diff --git a/.github/workflows/aws_corda_deploy.yaml b/.github/workflows/aws_corda_deploy.yaml new file mode 100644 index 00000000000..35035556654 --- /dev/null +++ b/.github/workflows/aws_corda_deploy.yaml @@ -0,0 +1,162 @@ +############################################################################################## +# Copyright Accenture. All Rights Reserved. +# +# SPDX-License-Identifier: Apache-2.0 +############################################################################################## + +############################################################################################## +# Workflow: Deploy Hyperledger Bevel's CORDA DLT Platform to an EKS Cluster. + +# Prerequisites: +# 1. An accessible EKS Cluster +# 2. A Vault instance accessible from GitHub Runner +# 3. A completed network.yaml file stored in GitHub Secrets + +# Workflow Overview: +# 1. This GitHub Actions workflow automates the seamless deployment of "BEVEL's CORDA" platform to an EKS cluster. +# 2. Utilizing secure environment variables, the workflow manages sensitive information related to AWS, Docker, Cluster, Vault, and Git. +# 3. The workflow dynamically customizes a network configuration file by substituting placeholders with values derived from environment variables. +# 4. It uses tool Ansible to deploy the platform. +############################################################################################## + +# Name of the workflow +name: Deploy or Reset Corda Network to an EKS Cluster + +# Triggers for the workflow +on: + # Manually trigger the workflow through the GitHub Actions UI + workflow_dispatch: + inputs: + action: + description: 'Choose action: Deploy or Reset' + required: true + default: 'deploy' + type: choice + options: + - 'deploy' + - 'reset' + paths-ignore: + - 'docs/**' + - '**/charts/**' + - '**/releases/**' + +# Jobs to be executed +jobs: + deployment: + runs-on: ubuntu-latest + permissions: + contents: write + environment: Bevel-AWS-Deployment + env: + AWS_ACCESS_KEY_ID: "${{ secrets.AWS_ACCESS_KEY_ID }}" # AWS Access Key ID + AWS_SECRET_ACCESS_KEY: "${{ secrets.AWS_SECRET_ACCESS_KEY }}" # AWS Secret Access Key + AWS_REGION: "${{ secrets.AWS_REGION }}" # EKS cluster zone + CLUSTER_CONTEXT: "${{ secrets.CLUSTER_CONTEXT }}" # Context name for the EKS cluster + KUBECONFIG: "${{ secrets.ENCODED_KUBECONFIG }}" # Provide Kubernetes configuration file in encoded base64 format + DOCKER_URL: "${{ secrets.DOCKER_URL }}" # URL of the Docker registry + DOCKER_USERNAME: "${{ secrets.DOCKER_USERNAME }}" # Docker registry username + DOCKER_PASSWORD: "${{ secrets.DOCKER_PASSWORD }}" # Docker registry password + EXTERNAL_URL_SUFFIX: "${{ secrets.EXTERNAL_URL_SUFFIX }}" # Suffix for external URLs + GIT_USER_NAME: "${{ secrets.GIT_USER_NAME }}" # Git username for Git operations + GIT_EMAIL_ADDR: "${{ secrets.GIT_EMAIL_ADDR }}" # Git email address for Git operations + GIT_TOKEN: "${{ secrets.GIT_TOKEN }}" # Git token with required permissions for authentication + GIT_BRANCH: "${{ vars.GIT_BRANCH }}" # Git branch to be used in the deployment + GIT_PRIVATE_SSH_KEY: "${{ secrets.GIT_PRIVATE_SSH_KEY }}" # Private SSH key for Git authentication in encoded base64 format + VAULT_ADDR: "${{ secrets.VAULT_ADDR }}" # Vault Server DNS name + VAULT_TOKEN: "${{ secrets.VAULT_TOKEN }}" # Token for authentication with Vault + + # Steps to be executed within the job + steps: + # Checkout the repository code + - name: Checkout Repository + uses: actions/checkout@v2.4.0 + + # Java installation + - name: Install java + uses: actions/setup-java@v2 + with: + distribution: 'adopt' + java-version: '8' + + # Configure AWS credentials + - name: AWS Setup + uses: aws-actions/configure-aws-credentials@v2 + with: + aws-access-key-id: "${{ env.AWS_ACCESS_KEY_ID }}" + aws-secret-access-key: "${{ env.AWS_SECRET_ACCESS_KEY }}" + aws-region: "${{ env.AWS_REGION }}" + + # Set up BEVEL's Corda network configuration file + - name: BEVEL's Corda Network Configuration file Setup + run: | + # Prepare network configuration file for deployment + mkdir -p build/ + + cp "./platforms/r3-corda/configuration/samples/workflow/network-proxy-corda.yaml" "build/network-corda.yaml" + + NETWORK_CONF_FILE="build/network-corda.yaml" + + # Decode and store private SSH key + echo "${{ env.GIT_PRIVATE_SSH_KEY }}" | base64 --decode > /home/runner/private_ssh_key + + # Define placeholder values for the network configuration file + declare -A placeholders=( + ["NETWORK_VERSION"]="4.9" + ["FLUX_SUFFIX"]="corda" + ["PORT_RANGE_FROM"]=15010 + ["PORT_RANGE_TO"]=15090 + ["DOCKER_URL"]="${{ env.DOCKER_URL }}" + ["DOCKER_USERNAME"]="${{ env.DOCKER_USERNAME }}" + ["DOCKER_PASSWORD"]="${{ env.DOCKER_PASSWORD }}" + ["USER_DIRECTORY"]="$(pwd)" + ["EXTERNAL_URL_SUFFIX"]="${{ env.EXTERNAL_URL_SUFFIX }}" + ["AWS_ACCESS_KEY"]="${{ env.AWS_ACCESS_KEY_ID }}" + ["AWS_SECRET_KEY"]="${{ env.AWS_SECRET_ACCESS_KEY }}" + ["AWS_REGION"]="${{ env.AWS_REGION}}" + ["CLUSTER_CONTEXT"]="${{ env.CLUSTER_CONTEXT }}" + ["CLUSTER_CONFIG"]="/home/runner/.kube/build_config/kubeconfig" + ["VAULT_ADDR"]="${{ env.VAULT_ADDR }}" + ["VAULT_ROOT_TOKEN"]="${{ env.VAULT_TOKEN }}" + ["GIT_USERNAME"]="${{ env.GIT_USER_NAME }}" + ["GIT_TOKEN"]="${{ env.GIT_TOKEN }}" + ["GIT_EMAIL_ADDR"]="${{ env.GIT_EMAIL_ADDR }}" + ["GIT_BRANCH"]="${{ env.GIT_BRANCH }}" + ["PRIVATE_KEY_PATH"]="/home/runner/private_ssh_key" + ) + + # Replace placeholders in the network configuration file + for placeholder in "${!placeholders[@]}"; do + sed -i "s#${placeholder}#${placeholders[$placeholder]}#g" "$NETWORK_CONF_FILE" + done + + # Deploy BEVEL's Corda Platform + - name: Deploy BEVEL's Corda Platform + run: | + # Setup Kubernetes configuration + mkdir -p /home/runner/.kube/build_config + echo "${{ env.KUBECONFIG }}" | base64 --decode > /home/runner/.kube/build_config/kubeconfig + export KUBECONFIG="/home/runner/.kube/build_config/kubeconfig" + + # Configure Git user settings + git config --global user.email "${{ env.GIT_EMAIL_ADDR }}" + git config --global user.name "${{ env.GIT_USER_NAME }}" + + # Install required tools and Ansible collections + mkdir -p ~/bin + export PATH=$PATH:~/bin + pip3 install openshift=='0.13.1' + pip install ansible jmespath jinja2-time + ansible-galaxy collection install -r platforms/shared/configuration/requirements.yaml + + # Set reset variable + if [ "${{ github.event.inputs.action }}" == "reset" ]; then + reset=true + else + reset=false + fi + + # Deploy the BEVEL's corda DLT platform + ansible-playbook platforms/shared/configuration/site.yaml \ + -i platforms/shared/inventory/ansible_provisioners \ + -e @build/network-corda.yaml \ + -e 'ansible_python_interpreter=/usr/bin/python3' -e "reset=$reset" diff --git a/.gitignore b/.gitignore index 7216c3812de..f11044ec844 100644 --- a/.gitignore +++ b/.gitignore @@ -40,4 +40,5 @@ *_custom.tpl **/charts/*.tgz **/files/*.json +**/files/*.crt requirements.lock diff --git a/Dockerfile b/Dockerfile index efd13264b1e..5041ac0ab6b 100644 --- a/Dockerfile +++ b/Dockerfile @@ -50,6 +50,10 @@ RUN rm /etc/apt/apt.conf.d/docker-clean RUN mkdir /etc/ansible/ RUN /bin/echo -e "[ansible_provisioners:children]\nlocal\n[local]\nlocalhost ansible_connection=local" > /etc/ansible/hosts +RUN curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.27.0/bin/linux/amd64/kubectl +RUN chmod +x ./kubectl +RUN mv ./kubectl /usr/local/bin + # Install krew for bevel-operator-fabric RUN (set -x; cd "$(mktemp -d)" && \ OS="$(uname | tr '[:upper:]' '[:lower:]')" && \ diff --git a/Dockerfile.jdk8 b/Dockerfile.jdk8 index 15ff3426b98..0520a4797de 100644 --- a/Dockerfile.jdk8 +++ b/Dockerfile.jdk8 @@ -3,13 +3,11 @@ # # SPDX-License-Identifier: Apache-2.0 ############################################################################################## - # USAGE: # docker build . -t bevel-build # docker run -v $(pwd):/home/bevel/ bevel-build FROM ubuntu:20.04 - # Create working directory WORKDIR /home/ ENV OPENSHIFT_VERSION='0.13.1' @@ -37,13 +35,17 @@ RUN apt-get update && apt-get install -y \ apt-get clean && \ ln -s /usr/bin/python3 /usr/bin/python && \ rm -rf /var/lib/apt/lists/* -RUN npm install -g ajv-cli +RUN npm install -g ajv-cli RUN apt-get update && apt-get install -y python3-venv RUN rm /etc/apt/apt.conf.d/docker-clean RUN mkdir /etc/ansible/ RUN /bin/echo -e "[ansible_provisioners:children]\nlocal\n[local]\nlocalhost ansible_connection=local" > /etc/ansible/hosts +RUN curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.27.0/bin/linux/amd64/kubectl +RUN chmod +x ./kubectl +RUN mv ./kubectl /usr/local/bin + # Copy the provisional script to build container COPY ./run.sh /home COPY ./reset.sh /home @@ -58,6 +60,4 @@ ENV PATH=/root/bin:/root/.local/bin/:$PATH #path to mount the repo VOLUME /home/bevel/ - - CMD ["/home/run.sh"] diff --git a/README.md b/README.md index 0162b24d02c..0c2ac74f403 100644 --- a/README.md +++ b/README.md @@ -16,33 +16,34 @@ - [Hyperledger Indy](#hyperledger-indy) - [Quorum](#quorum) - [Hyperledger Besu](#hyperledger-besu) + - [Substrate](#substrate) - [Contact](#contact) - [Contributing](#contributing) - [Initial Committers](#initial-committers) - [Sponsor](#sponsor) ## Short Description -An automation framework for rapidly and consistently deploying production-ready Distributed Ledger Technology (DLT) platforms. +An automation framework and helm charts for rapidly and consistently deploying production-ready Distributed Ledger Technology (DLT) platforms. ## Scope of Project -Hyperledger Bevel delivers an automation framework for rapidly and consistently deploying production-ready DLT platforms to cloud infrastructure. +Hyperledger Bevel is an automation framework for rapidly and consistently deploying production-ready DLT platforms to cloud infrastructure. ![What is Hyperledger Bevel?](./docs/images/hyperledger-bevel-overview.png "What is Hyperledger Bevel?") Hyperledger Bevel is an accelerator/tool that helps developers rapidly set up and deploy secure, scalable and production-ready DLT network(s) that also allows new organizations to be easily on-boarded on the network. Bevel facilitates a safe and secure way of deploying and operating different DLT platforms. It includes: -- Helm charts to deploy different DLT nodes and to generate the related crypto/identities. -- Helm charts for various operational features like adding new nodes, and deploying smart contracts. -- Helm charts to deploy Hyperledger Cacti connectors for Fabric, Quorum and Besu networks. -- Ansible playbooks and modular role definitions to automate the deployment of Helm charts. -- Ansible playbooks and roles to automate deployment of Hyperledger fabric using bevel-operator-fabric(Kubernetes operator for managing Hyperledger Fabric networks). -- Integrated CD using GitOps so that once the network is set up, all changes can be done via git PRs/merges. -- Configuration for Ambassador Edge Stack, HAProxy (for Hyperledger Fabric) and Isto Ingress (for Substrate) to act as Ingress Controller. +- Helm charts to **deploy** different DLT nodes and to generate the related crypto/identities. +- Helm charts for various **operational features** like adding new nodes, and deploying smart contracts. +- Helm charts to deploy Hyperledger **Cacti connectors** for Fabric, Quorum and Besu networks. +- **Ansible playbooks** and modular role definitions to automate the deployment of Helm charts. +- Ansible playbooks and roles to automate deployment of Hyperledger fabric using **bevel-operator-fabric** (Kubernetes operator for managing Hyperledger Fabric networks). +- Integrated CD using **GitOps** so that once the network is set up, all changes can be done via git PRs/merges. +- Configuration for Ambassador Edge Stack, HAProxy (for Hyperledger Fabric) and Istio Ingress (for Substrate) to act as Ingress Controller. Hyperledger Bevel currently supports R3 Corda OS and Enterprise, Hyperledger Fabric, Hyperledger Indy, Hyperledger Besu, Quorum and Substrate. Other DLT platforms can easily be added. -### Getting Started +## Getting Started To get started with the framework quickly, follow our [Getting Started guidelines](https://hyperledger-bevel.readthedocs.io/en/latest/gettingstarted.html). @@ -50,40 +51,62 @@ Detailed operator and developer documentation is available on [our ReadTheDocs s The documentation can also be built locally be following instructions in the `docs` folder. -### Hyperledger Fabric -For Hyperledger Fabric, we use the official Docker containers provided by that project. A number of different Ansible scripts will allow you to either create a new network (across clouds) or join an existing network. +## Hyperledger Fabric +For Hyperledger Fabric, there are two ways to deploy the network. -![Hyperledger Bevel - Fabric](./docs/images/hyperledger-bevel-fabric.png "Hyperledger Bevel for Hyperledger Fabric") +- Using `helm install`: Follow the [Fabric Charts readme](./platforms/hyperledger-fabric/charts/README.md). +- Using Ansible: A number of different Ansible scripts will allow you to either create a new network (across clouds) or join an existing network. -### Corda Enterprise -For Corda Enterprise, we build Docker containers from the Corda source with licensed jars. A number of different Ansible scripts will allow you to either create a new network (across clouds) or join an existing network. + ![Hyperledger Bevel - Fabric](./docs/images/hyperledger-bevel-fabric.png "Hyperledger Bevel for Hyperledger Fabric") -![Hyperledger Bevel - Corda Enterprise](./docs/images/hyperledger-bevel-corda-ent.png "Hyperledger Bevel for Corda Enterprise") +## Corda Enterprise +For Corda Enterprise, there are two ways to deploy the network. -### Corda Opensource -For Corda Opensource, we build Docker containers from the Corda source. A number of different Ansible scripts will allow you to either create a new network (across clouds) or join an existing network. +- Using `helm install`: Follow the [Corda Enterprise Charts readme](./platforms/r3-corda-ent/charts/README.md). +- Using Ansible: A number of different Ansible scripts will allow you to either create a new network (across clouds) or join an existing network. -![Hyperledger Bevel - Corda](./docs/images/hyperledger-bevel-corda.png "Hyperledger Bevel for Corda") + ![Hyperledger Bevel - Corda Enterprise](./docs/images/hyperledger-bevel-corda-ent.png "Hyperledger Bevel for Corda Enterprise") -### Hyperledger Indy -For Hyperledger Indy, we build Docker containers from our source code. A number of different Ansible scripts will allow you to create a new network (across clouds). +## Corda Opensource +For Corda Opensource, there are two ways to deploy the network. + +- Using `helm install`: Follow the [Corda Charts readme](./platforms/r3-corda/charts/README.md). +- Using Ansible: A number of different Ansible scripts will allow you to either create a new network (across clouds) or join an existing network. + + ![Hyperledger Bevel - Corda](./docs/images/hyperledger-bevel-corda.png "Hyperledger Bevel for Corda") + +## Hyperledger Indy +For Hyperledger Indy, there are two ways to deploy the network. + +- Using `helm install`: Follow the [Indy Charts readme](./platforms/hyperledger-indy/charts/README.md). +- Using Ansible: A number of different Ansible scripts will allow you to create a new network (across clouds). ![Hyperledger Bevel - Indy](./docs/images/hyperledger-bevel-indy.png "Hyperledger Bevel for Hyperledger Indy") -### Quorum -For Quorum, we use the official Docker containers provided by Quorum. A number of different Ansible scripts will allow you to either create a new network (across clouds) with choice of Consensus (between IBFT and RAFT) and a transaction Manager. +## Quorum +For Quorum, there are two ways to deploy the network. + +- Using `helm install`: Follow the [Quorum Charts readme](./platforms/quorum/charts/README.md). +- Using Ansible: A number of different Ansible scripts will allow you to either create a new network (across clouds) with choice of Consensus and a transaction Manager. + + ![Hyperledger Bevel - Quorum](./docs/images/hyperledger-bevel-quorum.png "Hyperledger Bevel for Quorum") + +## Hyperledger Besu +For Hyperledger Besu, there are two ways to deploy the network. + +- Using `helm install`: Follow the [Besu Charts readme](./platforms/hyperledger-besu/charts/README.md). +- Using Ansible: A number of different Ansible scripts will allow you to create a new network (across clouds). -![Hyperledger Bevel - Quorum](./docs/images/hyperledger-bevel-quorum.png "Hyperledger Bevel for Quorum") + ![Hyperledger Bevel - Besu](./docs/images/hyperledger-bevel-besu.png "Hyperledger Bevel for Hyperledger Besu") -### Hyperledger Besu -For Hyperledger Besu, we use the official Docker containers provided by that project. A number of different Ansible scripts will allow you to create a new network (across clouds). +## Substrate +For Substrate, there are two ways to deploy the network. -![Hyperledger Bevel - Besu](./docs/images/hyperledger-bevel-besu.png "Hyperledger Bevel for Hyperledger Besu") +- Using `helm install`: Follow the [Substrate Charts readme](./platforms/substrate/charts/README.md). +- Using Ansible: A number of different Ansible scripts will allow you to create a new network (across clouds). -### Substrate -For Substrate, we use the official Docker containers provided by that project. A number of different Ansible scripts will allow you to create a new network (across clouds). + ![Hyperledger Bevel - Substrate](./docs/images/hyperledger-bevel-substrate.png "Hyperledger Bevel for Substrate") -![Hyperledger Bevel - Substrate](./docs/images/hyperledger-bevel-substrate.png "Hyperledger Bevel for Substrate") ## Contact We welcome your questions & feedback on our [Discord channel](https://discord.com/channels/905194001349627914/941739691336679454). [Please join our Discord first](https://discord.gg/hyperledger). @@ -92,8 +115,8 @@ We welcome contributions to Hyperledger Bevel in many forms, and there’s alway Please review [contributing](./CONTRIBUTING.md) guidelines to get started. -# Build -If you are not using the provided Jenkins automation scripts, you can run the provisioning scripts within a docker runtime independent from your target Kubernetes cluster. +## Build +If you are not using the provided Jenkins automation scripts, you can run the provisioning scripts within a docker runtime independent of your target Kubernetes cluster. ``` # Build provisioning image docker build . -t ghcr.io/hyperledger/bevel-build diff --git a/docs/mkdocs.yml b/docs/mkdocs.yml index 5ddde366b8e..7c498c06490 100644 --- a/docs/mkdocs.yml +++ b/docs/mkdocs.yml @@ -92,10 +92,10 @@ nav: - Concepts: - Sequence Diagram: concepts/sequence-diagram.md - Features: concepts/features.md - - Ansible: concepts/ansible.md - - Gitops: concepts/gitops.md - Helm: concepts/helm.md - Kubernetes: concepts/kubernetes.md + - Ansible: concepts/ansible.md + - Gitops: concepts/gitops.md - Vault: concepts/vault.md - Getting Started: - Pre-requisites: getting-started/prerequisites.md @@ -103,7 +103,7 @@ nav: - Configure pre-requisites: getting-started/configure-prerequisites.md - Running: getting-started/run-bevel.md - Tutorials: - - Tutorial List: tutorials/index.md + - Tutorials: tutorials/index.md - Developer pre-requisites: tutorials/dev-prereq.md - Deploy using Docker: tutorials/docker-deploy.md - Deploy using Machine: tutorials/machine-deploy.md @@ -136,7 +136,6 @@ nav: - guides/fabric/upgrade-network-1.4.x-2.2.x.md - guides/fabric/upgrade-network.md - Corda Operations: - - guides/corda/add-cenm-console.md - guides/corda/add-cordapps.md - guides/corda/add-new-notary.md - guides/corda/add-new-org.md diff --git a/docs/source/_static/NetworkYamlFabric1.png b/docs/source/_static/NetworkYamlFabric1.png deleted file mode 100644 index 9b9c3f8faa0..00000000000 Binary files a/docs/source/_static/NetworkYamlFabric1.png and /dev/null differ diff --git a/docs/source/concepts/ansible.md b/docs/source/concepts/ansible.md index 6d7bc5e8e85..9cfa42c3fdb 100644 --- a/docs/source/concepts/ansible.md +++ b/docs/source/concepts/ansible.md @@ -3,10 +3,14 @@ [//]: # (SPDX-License-Identifier: Apache-2.0) [//]: # (##############################################################################################) -# **Ansible** +# Ansible [Ansible](https://docs.ansible.com/ansible/latest/index.html) is an open-source automation tool that simplifies the process of managing IT infrastructure and automating repetitive tasks. It is designed to make complex configuration and deployment processes easier, faster, and more consistent. Ansible is agentless, meaning it doesn't require any software to be installed on the managed hosts, making it lightweight and easy to set up. +!!! tip + + With 1.1 Release, Ansible is optional and can be used only for large deployment automation. + With Ansible, you can define your infrastructure as code using simple, human-readable YAML files called "playbooks." Playbooks describe the desired state of your systems, specifying which tasks should be performed and in what order. These tasks can range from simple tasks like copying files or installing packages to more complex tasks like configuring services or managing cloud resources. Here's how Ansible works in a nutshell: diff --git a/docs/source/concepts/features.md b/docs/source/concepts/features.md index b7fee17cf36..7d2bd45297b 100644 --- a/docs/source/concepts/features.md +++ b/docs/source/concepts/features.md @@ -19,14 +19,20 @@ The setup of a Distributed Ledger Technology (DLT) network doesn't hinge on mana ## One touch/command deployment With just one Ansible playbook — aptly named [site.yaml](https://github.com/hyperledger/bevel/tree/main/platforms/shared/configuration/site.yaml), you can orchestrate the creation of an entire Distributed Ledger Technology (DLT) network. Brace yourself for efficiency gains as this streamlined process significantly slashes the time typically spent configuring and managing the network components of Corda, Besu, Fabric or other supported DLT networks. -## Security through Vault -In the realm of identity-based security, HashiCorp Vault takes centre stage within Hyperledger Bevel. Especially in the complex terrain of managing secrets across multiple clouds, the dynamic capabilities of HashiCorp Vault shine through. With Vault at its core, Hyperledger Bevel ensures the secure storage and precise control of access to critical elements like tokens, passwords, certificates, and encryption keys. This robust approach safeguards machines, applications, and sensitive data within a multi-cloud environment. +## Security through Vault (Optional) +In the realm of identity-based security, HashiCorp Vault takes centre stage within Hyperledger Bevel. Especially in the complex terrain of managing secrets across multiple clouds, the dynamic capabilities of HashiCorp Vault shine through. With Vault at its core, Hyperledger Bevel ensures the secure storage and precise control of access to critical elements like tokens, passwords, certificates, and encryption keys. This robust approach safeguards machines, applications, and sensitive data within a multi-cloud environment. Now **optional** for development environments. ## Sharing a Network.yaml file without disclosing any confidentiality Unlocking a new level of efficiency, Hyperledger Bevel empowers organizations to initiate a Distributed Ledger Technology (DLT) or Blockchain network swiftly. Leveraging a configured [network.yaml](../guides/networkyaml-fabric.md) file, the setup process is not only streamlined but sets the stage for seamless collaboration. Here's the game-changer: this [network.yaml](../guides/networkyaml-fabric.md) file can be easily shared with new organizations looking to join the DLT/Blockchain network. The brilliance lies in the ability to reuse this file without compromising the confidentiality of the initial organization's sensitive data. +## Helm Chart Support +Simplifies deployment of DLT networks with Helm charts. Specially for development environments, only `helm install` commands can be used to setup a DLT network in few minutes. + +## GitOps Optionality +Provides flexibility by making GitOps deployment optional for development environments. This gives the developers a faster access to the DLT environment without the complexities of configuring GitOps. + ## How is it different from other BaaS? - Hyperledger Bevel deployment scripts can be reused across cloud providers like AWS, Azure, GCP, DigitalOcean and OpenShift - Can deploy networks and smart contracts across different DLT/Blockchain platforms diff --git a/docs/source/concepts/gitops.md b/docs/source/concepts/gitops.md index 0f25f57db58..562f1a663ac 100644 --- a/docs/source/concepts/gitops.md +++ b/docs/source/concepts/gitops.md @@ -7,6 +7,10 @@ [GitOps](https://www.weave.works/technologies/gitops/) is a modern approach to managing and deploying applications in Kubernetes clusters. It leverages the version control system Git as the source of truth for defining the desired state of your infrastructure and applications. Flux is a popular tool used to implement GitOps workflows, enabling automatic synchronization of changes from Git repositories to Kubernetes clusters. +!!! tip + + With 1.1 Release, GitOps is optional for small development environments. + ## Features 1. Source of Truth: In GitOps, the Git repository serves as the single source of truth for your infrastructure and application configurations. All desired states are declared and versioned in Git. diff --git a/docs/source/concepts/sequence-diagram.md b/docs/source/concepts/sequence-diagram.md index 989fa885a39..5301649a122 100644 --- a/docs/source/concepts/sequence-diagram.md +++ b/docs/source/concepts/sequence-diagram.md @@ -1,10 +1,10 @@ # Bevel Sequence Diagram -It is important to understand the sequence and flow for Bevel as this will determine how you confgure your networking. +When using Ansible automation in Bevel, it is important to understand the sequence and flow as this will determine how you confgure your networking. !!! tip - Do not use 127.0.0.1 or localhost to configure any services like Kubernetes or Vault + Do not use 127.0.0.1 or localhost to configure any services like Kubernetes or Vault. ``` mermaid sequenceDiagram diff --git a/docs/source/concepts/vault.md b/docs/source/concepts/vault.md index 965b468ada2..16d0cc39bf4 100644 --- a/docs/source/concepts/vault.md +++ b/docs/source/concepts/vault.md @@ -7,6 +7,10 @@ [HashiCorp Vault](https://www.vaultproject.io/) is an open-source tool designed to manage secrets and protect sensitive data in modern computing environments. It provides a secure and centralized way to store, access, and distribute secrets such as passwords, API keys, encryption keys, and other confidential information. Vault is especially useful in cloud-native and distributed systems where securing secrets is crucial. +!!! tip + + With 1.1 Release, Hahsicorp Vault is optional for development environments, and Cloud KMS integration is on the roadmap. + Hyperledger Bevel relies on Hashicorp Vault for managing all secrets like public and private certificates, passwords to repos or databases etc. which are used in a DLT/Blockchain network during the lifecycle of a deployment, and it is a prerequisite that the Vault is installed and unsealed prior to deployment of a DLT/Blockchain network. ## Core Features diff --git a/docs/source/getting-started/configure-prerequisites.md b/docs/source/getting-started/configure-prerequisites.md index 9068e48f6e4..939fb539fbe 100644 --- a/docs/source/getting-started/configure-prerequisites.md +++ b/docs/source/getting-started/configure-prerequisites.md @@ -6,10 +6,9 @@ # Configure Common Pre-requisites - [GitOps Authentication](#gitops-authentication) -- [Vault Initialization and unseal](#vaultunseal) -- [Docker Images](#docker) +- [Unseal Hashicorp Vault](#unseal-hashicorp-vault) +- [Docker Images](#docker-images) - ## GitOps Authentication For synchronizing the Git repo with the cluster, Hyperledger Bevel configures Flux for each cluster. The authentication can be via SSH or HTTPS. @@ -34,7 +33,6 @@ The above command generates an SSH key-pair: **gitops** (private key) and **gito And add the public key contents (starts with **ssh-rsa**) as an Access Key (with read-write permissions) in your Github repository by following [this guide](https://docs.github.com/en/authentication/connecting-to-github-with-ssh/adding-a-new-ssh-key-to-your-github-account). - ## Unseal Hashicorp Vault The [Hashicorp Vault](../concepts/vault.md) must be initialised and unsealed. Complete the following steps to unseal and access the Vault. @@ -43,7 +41,7 @@ The [Hashicorp Vault](../concepts/vault.md) must be initialised and unsealed. Co !!! important - Vault version should be > **1.13.1** + Vault version should be > **1.13.1** and <= **1.15.2** * Set the environment Variable **VAULT_ADDR** as the Vault service. @@ -83,8 +81,6 @@ The [Hashicorp Vault](../concepts/vault.md) must be initialised and unsealed. Co It is recommended to use Vault auto-unseal using Cloud KMS for Production Systems. And also, rotate the root token regularly. - - ## Docker Images Hyperledger Bevel provides pre-built docker images which are available on [GitHub Repo](https://github.com/orgs/hyperledger/packages?repo_name=bevel). Ensure that the versions/tags you need are available. If not, [ask a question](../contributing/asking-a-question.md). diff --git a/docs/source/getting-started/prerequisites-machine.md b/docs/source/getting-started/prerequisites-machine.md index a1d9a384f84..e27e05040f1 100644 --- a/docs/source/getting-started/prerequisites-machine.md +++ b/docs/source/getting-started/prerequisites-machine.md @@ -11,7 +11,7 @@ ## Ansible -Hyperledger Bevel configuration is essentially Ansible scripts, so install Ansible on the machine from which you will deploy the DLT/Blockchain network. This can be a local machine as long as Ansible commands can run on it. +Hyperledger Bevel automation is essentially Ansible scripts, so install Ansible on the machine from which you will deploy the DLT/Blockchain network. This can be a local machine as long as Ansible commands can run on it. As per our [sequence diagram](../concepts/sequence-diagram.md), this machine (also called **Ansible Controller**) should have connectivity to the Kubernetes cluster(s) and the Hashicorp Vault service(s). And it is essential to install the [git client](https://git-scm.com/download) on the Ansible Controller. diff --git a/docs/source/getting-started/prerequisites.md b/docs/source/getting-started/prerequisites.md index 8ca46ee70e9..8e0b04e4e92 100644 --- a/docs/source/getting-started/prerequisites.md +++ b/docs/source/getting-started/prerequisites.md @@ -7,53 +7,65 @@ Following are the common prerequisite software/client/platforms etc. needed before you can start deploying/operating blockchain networks using Hyperledger Bevel. -## Git Repository -[GitOps](../concepts/gitops.md) is a key concept for Hyperledger Bevel, so a Git repository is needed for Bevel (this can be a [GitHub](https://github.com/) repository as well). -Fork or import the [Bevel GitHub repo](https://github.com/hyperledger/bevel) to this Git repository. +## Helm -The Operator should have a user created on this repo with read-write access to the Git Repository. +[Helm](../concepts/helm.md) is a crucial tool for managing Kubernetes applications, simplifying the deployment and management of Kubernetes manifests. For Hyperledger Bevel, Helm charts are used to streamline the deployment of DLT networks, ensuring consistency and efficiency. -!!! tip +To install Helm, follow the official [Helm installation guide](https://helm.sh/docs/intro/install/). Ensure the version is compatible with your Kubernetes setup. - Install Git Client Version > **2.31.0** +!!! tip + Install Helm Version > **v3.6.2** ## Kubernetes Hyperledger Bevel deploys the DLT/Blockchain network on [Kubernetes](https://kubernetes.io/) clusters; hence, at least one Kubernetes cluster should be available. -Bevel recommends one Kubernetes cluster per organization for production-ready projects. -Also, a user needs to make sure that the Kubernetes clusters can support the number of pods and persistent volumes that will be created by Bevel. + +Bevel recommends one Kubernetes cluster per organization for production-ready projects. Also, a user needs to make sure that the Kubernetes clusters can support the number of pods and persistent volumes that will be created by Bevel. !!! tip - For the current release Bevel has been tested on Amazon EKS with Kubernetes version **1.23**. + For the current release Bevel has been tested on Amazon EKS with Kubernetes version **1.28**. - Bevel has been tested on Kubernetes >= 1.19 and <= 1.23 + Bevel has been tested on Kubernetes >= 1.19 and <= 1.28 !!! warning Icon Filename - Also, install kubectl Client version as per Kubernetes version **v1.23.0** + Also, install kubectl Client version as per Kubernetes version **v1.28.0** -Please follow respective Cloud provider instructions, like [ for Amazon](https://aws.amazon.com/eks/getting-started/) to set-up your required Kubernetes cluster(s). +Please follow respective Cloud provider instructions, like [for Amazon](https://aws.amazon.com/eks/getting-started/) to set-up your required Kubernetes cluster(s). To connect to Kubernetes cluster(s), you will also need kubectl Command Line Interface (CLI). Refer [here](https://kubernetes.io/docs/tasks/tools/install-kubectl/) for installation instructions, although Hyperledger Bevel configuration code (Ansible scripts) installs this automatically. +## Git Repository + +Release 1.1 onwards, [GitOps](../concepts/gitops.md) is a optional concept for Hyperledger Bevel. Although, for Production-ready deployment with Ansible automation, GitOps is needed for Bevel. + +Fork or import the [Bevel GitHub repo](https://github.com/hyperledger/bevel) to your own Git repository. The Operator should have a user created on this repo with read-write access to the Git Repository. + +!!! tip + + Install Git Client Version > **2.31.0** + ## HashiCorp Vault -In this current release, [Hashicorp Vault](https://www.vaultproject.io/) is mandatory for Hyperledger Bevel as the certificate and key storage solution; hence, at least one Vault server should be available. Bevel recommends one Vault per organization for production-ready projects. + +Release 1.1 onwards, [Hashicorp Vault](https://www.vaultproject.io/) is optional for Hyperledger Bevel as the certificate and key storage solution. But, for Production-ready deployment with Ansible automation, at least one Vault server should be available. Bevel recommends one Vault per organization for production-ready projects. Follow [official instructions](https://developer.hashicorp.com/vault/docs/install) to deploy Vault in your environment. !!! tip + The recommended approach is to create one Vault deployment on one VM and configure the backend as cloud storage. !!! warning - Vault version should be **1.13.1** + Vault version should be <= **1.15.2** ## Internet Domain + Hyperledger Bevel uses [Ambassador Edge Stack](https://www.getambassador.io/products/edge-stack/api-gateway) or [HAProxy Ingress Controller](https://haproxy-ingress.github.io/) for inter-cluster communication. So, for the Kubernetes services to be available outside the specific cluster, at least one DNS Domain is required. This domain name can then be sub-divided across multiple clusters and the domain-resolution configured for each. Although for production implementations, each organization (and thereby each cluster), must have one domain name. -!!! tip +!!! note If single cluster is being used for all organizations in a dev/POC environment, then domain name is not needed. diff --git a/docs/source/getting-started/run-bevel.md b/docs/source/getting-started/run-bevel.md index 1f96d735a5c..4c07fb89cee 100644 --- a/docs/source/getting-started/run-bevel.md +++ b/docs/source/getting-started/run-bevel.md @@ -4,19 +4,32 @@ [//]: # (##############################################################################################) # Run Bevel -Once your [pre-requisites](./prerequisites.md) are [configured](./configure-prerequisites.md), it's time to take the next step. Fork the [Hyperledger Bevel GitHub](https://github.com/hyperledger/bevel) repository and unlock the potential of this powerful tool for your Distributed Ledger Technology (DLT) deployment. +Once your [pre-requisites](./prerequisites.md) are [configured](./configure-prerequisites.md), it's time to take the next step. -Now, let's explore two user-friendly methods for using Hyperledger Bevel: +There are three user-friendly methods for using Hyperledger Bevel: -- [Using the **bevel-build** Docker container as Ansible controller.](#bevel-build) -- [Using your own machine as Ansible controller.](#own-machine) +- [Using Helm Charts](#using-helm-charts) +- [Using the **bevel-build** Docker container as Ansible controller.](#using-docker-container) +- [Using your own machine as Ansible controller.](#using-own-machine) + +## Using Helm Charts + +Release 1.1 onwards, Bevel can be used without Ansible automation. If you want to create a small development network, using the Helm charts will be simpler and faster. For production-ready networks or complex networks with multiple organisations, the below two options are recommended. + +Follow the respective Helm chart documentation to setup your network: + +* [R3 Corda Opensource Charts](https://github.com/hyperledger/bevel/tree/main/platforms/r3-corda/charts) +* [R3 Corda Enterprise Charts](https://github.com/hyperledger/bevel/tree/main/platforms/r3-corda-ent/charts) +* [Hyperledger Fabric Charts](https://github.com/hyperledger/bevel/tree/main/platforms/hyperledger-fabric/charts) +* [Hyperledger Indy Charts](https://github.com/hyperledger/bevel/tree/main/platforms/hyperledger-indy/charts) +* [Quorum Charts](https://github.com/hyperledger/bevel/tree/main/platforms/quorum/charts) +* [Hyperledger Besu Charts](https://github.com/hyperledger/bevel/tree/main/platforms/hyperledger-besu/charts) +* [Substrate Charts](https://github.com/hyperledger/bevel/tree/main/platforms/substrate/charts) - ## Using Docker container Follow [this tutorial](../tutorials/docker-deploy.md) for how to deploy from the docker container. - ## Using Own machine Using own machine as Ansible Controller needs these [additional pre-requisites](./prerequisites-machine.md). diff --git a/docs/source/guides/besu/add-new-member-org.md b/docs/source/guides/besu/add-new-member-org.md index 3c6689df34f..05c58aa78d4 100644 --- a/docs/source/guides/besu/add-new-member-org.md +++ b/docs/source/guides/besu/add-new-member-org.md @@ -35,137 +35,13 @@ The `network.yaml` file should contain the specific `network.organization` detai Make sure that the genesis flie is provided in base64 encoding. Also, if you are adding node to the same cluster as of another node, make sure that you add the ambassador ports of the existing node present in the cluster to the network.yaml -For reference, sample `network.yaml` file looks like below for IBFT consensus (but always check the latest network-besu-new-memberorg.yaml at `platforms/hyperledger-besu/configuration/samples`): +For reference, sample `network-besu-new-memberorg.yaml` file [here](https://github.com/hyperledger/bevel/blob/main/platforms/hyperledger-besu/configuration/samples/network-besu-new-memberorg.yaml) +```yaml +--8<-- "platforms/hyperledger-besu/configuration/samples/network-besu-new-memberorg.yaml:1:152" ``` ---- -# This is a sample configuration file for Hyperledger Besu network which has 4 nodes. -# All text values are case-sensitive -network: - # Network level configuration specifies the attributes required for each organization - # to join an existing network. - type: besu - version: 22.10.2 #this is the version of Besu docker image that will be deployed. - - #Environment section for Kubernetes setup - env: - type: "dev" # tag for the environment. Important to run multiple flux on single cluster - proxy: ambassador # value has to be 'ambassador' as 'haproxy' has not been implemented for besu - ## Any additional Ambassador ports can be given below, this is valid only if proxy='ambassador' - # These ports are enabled per cluster, so if you have multiple clusters you do not need so many ports - # This sample uses a single cluster, so we have to open 4 ports for each Node. These ports are again specified for each organization below - ambassadorPorts: - portRange: # For a range of ports - from: 15010 - to: 15043 - # ports: 15020,15021 # For specific ports - retry_count: 20 # Retry count for the checks on Kubernetes cluster - external_dns: enabled # Should be enabled if using external-dns for automatic route configuration - - # Docker registry details where images are stored. This will be used to create k8s secrets - # Please ensure all required images are built and stored in this registry. - # Do not check-in docker_password. - docker: - url: "ghcr.io/hyperledger" - username: "docker_username" - password: "docker_password" - - # Following are the configurations for the common Besu network - config: - consensus: "ibft" # Options are "ibft", "ethash" and "clique". - ## Certificate subject for the root CA of the network. - # This is for development usage only where we create self-signed certificates and the truststores are generated automatically. - # Production systems should generate proper certificates and configure truststores accordingly. - subject: "CN=DLT Root CA,OU=DLT,O=DLT,L=London,C=GB" - transaction_manager: "tessera" # Transaction manager is "tessera" - # This is the version of "tessera" docker image that will be deployed - tm_version: "21.7.3" - # TLS can be True or False for the tessera tm - tm_tls: True - # Tls trust value - tm_trust: "ca-or-tofu" # Options are: "ca-or-tofu", "ca", "tofu" - ## File location where the base64 encoded genesis file is located. - genesis: "/home/user/bevel/build/besu_genesis" # Location where genesis file will be fetched - ## Transaction Manager nodes public addresses should be provided. - # - "https://node.test.besu.blockchain-develop.com" - # The above domain name is formed by the (http or https)://(peer.name).(org.external_url_suffix):(ambassador tessera node port) - tm_nodes: - - "https://carrier.test.besu.blockchaincloudpoc-develop.com" - - "https://manufacturer.test.besu.blockchaincloudpoc-develop.com" - - "https://store.test.besu.blockchaincloudpoc-develop.com" - - "https://warehouse.test.besu.blockchaincloudpoc-develop.com" - # Besu rpc public address list for existing validator and member nodes - # - "http://noderpc.test.besu.blockchaincloudpoc-develop.com" - # The above domain name is formed by the (http)://(peer.name)rpc.(org.external_url_suffix):(ambassador node rpc port) - besu_nodes: - - "http://validatorrpc.test.besu.blockchaincloudpoc-develop.com" - - "http://carrierrpc.test.besu.blockchaincloudpoc-develop.com" - - "http://manufacturerrpc.test.besu.blockchaincloudpoc-develop.com" - - "http://storerpc.test.besu.blockchaincloudpoc-develop.com" - - # Allows specification of one or many organizations that will be connecting to a network. - organizations: - # Specification for the 1st organization. Each organization should map to a VPC and a separate k8s cluster for production deployments - - organization: - name: neworg - type: member - # Provide the url suffix that will be added in DNS recordset. Must be different for different clusters - # This is not used for Besu as Besu does not support DNS hostnames currently. Here for future use - external_url_suffix: test.besu.blockchaincloudpoc-develop.com - cloud_provider: aws # Options: aws, azure, gcp - aws: - access_key: "aws_access_key" # AWS Access key, only used when cloud_provider=aws - secret_key: "aws_secret_key" # AWS Secret key, only used when cloud_provider=aws - region: "aws_region" # AWS Region where cluster and EIPs are created - # Kubernetes cluster deployment variables. The config file path and name has to be provided in case - # the cluster has already been created. - k8s: - context: "cluster_context" - config_file: "cluster_config" - # Hashicorp Vault server address and root-token. Vault should be unsealed. - # Do not check-in root_token - vault: - url: "vault_addr" - root_token: "vault_root_token" - secret_path: "secretsv2" - # Git Repo details which will be used by GitOps/Flux. - # Do not check-in git_access_token - gitops: - git_protocol: "https" # Option for git over https or ssh - git_url: "https://github.com//bevel.git" # Gitops https or ssh url for flux value files - branch: "develop" # Git branch where release is being made - release_dir: "platforms/hyperledger-besu/releases/dev" # Relative Path in the Git repo for flux sync per environment. - chart_source: "platforms/hyperledger-besu/charts" # Relative Path where the Helm charts are stored in Git repo - git_repo: "github.com//bevel.git" # Gitops git repository URL for git push - username: "git_username" # Git Service user who has rights to check-in in all branches - password: "git_access_token" # Git Server user access token (Optional for ssh; Required for https) - email: "git_email" # Email to use in git config - private_key: "path_to_private_key" # Path to private key file which has write-access to the git repo (Optional for https; Required for ssh) - # The participating nodes are named as peers - services: - peers: - - peer: - name: newOrg - subject: "O=Neworg,OU=Neworg,L=51.50/-0.13/London,C=GB" # This is the node subject. L=lat/long is mandatory for supplychain sample app - geth_passphrase: 12345 # Passphrase to be used to generate geth account - lock: false # Sets Besu node to lock or unlock mode. Can be true or false - p2p: - port: 30303 - ambassador: 15020 #Port exposed on ambassador service (use one port per org if using single cluster) - rpc: - port: 8545 - ambassador: 15021 #Port exposed on ambassador service (use one port per org if using single cluster) - ws: - port: 8546 - tm_nodeport: - port: 15022 # Port exposed on ambassador service must be same - ambassador: 15022 - tm_clientport: - port: 8888 - -``` -Three new sections are added to the network.yaml +Three new sections are added to the network.yaml | Field | Description | |-------------|----------------------------------------------------------| @@ -185,3 +61,4 @@ ansible-playbook platforms/shared/configuration/add-new-organization.yaml --extr ## Verify network deployment For instructions on how to troubleshoot network, read [our troubleshooting guide](../../references/troubleshooting.md) + diff --git a/docs/source/guides/besu/add-new-validator-node.md b/docs/source/guides/besu/add-new-validator-node.md index fa02bd57517..49c0aa9511f 100644 --- a/docs/source/guides/besu/add-new-validator-node.md +++ b/docs/source/guides/besu/add-new-validator-node.md @@ -30,183 +30,12 @@ The `network.yaml` file should contain the specific `network.organization` detai **NOTE**: Make sure that the genesis flie is provided in base64 encoding. Also, if you are adding node to the same cluster as of another node, make sure that you add the ambassador ports of the existing node present in the cluster to the network.yaml --- -For reference, sample `network.yaml` file looks like below for IBFT consensus (but always check the latest network-besu-new-validatornode.yaml at `platforms/hyperledger-besu/configuration/samples`): +For reference, sample `network-besu-new-validatornode.yaml` file [here](https://github.com/hyperledger/bevel/blob/main/platforms/hyperledger-besu/configuration/samples/network-besu-new-validatornode.yaml) +```yaml +--8<-- "platforms/hyperledger-besu/configuration/samples/network-besu-new-validatornode.yaml:1:201" ``` ---- -# This is a sample configuration file to add a new validator node to existing network. -# This DOES NOT support proxy=none -# All text values are case-sensitive -network: -# Network level configuration specifies the attributes required for each organization to join an existing network. - type: besu - version: 21.10.6 #this is the version of Besu docker image that will be deployed. - -#Environment section for Kubernetes setup - env: - type: "dev" # tag for the environment. Important to run multiple flux on single cluster - proxy: ambassador # value has to be 'ambassador' as 'haproxy' has not been implemented for besu - ## Any additional Ambassador ports can be given below, this is valid only if proxy='ambassador' - # These ports are enabled per cluster, so if you have multiple clusters you do not need so many ports - # This sample uses a single cluster, so we have to open 4 ports for each Node. - # These ports are again specified for each organization below - ambassadorPorts: - portRange: # For a range of ports - from: 15010 - to: 15043 - # ports: 15020,15021 # For specific ports - retry_count: 20 # Retry count for the checks on Kubernetes cluster - external_dns: enabled # Should be enabled if using external-dns for automatic route configuration - - # Docker registry details where images are stored. This will be used to create k8s secrets - # Please ensure all required images are built and stored in this registry. - # Do not check-in docker_password. - docker: - url: "ghcr.io/hyperledger" - username: "docker_username" - password: "docker_password" - - # Following are the configurations for the common Besu network - config: - consensus: "ibft" # Options are "ibft", "ethash" and "clique". - ## Certificate subject for the root CA of the network. - # This is for development usage only where we create self-signed certificates - # and the truststores are generated automatically. - # Production systems should generate proper certificates and configure truststores accordingly. - subject: "CN=DLT Root CA,OU=DLT,O=DLT,L=London,C=GB" - transaction_manager: "tessera" # Transaction manager is "tessera" - # This is the version of "tessera" docker image that will be deployed - tm_version: "21.7.3" - # TLS can be True or False for the tessera tm - tm_tls: True - # Tls trust value - tm_trust: "ca-or-tofu" # Options are: "ca-or-tofu", "ca", "tofu" - ## File location where the base64 encoded genesis file is located. - genesis: "/home/user/bevel/build/besu_genesis" - ## Transaction Manager nodes public addresses should be provided. - # - "https://node.test.besu.blockchain-develop.com" - # The above domain name is formed by the (http or https)://(peer.name).(org.external_url_suffix):(ambassador tm node port) - tm_nodes: - - "https://carrier.test.besu.blockchaincloudpoc-develop.com" - - "https://manufacturer.test.besu.blockchaincloudpoc-develop.com" - - "https://store.test.besu.blockchaincloudpoc-develop.com" - - "https://warehouse.test.besu.blockchaincloudpoc-develop.com" - # Besu rpc public address list for existing validator and member nodes - # - "http://noderpc.test.besu.blockchaincloudpoc-develop.com" - # The above domain name is formed by the (http)://(peer.name)rpc.(org.external_url_suffix):(ambassador node rpc port) - besu_nodes: - - "http://validator1rpc.test.besu.blockchaincloudpoc-develop.com" - - "http://validator2rpc.test.besu.blockchaincloudpoc-develop.com" - - "http://validator3rpc.test.besu.blockchaincloudpoc-develop.com" - - "http://validator4rpc.test.besu.blockchaincloudpoc-develop.com" - - "https://carrierrpc.test.besu.blockchaincloudpoc-develop.com" - - "https://manufacturerrpc.test.besu.blockchaincloudpoc-develop.com" - - "https://storerpc.test.besu.blockchaincloudpoc-develop.com" - - "https://warehouserpc.test.besu.blockchaincloudpoc-develop.com" - - # Allows specification of one or many organizations that will be connecting to a network. - organizations: - # Specification for the 1st organization. Each organization should map to a VPC and a separate k8s cluster for production deployments - - organization: - name: supplychain - type: validator - # Provide the url suffix that will be added in DNS recordset. Must be different for different clusters - # This is not used for Besu as Besu does not support DNS hostnames currently. Here for future use - external_url_suffix: test.besu.blockchaincloudpoc-develop.com - cloud_provider: aws # Options: aws, azure, gcp - aws: - access_key: "aws_access_key" # AWS Access key, only used when cloud_provider=aws - secret_key: "aws_secret_key" # AWS Secret key, only used when cloud_provider=aws - region: "aws_region" # AWS Region where cluster and EIPs are created - # Kubernetes cluster deployment variables. The config file path and name has to be provided in case - # the cluster has already been created. - k8s: - context: "cluster_context" - config_file: "cluster_config" - # Hashicorp Vault server address and root-token. Vault should be unsealed. - # Do not check-in root_token - vault: - url: "vault_addr" - root_token: "vault_root_token" - secret_path: "secretsv2" - # Git Repo details which will be used by GitOps/Flux. - # Do not check-in git_access_token - gitops: - git_protocol: "https" # Option for git over https or ssh - git_url: "https://github.com//bevel.git" # Gitops https or ssh url for flux value files - branch: "develop" # Git branch where release is being made - release_dir: "platforms/hyperledger-besu/releases/dev" # Relative Path in the Git repo for flux sync per environment. - chart_source: "platforms/hyperledger-besu/charts" # Relative Path where the Helm charts are stored in Git repo - git_repo: "github.com//bevel.git" # Gitops git repository URL for git push - username: "git_username" # Git Service user who has rights to check-in in all branches - password: "git_access_token" # Git Server user access token (Optional for ssh; Required for https) - email: "git_email" # Email to use in git config - private_key: "path_to_private_key" # Path to private key file which has write-access to the git repo (Optional for https; Required for ssh) - # As this is a validator org, it is hosting a few validators as services - services: - validators: - - validator: - name: validator1 - status: existing # needed to know which validator node exists - bootnode: true # true if the validator node is used also a bootnode for the network - p2p: - port: 30303 - ambassador: 15020 #Port exposed on ambassador service (use one port per org if using single cluster) - rpc: - port: 8545 - ambassador: 15021 #Port exposed on ambassador service (use one port per org if using single cluster) - ws: - port: 8546 - - validator: - name: validator2 - status: existing # needed to know which validator node exists - bootnode: true # true if the validator node is used also a bootnode for the network - p2p: - port: 30303 - ambassador: 15012 #Port exposed on ambassador service (use one port per org if using single cluster) - rpc: - port: 8545 - ambassador: 15013 #Port exposed on ambassador service (use one port per org if using single cluster) - ws: - port: 8546 - - validator: - name: validator3 - status: existing # needed to know which validator node exists - bootnode: false # true if the validator node is used also a bootnode for the network - p2p: - port: 30303 - ambassador: 15014 #Port exposed on ambassador service (use one port per org if using single cluster) - rpc: - port: 8545 - ambassador: 15015 #Port exposed on ambassador service (use one port per org if using single cluster) - ws: - port: 8546 - - validator: - name: validator4 - status: existing # needed to know which validator node exists - bootnode: false # true if the validator node is used also a bootnode for the network - p2p: - port: 30303 - ambassador: 15016 #Port exposed on ambassador service (use one port per org if using single cluster) - rpc: - port: 8545 - ambassador: 15017 #Port exposed on ambassador service (use one port per org if using single cluster) - ws: - port: 8546 - - validator: - name: validator5 - status: new # needed to know which validator node exists - bootnode: false # true if the validator node is used also a bootnode for the network - p2p: - port: 30303 - ambassador: 15018 #Port exposed on ambassador service (use one port per org if using single cluster) - rpc: - port: 8545 - ambassador: 15019 #Port exposed on ambassador service (use one port per org if using single cluster) - ws: - port: 8546 -``` Three new sections are added to the network.yaml | Field | Description | @@ -224,3 +53,4 @@ The [add-validator.yaml](https://github.com/hyperledger/bevel/tree/main/platform ``` ansible-playbook platforms/hyperledger-besu/configuration/add-validator.yaml --extra-vars "@path-to-network.yaml" ``` + diff --git a/docs/source/guides/besu/add-new-validator-org.md b/docs/source/guides/besu/add-new-validator-org.md index 4f0483b0134..21bae10c072 100644 --- a/docs/source/guides/besu/add-new-validator-org.md +++ b/docs/source/guides/besu/add-new-validator-org.md @@ -30,234 +30,12 @@ The `network.yaml` file should contain the specific `network.organization` detai **NOTE**: Make sure that the genesis flie is provided in base64 encoding. Also, if you are adding node to the same cluster as of another node, make sure that you add the ambassador ports of the existing node present in the cluster to the network.yaml --- -For reference, sample `network.yaml` file looks like below for IBFT consensus (but always check the latest network-besu-new-validatororg.yaml at `platforms/hyperledger-besu/configuration/samples`): +For reference, sample `network-besu-new-validatororg.yaml` file [here](https://github.com/hyperledger/bevel/blob/main/platforms/hyperledger-besu/configuration/samples/network-besu-new-validatororg.yaml) +```yaml +--8<-- "platforms/hyperledger-besu/configuration/samples/network-besu-new-validatororg.yaml:1:155" ``` ---- -# This is a sample configuration file to add a new validator organization to existing network. -# This DOES NOT support proxy=none -# All text values are case-sensitive -network: -# Network level configuration specifies the attributes required for each organization to join an existing network. - type: besu - version: 21.10.6 #this is the version of Besu docker image that will be deployed. - -#Environment section for Kubernetes setup - env: - type: "dev" # tag for the environment. Important to run multiple flux on single cluster - proxy: ambassador # value has to be 'ambassador' as 'haproxy' has not been implemented for besu - ## Any additional Ambassador ports can be given below, this is valid only if proxy='ambassador' - # These ports are enabled per cluster, so if you have multiple clusters you do not need so many ports - # This sample uses a single cluster, so we have to open 4 ports for each Node. - # These ports are again specified for each organization below - ambassadorPorts: - portRange: # For a range of ports - from: 15010 - to: 15043 - # ports: 15020,15021 # For specific ports - retry_count: 20 # Retry count for the checks on Kubernetes cluster - external_dns: enabled # Should be enabled if using external-dns for automatic route configuration - - # Docker registry details where images are stored. This will be used to create k8s secrets - # Please ensure all required images are built and stored in this registry. - # Do not check-in docker_password. - docker: - url: "ghcr.io/hyperledger" - username: "docker_username" - password: "docker_password" - - # Following are the configurations for the common Besu network - config: - consensus: "ibft" # Options are "ibft", "ethash" and "clique". - ## Certificate subject for the root CA of the network. - # This is for development usage only where we create self-signed certificates and the truststores are generated automatically. - # Production systems should generate proper certificates and configure truststores accordingly. - subject: "CN=DLT Root CA,OU=DLT,O=DLT,L=London,C=GB" - transaction_manager: "tessera" # Transaction manager is "tessera" - # This is the version of "tessera" docker image that will be deployed - tm_version: "21.7.3" - # TLS can be True or False for the tessera tm - tm_tls: True - # Tls trust value - tm_trust: "ca-or-tofu" # Options are: "ca-or-tofu", "ca", "tofu" - ## File location where the base64 encoded genesis file is located. - genesis: "/home/user/bevel/build/besu_genesis" - ## Transaction Manager nodes public addresses should be provided. - # - "https://node.test.besu.blockchain-develop.com" - # The above domain name is formed by the (http or https)://(peer.name).(org.external_url_suffix):(ambassador tessera node port) - tm_nodes: - - "https://carrier.test.besu.blockchaincloudpoc-develop.com" - - "https://manufacturer.test.besu.blockchaincloudpoc-develop.com" - - "https://store.test.besu.blockchaincloudpoc-develop.com" - - "https://warehouse.test.besu.blockchaincloudpoc-develop.com" - # Besu rpc public address list for existing validator and member nodes - # - "http://noderpc.test.besu.blockchaincloudpoc-develop.com" - # The above domain name is formed by the (http)://(peer.name)rpc.(org.external_url_suffix):(ambassador node rpc port) - besu_nodes: - - "http://validator1rpc.test.besu.blockchaincloudpoc-develop.com" - - "http://validator2rpc.test.besu.blockchaincloudpoc-develop.com" - - "http://validator3rpc.test.besu.blockchaincloudpoc-develop.com" - - "http://validator4rpc.test.besu.blockchaincloudpoc-develop.com" - - "https://carrierrpc.test.besu.blockchaincloudpoc-develop.com" - - "https://manufacturerrpc.test.besu.blockchaincloudpoc-develop.com" - - "https://storerpc.test.besu.blockchaincloudpoc-develop.com" - - "https://warehouserpc.test.besu.blockchaincloudpoc-develop.com" - - # Allows specification of one or many organizations that will be connecting to a network. - organizations: - # Specification for the 1st organization. Each organization should map to a VPC and a separate k8s cluster for production deployments - - organization: - name: supplychain - type: validator - # Provide the url suffix that will be added in DNS recordset. Must be different for different clusters - # This is not used for Besu as Besu does not support DNS hostnames currently. Here for future use - external_url_suffix: test.besu.blockchaincloudpoc-develop.com - cloud_provider: aws # Options: aws, azure, gcp - aws: - access_key: "aws_access_key" # AWS Access key, only used when cloud_provider=aws - secret_key: "aws_secret_key" # AWS Secret key, only used when cloud_provider=aws - region: "aws_region" # AWS Region where cluster and EIPs are created - # Kubernetes cluster deployment variables. The config file path and name has to be provided in case - # the cluster has already been created. - k8s: - context: "cluster_context" - config_file: "cluster_config" - # Hashicorp Vault server address and root-token. Vault should be unsealed. - # Do not check-in root_token - vault: - url: "vault_addr" - root_token: "vault_root_token" - secret_path: "secretsv2" - # Git Repo details which will be used by GitOps/Flux. - # Do not check-in git_access_token - gitops: - git_protocol: "https" # Option for git over https or ssh - git_url: "https://github.com//bevel.git" # Gitops https or ssh url for flux value files - branch: "develop" # Git branch where release is being made - release_dir: "platforms/hyperledger-besu/releases/dev" # Relative Path in the Git repo for flux sync per environment. - chart_source: "platforms/hyperledger-besu/charts" # Relative Path where the Helm charts are stored in Git repo - git_repo: "github.com//bevel.git" # Gitops git repository URL for git push - username: "git_username" # Git Service user who has rights to check-in in all branches - password: "git_access_token" # Git Server user access token (Optional for ssh; Required for https) - email: "git_email" # Email to use in git config - private_key: "path_to_private_key" # Path to private key file which has write-access to the git repo (Optional for https; Required for ssh) - # As this is a validator org, it is hosting a few validators as services - services: - validators: - - validator: - name: validator1 - status: existing # needed to know which validator node exists - bootnode: true # true if the validator node is used also a bootnode for the network - p2p: - port: 30303 - ambassador: 15020 #Port exposed on ambassador service (use one port per org if using single cluster) - rpc: - port: 8545 - ambassador: 15021 #Port exposed on ambassador service (use one port per org if using single cluster) - ws: - port: 8546 - - validator: - name: validator2 - status: existing # needed to know which validator node exists - bootnode: true # true if the validator node is used also a bootnode for the network - p2p: - port: 30303 - ambassador: 15012 #Port exposed on ambassador service (use one port per org if using single cluster) - rpc: - port: 8545 - ambassador: 15013 #Port exposed on ambassador service (use one port per org if using single cluster) - ws: - port: 8546 - - validator: - name: validator3 - status: existing # needed to know which validator node exists - bootnode: false # true if the validator node is used also a bootnode for the network - p2p: - port: 30303 - ambassador: 15014 #Port exposed on ambassador service (use one port per org if using single cluster) - rpc: - port: 8545 - ambassador: 15015 #Port exposed on ambassador service (use one port per org if using single cluster) - ws: - port: 8546 - - validator: - name: validator4 - status: existing # needed to know which validator node exists - bootnode: false # true if the validator node is used also a bootnode for the network - p2p: - port: 30303 - ambassador: 15016 #Port exposed on ambassador service (use one port per org if using single cluster) - rpc: - port: 8545 - ambassador: 15017 #Port exposed on ambassador service (use one port per org if using single cluster) - ws: - port: 8546 - - - organization: - name: supplychain2 - type: validator - # Provide the url suffix that will be added in DNS recordset. Must be different for different clusters - external_url_suffix: test.besu.blockchaincloudpoc-develop.com - - cloud_provider: aws # Options: aws, azure, gcp - aws: - access_key: "aws_access_key" # AWS Access key, only used when cloud_provider=aws - secret_key: "aws_secret_key" # AWS Secret key, only used when cloud_provider=aws - region: "aws_region" # AWS Region where cluster and EIPs are created - # Kubernetes cluster deployment variables. The config file path and name has to be provided in case - # the cluster has already been created. - k8s: - context: "cluster_context" - config_file: "cluster_config" - # Hashicorp Vault server address and root-token. Vault should be unsealed. - # Do not check-in root_token - vault: - url: "vault_addr" - root_token: "vault_root_token" - secret_path: "secretsv2" - # Git Repo details which will be used by GitOps/Flux. - # Do not check-in git_access_token - gitops: - git_protocol: "https" # Option for git over https or ssh - git_url: "https://github.com//bevel.git" # Gitops https or ssh url for flux value files - branch: "develop" # Git branch where release is being made - release_dir: "platforms/hyperledger-besu/releases/dev" # Relative Path in the Git repo for flux sync per environment. - chart_source: "platforms/hyperledger-besu/charts" # Relative Path where the Helm charts are stored in Git repo - git_repo: "github.com//bevel.git" # Gitops git repository URL for git push - username: "git_username" # Git Service user who has rights to check-in in all branches - password: "git_access_token" # Git Server user password/token (Optional for ssh; Required for https) - email: "git@email.com" # Email to use in git config - private_key: "path_to_private_key" # Path to private key file which has write-access to the git repo (Optional for https; Required for ssh) - # As this is a validator org, it is hosting a few validators as services - services: - validators: - - validator: - name: validator5 - status: new # needed to know which validator node exists - bootnode: true # true if the validator node is used also a bootnode for the network - p2p: - port: 30303 - ambassador: 15026 #Port exposed on ambassador service (use one port per org if using single cluster) - rpc: - port: 8545 - ambassador: 15027 #Port exposed on ambassador service (use one port per org if using single cluster) - ws: - port: 8546 - - validator: - name: validator6 - status: new # needed to know which validator node exists - bootnode: true # true if the validator node is used also a bootnode for the network - p2p: - port: 30303 - ambassador: 15028 #Port exposed on ambassador service (use one port per org if using single cluster) - rpc: - port: 8545 - ambassador: 15029 #Port exposed on ambassador service (use one port per org if using single cluster) - ws: - port: 8546 - -``` Three new sections are added to the network.yaml | Field | Description | @@ -276,4 +54,3 @@ The [add-validator.yaml](https://github.com/hyperledger/bevel/tree/main/platform ansible-playbook platforms/hyperledger-besu/configuration/add-validator.yaml --extra-vars "@path-to-network.yaml" --extra-vars "add_new_org='true'" ``` - diff --git a/docs/source/guides/besu/setup-cactus-connector.md b/docs/source/guides/besu/setup-cactus-connector.md index 920090c9d23..4d78287c3c9 100644 --- a/docs/source/guides/besu/setup-cactus-connector.md +++ b/docs/source/guides/besu/setup-cactus-connector.md @@ -27,33 +27,14 @@ Refer [this guide](../networkyaml-besu.md) for details on editing the configurat When editing the configuration file (`network.yaml`) to deploy the cactus connector, both validators and peers from validator and member organizations should have the `cactus_connector` field. To enable the cactus connector for a peer or validator, set the value as `enabled`. If a particular peer or validator does not want to support the cactus connector feature, set the `cactus_connector` field as `disabled`. A sample for the same is shared below: - network: - organizations: - - organization: supplychain - type: validator - .. - .. - services: - validators: - - validator: - name: validator1 - .. - .. - cactus_connector: enabled # set to enabled to create a cactus connector for Besu otherwise set it to disabled - - - organization: carrier - type: member - .. - .. - services: - peers: - - peer: - name: carrier - .. - .. - cactus_connector: disabled # set to enabled to create a cactus connector for Besu otherwise set it to disabled - -For reference, see `network-besu-v22.yaml` file [here](https://github.com/hyperledger/bevel/blob/develop/platforms/hyperledger-besu/configuration/samples/network-besu-v22.yaml). +```yaml +--8<-- "platforms/hyperledger-besu/configuration/samples/network-besu.yaml:127:132" + .. +--8<-- "platforms/hyperledger-besu/configuration/samples/network-besu.yaml:187:193" + .. +``` + +For reference, see `network-besu.yaml` file [here](https://github.com/hyperledger/bevel/blob/develop/platforms/hyperledger-besu/configuration/samples/network-besu.yaml). @@ -64,3 +45,4 @@ The [setup-cactus-connector.yaml](https://github.com/hyperledger/bevel/blob/deve ``` ansible-playbook platforms/hyperledger-besu/configuration/setup-cactus-connector.yaml --extra-vars "@path-to-network.yaml" ``` + diff --git a/docs/source/guides/besu/setup-onchain-permissioning.md b/docs/source/guides/besu/setup-onchain-permissioning.md index dba17a6afb9..1b658fd3aae 100644 --- a/docs/source/guides/besu/setup-onchain-permissioning.md +++ b/docs/source/guides/besu/setup-onchain-permissioning.md @@ -24,18 +24,9 @@ 2. To enable and use onchain permissioning, set the `network.permissioning.enabled` parameter to `true` in the Besu network configuration file. Below is a sample configuration for reference: - ```yaml - network: - type: besu - version: 21.10.6 - permissioning: - enabled: true # Set to false if onchain permissioning is not required - env:... - docker:... - config:... - organizations:... - ``` - +```yaml +--8<-- "platforms/hyperledger-besu/configuration/samples/network-besu.yaml:11:18" +``` For reference, use sample configuration defined in the [network-besu.yaml](https://github.com/hyperledger/bevel/blob/develop/platforms/hyperledger-besu/configuration/samples/network-besu.yaml) file. **Step 2: Deploy Besu network.** @@ -98,3 +89,5 @@ truffle migrate --reset --network besu By following these steps, we will be able to successfully deploy a Besu Onchain Permissioning Network. Post network bootstrap permissioing smartcontract can be installed. Smartcontract installation steps can be found [here](https://besu.hyperledger.org/en/stable/private-networks/tutorials/permissioning/onchain/#11-clone-the-contracts-and-install-dependencies) + + diff --git a/docs/source/guides/corda/add-cenm-console.md b/docs/source/guides/corda/add-cenm-console.md deleted file mode 100644 index fd6be99444a..00000000000 --- a/docs/source/guides/corda/add-cenm-console.md +++ /dev/null @@ -1,149 +0,0 @@ -[//]: # (##############################################################################################) -[//]: # (Copyright Accenture. All Rights Reserved.) -[//]: # (SPDX-License-Identifier: Apache-2.0) -[//]: # (##############################################################################################) - - -# Adding a CENM Management Console in R3 Corda - -- [Prerequisites](#prerequisites) -- [Modify configuration file](#modify-configuration-file) - - -## Prerequisites -To add CENM management console, Auth service that has been setup with atleast one user (an admin user), Zone service and Gateway services should already be installed and running. - -The Helm Chart for Auth service is available [here](https://github.com/hyperledger/bevel/tree/main/platforms/r3-corda-ent/charts/auth). -The Helm Chart for Zone service is available [here](https://github.com/hyperledger/bevel/tree/main/platforms/r3-corda-ent/charts/zone). -The Helm Chart for Gateway service is available [here](https://github.com/hyperledger/bevel/tree/main/platforms/r3-corda-ent/charts/gateway). - ---- -**NOTE**: Addition of a cenm management console has been tested on an existing network which is created by Bevel. Networks created using other methods may be suitable but this has not been tested by Bevel team. - ---- - - -## Modify Configuration File - -Refer [this guide](../networkyaml-corda.md) for details on editing the configuration file. - -The `network.yaml` file should contain the specific `services.auth`, `services.zone` and `services.gateway` details along with the network service information about the networkmap and doorman service. - ---- - -For reference, sample `network.yaml` file looks like below (but always check the latest at `platforms/r3-corda/configuration/samples`): - -``` -network: - # Network level configuration specifies the attributes required for each organization - # to join an existing network. - type: corda - version: 4.0 - #enabled flag is frontend is enabled for nodes - frontend: enabled - - #Environment section for Kubernetes setup - env: - type: "env_type" # tag for the environment. Important to run multiple flux on single cluster - proxy: ambassador # value has to be 'ambassador' as 'haproxy' has not been implemented for Corda - ambassadorPorts: # Any additional Ambassador ports can be given here, this is valid only if proxy='ambassador' - portRange: # For a range of ports - from: 15010 - to: 15043 - # ports: 15020,15021 # For specific ports - retry_count: 20 # Retry count for the checks - external_dns: enabled # Should be enabled if using external-dns for automatic route configuration - - # Docker registry details where images are stored. This will be used to create k8s secrets - # Please ensure all required images are built and stored in this registry. - # Do not check-in docker_password. - docker: - url: "docker_url" - username: "docker_username" - password: "docker_password" - - # Remote connection information for doorman and networkmap (will be blank or removed for hosting organization) - network_service: - - service: - type: doorman - uri: https://doorman.test.corda.blockchaincloudpoc.com:8443 - certificate: home_dir/platforms/r3-corda/configuration/build/corda/doorman/tls/ambassador.crt - - service: - type: networkmap - uri: https://networkmap.test.corda.blockchaincloudpoc.com:8443 - certificate: home_dir/platforms/r3-corda/configuration/build/corda/networkmap/tls/ambassador.crt - - # Allows specification of one or many organizations that will be connecting to a network. - # If an organization is also hosting the root of the network (e.g. doorman, membership service, etc), - # then these services should be listed in this section as well. - organizations: - # Specification for the new organization. Each organization maps to a VPC and a separate k8s cluster - - organization: - name: neworg - country: US - state: New York - location: New York - subject: "O=Neworg,OU=Neworg,L=New York,C=US" - type: node - external_url_suffix: test.corda.blockchaincloudpoc.com - - cloud_provider: aws # Options: aws, azure, gcp - aws: - access_key: "aws_access_key" # AWS Access key, only used when cloud_provider=aws - secret_key: "aws_secret_key" # AWS Secret key, only used when cloud_provider=aws - - # Kubernetes cluster deployment variables. The config file path and name has to be provided in case - # the cluster has already been created. - k8s: - region: "cluster_region" - context: "cluster_context" - config_file: "cluster_config" - - # Hashicorp Vault server address and root-token. Vault should be unsealed. - # Do not check-in root_token - vault: - url: "vault_addr" - root_token: "vault_root_token" - - # Git Repo details which will be used by GitOps/Flux. - # Do not check-in git_access_token - gitops: - git_protocol: "https" # Option for git over https or ssh - git_url: "gitops_ssh_url" # Gitops https or ssh url for flux value files like "https://github.com/hyperledger/bevel.git" - branch: "gitops_branch" # Git branch where release is being made - release_dir: "gitops_release_dir" # Relative Path in the Git repo for flux sync per environment. - chart_source: "gitops_charts" # Relative Path where the Helm charts are stored in Git repo - git_repo: "gitops_repo_url" # Gitops git repository URL for git push like "github.com/hyperledger/bevel.git" - username: "git_username" # Git Service user who has rights to check-in in all branches - password: "git_access_token" # Git Server user access token (Optional for ssh; Required for https) - email: "git_email" # Email to use in git config - private_key: "path_to_private_key" # Path to private key file which has write-access to the git repo (Optional for https; Required for ssh) - - services: - zone: - name: zone - type: cenm-zone - ports: - enm: 25000 - admin: 12345 - auth: - name: auth - subject: "CN=Test TLS Auth Service Certificate, OU=HQ, O=HoldCo LLC, L=New York, C=US" - type: cenm-auth - port: 8081 - username: admin - userpwd: p4ssWord - gateway: - name: gateway - subject: "CN=Test TLS Gateway Certificate, OU=HQ, O=HoldCo LLC, L=New York, C=US" - type: cenm-gateway - ports: - servicePort: 8080 - ambassadorPort: 15008 - -``` - - -## Access CENM Management Console - -The detailed steps to access the CENM Management console is given [here](https://docs.r3.com/en/platform/corda/1.5/cenm/cenm-console.html) diff --git a/docs/source/guides/corda/add-new-notary.md b/docs/source/guides/corda/add-new-notary.md index a65ce4e8c69..b1be3387aba 100644 --- a/docs/source/guides/corda/add-new-notary.md +++ b/docs/source/guides/corda/add-new-notary.md @@ -10,7 +10,6 @@ To overcome this, we have created an Ansible playbook. The playbook will update `run flagDay` command must be run after the network parameters update deadline is over (+10 minutes by default). And this command must be run during downtime as it will trigger Corda node restart. - - [Prerequisites](#prerequisites) - [Deploy new Notary Service](#deploy-new-notary-service) - [Run playbook](#run-playbook) @@ -20,20 +19,22 @@ To overcome this, we have created an Ansible playbook. The playbook will update ## Prerequisites To add a new Notary organization, Corda Idman and Networkmap services should already be running. The public certificates and NetworkTrustStore from Idman and Networkmap should be available and specified in the configuration file. ---- -**NOTE**: Addition of a new Notary organization has been tested on an existing network which is created by Bevel. Networks created using other methods may be suitable but this has not been tested by Bevel team. - ---- +!!! note + Addition of a new Notary organization has been tested on an existing network which is created by Bevel. Networks created using other methods may be suitable but this has not been tested by Bevel team. ## Deploy new Notary Service -Deploy the additional notary/notaries as separate organizations by following the guidance on [how to add new organizations here](./add-new-org.md). A sample network.yaml for adding new notary orgs can be found [here](https://github.com/hyperledger/bevel/tree/develop/platforms/r3-corda-ent/configuration/samples). +Deploy the additional notary/notaries as separate organizations by following the guidance on [how to add new organizations here](./add-new-org.md). A sample network.yaml for adding new notary orgs can be found [here](https://github.com/hyperledger/bevel/blob/main/platforms/r3-corda-ent/configuration/samples/network-addNotary.yaml). + +```yaml +--8<-- "platforms/r3-corda-ent/configuration/samples/network-addNotary.yaml:1:306" +``` ## Run Playbook -After the new notary is running, execute the playbook `platforms/r3-corda-ent/configuration/add-notaries.yaml` with the same configuration file as used in previous step. +After the new notary is running, execute the playbook [add-notaries.yaml] (https://github.com/hyperledger/bevel/blob/main/platforms/r3-corda-ent/configuration/add-notaries.yaml) with the same configuration file as used in previous step. This can be done using the following command ``` ansible-playbook platforms/r3-corda-ent/configuration/add-notaries.yaml --extra-vars "@path-to-new-network.yaml" diff --git a/docs/source/guides/corda/add-new-org.md b/docs/source/guides/corda/add-new-org.md index 1adc6be8dd3..4eee1358f43 100644 --- a/docs/source/guides/corda/add-new-org.md +++ b/docs/source/guides/corda/add-new-org.md @@ -14,10 +14,8 @@ ## Prerequisites To add a new organization, Corda Doorman/Idman and Networkmap services should already be running. The public certificates from Doorman/Idman and Networkmap should be available and specified in the configuration file. ---- -**NOTE**: Addition of a new organization has been tested on an existing network which is created by Bevel. Networks created using other methods may be suitable but this has not been tested by Bevel team. - ---- +!!! note + Addition of a new organization has been tested on an existing network which is created by Bevel. Networks created using other methods may be suitable but this has not been tested by Bevel team. ## Create Configuration File @@ -26,138 +24,23 @@ Refer [this guide](../networkyaml-corda.md) for details on editing the configura The `network.yaml` file should contain the specific `network.organization` details along with the network service information about the networkmap and doorman service. ---- -**NOTE**: Make sure the doorman and networkmap service certificates are in plain text and not encoded in base64 or any other encoding scheme, along with correct paths to them mentioned in network.yaml. - ---- -For reference, sample `network.yaml` file looks like below (but always check the latest at `platforms/r3-corda/configuration/samples`): - -``` -network: - # Network level configuration specifies the attributes required for each organization - # to join an existing network. - type: corda - version: 4.0 - #enabled flag is frontend is enabled for nodes - frontend: enabled - - #Environment section for Kubernetes setup - env: - type: "env_type" # tag for the environment. Important to run multiple flux on single cluster - proxy: ambassador # value has to be 'ambassador' as 'haproxy' has not been implemented for Corda - ambassadorPorts: # Any additional Ambassador ports can be given here, this is valid only if proxy='ambassador' - portRange: # For a range of ports - from: 15010 - to: 15043 - # ports: 15020,15021 # For specific ports - retry_count: 20 # Retry count for the checks - external_dns: enabled # Should be enabled if using external-dns for automatic route configuration - - # Docker registry details where images are stored. This will be used to create k8s secrets - # Please ensure all required images are built and stored in this registry. - # Do not check-in docker_password. - docker: - url: "docker_url" - username: "docker_username" - password: "docker_password" - - # Remote connection information for doorman and networkmap (will be blank or removed for hosting organization) - network_service: - - service: - type: doorman - uri: https://doorman.test.corda.blockchaincloudpoc.com:8443 - certificate: home_dir/platforms/r3-corda/configuration/build/corda/doorman/tls/ambassador.crt - - service: - type: networkmap - uri: https://networkmap.test.corda.blockchaincloudpoc.com:8443 - certificate: home_dir/platforms/r3-corda/configuration/build/corda/networkmap/tls/ambassador.crt - - # Allows specification of one or many organizations that will be connecting to a network. - # If an organization is also hosting the root of the network (e.g. doorman, membership service, etc), - # then these services should be listed in this section as well. - organizations: - # Specification for the new organization. Each organization maps to a VPC and a separate k8s cluster - - organization: - name: neworg - country: US - state: New York - location: New York - subject: "O=Neworg,OU=Neworg,L=New York,C=US" - type: node - external_url_suffix: test.corda.blockchaincloudpoc.com - - cloud_provider: aws # Options: aws, azure, gcp - aws: - access_key: "aws_access_key" # AWS Access key, only used when cloud_provider=aws - secret_key: "aws_secret_key" # AWS Secret key, only used when cloud_provider=aws - - # Kubernetes cluster deployment variables. The config file path and name has to be provided in case - # the cluster has already been created. - k8s: - region: "cluster_region" - context: "cluster_context" - config_file: "cluster_config" - - # Hashicorp Vault server address and root-token. Vault should be unsealed. - # Do not check-in root_token - vault: - url: "vault_addr" - root_token: "vault_root_token" +!!! note + Make sure the doorman and networkmap service certificates are in plain text and not encoded in base64 or any other encoding scheme, along with correct paths to them mentioned in network.yaml. - # Git Repo details which will be used by GitOps/Flux. - # Do not check-in git_access_token - gitops: - git_protocol: "https" # Option for git over https or ssh - git_url: "gitops_ssh_url" # Gitops https or ssh url for flux value files like "https://github.com/hyperledger/bevel.git" - branch: "gitops_branch" # Git branch where release is being made - release_dir: "gitops_release_dir" # Relative Path in the Git repo for flux sync per environment. - chart_source: "gitops_charts" # Relative Path where the Helm charts are stored in Git repo - git_repo: "gitops_repo_url" # Gitops git repository URL for git push like "github.com/hyperledger/bevel.git" - username: "git_username" # Git Service user who has rights to check-in in all branches - password: "git_access_token" # Git Server user access token (Optional for ssh; Required for https) - email: "git_email" # Email to use in git config - private_key: "path_to_private_key" # Path to private key file which has write-access to the git repo (Optional for https; Required for ssh) - - services: - peers: - - peer: - name: neworg - subject: "O=Neworg,OU=Neworg,L=New York,C=US" - type: node - p2p: - port: 10002 - targetPort: 10002 - ambassador: 10070 #Port for ambassador service (use one port per org if using single cluster) - rpc: - port: 10003 - targetPort: 10003 - rpcadmin: - port: 10005 - targetPort: 10005 - dbtcp: - port: 9101 - targetPort: 1521 - dbweb: - port: 8080 - targetPort: 81 - springboot: - targetPort: 20001 - port: 20001 - expressapi: - targetPort: 3000 - port: 3000 +For reference, sample `network.yaml` file [here] (https://github.com/hyperledger/bevel/blob/main/platforms/r3-corda/configuration/samples/network-cordav2.yaml) but always check the latest `network.yaml` file. +```yaml +--8<-- "platforms/r3-corda/configuration/samples/network-cordav2.yaml:1:223" ``` ## Run playbook -The [add-new-organization.yaml](https://github.com/hyperledger/bevel/tree/main/platforms/shared/configuration/add-new-organization.yaml) playbook is used to add a new organization to the existing network. This can be done using the following command +The [add-new-organization.yaml](https://github.com/hyperledger/bevel/blob/main/platforms/shared/configuration/add-new-organization.yaml) playbook is used to add a new organization to the existing network. This can be done using the following command ``` ansible-playbook platforms/shared/configuration/add-new-organization.yaml --extra-vars "@path-to-network.yaml" ``` ---- -**NOTE:** If you have CorDapps and applications, please deploy them as well. - +!!! note + If you have CorDapps and applications, please deploy them as well. diff --git a/docs/source/guides/fabric/add-cli.md b/docs/source/guides/fabric/add-cli.md index 46aad8fecde..9845b60d2f0 100644 --- a/docs/source/guides/fabric/add-cli.md +++ b/docs/source/guides/fabric/add-cli.md @@ -3,66 +3,86 @@ [//]: # (SPDX-License-Identifier: Apache-2.0) [//]: # (##############################################################################################) - -# Adding cli to Hyperledger Fabric +# Add CLI to a Peer -- [Prerequisites](#prerequisites) -- [Modifying configuration file](#create_config_file) -- [Running playbook to deploy Hyperledger Fabric network](#run_network) +This guide explains how to add a CLI to an existing Hyperledger Fabric network using two methods: +1. Using the `add-cli.yaml` playbook: This method involves running an Ansible playbook that automates the process of adding a CLI to the network. - -## Prerequisites -To add cli a fully configured Fabric network must be present already, i.e. a Fabric network which has Orderers, Peers, Channels (with all Peers already in the channels). The corresponding crypto materials should also be present in their respective Hashicorp Vault. - ---- -**NOTE**: Addition of cli has been tested on an existing network which is created by Bevel. Networks created using other methods may be suitable but this has not been tested by Bevel team. - ---- - - -## Modifying Configuration File - -Refer [this guide](../networkyaml-fabric.md) for details on editing the configuration file. - -While modifying the configuration file(`network.yaml`) for adding cli, all the existing organizations should have `org_status` tag as `existing` and the new organization should have `org_status` tag as `new` under `network.channels` e.g. - - network: - channels: - - channel: - .. - .. - participants: - - organization: - .. - .. - org_status: new # new for new organization(s) - - organization: - .. - .. - org_status: existing # existing for old organization(s) +1. Using `helm install`: This method involves using the helm install command to directly install the CLI chart. -and under `network.organizations` as - - network: - organizations: - - organization: - .. - .. - org_status: new # new for new organization(s) - - organization: - .. - .. - org_status: existing # existing for old organization(s) - -The `network.yaml` file should contain the specific `network.organization` details along with the orderer information. - - - -## Run playbook - -The [add-cli.yaml](https://github.com/hyperledger/bevel/tree/main/platforms/hyperledger-fabric/configuration/add-cli.yaml) playbook is used to add cli to the existing network. This can be done using the following command +## Prerequisites -``` -ansible-playbook platforms/shared/configuration/add-cli.yaml --extra-vars "@path-to-network.yaml" -``` \ No newline at end of file +- A fully configured Fabric network with Orderers and Peers. +- Corresponding crypto materials present in Hashicorp Vault or Kubernetes secrets. +- Hyperledger Bevel configured. + +## Method 1: Using the `add-cli.yaml` playbook + +1. **Update Configuration File** + + - Edit the `network.yaml` file to include the new organization with the following details: + - `org_status: new` + - Organization details (name, MSP ID, etc.) + - Orderer information + - Existing organizations should have `org_status: existing` + - Refer to the [networkyaml-fabric.md](../networkyaml-fabric.md) guide for details on editing the configuration file. + + Snippet from `network.channels` section below: + ```yaml + --8<-- "platforms/hyperledger-fabric/configuration/samples/network-fabric-add-organization.yaml:65:139" + ``` + + and from `network.organizations` section below: + + ```yaml + --8<-- "platforms/hyperledger-fabric/configuration/samples/network-fabric-add-organization.yaml:143:155" + .. + .. + --8<-- "platforms/hyperledger-fabric/configuration/samples/network-fabric-add-organization.yaml:406:414" + .. + .. + ``` + +1. **Run Playbook** + + Execute the following command to run the `add-cli.yaml` playbook: + + ``` + ansible-playbook platforms/hyperledger-fabric/configuration/add-cli.yaml --extra-vars "@path-to-network.yaml" + ``` + Replace `path-to-network.yaml` with the actual path to your updated `network.yaml` file. + + This will add the CLI to the specified organization in the existing Fabric network. + +## Method 2: Using `helm install` + +1. **Update the fabric-cli values.yaml file** + + The `values.yaml` file allows you to configure various aspects of the CLI, including: + + - The peer to which the CLI should connect. + - The storage class and size for the CLI's persistent volume claim. + - The local MSP ID of the organization. + - The TLS status of the peer. + - The GRPC Port of the peer. + - The Orderer Address to which the CLI should connect. + + Refer to the [fabric-cli chart documentation](https://github.com/hyperledger/bevel/tree/main/platforms/hyperledger-fabric/charts/fabric-cli) for a complete list of available configuration options. + +1. **Install the CLI Chart** + + Execute the following command to install the CLI chart: + ```bash + # From platforms/hyperledger-fabric/charts directory + helm install ./fabric-cli --namespace --values + ``` + Replace the following placeholders: + + - ``: The desired name for the CLI release. + - ``: The Kubernetes namespace where the CLI should be deployed. + - ``: The path to a YAML file containing the CLI configuration values. + +## Additional Notes +- The `add-cli.yaml playbook` and `helm install` method has been tested on networks created by Bevel. Networks created using other methods may be suitable, but this has not been tested by the Bevel team. +- Ensure that the network.yaml file contains the specific network.organization details along with the orderer information. diff --git a/docs/source/guides/fabric/add-new-channel.md b/docs/source/guides/fabric/add-new-channel.md index 5d7e2480f64..b075a0a366d 100644 --- a/docs/source/guides/fabric/add-new-channel.md +++ b/docs/source/guides/fabric/add-new-channel.md @@ -3,69 +3,180 @@ [//]: # (SPDX-License-Identifier: Apache-2.0) [//]: # (##############################################################################################) - -# Adding a new channel in Hyperledger Fabric +# Add a new channel -- [Prerequisites](#prerequisites) -- [Modifying configuration file](#create_config_file) -- [Running playbook to deploy Hyperledger Fabric network](#run_network) +This guide explains how to add a new channel in a Hyperledger Fabric network using two methods: +1. Using the `add-new-channel.yaml` playbook: This method involves running an Ansible playbook that automates the process of adding a new channel to the network. + +2. Using `helm install`: This method involves using the `helm install` commands to directly add a new channel to the network. - ## Prerequisites -To add a new channel a fully configured Fabric network must be present already, i.e. a Fabric network which has Orderers, Peers, Channels (with all Peers already in the channels). The corresponding crypto materials should also be present in their respective Hashicorp Vault. +- A fully configured Fabric network with Orderers, Peers, Peer Organization. +- Corresponding crypto materials present in Hashicorp Vault or Kubernetes secrets. +- Hyperledger Bevel configured. + +!!! important + + Do not try to add a new organization as a part of this operation. Use only existing organizations for new channel creation. + +## Method 1: Using the `add-new-channel.yaml` playbook + +1. **Add a defined channel with genesis or channeltx generated in basic deployment** + + **Update Configuration File** + + - Edit the `network.yaml` file to include a channel with the following details: + - Organization details (name, CA address, MSP ID, etc.) + - Orderer information + - Refer to the [networkyaml-fabric.md](../networkyaml-fabric.md) guide for details on editing the configuration file. + + Snippet from `network.channels` section below: + + ```yaml + --8<-- "platforms/hyperledger-fabric/configuration/samples/network-fabricv2.yaml:63:165" + ``` + + **Run Playbook** + + Execute the following command to run the `add-new-channel.yaml` playbook: + + ``` + ansible-playbook platforms/hyperledger-fabric/configuration/add-new-channel.yaml --extra-vars "@path-to-network.yaml" + ``` + Replace `path-to-network.yaml` with the actual path to your updated `network.yaml` file. + + This will add a channel to the existing Fabric network. + +2. **Add a new channel by generating a new genesis or channeltx in an existing network** + + **Update Configuration File** + + - Edit the `network.yaml` file to include a new channel with the following details: + - `channel_status: new` + - Organization details (name, CA address, MSP ID, etc.) + - Orderer information + - Remove existing channels or use `channel_status: existing` + - Refer to the [networkyaml-fabric.md](../networkyaml-fabric.md) guide for details on editing the configuration file. + + Snippet from `network.channels` section below: + + ```yaml + --8<-- "platforms/hyperledger-fabric/configuration/samples/network-fabric-add-new-channel.yaml:63:227" + ``` + + !!! tip + + For reference, see sample [network-fabric-add-channel.yaml](https://github.com/hyperledger/bevel/tree/main/platforms/hyperledger-fabric/configuration/samples/network-fabric-add-new-channel.yaml) file. + + **Run Playbook** + + Execute the following command to run the `add-new-channel.yaml` playbook: + + ``` + ansible-playbook platforms/hyperledger-fabric/configuration/add-new-channel.yaml --extra-vars "@path-to-network.yaml" -e genererate_configtx=true + ``` + Replace `path-to-network.yaml` with the actual path to your updated `network.yaml` file. + + This will add a new channel to the existing Fabric network. ---- -**NOTE**: Do not try to add a new organization as a part of this operation. Use only existing organization for new channel addition. +## Method 2: Using `helm install` ---- +1. **Update the fabric-genesis values.yaml file** - -## Modifying Configuration File + Following changes are must in the `values.yaml` file for a new channel to be added to the network: -Refer [this guide](../networkyaml-fabric.md) for details on editing the configuration file. + - `settings.generateGenesis: false` only needed for Fabric 2.2.x to not generate the syschannel genesis block. + - `channels` to include the new channel. + - All other fields as required by your new channel. + Refer to the [fabric-genesis chart documentation](https://github.com/hyperledger/bevel/tree/main/platforms/hyperledger-fabric/charts/fabric-genesis) for a complete list of available configuration options. -While modifying the configuration file(`network.yaml`) for adding new channel, all the existing channel should have `channel_status` tag as `existing` and the new channel should have `channel_status` tag as `new` under `network.channels` e.g. +1. **Generate the new channel artifacts** - network: - channels: - - channel: - channel_status: existing - .. - .. - participants: - - organization: - .. - .. - - organization: - .. - .. - - channel: - channel_status: new - .. - .. - participants: - - organization: - .. - .. - - organization: - .. - .. + First, save the admin MSP and TLS files for the new participants (peers and/or orderers) locally. + ```bash + # Obtain certificates and the configuration file of each peer organization, place in fabric-genesis/files + cd ./platforms/hyperledger-fabric/charts/fabric-genesis/files + kubectl --namespace org1-net get secret admin-msp -o json > org2.json + kubectl --namespace org1-net get configmap peer0-msp-config -o json > org1-config-file.json + #If additional orderer from a different organization is needed in genesis + kubectl --namespace orderer-net get secret orderer4-tls -o json > orderer4-orderer-tls.json + ``` -The `network.yaml` file should contain the specific `network.organization` details along with the orderer information. + Execute the following command to install the Genesis chart to generate the channel artifacts: + ```bash + cd ../.. + helm dependency update ./fabric-genesis + helm install ./fabric-genesis --namespace --values + ``` + Replace the following placeholders: + - ``: The desired name for the channel artifacts release. + - ``: The Kubernetes namespace where the orderer admins are already present. + - ``: The path to a YAML file containing the new channel configuration values from Step 1. -For reference, see `network-fabric-add-channel.yaml` file [here](https://github.com/hyperledger/bevel/tree/main/platforms/hyperledger-fabric/configuration/samples). +1. **Create channel for Hyperledger Fabric 2.5.x** - -## Run playbook + Execute the following command to create the channel for Hyperledger Fabric 2.5.x: + ```bash + # Create channel + helm install ./fabric-osnadmin-channel-create --namespace --values + ``` + Replace the following placeholders: -The [add-new-channel.yaml](https://github.com/hyperledger/bevel/tree/main/platforms/shared/configuration/add-new-channel.yaml) playbook is used to add a new channel to the existing network. This can be done using the following command + - ``: Release name must be the new channel name. + - ``: The Kubernetes namespace where `fabric-genesis` was installed. + - ``: The path to a YAML file containing the new channel configuration values. + Refer to the [fabric-osnadmin-channel-create chart documentation](https://github.com/hyperledger/bevel/tree/main/platforms/hyperledger-fabric/charts/fabric-osnadmin-channel-create) for a complete list of available configuration options. -``` -ansible-playbook platforms/hyperledger-fabric/configuration/add-new-channel.yaml --extra-vars "@path-to-network.yaml" -``` + Execute the following command for each Peer which is to join the new Channel: + ```bash + helm install ./fabric-channel-join --namespace --values + ``` + Replace the following placeholders: ---- -**NOTE:** Make sure that the `channel_status` label was set as `new` when the network is deployed for the first time. If you have additional applications, please deploy them as well. + - ``: The desired name for the join-channel release. + - ``: The Kubernetes namespace where corresponding peer exists. + - ``: The path to a YAML file containing the join-channel configuration values. + Refer to the [fabric-channel-join chart documentation](https://github.com/hyperledger/bevel/tree/main/platforms/hyperledger-fabric/charts/fabric-channel-join) for a complete list of available configuration options. + +1. **Create channel for Hyperledger Fabric 2.2.x** + + Execute the following command to create the channel for Hyperledger Fabric 2.2.x: + ```bash + # Obtain the file channel.tx and place it in fabric-channel-create/files + cd ./fabric-channel-create/files + kubectl --namespace get configmap -channeltx -o jsonpath='{.data.-channeltx_base64}' > channeltx.json + + # Create channel + cd ../.. + helm install ./fabric-channel-create --namespace --values + ``` + Replace the following placeholders: + + - ``: Release name must be the new channel name. + - ``: The Kubernetes namespace where `fabric-genesis` was installed. + - ``: The Kubernetes namespace of the organization creating the new channel. + - ``: The path to a YAML file containing the new channel configuration values. + Refer to the [fabric-channel-create chart documentation](https://github.com/hyperledger/bevel/tree/main/platforms/hyperledger-fabric/charts/fabric-channel-create) for a complete list of available configuration options. + + + Execute the following command for each Peer which is to join the new Channel: + ```bash + cd ./fabric-channel-join/files + kubectl --namespace get configmap --anchortx -o jsonpath='{.data.--anchortx_base64}' > anchortx.json + + # Join channel + cd ../.. + helm install ./fabric-channel-join --namespace --values + ``` + Replace the following placeholders: + + - ``: Release name must be the new channel name. + - ``: The Kubernetes namespace where `fabric-genesis` was installed. + - ``: The participating organization name. + - ``: The desired name for the join-channel release. + - ``: The Kubernetes namespace where corresponding peer exists. + - ``: The path to a YAML file containing the join-channel configuration values. + Refer to the [fabric-channel-join chart documentation](https://github.com/hyperledger/bevel/tree/main/platforms/hyperledger-fabric/charts/fabric-channel-join) for a complete list of available configuration options. diff --git a/docs/source/guides/fabric/add-new-orderer-org.md b/docs/source/guides/fabric/add-new-orderer-org.md index 04f429da068..121760dd237 100644 --- a/docs/source/guides/fabric/add-new-orderer-org.md +++ b/docs/source/guides/fabric/add-new-orderer-org.md @@ -7,9 +7,8 @@ # Adding a new Orderer organization in Hyperledger Fabric - [Prerequisites](#prerequisites) -- [Modifying configuration file](#create_config_file) -- [Running playbook to deploy Hyperledger Fabric network](#run_network) - +- [Modifying Configuration File](#modifying-configuration-file) +- [Run playbook](#run-playbook) ## Prerequisites @@ -17,7 +16,7 @@ To add a new Orderer organization, a fully configured Fabric network must be pre --- **NOTE**: Addition of a new Orderer organization has been tested on an existing network which is created by Bevel. Networks created using other methods may be suitable but this has not been tested by Bevel team. -Addition of new Orderer organization only works with Fabric 2.2.2 and RAFT Service. +Addition of new Orderer organization only works with Fabric 2.2.2, 2.5.4 and RAFT Service. --- @@ -28,38 +27,26 @@ Refer [this guide](../networkyaml-fabric.md) for details on editing the configur While modifying the configuration file(`network.yaml`) for adding new orderer organization, all the existing organizations should have `org_status` tag as `existing` and the new organization should have `org_status` tag as `new` under `network.channels` e.g. - network: - channels: - - channel: - .. - .. - participants: - - organization: - .. - .. - org_status: new # new for new organization(s) - - organization: - .. - .. - org_status: existing # existing for old organization(s) + +```yaml +--8<-- "platforms/hyperledger-fabric/configuration/samples/network-fabric-add-ordererorg.yaml:64:138" +``` and under `network.organizations` as - network: - organizations: - - organization: - .. - .. - org_status: new # new for new organization(s) - - organization: - .. - .. - org_status: existing # existing for old organization(s) +```yaml +--8<-- "platforms/hyperledger-fabric/configuration/samples/network-fabric-add-ordererorg.yaml:145:154" + .. + .. +--8<-- "platforms/hyperledger-fabric/configuration/samples/network-fabric-add-ordererorg.yaml:230:239" + .. + .. +``` The `network.yaml` file should contain the specific `network.organization` details along with the orderer information. -For reference, see `network-fabric-add-ordererorg.yaml` file [here](https://github.com/hyperledger/bevel/tree/main/platforms/hyperledger-fabric/configuration/add-orderer-organization.yaml). +For reference, see `network-fabric-add-ordererorg.yaml` file [here](https://github.com/hyperledger/bevel/tree/main/platforms/hyperledger-fabric/configuration/samples/network-fabric-add-ordererorg.yaml). ## Run playbook diff --git a/docs/source/guides/fabric/add-new-orderer-peer.md b/docs/source/guides/fabric/add-new-orderer-peer.md index 47b433dfe5e..8316a7e71f5 100644 --- a/docs/source/guides/fabric/add-new-orderer-peer.md +++ b/docs/source/guides/fabric/add-new-orderer-peer.md @@ -6,10 +6,9 @@ # Adding a new RAFT orderer to existing Orderer organization in Hyperledger Fabric - - [Prerequisites](#prerequisites) - - [Modifying Configuration File](#modifying-configuration-file) - - [Run playbook](#run-playbook) - - [Chaincode Installation](#chaincode-installation) +- [Prerequisites](#prerequisites) +- [Modifying Configuration File](#modifying-configuration-file) +- [Run playbook](#run-playbook) @@ -31,24 +30,21 @@ For generic instructions on the Fabric configuration file, refer [this guide](.. While modifying the configuration file(`network.yaml`) for adding new peer, all the existing orderers should have `status` tag as `existing` and the new orderers should have `status` tag as `new` under `network.organizations` as - network: - organizations: - - organization: - org_status: existing # org_status must be existing when adding peer - .. - .. - services: - orderers: - - orderer: - .. - .. - status: new # new for new peers(s) - - orderer: - .. - .. - status: existing # existing for existing peers(s) - +```yaml +--8<-- "platforms/hyperledger-fabric/configuration/samples/network-fabricv2-raft-add-orderer.yaml:126:135" + .. + .. +--8<-- "platforms/hyperledger-fabric/configuration/samples/network-fabricv2-raft-add-orderer.yaml:174:174" +--8<-- "platforms/hyperledger-fabric/configuration/samples/network-fabricv2-raft-add-orderer.yaml:185:220" + +``` +and under `network.orderers` the new orderer must be added. + +```yaml +--8<-- "platforms/hyperledger-fabric/configuration/samples/network-fabricv2-raft-add-orderer.yaml:42:66" +``` + The `network.yaml` file should contain the specific `network.organization` details. Ensure the following is considered when adding the new orderer on a different cluster: diff --git a/docs/source/guides/fabric/add-new-org.md b/docs/source/guides/fabric/add-new-org.md index 0fe219cdb75..c5abe3baf51 100644 --- a/docs/source/guides/fabric/add-new-org.md +++ b/docs/source/guides/fabric/add-new-org.md @@ -7,8 +7,8 @@ # Adding a new organization in Hyperledger Fabric - [Prerequisites](#prerequisites) -- [Modifying configuration file](#create_config_file) -- [Running playbook to deploy Hyperledger Fabric network](#run_network) +- [Modifying Configuration File](#modifying-configuration-file) +- [Run playbook](#run-playbook) @@ -27,38 +27,26 @@ Refer [this guide](../networkyaml-fabric.md) for details on editing the configur While modifying the configuration file(`network.yaml`) for adding new organization, all the existing organizations should have `org_status` tag as `existing` and the new organization should have `org_status` tag as `new` under `network.channels` e.g. - network: - channels: - - channel: - .. - .. - participants: - - organization: - .. - .. - org_status: new # new for new organization(s) - - organization: - .. - .. - org_status: existing # existing for old organization(s) +```yaml +--8<-- "platforms/hyperledger-fabric/configuration/samples/network-fabric-add-organization.yaml:65:139" +``` and under `network.organizations` as - network: - organizations: - - organization: - .. - .. - org_status: new # new for new organization(s) - - organization: - .. - .. - org_status: existing # existing for old organization(s) +```yaml +--8<-- "platforms/hyperledger-fabric/configuration/samples/network-fabric-add-organization.yaml:144:155" + .. + .. +--8<-- "platforms/hyperledger-fabric/configuration/samples/network-fabric-add-organization.yaml:406:414" + .. + .. + +``` The `network.yaml` file should contain the specific `network.organization` details along with the orderer information. -For reference, see `network-fabric-add-organization.yaml` file [here](https://github.com/hyperledger/bevel/tree/main/platforms/hyperledger-fabric/configuration/samples). +For reference, see `network-fabric-add-organization.yaml` file [here](https://github.com/hyperledger/bevel/tree/main/platforms/hyperledger-fabric/configuration/samples/network-fabric-add-organization.yaml). ## Run playbook diff --git a/docs/source/guides/fabric/add-new-peer.md b/docs/source/guides/fabric/add-new-peer.md index ec79ee4a61e..577202525cf 100644 --- a/docs/source/guides/fabric/add-new-peer.md +++ b/docs/source/guides/fabric/add-new-peer.md @@ -3,105 +3,142 @@ [//]: # (SPDX-License-Identifier: Apache-2.0) [//]: # (##############################################################################################) - -# Adding a new peer to existing organization in Hyperledger Fabric +# Add a new peer to an existing organization - - [Prerequisites](#prerequisites) - - [Modifying Configuration File](#modifying-configuration-file) - - [Run playbook](#run-playbook) - - [Chaincode Installation](#chaincode-installation) +This guide explains how to add a new **general** (non-anchor) peer to an existing organization in a Hyperledger Fabric network using two methods: +1. Using the `add-peer.yaml` playbook: This method involves running an Ansible playbook that automates the process of adding a new peer to the network. + +1. Using `helm install`: This method involves using the `helm install` commands to directly add a new peer to the network. - ## Prerequisites -To add a new peer a fully configured Fabric network must be present already, i.e. a Fabric network which has Orderers, Peers, Channels (with all Peers already in the channels) and the organization to which the peer is being added. The corresponding crypto materials should also be present in their respective Hashicorp Vault. +- A fully configured Fabric network with Orderers, Peers, Peer Organization and the Channel that the new peer will join. +- Corresponding crypto materials present in Hashicorp Vault or Kubernetes secrets. +- Hyperledger Bevel configured. + +## Method 1: Using the `add-peer.yaml` playbook ---- -**NOTE**: Addition of a new peer has been tested on an existing network which is created by Bevel. Networks created using other methods may be suitable but this has not been tested by Bevel team. +1. **Additional Considerations** ---- + Consider the following points when adding the new peer on a different cluster: - -## Modifying Configuration File + - The CA server is accessible publicly or at least from the new cluster. + - The CA server public certificate is stored in a local path and that path provided in `network.yaml`. + - There is a single Hashicorp Vault and both clusters (as well as ansible controller) can access it. + - Admin User certs have been already generated and stored in Vault (this is taken care of by deploy-network.yaml playbook if you are using Bevel to setup the network). + - The `network.env.type` is different for different clusters. + - The GitOps release directory `gitops.release_dir` is different for different clusters. -A Sample configuration file for adding new peer is available [here](https://github.com/hyperledger/bevel/blob/main/platforms/hyperledger-fabric/configuration/samples/network-fabricv-add-peer.yaml). Please go through this file and all the comments there and edit accordingly. +1. **Update Configuration File** -For generic instructions on the Fabric configuration file, refer [this guide](../networkyaml-fabric.md). + - Edit the `network.yaml` file to include the new peer with the following details: + - `peerstatus: new` + - `org_status: existing` + - Organization details (name, CA address, MSP ID, etc.) + - Orderer information, if you are going to install/upgrade the existing chaincodes. + - Existing peer(s) should have `peerstatus: existing` + - Refer to the [networkyaml-fabric.md](../networkyaml-fabric.md) guide for details on editing the configuration file. -While modifying the configuration file(`network.yaml`) for adding new peer, all the existing peers should have `peerstatus` tag as `existing` and the new peers should have `peerstatus` tag as `new` under `network.channels` e.g. + Snippet from `network.channels` section below: + ```yaml + --8<-- "platforms/hyperledger-fabric/configuration/samples/network-fabric-add-peer.yaml:60:87" + ``` - network: - channels: - - channel: + and from `network.organizations` section below: + + ```yaml + --8<-- "platforms/hyperledger-fabric/configuration/samples/network-fabric-add-peer.yaml:94:103" .. .. - participants: - - organization: - peers: - - peer: + --8<-- "platforms/hyperledger-fabric/configuration/samples/network-fabric-add-peer.yaml:144:144" + --8<-- "platforms/hyperledger-fabric/configuration/samples/network-fabric-add-peer.yaml:153:159" .. .. - peerstatus: new # new for new peers(s) - gossipAddress: peer0.xxxx.com # gossip Address must be one existing peer - - peer: + --8<-- "platforms/hyperledger-fabric/configuration/samples/network-fabric-add-peer.yaml:187:192" .. .. - peerstatus: existing # existing for existing peers(s) - + ``` + +1. **Run Playbook** + + Execute the following command to run the `add-peer.yaml` playbook: + + ``` + ansible-playbook platforms/hyperledger-fabric/configuration/add-peer.yaml --extra-vars "@path-to-network.yaml" + ``` + Replace `path-to-network.yaml` with the actual path to your updated `network.yaml` file. + + This will add a new peer and the new peer will join the channel provided in the existing Fabric network. + +## Method 2: Using `helm install` + +1. **Update the fabric-peernode values.yaml file** + + Following changes are must in the `values.yaml` file for a new peer to be added to the network: + + - `certs.settings.createConfigMaps: false` as the ConfigMaps for certs are already generated in the same namespace. + - `certs.settings.addPeerValue: true` Most important flag for adding a new Peer. + - `peer.gossipPeerAddress: ` So that the new peer can gossip with existing peer. + + Refer to the [fabric-peernode chart documentation](https://github.com/hyperledger/bevel/tree/main/platforms/hyperledger-fabric/charts/fabric-peernode) for a complete list of available configuration options. + +1. **Install the fabric-peernode chart** + + Ensure the Orderer tls certificate is in `fabric-peernode/files` + + ```bash + # Get the orderer.crt from Kubernetes + cd ./platforms/hyperledger-fabric/charts/fabric-peernode/files + kubectl --namespace supplychain-net get configmap orderer-tls-cacert -o jsonpath='{.data.cacert}' > orderer.crt + ``` -and under `network.organizations` as + Execute the following command to install the Peer chart: + ```bash + cd ../.. + helm dependency update ./fabric-peernode + helm install ./fabric-peernode --namespace --values + ``` + Replace the following placeholders: - network: - organizations: - - organization: - org_status: existing # org_status must be existing when adding peer - .. - .. - services: - peers: - - peer: - .. - .. - peerstatus: new # new for new peers(s) - gossipAddress: peer0.xxxx.com # gossip Address must be one existing peer - - peer: - .. - .. - peerstatus: existing # existing for existing peers(s) - + - ``: The desired name for the Peer release. + - ``: The Kubernetes namespace where the Peer should be deployed. + - ``: The path to a YAML file containing the new peer configuration values. -The `network.yaml` file should contain the specific `network.organization` details. Orderer information is needed if you are going to install/upgrade the existing chaincodes, otherwise it is not needed. And the `org_status` must be `existing` when adding peer. +1. **Update the fabric-channel-join values.yaml file** -Ensure the following is considered when adding the new peer on a different cluster: -- The CA server is accessible publicly or at least from the new cluster. -- The CA server public certificate is stored in a local path and that path provided in network.yaml. -- There is a single Hashicorp Vault and both clusters (as well as ansible controller) can access it. -- Admin User certs have been already generated and store in Vault (this is taken care of by deploy-network.yaml playbook if you are using Bevel to setup the network). -- The `network.env.type` is different for different clusters. -- The GitOps release directory `gitops.release_dir` is different for different clusters. + After the peer has started, we need to join the channel. The channel should already exist in the network. + Following changes are must in the `values.yaml` file for a new peer to join an existing channel: - -## Run playbook + - `peer.name: ` + - `peer.type: general` + - `peer.address: ` + - `peer.localMspId: ` + - `peer.channelName: ` + - `peer.ordererAddress: ` the Orderer Address to which the peer should connect. -The [add-peer.yaml](https://github.com/hyperledger/bevel/tree/main/platforms/hyperledger-fabric/configuration/add-peer.yaml) playbook is used to add a new peer to an existing organization in the existing network. This can be done using the following command + Refer to the [fabric-channel-join chart documentation](https://github.com/hyperledger/bevel/tree/main/platforms/hyperledger-fabric/charts/fabric-channel-join) for a complete list of available configuration options. -``` -ansible-playbook platforms/hyperledger-fabric/configuration/add-peer.yaml --extra-vars "@path-to-network.yaml" -``` +1. **Join the channel** ---- -**NOTE:** The `peerstatus` is not required when the network is deployed for the first time but is mandatory for addition of new peer. If you have additional applications, please deploy them as well. + Execute the following command to join the channel: + ```bash + # From platforms/hyperledger-fabric/charts directory + helm install ./fabric-channel-join --namespace --values + ``` + Replace the following placeholders: ---- + - ``: The desired name for the join channel release. + - ``: The Kubernetes namespace must be same as the namespace of the Peer release. + - ``: The path to a YAML file containing the updated join channel configuration values. - -## Chaincode Installation +## Additional Notes + +- The `peerstatus` is _optional_ when the network is deployed for the first time but is _mandatory_ for addition of new peer. -Use the same network.yaml if you need to install chaincode on the new peers. +- Currently, only a `general` or non-anchor peer can be added. ---- -**NOTE:** With Fabric 2.2 chaincode lifecyle, re-installing chaincode on new peer is not needed as when the blocks are synced, the new peer will have access to already committed chaincode. If still needed, you can upgrade the version of the chaincode and install on all peers. +- Chaincode Installation: Use the same `network.yaml` if you need to install chaincode on the new peers. ---- +- With Fabric 2.2 and 2.5 chaincode lifecyle, re-installing chaincode on new peer is not needed as when the blocks are synced, the new peer will have access to already committed chaincode. If still needed, you can upgrade the version of the chaincode and install on all peers. -Refer [this guide](./chaincode-operations.md) for details on installing chaincode. +- Refer [Install chaincode guide](./chaincode-operations.md) or [Install external chaincode guide](./external-chaincode-operations.md) for details on installing chaincode. diff --git a/docs/source/guides/fabric/chaincode-operations.md b/docs/source/guides/fabric/chaincode-operations.md index fe89a1ca5ff..1cb85444447 100644 --- a/docs/source/guides/fabric/chaincode-operations.md +++ b/docs/source/guides/fabric/chaincode-operations.md @@ -23,44 +23,23 @@ The `network.yaml` file should contain the specific `network.organizations.servi For reference, following snippet shows that section of `network.yaml` -``` ---- -network: - .. - .. - organizations: - - organization: - name: manufacturer +```yaml +--8<-- "platforms/hyperledger-fabric/configuration/samples/network-fabricv2.yaml:241:248" + .. .. - .. - services: - peers: - - peer: - name: peer0 - .. - chaincodes: - - name: "chaincode_name" #This has to be replaced with the name of the chaincode - version: "chaincode_version" # This has to be different than the current version - maindirectory: "chaincode_main" #The main directory where chaincode is needed to be placed - repository: - username: "git_username" # Git Service user who has rights to check-in in all branches - password: "git_access_token" - url: "github.com/hyperledger/bevel.git" - branch: develop - path: "chaincode_src" #The path to the chaincode - arguments: 'chaincode_args' #Arguments to be passed along with the chaincode parameters - endorsements: "" #Endorsements (if any) provided along with the chaincode +--8<-- "platforms/hyperledger-fabric/configuration/samples/network-fabricv2.yaml:297:297" +--8<-- "platforms/hyperledger-fabric/configuration/samples/network-fabricv2.yaml:304:338" ``` ## Chaincode Operations in Bevel for the deployed Hyperledger Fabric network The playbook [chaincode-ops.yaml](https://github.com/hyperledger/bevel/tree/main/platforms/hyperledger-fabric/configuration/chaincode-ops.yaml) is used to install and instantiate chaincode for the existing fabric network. -For Fabric v2.2 multiple operations such as approve, commit and invoke the chaincode are available in the same playbook. +For Fabric v2.2 and 2.5 multiple operations such as approve, commit and invoke the chaincode are available in the same playbook. This can be done by using the following command ``` - ansible-playbook platforms/hyperledger-fabric/configuration/chaincode-ops.yaml --extra-vars "@path-to-network.yaml" +ansible-playbook platforms/hyperledger-fabric/configuration/chaincode-ops.yaml --extra-vars "@path-to-network.yaml" ``` --- diff --git a/docs/source/guides/fabric/deploy-fabric-operator.md b/docs/source/guides/fabric/deploy-fabric-operator.md index b4d92c2e0c1..6c52a7afed2 100644 --- a/docs/source/guides/fabric/deploy-fabric-operator.md +++ b/docs/source/guides/fabric/deploy-fabric-operator.md @@ -3,51 +3,58 @@ [//]: # (SPDX-License-Identifier: Apache-2.0) [//]: # (##############################################################################################) - # Deploy Fabric Network using Operator - - [Introduction](#introduction) - - [Modifying Configuration File](#modifying-configuration-file) - - [Run playbook](#run-playbook) - ## Introduction -The [bevel-operator-fabric](https://github.com/hyperledger/bevel-operator-fabric) provides a different approach to deploying the Fabric Network. It uses -the kubernetes operator to deploy CAs, Orderers and Peers. -This release supports bevel-operator-fabric version 1.9.0 and all the Fabric platforms supported by it. Also, chaincode and user/certificate management is not yet supported, there will be separate issues to handle this. Current implementation supports till Channel creation and joining. -Due to open issues with bevel-operator-fabric, it is not recommended for Production workloads yet. +The [bevel-operator-fabric](https://github.com/hyperledger/bevel-operator-fabric) provides a streamlined way to deploy a Fabric network. It leverages the Kubernetes operator to manage the deployment of Certificate Authorities (CAs), Orderers, and Peers. This guide covers the deployment process using _bevel-operator-fabric_ version **1.9.0** and the Fabric platforms it supports. ---- -**NOTE**: The bevel-operator-fabric deployment has been tested only for Fabric 2.5.3 +!!! important ---- + Chaincode and user/certificate management are not yet supported by this Bevel release. There will be separate issues to address these features. The current implementation supports channel creation and joining. -## Modifying Configuration File -A Sample configuration file for deploying using bevel-operator-fabric is available [here](https://github.com/hyperledger/bevel/blob/main/platforms/hyperledger-fabric/configuration/samples/network-operator-fabric.yaml). Following are the main changes in this file from previous versions: +!!! note -1. `network.env.type` must be `operator`. This is how Ansible will understand that bevel-operator-fabric will be used. -1. `network.env.proxy` must be `istio` as no other proxy is supported by bevel-operator-fabric. -1. Only `443` is supported as external port because that is what bevel-operator-fabric supports. -1. `vault` and `gitops` sections are removed as they are not applicable. + The bevel-operator-fabric automated deployment has been tested with Fabric 2.5.4. -For generic instructions on the Fabric configuration file, refer [this guide](../networkyaml-fabric.md). +## Understanding the Configuration File - -## Run playbook +A Sample configuration file for deploying using _bevel-operator-fabric_ is available [here](https://github.com/hyperledger/bevel/blob/main/platforms/hyperledger-fabric/configuration/samples/network-operator-fabric.yaml). -After all the configurations are updated in the `network.yaml`, execute the following to create the DLT network -``` -# Run the provisioning scripts -ansible-playbook platforms/shared/configuration/site.yaml -e "@./build/network.yaml" +Here are the key changes from other versions: + +1. **`network.env.type`:** Must be set to `operator`. This tells Ansible to use _bevel-operator-fabric_ for deployment. +1. **`network.env.proxy`:** Must be set to `istio` as _bevel-operator-fabric_ currently only supports Istio as a proxy. +1. **External Port:** Only port `443` is supported for external access. +1. **Removed Sections:** The `vault` and `gitops` sections are removed as they are not applicable to this deployment method. +```yaml +--8<-- "platforms/hyperledger-fabric/configuration/samples/network-operator-fabric.yaml:8:21" + .. + .. ``` -The `site.yaml` playbook, in turn calls various playbooks depending on the configuration file and sets up your DLT/Blockchain network. -The [deploy-fabric-console.yaml](https://github.com/hyperledger/bevel/tree/main/platforms/hyperledger-fabric/configuration/deploy-operator-network.yaml) playbook can be used as well if the pre-requisites like Istio and krew is already installed. This can be done using the following command +For a comprehensive guide on the Fabric configuration file, refer to [this guide](../networkyaml-fabric.md). -``` -ansible-playbook platforms/hyperledger-fabric/configuration/deploy-operator-network.yaml -e "@/path/to/network.yaml" -``` +## Running the Deployment Playbook + +After updating the `network.yaml` file with the necessary configurations, follow these steps to create your DLT network. + +1. Run the provisioning scripts: + ``` + ansible-playbook platforms/shared/configuration/site.yaml -e "@./build/network.yaml" + ``` + + The `site.yaml` playbook will call various other playbooks based on your configuration file and set up your DLT/Blockchain network. + +1. Alternative Deployment Method (Pre-requisites installed): + + If you have already installed and configured Istio and krew, you can use the [deploy-operator-network.yaml](https://github.com/hyperledger/bevel/tree/main/platforms/hyperledger-fabric/configuration/deploy-operator-network.yaml) playbook: + + ``` + ansible-playbook platforms/hyperledger-fabric/configuration/deploy-operator-network.yaml -e "@/path/to/network.yaml" + ``` -Refer to [bevel-operator-fabric docs](https://hyperledger.github.io/bevel-operator-fabric/) for details the operator and latest releases. +## Manual Deployment +For detailed information about the operator and latest releases, and also for manual deployment instructions, refer to the [bevel-operator-fabric documentation](https://hyperledger.github.io/bevel-operator-fabric/). diff --git a/docs/source/guides/fabric/deploy-operations-console.md b/docs/source/guides/fabric/deploy-operations-console.md index c3a1b4f403b..8d87483ae9f 100644 --- a/docs/source/guides/fabric/deploy-operations-console.md +++ b/docs/source/guides/fabric/deploy-operations-console.md @@ -29,6 +29,15 @@ If you want to create the JSON files automatically by using our ansible playbook A Sample configuration file for deploying Operations Console is available [here](https://github.com/hyperledger/bevel/blob/main/platforms/hyperledger-fabric/configuration/samples/network-fabricv2-raft.yaml). Main change being addition of a new key `organization.fabric_console` which when `enabled` will deploy the operations console for the organization. +```yaml +--8<-- "platforms/hyperledger-fabric/configuration/samples/network-fabricv2.yaml:151:166" + .. + .. +--8<-- "platforms/hyperledger-fabric/configuration/samples/network-fabricv2.yaml:340:350" + .. + .. +``` + For generic instructions on the Fabric configuration file, refer [this guide](../networkyaml-fabric.md). diff --git a/docs/source/guides/fabric/external-chaincode-operations.md b/docs/source/guides/fabric/external-chaincode-operations.md index 5a4399f6901..0fe9459d084 100644 --- a/docs/source/guides/fabric/external-chaincode-operations.md +++ b/docs/source/guides/fabric/external-chaincode-operations.md @@ -143,33 +143,7 @@ While modifying the configuration file (`network.yaml`), the following two secti - `crypto_mount_path`: If TLS is enabled, path to mount TLS certs and key in the chaincode server pod ```yaml - network: - channels: - - channel: - .. - .. - participants: - organizations: - - organization: - services: - peers: - name: - type: - gossippeeraddress: - cli: - grpc: - port: - chaincodes: - - name: "assettransfer" #This has to be replaced with the name of the chaincode - version: "1" #This has to be replaced with the version of the chaincode - external_chaincode: true - tls: true - buildpack_path: /home/bevel/fabric-samples/asset-transfer-basic/chaincode-external/sampleBuilder - image: ghcr.io/hyperledger/bevel-samples-example:1.0 - arguments: '\"InitLedger\",\"\"' #Arguments to be passed along with the chaincode parameters - crypto_mount_path: /crypto # OPTIONAL | tls: true | Path where crypto shall be mounted for the chaincode server - .. - .. +--8<-- "platforms/hyperledger-fabric/configuration/samples/network-fabricv2-external-chaincode.yaml:289:319" ``` ## Execute playbook diff --git a/docs/source/guides/fabric/remove-org.md b/docs/source/guides/fabric/remove-org.md index 2a66fdbe42e..87c9d3de0ad 100644 --- a/docs/source/guides/fabric/remove-org.md +++ b/docs/source/guides/fabric/remove-org.md @@ -6,9 +6,9 @@ # Removing an organization in Hyperledger Fabric - - [Prerequisites](#prerequisites) - - [Modifying Configuration File](#modifying-configuration-file) - - [Run playbook](#run-playbook) +- [Prerequisites](#prerequisites) +- [Modifying Configuration File](#modifying-configuration-file) +- [Run playbook](#run-playbook) @@ -27,43 +27,30 @@ Refer [this guide](../networkyaml-fabric.md) for details on editing the configur While modifying the configuration file(`network.yaml`) for removing an organization, all the existing organizations should have `org_status` tag as `existing` and to be deleted organization should have `org_status` tag as `delete` under `network.channels` e.g. - network: - channels: - - channel: - .. - .. - participants: - - organization: - .. - .. - org_status: delete # delete for to be deleted organization(s) - - organization: - .. - .. - org_status: existing # existing for old organization(s) +```yaml +--8<-- "platforms/hyperledger-fabric/configuration/samples/network-fabric-remove-organization.yaml:62:112" +``` and under `network.organizations` as - network: - organizations: - - organization: - .. - .. - org_status: delete # delete for to be deleted organization(s) - - organization: - .. - .. - org_status: existing # existing for old organization(s) +```yaml +--8<-- "platforms/hyperledger-fabric/configuration/samples/network-fabric-remove-organization.yaml:117:128" + .. + .. +--8<-- "platforms/hyperledger-fabric/configuration/samples/network-fabric-remove-organization.yaml:381:389" + .. + .. +``` The `network.yaml` file should contain the specific `network.organization` details along with the orderer information. -For reference, see `network-fabric-remove-organization.yaml` file [here](https://github.com/hyperledger/bevel/tree/main/platforms/hyperledger-fabric/configuration/samples). +For reference, see `network-fabric-remove-organization.yaml` file [here](https://github.com/hyperledger/bevel/tree/main/platforms/hyperledger-fabric/configuration/samples/network-fabric-remove-organization.yaml). ## Run playbook -The [remove-organization.yaml](https://github.com/hyperledger/bevel/platforms/hyperledger-fabric/configuration/remove-organization.yaml) playbook is used to remove organization(s) from the existing network. This can be done using the following command +The [remove-organization.yaml](https://github.com/hyperledger/bevel/tree/main/platforms/hyperledger-fabric/configuration/remove-organization.yaml) playbook is used to remove organization(s) from the existing network. This can be done using the following command ``` ansible-playbook platforms/hyperledger-fabric/configuration/remove-organization.yaml --extra-vars "@path-to-network.yaml" diff --git a/docs/source/guides/fabric/setup-cactus-connector.md b/docs/source/guides/fabric/setup-cactus-connector.md index 6b42500daf8..f404bc84671 100644 --- a/docs/source/guides/fabric/setup-cactus-connector.md +++ b/docs/source/guides/fabric/setup-cactus-connector.md @@ -7,8 +7,8 @@ # Deploy Fabric Cactus connector - [Prerequisites](#prerequisites) -- [Modifying configuration file](#create_config_file) -- [Running playbook to deploy Hyperledger Fabric network](#run_network) +- [Modifying Configuration File](#modifying-configuration-file) +- [Run playbook](#run-playbook) @@ -27,31 +27,17 @@ Refer [this guide](../networkyaml-fabric.md) for details on editing the configur While modifying the configuration file(`network.yaml`)to deploy the cactus connector, all peers in member organizations should have `cactus_connector` tag as `enabled` e.g. - network: - organizations: - - organization: - type: peer +```yaml +--8<-- "platforms/hyperledger-fabric/configuration/samples/network-fabricv2.yaml:242:248" + .. + .. +--8<-- "platforms/hyperledger-fabric/configuration/samples/network-fabricv2.yaml:297:297" +--8<-- "platforms/hyperledger-fabric/configuration/samples/network-fabricv2.yaml:304:312" .. .. - services: - peers: - - peer: - .. - .. - cactus_connector: enabled # set to enabled to create a cactus connector for Fabric - - - organization: - type: peer - .. - .. - services: - peers: - - peer: - .. - .. - cactus_connector: enabled # set to enabled to create a cactus connector for Fabric +``` -For reference, see `network-fabricv2.yaml` file [here](https://github.com/hyperledger/bevel/tree/main/platforms/hyperledger-fabric/configuration/samples). +For reference, see `network-fabricv2.yaml` file [here](https://github.com/hyperledger/bevel/tree/main/platforms/hyperledger-fabric/configuration/samples/network-fabricv2.yaml). diff --git a/docs/source/guides/fabric/upgrade-chaincode.md b/docs/source/guides/fabric/upgrade-chaincode.md index e10ca134835..b01e3c84f56 100644 --- a/docs/source/guides/fabric/upgrade-chaincode.md +++ b/docs/source/guides/fabric/upgrade-chaincode.md @@ -6,12 +6,11 @@ # Upgrading chaincode in Hyperledger Fabric -- [Upgrading chaincode in Hyperledger Fabric](#upgrading-chaincode-in-hyperledger-fabric) - - [Pre-requisites](#pre-requisites) - - [Modifying configuration file](#modifying-configuration-file) - - [Run playbook for Fabric version 1.4.x](#run-playbook-for-fabric-version-14x) - - [Run playbook for Fabric version 2.2.x](#run-playbook-for-fabric-version-22x) - - [Run playbook for Fabric version 2.2.x with external chaincode](#run-playbook-for-fabric-version-22x-with-external-chaincode) +- [Pre-requisites](#pre-requisites) +- [Modifying configuration file](#modifying-configuration-file) +- [Run playbook for Fabric version 1.4.x](#run-playbook-for-fabric-version-14x) +- [Run playbook for Fabric version 2.2.x](#run-playbook-for-fabric-version-22x) +- [Run playbook for Fabric version 2.2.x with external chaincode](#run-playbook-for-fabric-version-22x-with-external-chaincode) ## Pre-requisites @@ -26,35 +25,12 @@ The `network.yaml` file should contain the specific `network.organizations.servi For reference, following snippet shows that section of `network.yaml` -``` ---- -network: - .. - .. - organizations: - - organization: - name: manufacturer +```yaml +--8<-- "platforms/hyperledger-fabric/configuration/samples/network-fabricv2.yaml:242:248" + .. .. - .. - services: - peers: - - peer: - name: peer0 - .. - chaincodes: - - name: "chaincode_name" #This has to be replaced with the name of the chaincode - version: "chaincode_version" # This has to be greater than the current version, should be an integer. - sequence: "2" # sequence of the chaincode, update this only for chaincode upgrade depending on the last sequence - maindirectory: "chaincode_main" #The main directory where chaincode is needed to be placed - lang: "java" # The chaincode language, optional field with default vaule of 'go'. - repository: - username: "git_username" # Git Service user who has rights to check-in in all branches - password: "git_access_token" - url: "github.com/hyperledger/bevel.git" - branch: develop - path: "chaincode_src" #The path to the chaincode - arguments: 'chaincode_args' #Arguments to be passed along with the chaincode parameters - endorsements: "" #Endorsements (if any) provided along with the chaincode +--8<-- "platforms/hyperledger-fabric/configuration/samples/network-fabricv2.yaml:297:297" +--8<-- "platforms/hyperledger-fabric/configuration/samples/network-fabricv2.yaml:304:338" ``` When the chaincode is an external service, `network.organizations.services.peers.chaincodes[*].upgrade_chaincode` variable must also be added to change the version. If only the sequence is modified, it isn't necessary to add this field. @@ -63,32 +39,12 @@ The sequence must be incremented in each execution regardless of whether the ver For reference, following snippet shows that section of `network.yaml` -``` ---- -network: - .. - .. - organizations: - - organization: - name: manufacturer +```yaml +--8<-- "platforms/hyperledger-fabric/configuration/samples/network-fabricv2-external-chaincode.yaml:227:233" + .. .. - .. - services: - peers: - - peer: - name: peer0 - .. - chaincodes: - - name: "chaincode_name" #This has to be replaced with the name of the chaincode - version: "2" #This has to be replaced with the version of the chaincode - sequence: "2" - external_chaincode: true - upgrade_chaincode: true - tls: true - buildpack_path: /home/fabric-samples/asset-transfer-basic/chaincode-external/sampleBuilder # The path where buildpacks are locally stored - image: ghcr.io/hyperledger/bevel-samples-example:1.0 - arguments: '\"InitLedger\",\"\"' # Init Arguments to be passed which will mark chaincode as init-required - crypto_mount_path: /crypto # OPTIONAL | tls: true | Path where crypto shall be mounted for the chaincode server +--8<-- "platforms/hyperledger-fabric/configuration/samples/network-fabricv2-external-chaincode.yaml:282:282" +--8<-- "platforms/hyperledger-fabric/configuration/samples/network-fabricv2-external-chaincode.yaml:289:321" ``` diff --git a/docs/source/guides/fabric/upgrade-network-1.4.x-2.2.x.md b/docs/source/guides/fabric/upgrade-network-1.4.x-2.2.x.md index 676f186903f..f7acd80f7a5 100755 --- a/docs/source/guides/fabric/upgrade-network-1.4.x-2.2.x.md +++ b/docs/source/guides/fabric/upgrade-network-1.4.x-2.2.x.md @@ -45,9 +45,10 @@ Update the network network.yaml [here](https://github.com/hyperledger/bevel/tree a. Set the required version tag under `network.version` for upgrading the base images of CA, orderer and peer. b. Add the upgrade flag to true For example: - network: - version: 2.2.2 - upgrade: true + +```yaml +--8<-- "platforms/hyperledger-fabric/configuration/samples/network-fabricv2.yaml:8:16" +``` Note: The network.yaml should reflect the entire network which requires to be upgraded diff --git a/docs/source/guides/fabric/upgrade-network.md b/docs/source/guides/fabric/upgrade-network.md index 0d28d8b476b..ebcfa71da2d 100644 --- a/docs/source/guides/fabric/upgrade-network.md +++ b/docs/source/guides/fabric/upgrade-network.md @@ -34,17 +34,17 @@ For example, for Fabric v1.4.8, these are the image tags of the supporting docke The network.yaml [here](https://github.com/hyperledger/bevel/tree/main/platforms/hyperledger-fabric/configuration/samples/network-fabricv2.yaml) should be updated with the required version tag under `network.version` for upgrading the base images of CA, orderer and peer. For example: - - network: - version: 1.4.8 +```yaml +--8<-- "platforms/hyperledger-fabric/configuration/samples/network-fabricv2.yaml:8:16" +``` 2 files need to be edited in order to support version change for kafka, zookeeper and couchDB | File | Fabric entity | Key | |---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------------|-------------------------| -| [orderer vars](https://github.com/hyperledger/bevel/tree/main/platforms/hyperledger-fabric/configuration/roles/create/orderers/vars/main.yaml) | kafka | kafka_image_version | -| [orderer vars](https://github.com/hyperledger/bevel/tree/main/platforms/hyperledger-fabric/configuration/roles/create/orderers/vars/main.yaml) | zookeeper | zookeeper_image_version | -| [peer vars](https://github.com/hyperledger/bevel/tree/main/platforms/hyperledger-fabric/configuration/roles/create/peers/vars/main.yaml) | couchDB | couchdb_image_version | +| [orderer vars](https://github.com/hyperledger/bevel/blob/main/platforms/hyperledger-fabric/configuration/roles/helm_component/vars/main.yaml#L35) | kafka | kafka_image | +| [orderer vars](https://github.com/hyperledger/bevel/blob/main/platforms/hyperledger-fabric/configuration/roles/helm_component/vars/main.yaml#L36) | zookeeper | zookeeper_image | +| [peer vars](https://github.com/hyperledger/bevel/blob/main/platforms/hyperledger-fabric/configuration/roles/helm_component/vars/main.yaml#L53) | couchDB | couchdb_image | ## Executing Ansible playbook The playbook [site.yaml](https://github.com/hyperledger/bevel/tree/main/platforms/shared/configuration/site.yaml) ([ReadMe](https://github.com/hyperledger/bevel/tree/main/platforms/shared/configuration/)) can be run after the configuration file (for example: [network.yaml](https://github.com/hyperledger/bevel/tree/main/platforms/hyperledger-fabric/configuration/samples/network-fabricv2.yaml) for Fabric) has been updated. diff --git a/docs/source/guides/indy-add-new-org.md b/docs/source/guides/indy-add-new-org.md index 5a9c832eb50..d3f54e2547c 100644 --- a/docs/source/guides/indy-add-new-org.md +++ b/docs/source/guides/indy-add-new-org.md @@ -20,7 +20,7 @@ ## Prerequisites To add a new organization in Indy, an existing Indy network should be running, pool and domain genesis files should be available. -!!! note +??? note "add organization" The guide is only for the addition of VALIDATOR Node in existing Indy network. @@ -38,238 +38,12 @@ The `network.yaml` file should contain the specific `network.organization` detai If you are adding node to the same cluster as of another node, make sure that you add the ambassador ports of the existing node present in the cluster to the network.yaml -For reference, sample `network.yaml` file looks like below (but always check the latest network-indy-newnode-to-bevel-network.yaml at `platforms/hyperledger-indy/configuration/samples`): +Use this [sample configuration file](https://github.com/hyperledger/bevel/blob/main/platforms/hyperledger-indy/configuration/samples/network-indy-newnode-to-baf-network.yaml) as a base. +```yaml +--8<-- "platforms/hyperledger-indy/configuration/samples/network-indy-newnode-to-baf-network.yaml" ``` ---- -# This is a sample configuration file for hyperledger indy which can be reused for adding of new org with 1 validator node to a fully Bevel managed network. -# It has 2 organizations: -# 1. existing organization "university" with 1 trustee, 4 stewards and 1 endorser -# 2. new organization "bank" with 1 trustee, 1 steward and 1 endorser - -network: - # Network level configuration specifies the attributes required for each organization - # to join an existing network. - type: indy - version: 1.11.0 # Supported versions 1.11.0 and 1.12.1 - - #Environment section for Kubernetes setup - env: - type: indy # tag for the environment. Important to run multiple flux on single cluster - proxy: ambassador # value has to be 'ambassador' as 'haproxy' has not been implemented for Indy - ambassadorPorts: - portRange: # For a range of ports - from: 9711 - to: 9720 - loadBalancerSourceRanges: # (Optional) Default value is '0.0.0.0/0', this value can be changed to any other IP adres or list (comma-separated without spaces) of IP adresses, this is valid only if proxy='ambassador' - retry_count: 40 # Retry count for the checks - external_dns: enabled # Should be enabled if using external-dns for automatic route configuration - - # Docker registry details where images are stored. This will be used to create k8s secrets - # Please ensure all required images are built and stored in this registry. - # Do not check-in docker_password. - docker: - url: "ghcr.io/hyperledger" - username: "docker_username" - password: "docker_password" - - # It's used as the Indy network name (has impact e.g. on paths where the Indy nodes look for crypto files on their local filesystem) - name: bevel - - # Information about pool transaction genesis and domain transactions genesis - # All the fields below in the genesis section are MANDATORY - genesis: - state: present # must be present when add_new_org is true - pool: /path/to/pool_transactions_genesis # path where pool_transactions_genesis from existing network has been stored locally - domain: /path/to/domain_transactions_genesis # path where domain_transactions_genesis from existing has been stored locally - - # Allows specification of one or many organizations that will be connecting to a network. - organizations: - - organization: - name: university - type: peer - org_status: existing # Status of the organization for the existing network, can be new / existing - cloud_provider: aws - external_url_suffix: indy.blockchaincloudpoc.com # Provide the external dns suffix. Only used when Indy webserver/Clients are deployed. - - aws: - access_key: "aws_access_key" # AWS Access key - secret_key: "aws_secret_key" # AWS Secret key - encryption_key: "encryption_key_id" # AWS encryption key. If present, it's used as the KMS key id for K8S storage class encryption. - zone: "availability_zone" # AWS availability zone - region: "region" # AWS region - - publicIps: ["3.221.78.194"] # List of all public IP addresses of each availability zone from all organizations in the same k8s cluster - - # Kubernetes cluster deployment variables. The config file path has to be provided in case - # the cluster has already been created. - k8s: - config_file: "/path/to/cluster_config" - context: "kubernetes-admin@kubernetes" - - # Hashicorp Vault server address and root-token. Vault should be unsealed. - # Do not check-in root_token - vault: - url: "vault_addr" - root_token: "vault_root_token" - - # Git Repo details which will be used by GitOps/Flux. - # Do not check-in git_access_token - gitops: - git_protocol: "https" # Option for git over https or ssh - git_url: "https://github.com//bevel.git" # Gitops https or ssh url for flux value files - branch: "develop" # Git branch where release is being made - release_dir: "platforms/hyperledger-indy/releases/dev" # Relative Path in the Git repo for flux sync per environment. - chart_source: "platforms/hyperledger-indy/charts" # Relative Path where the Helm charts are stored in Git repo - git_repo: "github.com//bevel.git" # Gitops git repository URL for git push - username: "git_username" # Git Service user who has rights to check-in in all branches - password: "git_access_token" # Git Server user password - email: "git@email.com" # Email to use in git config - private_key: "path_to_private_key" # Path to private key file which has write-access to the git repo (Optional for https; Required for ssh) - - # Services maps to the pods that will be deployed on the k8s cluster - # This sample has trustee, 2 stewards and endoorser - services: - trustees: - - trustee: - name: university-trustee - genesis: true - stewards: - - steward: - name: university-steward-1 - type: VALIDATOR - genesis: true - publicIp: 3.221.78.194 # IP address of current organization in current availability zone - node: - port: 9713 - targetPort: 9713 - ambassador: 9713 # Port for ambassador service - client: - port: 9714 - targetPort: 9714 - ambassador: 9714 # Port for ambassador service - - steward: - name: university-steward-2 - type: VALIDATOR - genesis: true - publicIp: 3.221.78.194 # IP address of current organization in current availability zone - node: - port: 9715 - targetPort: 9715 - ambassador: 9715 # Port for ambassador service - client: - port: 9716 - targetPort: 9716 - ambassador: 9716 # Port for ambassador service - - steward: - name: university-steward-3 - type: VALIDATOR - genesis: true - publicIp: 3.221.78.194 # IP address of current organization in current availability zone - node: - port: 9717 - targetPort: 9717 - ambassador: 9717 # Port for ambassador service - client: - port: 9718 - targetPort: 9718 - ambassador: 9718 # Port for ambassador service - - steward: - name: university-steward-4 - type: VALIDATOR - genesis: true - publicIp: 3.221.78.194 # IP address of current organization in current availability zone - node: - port: 9719 - targetPort: 9719 - ambassador: 9719 # Port for ambassador service - client: - port: 9720 - targetPort: 9720 - ambassador: 9720 # Port for ambassador service - endorsers: - - endorser: - name: university-endorser - full_name: Some Decentralized Identity Mobile Services Partner - avatar: http://university.com/avatar.png - # public endpoint will be {{ endorser.name}}.{{ external_url_suffix}}:{{endorser.server.httpPort}} - # Eg. In this sample http://university-endorser.indy.blockchaincloudpoc.com:15033/ - # For minikube: http://>:15033 - server: - httpPort: 15033 - apiPort: 15034 - webhookPort: 15035 - - organization: - name: bank - type: peer - org_status: new # Status of the organization for the existing network, can be new / existing - cloud_provider: aws - external_url_suffix: indy.blockchaincloudpoc.com # Provide the external dns suffix. Only used when Indy webserver/Clients are deployed. - - aws: - access_key: "aws_access_key" # AWS Access key - secret_key: "aws_secret_key" # AWS Secret key - encryption_key: "encryption_key_id" # AWS encryption key. If present, it's used as the KMS key id for K8S storage class encryption. - zone: "availability_zone" # AWS availability zone - region: "region" # AWS region - - publicIps: ["3.221.78.194"] # List of all public IP addresses of each availability zone from all organizations in the same k8s cluster # List of all public IP addresses of each availability zone - - # Kubernetes cluster deployment variables. The config file path has to be provided in case - # the cluster has already been created. - k8s: - config_file: "/path/to/cluster_config" - context: "kubernetes-admin@kubernetes" - - # Hashicorp Vault server address and root-token. Vault should be unsealed. - # Do not check-in root_token - vault: - url: "vault_addr" - root_token: "vault_root_token" - - # Git Repo details which will be used by GitOps/Flux. - # Do not check-in git_access_token - gitops: - git_protocol: "https" # Option for git over https or ssh - git_url: "https://github.com//bevel.git" # Gitops https or ssh url for flux value files - branch: "develop" # Git branch where release is being made - release_dir: "platforms/hyperledger-indy/releases/dev" # Relative Path in the Git repo for flux sync per environment. - chart_source: "platforms/hyperledger-indy/charts" # Relative Path where the Helm charts are stored in Git repo - git_repo: "github.com//bevel.git" # Gitops git repository URL for git push - username: "git_username" # Git Service user who has rights to check-in in all branches - password: "git_access_token" # Git Server user password - email: "git@email.com" # Email to use in git config - private_key: "path_to_private_key" # Path to private key file which has write-access to the git repo (Optional for https; Required for ssh) - - # Services maps to the pods that will be deployed on the k8s cluster - # This sample has trustee, 2 stewards and endoorser - services: - trustees: - - trustee: - name: bank-trustee - genesis: true - stewards: - - steward: - name: bank-steward-1 - type: VALIDATOR - genesis: true - publicIp: 3.221.78.194 # IP address of current organization in current availability zone - node: - port: 9711 - targetPort: 9711 - ambassador: 9711 # Port for ambassador service - client: - port: 9712 - targetPort: 9712 - ambassador: 9712 # Port for ambassador service - endorsers: - - endorser: - name: bank-endorser - full_name: Some Decentralized Identity Mobile Services Provider - avatar: http://bank.com/avatar.png - -``` Following items must be added/updated to the network.yaml used to add new organizations | Field | Description | @@ -300,117 +74,11 @@ Refer [this guide](./networkyaml-indy.md) for details on editing the configurati The `network.yaml` file should contain the specific `network.organization` details. -For reference, sample `network.yaml` file looks like below (but always check the latest network-indy-newnode-to-non-bevel-network.yaml at `platforms/hyperledger-indy/configuration/samples`): +For reference, use this [sample configuration file](https://github.com/hyperledger/bevel/blob/main/platforms/hyperledger-indy/configuration/samples/network-indy-newnode-to-non-baf-network.yaml) -``` ---- -# This is a sample configuration file for hyperledger indy which can be reused for adding of new org with 1 validator node to an existing non-Bevel managed network. -# It has 1 organization: -# - new organization "bank" with 1 steward and 1 endorser - -network: - # Network level configuration specifies the attributes required for each organization - # to join an existing network. - type: indy - version: 1.11.0 # Supported versions 1.11.0 and 1.12.1 - - #Environment section for Kubernetes setup - env: - type: indy # tag for the environment. Important to run multiple flux on single cluster - proxy: ambassador # value has to be 'ambassador' as 'haproxy' has not been implemented for Indy - ambassadorPorts: - portRange: # For a range of ports - from: 9711 - to: 9712 - loadBalancerSourceRanges: # (Optional) Default value is '0.0.0.0/0', this value can be changed to any other IP adres or list (comma-separated without spaces) of IP adresses, this is valid only if proxy='ambassador' - retry_count: 40 # Retry count for the checks - external_dns: enabled # Should be enabled if using external-dns for automatic route configuration - - # Docker registry details where images are stored. This will be used to create k8s secrets - # Please ensure all required images are built and stored in this registry. - # Do not check-in docker_password. - docker: - url: "ghcr.io/hyperledger" - username: "docker_username" - password: "docker_password" - - # It's used as the Indy network name (has impact e.g. on paths where the Indy nodes look for crypto files on their local filesystem) - name: bevel - - # Information about pool transaction genesis and domain transactions genesis - # All the fields below in the genesis section are MANDATORY - genesis: - state: present # must be present when add_new_org is true - pool: /path/to/pool_transactions_genesis # path where pool_transactions_genesis from existing network has been stored locally - domain: /path/to/domain_transactions_genesis # path where domain_transactions_genesis from existing has been stored locally - - # Allows specification of one or many organizations that will be connecting to a network. - organizations: - - organization: - name: bank - type: peer - org_status: new # Status of the organization for the existing network, can be new / existing - cloud_provider: aws - external_url_suffix: indy.blockchaincloudpoc.com # Provide the external dns suffix. Only used when Indy webserver/Clients are deployed. - - aws: - access_key: "aws_access_key" # AWS Access key - secret_key: "aws_secret_key" # AWS Secret key - encryption_key: "encryption_key_id" # AWS encryption key. If present, it's used as the KMS key id for K8S storage class encryption. - zone: "availability_zone" # AWS availability zone - region: "region" # AWS region - - publicIps: ["3.221.78.194"] # List of all public IP addresses of each availability zone from all organizations in the same k8s cluster # List of all public IP addresses of each availability zone - - # Kubernetes cluster deployment variables. The config file path has to be provided in case - # the cluster has already been created. - k8s: - config_file: "/path/to/cluster_config" - context: "kubernetes-admin@kubernetes" - - # Hashicorp Vault server address and root-token. Vault should be unsealed. - # Do not check-in root_token - vault: - url: "vault_addr" - root_token: "vault_root_token" - - # Git Repo details which will be used by GitOps/Flux. - # Do not check-in git_access_token - gitops: - git_protocol: "https" # Option for git over https or ssh - git_url: "https://github.com//bevel.git" # Gitops https or ssh url for flux value files - branch: "develop" # Git branch where release is being made - release_dir: "platforms/hyperledger-indy/releases/dev" # Relative Path in the Git repo for flux sync per environment. - chart_source: "platforms/hyperledger-indy/charts" # Relative Path where the Helm charts are stored in Git repo - git_repo: "github.com//bevel.git" # Gitops git repository URL for git push - username: "git_username" # Git Service user who has rights to check-in in all branches - password: "git_access_token" # Git Server user password - email: "git@email.com" # Email to use in git config - private_key: "path_to_private_key" # Path to private key file which has write-access to the git repo (Optional for https; Required for ssh) - - # Services maps to the pods that will be deployed on the k8s cluster - # This sample has trustee, 2 stewards and endoorser - services: - stewards: - - steward: - name: bank-steward-1 - type: VALIDATOR - genesis: true - publicIp: 3.221.78.194 # IP address of current organization in current availability zone - node: - port: 9711 - targetPort: 9711 - ambassador: 9711 # Port for ambassador service - client: - port: 9712 - targetPort: 9712 - ambassador: 9712 # Port for ambassador service - endorsers: - - endorser: - name: bank-endorser - full_name: Some Decentralized Identity Mobile Services Provider - avatar: http://bank.com/avatar.png +```yaml +--8<-- "platforms/hyperledger-indy/configuration/samples/network-indy-newnode-to-non-baf-network.yaml" ``` Following items must be added/updated to the network.yaml used to add new organizations diff --git a/docs/source/guides/networkyaml-besu.md b/docs/source/guides/networkyaml-besu.md index b829719e5a6..459a0361320 100644 --- a/docs/source/guides/networkyaml-besu.md +++ b/docs/source/guides/networkyaml-besu.md @@ -17,56 +17,52 @@ A json-schema definition is provided in `platforms/network-schema.json` to assis The configurations are grouped in the following sections for better understanding. -* type +* [type](#type) -* version +* [version](#version) -* env +* [env](#env) -* docker +* [docker](#docker) -* config +* [config](#config) -* organizations +* [organizations](#organizations) -Here is the snapshot from the sample configuration file - -![](./../_static/NetworkYamlBesu.png) +Although, the file itself has comments for each key-value, here is a more detailed description with respective snippets. +=== "Hyperledger-Besu" + Use this [sample configuration file](https://github.com/hyperledger/bevel/blob/main/platforms/hyperledger-besu/configuration/samples/network-besu.yaml) as a base. + ```yaml + --8<-- "platforms/hyperledger-besu/configuration/samples/network-besu.yaml:7:15" + ``` The sections in the sample configuration file are + +type +: `type` defines the platform choice like corda/fabric/indy/quorum/besu, here in the example its **besu**. -`type` defines the platform choice like corda/fabric/indy/quorum/besu, here in the example its **besu**. + +version +: `version` defines the version of platform being used. The current Hyperledger Besu version support is for **21.10.6** and **22.10.2**. -`version` defines the version of platform being used. The current Hyperledger Besu version support is for **21.10.6** and **22.10.2**. + +permissioning +: `permissioning` section contains the flag to enable permissioning in the network -`permissioning` section contains the flag to enable permissioning in the network ```yaml - permissioning: - enabled: false +--8<-- "platforms/hyperledger-besu/configuration/samples/network-besu.yaml:17:18" ``` -`env` section contains the environment type and additional (other than 443) Ambassador port configuration. Vaule for proxy field under this section can be 'ambassador' as 'haproxy' has not been implemented for Besu. - -The snapshot of the `env` section with example value is below -```yaml - env: - type: "env-type" # tag for the environment. Important to run multiple flux on single cluster - proxy: ambassador # value has to be 'ambassador' as 'haproxy' has not been implemented for Hyperledger Besu - # These ports are enabled per cluster, so if you have multiple clusters you do not need so many ports - # This sample uses a single cluster, so we have to open 4 ports for each Node. These ports are again specified for each organization below - ambassadorPorts: # Any additional Ambassador ports can be given here, this is valid only if proxy='ambassador' - portRange: # For a range of ports - from: 15010 - to: 15043 - # ports: [15020, 15021] # For specific ports, needs to be an array or list - loadBalancerSourceRanges: # (Optional) Default value is '0.0.0.0/0', this value can be changed to any other IP adres or list (comma-separated without spaces) of IP adresses, this is valid only if proxy='ambassador' - retry_count: 50 # Retry count for the checks - external_dns: enabled # Should be enabled if using external-dns for automatic route configuration - labels: - service: {} - pvc: {} - deployment: {} + +env +: `env` section contains the environment type and additional (other than 443) Ambassador port configuration. Vaule for proxy field under this section can be 'ambassador' as 'haproxy' has not been implemented for Besu. + +The snippet of the `env` section with example value is below + +```yaml +--8<-- "platforms/hyperledger-besu/configuration/samples/network-besu.yaml:21:41" ``` + The fields under `env` section are | Field | Description | @@ -82,17 +78,13 @@ The fields under `env` section are | labels.pvc | (Optional) Labels to be added to kubernetes pvc | | labels.deployment | (Optional) Labels to be added to kubernetes deployment/statefulset/pod | -`docker` section contains the credentials of the repository where all the required images are built and stored. + +docker +: `docker` section contains the credentials of the repository where all the required images are built and stored. -The snapshot of the `docker` section with example values is below +The snippet of the `docker` section with example values is below ```yaml - # Docker registry details where images are stored. This will be used to create k8s secrets - # Please ensure all required images are built and stored in this registry. - # Do not check-in docker_password. - docker: - url: "docker_url" - username: "docker_username" - password: "docker_password" +--8<-- "platforms/hyperledger-besu/configuration/samples/network-besu.yaml:56:59" ``` The fields under `docker` section are @@ -103,34 +95,13 @@ The fields under `docker` section are | password | Password required for login to docker registry| -`config` section contains the common configurations for the Hyperledger Besu network. + +config +: `config` section contains the common configurations for the Hyperledger Besu network. -The snapshot of the `config` section with example values is below +The snippet of the `config` section with example values is below ```yaml - config: - consensus: "ibft" # Options are "ibft", "ethash", "clique" - chain_id: 2018 # Custom chain ID, Optional field - default value is 2018 - ## Certificate subject for the root CA of the network. - # This is for development usage only where we create self-signed certificates and the truststores are generated automatically. - # Production systems should generate proper certificates and configure truststores accordingly. - subject: "CN=DLT Root CA,OU=DLT,O=DLT,L=London,C=GB" - transaction_manager: "tessera" # Transaction manager can be "tessera" - # This is the version of transaction_manager docker image that will be deployed - # Supported versions # - # tessera: 21.7.3(for besu 21.10.6) - tm_version: "21.7.3" - # TLS can be True or False for the transaction manager - tm_tls: True - # Tls trust value - tm_trust: "tofu" # Options are: "ca-or-tofu", "ca", "tofu" - ## File location for saving the genesis file should be provided. - genesis: "/home/user/bevel/build/besu_genesis" # Location where genesis file will be saved - ## At least one Transaction Manager nodes public addresses should be provided. - # - "https://node.test.besu.blockchaincloudpoc-develop.com" for tessera - # The above domain name is formed by the (http or https)://(peer.name).(org.external_url_suffix):(ambassador tm_nodeport) - tm_nodes: - - "https://carrier.test.besu.blockchaincloudpoc-develop.com" - +--8<-- "platforms/hyperledger-besu/configuration/samples/network-besu.yaml:62:83" ``` The fields under `config` are @@ -147,20 +118,15 @@ The fields under `config` are | tm_nodes | This is an array. Provide at least one tessera node details which will act as bootstrap for other tessera nodes | -The `organizations` section contains the specifications of each organization. + +organizations +: The `organizations` section contains the specifications of each organization. In the sample configuration example, we have four organization under the `organizations` section. -The snapshot of an organization field with sample values is below +The snippet of an organization field with sample values is below ```yaml - organizations: - # Specification for the 1st organization. Each organization maps to a VPC and a separate k8s cluster - - organization: - name: carrier - type: member - # Provide the url suffix that will be added in DNS recordset. Must be different for different clusters - external_url_suffix: test.besu.blockchaincloudpoc.com - cloud_provider: aws # Options: aws, azure, gcp, minikube +--8<-- "platforms/hyperledger-besu/configuration/samples/network-besu.yaml:86:97" ``` Each `organization` under the `organizations` section has the following fields. @@ -176,16 +142,9 @@ Each `organization` under the `organizations` section has the following fields. | gitops | Git Repo details which will be used by GitOps/Flux. | | services | Contains list of services which could be validator/peer based on the type of organization | -For the `aws` and `k8s` field the snapshot with sample values is below +For the `aws`, `k8s` and `vault` fields, a snippet is below ```yaml - aws: - access_key: "" # AWS Access key, only used when cloud_provider=aws - secret_key: "" # AWS Secret key, only used when cloud_provider=aws - region: "" # AWS Region where cluster and EIPs are created - # Kubernetes cluster deployment variables. - k8s: - context: "" - config_file: "" +--8<-- "platforms/hyperledger-besu/configuration/samples/network-besu.yaml:98:112" ``` The `aws` field under each organization contains: (This will be ignored if cloud_provider is not `aws`) @@ -203,20 +162,9 @@ The `k8s` field under each organization contains | context | Context/Name of the cluster where the organization entities should be deployed | | config_file | Path to the kubernetes cluster configuration file | -For gitops fields the snapshot from the sample configuration file with the example values is below +For gitops fields the snippet from the sample configuration file with the example values is below ```yaml - # Git Repo details which will be used by GitOps/Flux. - gitops: - git_protocol: "https" # Option for git over https or ssh - git_url: "https://github.com//bevel.git" # Gitops htpps or ssh url for flux value files - branch: "" # Git branch where release is being made - release_dir: "platforms/hyperledger-besu/releases/dev" # Relative Path in the Git repo for flux sync per environment. - chart_source: "platforms/hyperledger-besu/charts" # Relative Path where the Helm charts are stored in Git repo - git_repo: "github.com//bevel.git" # without https:// - username: "" # Git Service user who has rights to check-in in all branches - password: "" # Git Server user password/personal token (Optional for ssh; Required for https) - email: "" # Email to use in git config - private_key: "" # Path to private key (Optional for https; Required for ssh) +--8<-- "platforms/hyperledger-besu/configuration/samples/network-besu.yaml:115:125" ``` The gitops field under each organization contains @@ -236,29 +184,9 @@ The gitops field under each organization contains The services field for each organization under `organizations` section of Hyperledger Besu contains list of `services` which could be peers or validators. -Each organization with type as `member` will have a peers service. The snapshot of peers service with example values is below +Each organization with type as `member` will have a peers service. The snippet of peers service with example values is below ```yaml - peers: - - peer: - name: carrier - subject: "O=Carrier,OU=Carrier,L=51.50/-0.13/London,C=GB" # This is the node subject. L=lat/long is mandatory for supplychain sample app - geth_passphrase: "12345" # Passphrase to be used to generate geth account - lock: true # (for future use) Sets Besu node to lock or unlock mode. Can be true or false - p2p: - port: 30303 - ambassador: 15010 #Port exposed on ambassador service (use one port per org if using single cluster) - rpc: - port: 8545 - ambassador: 15011 #Port exposed on ambassador service (use one port per org if using single cluster) - ws: - port: 8546 - db: - port: 3306 # Only applicable for tessra where mysql db is used - tm_nodeport: - port: 8888 - ambassador: 15013 # Port exposed on ambassador service (Transaction manager node port) - tm_clientport: - port: 8080 +--8<-- "platforms/hyperledger-besu/configuration/samples/network-besu.yaml:187:208" ``` The fields under `peer` service are @@ -320,21 +248,9 @@ The additional fields under `peer` service are | smart_contract.entrypoint | Main entrypoint solidity file of the smart contract | | smart_contract.private_for | Comma seperated string of `tessera` Public keys for the `privateFor` | -Each organization with type as `validator` will have a validator service. The snapshot of validator service with example values is below +Each organization with type as `validator` will have a validator service. The snippet of validator service with example values is below ```yaml - validators: - - validator: - name: validator1 - bootnode: true # true if the validator node is used also a bootnode for the network - p2p: - port: 30303 - ambassador: 15010 #Port exposed on ambassador service (use one port per org if using single cluster) - rpc: - port: 8545 - ambassador: 15011 #Port exposed on ambassador service (use one port per org if using single cluster) - ws: - port: 8546 - +--8<-- "platforms/hyperledger-besu/configuration/samples/network-besu.yaml:128:143" ``` The fields under `validator` service are @@ -351,3 +267,4 @@ The fields under `validator` service are | metrics.port | Metrics port for Besu | *** feature is in future scope + diff --git a/docs/source/guides/networkyaml-corda.md b/docs/source/guides/networkyaml-corda.md index d82a36bf796..313c750cecb 100644 --- a/docs/source/guides/networkyaml-corda.md +++ b/docs/source/guides/networkyaml-corda.md @@ -88,7 +88,7 @@ The fields under `docker` section are | password | Password or Access token required for login | !!! note - Please follow [these instructions](../getting-started/configure-prerequisites.md#docker) to build and store the docker images before running the Ansible playbooks. + Please follow [these instructions](../getting-started/configure-prerequisites.md#docker-images) to build and store the docker images before running the Ansible playbooks. network_services diff --git a/docs/source/guides/networkyaml-fabric.md b/docs/source/guides/networkyaml-fabric.md index 46f7a346360..7587ce4d6a0 100644 --- a/docs/source/guides/networkyaml-fabric.md +++ b/docs/source/guides/networkyaml-fabric.md @@ -4,64 +4,66 @@ [//]: # (##############################################################################################) # Configuration file specification: Hyperledger-Fabric -A network.yaml file is the base configuration file designed in Hyperledger Bevel for setting up a Fabric DLT network. This file contains all the information related to the infrastructure and network specifications. Below shows its structure. -![](./../_static/TopLevelClass-Fabric.png) +A network.yaml file is the base configuration file designed in Hyperledger Bevel for setting up a Fabric DLT network. This file contains all the information related to the infrastructure and network specifications. -Before setting up a Fabric DLT/Blockchain network, this file needs to be updated with the required specifications. -A sample configuration file is provided in the repo path: -`platforms/hyperledger-fabric/configuration/samples/network-fabricv2.yaml` +??? note "Schema Definition" -A json-schema definition is provided in `platforms/network-schema.json` to assist with semantic validations and lints. You can use your favorite yaml lint plugin compatible with json-schema specification, like `redhat.vscode-yaml` for VSCode. You need to adjust the directive in template located in the first line based on your actual build directory: + A json-schema definition is provided in `platforms/network-schema.json` to assist with semantic validations and lints. You can use your favorite yaml lint plugin compatible with json-schema specification, like `redhat.vscode-yaml` for VSCode. You need to adjust the directive in template located in the first line based on your actual build directory: -`# yaml-language-server: $schema=../platforms/network-schema.json` + `# yaml-language-server: $schema=../platforms/network-schema.json` The configurations are grouped in the following sections for better understanding. -* type +* [type](#type) + +* [version](#version) -* version +* [frontend](#frontend) -* docker +* [env](#env) -* frontend +* [frontend](#frontend) -* env +* [docker](#docker) -* consensus +* [consensus](#consensus) -* orderers +* [orderers](#orderers) -* channels +* [channels](#channels) -* organizations +* [organizations](#organizations) -Here is the snapshot from the sample configuration file -![](./../_static/NetworkYamlFabric1.png) +Before setting up a Fabric DLT/Blockchain network, this file needs to be updated with the required specifications. -The sections in the sample configuration file are: +Use this [sample configuration file](https://github.com/hyperledger/bevel/blob/main/platforms/hyperledger-fabric/configuration/samples/network-fabricv2.yaml) as a base. -`type` defines the platform choice like corda/fabric, here in the example its Fabric +```yaml +--8<-- "platforms/hyperledger-fabric/configuration/samples/network-fabricv2.yaml:7:14" +``` -`version` defines the version of platform being used. The current Fabric version support is 1.4.8, 2.2.2 & 2.5.4 + +type +: `type` defines the platform choice like corda/fabric/quorum, here in the example its **Fabric**. -`frontend` is a flag which defines if frontend is enabled for nodes or not. Its value can only be enabled/disabled. This is only applicable if the sample Supplychain App is being installed. + +version +: `version` defines the version of platform being used. The current Fabric version support is 1.4.8, 2.2.2 & 2.5.4 -`env` section contains the environment type. Value for proxy field under this section can be 'none' or 'haproxy' + +frontend +: `frontend` s a flag which defines if frontend is enabled for nodes or not. Its value can only be enabled/disabled. This is only applicable if the sample Supplychain App is being installed. + + +env +: `env` section contains the environment type. Value for proxy field under this section can be 'none' or 'haproxy' The snapshot of the `env` section with example value is below -```yaml - env: - type: "dev" # tag for the environment. Important to run multiple flux on single cluster - proxy: haproxy # values can be 'haproxy' or 'none' - retry_count: 100 # Retry count for the checks - external_dns: enabled # Should be enabled if using external-dns for automatic route configuration - annotations: # Additional annotations that can be used for some pods (ca, ca-tools, orderer and peer nodes) - service: - - example1: example2 - deployment: {} - pvc: {} + +```yaml +--8<-- "platforms/hyperledger-fabric/configuration/samples/network-fabricv2.yaml:19:28" ``` The fields under `env` section are @@ -71,20 +73,19 @@ The fields under `env` section are | proxy | Choice of the Cluster Ingress controller. Currently supports 'haproxy' for production/inter-cluster and 'none' for single cluster | | retry_count | Retry count for the checks. | |external_dns | If the cluster has the external DNS service, this has to be set `enabled` so that the hosted zone is automatically updated. | -|annotations| Use this to pass additional annotations to the `service`, `deployment` and `pvc` elements of Kubernetes| +|labels| Use this to pass additional labels to the `service`, `deployment` and `pvc` elements of Kubernetes| + -`docker` section contains the credentials of the repository where all the required images are built and stored. + +docker +: `docker` section contains the credentials of the container registry where all the required images are stored. The snapshot of the `docker` section with example values is below + ```yaml - # Docker registry details where images are stored. This will be used to create k8s secrets - # Please ensure all required images are built and stored in this registry. - # Do not check-in docker_password. - docker: - url: "docker_url" - username: "docker_username" - password: "docker_password" +--8<-- "platforms/hyperledger-fabric/configuration/samples/network-fabricv2.yaml:34:40" ``` + The fields under `docker` section are | Field | Description | @@ -95,45 +96,32 @@ The fields under `docker` section are !!! tip - Please follow [these instructions](../getting-started/configure-prerequisites.md#docker) to build and store the docker images before running the Ansible playbooks. + Please follow [these instructions](../getting-started/configure-prerequisites.md#docker-images) to build and store the docker images before running the Ansible playbooks. -`consensus` section contains the consensus service that uses the orderers provided in the following `orderers` section. + +consensus +: `consensus` section contains the consensus service that uses the orderers provided in the following `orderers` section. ```yaml - consensus: - name: raft +--8<-- "platforms/hyperledger-fabric/configuration/samples/network-fabricv2.yaml:44:45" ``` + The fields under the each `consensus` are | Field | Description | |-------------|----------------------------------------------------------| -| name | Name of the Consensus service. Can be `raft` or `kafka`. | +| name | Name of the Consensus service. Can be `raft` or `kafka`. | -`orderers` section contains a list of orderers with variables which will expose it for the network. + +orderers +: `orderers` section contains a list of orderers with variables which will expose it for the network. The snapshot of the `orderers` section with example values is below + ```yaml - # Remote connection information for orderer (will be blank or removed for orderer hosting organization) - orderers: - - orderer: - type: orderer - name: orderer1 - org_name: supplychain #org_name should match one organization definition below in organizations: key - uri: orderer1.org1ambassador.blockchaincloudpoc.com:443 # Must include port, Can be external or internal URI for orderer which should be reachable by all peers - certificate: /home/bevel/build/orderer1.crt # Ensure that the directory exists - - orderer: - type: orderer - name: orderer2 - org_name: supplychain #org_name should match one organization definition below in organizations: key - uri: orderer2.org1ambassador.blockchaincloudpoc.com:443 # Must include port, Can be external or internal URI for orderer which should be reachable by all peers - certificate: /home/bevel/build/orderer2.crt # Ensure that the directory exists - - orderer: - type: orderer - name: orderer3 - org_name: supplychain #org_name should match one organization definition below in organizations: key - uri: orderer3.org1ambassador.blockchaincloudpoc.com:443 # Must include port, Can be external or internal URI for orderer which should be reachable by all peers - certificate: /home/bevel/build/orderer3.crt # Ensure that the directory exists +--8<-- "platforms/hyperledger-fabric/configuration/samples/network-fabricv2.yaml:46:61" ``` + The fields under the each `orderer` are | Field | Description | @@ -142,98 +130,18 @@ The fields under the each `orderer` are | type | For Fabric, `orderer` is the only valid type of orderers. | | org_name | Name of the organization to which this orderer belongs to | | uri | Orderer URL which is accessible by all Peers. This must include the port even when running on 443 | -| certificate | Path to orderer certificate. For inital network setup, ensure that the directory is present, the file need not be present. For adding a new organization, ensure that the file is the crt file of the orderer of the existing network. | -The `channels` sections contains the list of channels mentioning the participating peers of the organizations. + + +channels +: The `channels` sections contains the list of channels mentioning the participating peers of the organizations. The snapshot of channels section with its fields and sample values is below ```yaml - # The channels defined for a network with participating peers in each channel - channels: - - channel: - consortium: SupplyChainConsortium - channel_name: AllChannel - osn_creator_org: - name: supplychain - chaincodes: - - "chaincode_name" - orderers: - - supplychain - participants: - - organization: - name: carrier - type: creator # creator organization will create the channel and instantiate chaincode, in addition to joining the channel and install chaincode - org_status: new - peers: - - peer: - name: peer0 - gossipAddress: peer0.carrier-net.org3ambassador.blockchaincloudpoc.com:443 # Must include port, External or internal URI of the gossip peer - peerAddress: peer0.carrier-net.org3ambassador.blockchaincloudpoc.com:443 # Must include port, External URI of the peer - ordererAddress: orderer1.org1ambassador.blockchaincloudpoc.com:443 # Must include port, External or internal URI of the orderer - - organization: - name: store - type: joiner # joiner organization will only join the channel and install chaincode - org_status: new - peers: - - peer: - name: peer0 - gossipAddress: peer0.store-net.org4ambassador.blockchaincloudpoc.com:443 - peerAddress: peer0.store-net.org4ambassador.blockchaincloudpoc.com:443 # Must include port, External URI of the peer - ordererAddress: orderer1.org1ambassador.blockchaincloudpoc.com:443 - - organization: - name: warehouse - type: joiner - org_status: new - peers: - - peer: - name: peer0 - gossipAddress: peer0.warehouse-net.org5ambassador.blockchaincloudpoc.com:443 - peerAddress: peer0.warehouse-net.org5ambassador.blockchaincloudpoc.com:443 # Must include port, External URI of the peer - ordererAddress: orderer1.org1ambassador.blockchaincloudpoc.com:443 - - organization: - name: manufacturer - type: joiner - org_status: new - peers: - - peer: - name: peer0 - gossipAddress: peer0.manufacturer-net.org2ambassador.blockchaincloudpoc.com:443 - peerAddress: peer0.manufacturer-net.org2ambassador.blockchaincloudpoc.com:443 # Must include port, External URI of the peer - ordererAddress: orderer1.org1ambassador.blockchaincloudpoc.com:443 - endorsers: - # Only one peer per org required for endorsement - - organization: - name: carrier - peers: - - peer: - name: peer0 - corepeerAddress: peer0.carrier-net.org3ambassador.blockchaincloudpoc.com:443 - certificate: "/path/ca.crt" # certificate path for peer - - organization: - name: warehouse - peers: - - peer: - name: peer0 - corepeerAddress: peer0.warehouse-net.org5ambassador.blockchaincloudpoc.com:443 - certificate: "/path/ca.crt" # certificate path for peer - - organization: - name: manufacturer - peers: - - peer: - name: peer0 - corepeerAddress: peer0.manufacturer-net.org2ambassador.blockchaincloudpoc.com:443 - certificate: "/path/ca.crt" # certificate path for peer - - organization: - name: store - peers: - - peer: - name: peer0 - corepeerAddress: peer0.store-net.org4ambassador.blockchaincloudpoc.com:443 - certificate: "/path/ca.crt" # certificate path for peer - genesis: - name: OrdererGenesis +--8<-- "platforms/hyperledger-fabric/configuration/samples/network-fabricv2.yaml:63:165" ``` + The fields under the `channel` are | Field | Description | @@ -269,28 +177,20 @@ Each `organization` field under `endorsers` field of the channel contains the fo | peer.corepeerAddress | Endorsers addresses, including port | | peer.certificate | Certificate path for peer | -The `organizations` section contains the specifications of each organization. + + + +organizations +: The `organizations` section contains the specifications of each organization. In the sample configuration example, we have five organization under the `organizations` section The snapshot of an organization field with sample values is below + ```yaml - organizations: - # Specification for the 1st organization. Each organization maps to a VPC and a separate k8s cluster - - organization: - name: supplychain - country: UK - state: London - location: London - subject: "O=Orderer,L=51.50/-0.13/London,C=GB" - type: orderer - external_url_suffix: org1ambassador.blockchaincloudpoc.com - org_status: new - ca_data: - url: ca.supplychain-net:7054 - certificate: file/server.crt # This has not been implemented - cloud_provider: aws # Options: aws, azure, gcp, digitalocean, minikube +--8<-- "platforms/hyperledger-fabric/configuration/samples/network-fabricv2.yaml:167:185" ``` + Each `organization` under the `organizations` section has the following fields. | Field | Description | @@ -300,11 +200,10 @@ Each `organization` under the `organizations` section has the following fields. | state | State of the organization | | location | Location of the organization | | subject | Subject format can be referred at [OpenSSL Subject](https://www.openssl.org/docs/man1.0.2/man1/openssl-req.html) | -| type | This field can be orderer/peer | | external_url_suffix | Public url suffix of the cluster. | | org_status | `new` (for inital setup) or `existing` (for add new org) | -| orderer_org | Ordering service provider. It should only be added to peer organizations | -| ca_data | Contains the certificate authority url (dont include port if running on 443) and certificate path; this has not been implemented yet | +| orderer_org | Ordering service provider. | +| ca_data | Contains the certificate path; this has not been implemented yet | | cloud_provider | Cloud provider of the Kubernetes cluster for this organization. This field can be aws, azure, gcp or minikube | | aws | When the organization cluster is on AWS | | k8s | Kubernetes cluster deployment variables.| @@ -313,16 +212,9 @@ Each `organization` under the `organizations` section has the following fields. | services | Contains list of services which could ca/peer/orderers/concensus based on the type of organization | For the aws and k8s field the snapshot with sample values is below + ```yaml - aws: - access_key: "" # AWS Access key, only used when cloud_provider=aws - secret_key: "" # AWS Secret key, only used when cloud_provider=aws - - # Kubernetes cluster deployment variables. - k8s: - region: "" - context: "" - config_file: "" +--8<-- "platforms/hyperledger-fabric/configuration/samples/network-fabricv2.yaml:186:202" ``` The `aws` field under each organization contains: (This will be ignored if cloud_provider is not 'aws') @@ -340,20 +232,17 @@ The `k8s` field under each organization contains | context | Context/Name of the cluster where the organization entities should be deployed | | config_file | Path to the kubernetes cluster configuration file | +The `vault` field under each organization contains + +| Field | Description | +|-------------|----------------------------------------------------------| +| url | The URL for Hashicorp Vault server with port (Do not use 127.0.0.1 or localhost) | +| root_token | The root token for accessing the Vault server | + For gitops fields the snapshot from the sample configuration file with the example values is below + ```yaml - # Git Repo details which will be used by GitOps/Flux. - gitops: - git_protocol: "https" # Option for git over https or ssh - git_url: "https://github.com//bevel.git" # Gitops htpps or ssh url for flux value files - branch: "" # Git branch where release is being made - release_dir: "platforms/hyperledger-fabric/releases/dev" # Relative Path in the Git repo for flux sync per environment. - chart_source: "platforms/hyperledger-fabric/charts" # Relative Path where the Helm charts are stored in Git repo - git_repo: "github.com//bevel.git" # without https:// - username: "" # Git Service user who has rights to check-in in all branches - password: "" # Git Server user password/personal token (Optional for ssh; Required for https) - email: "" # Email to use in git config - private_key: "" # Path to private key (Optional for https; Required for ssh) +--8<-- "platforms/hyperledger-fabric/configuration/samples/network-fabricv2.yaml:203:215" ``` The gitops field under each organization contains @@ -372,15 +261,11 @@ The gitops field under each organization contains | private_key | Path to the private key file which has write-access to the git repo (Optional for https; Required for ssh) | For Hyperledger Fabric, you can also generate different user certificates and pass the names and attributes in the specific section for `users`. This is only applicable if using Fabric CA. An example is below: + ```yaml - # Generating User Certificates with custom attributes using Fabric CA in BAF for Peer Organizations - users: - - user: - identity: user1 - attributes: - - key: "hf.Revoker" - value: "true" +--8<-- "platforms/hyperledger-fabric/configuration/samples/network-fabricv2.yaml:338:344" ``` + The fields under `user` are | Field | Description | @@ -392,17 +277,11 @@ The fields under `user` are The services field for each organization under `organizations` section of Fabric contains list of `services` which could be ca/orderers/consensus/peers based on if the type of organization. Each organization will have a CA service under the service field. The snapshot of CA service with example values is below + ```yaml - # Services maps to the pods that will be deployed on the k8s cluster - # This sample is an orderer service and includes a zk-kafka consensus - services: - ca: - name: ca - subject: "/C=GB/ST=London/L=London/O=Orderer/CN=ca.supplychain-net" - type: ca - grpc: - port: 7054 +--8<-- "platforms/hyperledger-fabric/configuration/samples/network-fabricv2.yaml:217:225" ``` + The fields under `ca` service are | Field | Description | @@ -413,45 +292,12 @@ The fields under `ca` service are | grpc.port | Grpc port number | -Each organization with type as peer will have a peers service. The snapshot of peers service with example values is below +Example of peer service. Below is a snapshot of the peer service with example values. + ```yaml - peers: - - peer: - name: peer0 - type: anchor # This can be anchor/nonanchor. Atleast one peer should be anchor peer. - gossippeeraddress: peer0.manufacturer-net:7051 # Internal Address of the other peer in same Org for gossip, same peer if there is only one peer - peerAddress: peer0.carrier-net.org3ambassador.blockchaincloudpoc.com:443 # Must include port, External URI of the peer - certificate: /path/manufacturer/peer0.crt # Path to peer Certificate - cli: disabled # Creates a peer cli pod depending upon the (enabled/disabled) - configpath: /path/to/peer0-core.yaml # path to custom core.yaml tag. - grpc: - port: 7051 - events: - port: 7053 - couchdb: - port: 5984 - restserver: # This is for the rest-api server - targetPort: 20001 - port: 20001 - expressapi: # This is for the express api server - targetPort: 3000 - port: 3000 - chaincodes: - - name: "chaincode_name" #This has to be replaced with the name of the chaincode - version: "chaincode_version" #This has to be replaced with the version of the chaincode - maindirectory: "chaincode_main" #The main directory where chaincode is needed to be placed - repository: - username: "git_username" # Git Service user who has rights to check-in in all branches - password: "git_access_token" - url: "github.com/hyperledger/bevel.git" - branch: develop - path: "chaincode_src" #The path to the chaincode - arguments: 'chaincode_args' #Arguments to be passed along with the chaincode parameters - endorsements: "" #Endorsements (if any) provided along with the chaincode - metrics: - enabled: true # Enable/disable metrics collector for prometheus - port: 9443 # metrics port - internal to the cluster +--8<-- "platforms/hyperledger-fabric/configuration/samples/network-fabricv2.yaml:354:387" ``` + The fields under `peer` service are | Field | Description | @@ -460,7 +306,6 @@ The fields under `peer` service are | type | Type can be `anchor` and `nonanchor` for Peer | | gossippeeraddress | Gossip address of another peer in the same Organization, including port. If there is only one peer, then use that peer address. Can be internal if the peer is hosted in the same Kubernetes cluster. | | peerAddress | External address of this peer, including port. Must be the HAProxy qualified address. If using single cluster, this can be internal address. | -| certificate | Path where the Peer's CA certificate will be stored. | | cli | Optional field. If `enabled` will deploy the CLI pod for this Peer. Default is `disabled`. | | configpath | This field is mandatory for using external chaincode. This is the path where a custom core.yaml will be used for the peer. | | grpc.port | Grpc port | @@ -495,14 +340,11 @@ The chaincodes section contains the list of chaincode for the peer, the fields u | crypto_mount_path | Required only when `tls: true`, the path where the crypto materials will be stored | The organization with orderer type will have concensus service. The snapshot of consensus service with example values is below + ```yaml - consensus: - name: raft - type: broker #This field is not consumed for raft consensus - replicas: 4 #This field is not consumed for raft consensus - grpc: - port: 9092 #This field is not consumed for raft consensus +--8<-- "platforms/hyperledger-fabric/configuration/samples/network-fabricv2.yaml:227:228" ``` + The fields under `consensus` service are | Field | Description | @@ -512,36 +354,12 @@ The fields under `consensus` service are | replicas | Only for `kafka`. Replica count of the brokers | | grpc.port | Only for `kafka`. Grpc port of consensus service | -The organization with orderer type will have orderers service. The snapshot of orderers service with example values is below +Example of ordering service. The snapshot of orderers service with example values is below + ```yaml - orderers: - # This sample has multiple orderers as an example. - # You can use a single orderer for most production implementations. - - orderer: - name: orderer1 - type: orderer - consensus: raft - grpc: - port: 7050 - ordererAddress: orderer1.org1ambassador.blockchaincloudpoc.com:443 - - orderer: - name: orderer2 - type: orderer - consensus: raft - grpc: - port: 7050 - ordererAddress: orderer2.org1ambassador.blockchaincloudpoc.com:443 - - orderer: - name: orderer3 - type: orderer - consensus: raft - grpc: - port: 7050 - ordererAddress: orderer3.org1ambassador.blockchaincloudpoc.com:443 - metrics: - enabled: true # Enable/disable metrics collector for prometheus - port: 9443 # metrics port - internal to the cluster +--8<-- "platforms/hyperledger-fabric/configuration/samples/network-fabricv2.yaml:229:253" ``` + The fields under `orderer` service are | Field | Description | diff --git a/docs/source/guides/networkyaml-indy.md b/docs/source/guides/networkyaml-indy.md index eb0dcd0b440..0d55aa15f52 100644 --- a/docs/source/guides/networkyaml-indy.md +++ b/docs/source/guides/networkyaml-indy.md @@ -4,60 +4,54 @@ [//]: # (##############################################################################################) # Configuration file specification: Indy -A network.yaml file is the base configuration file for setting up a Indy network. This file contains all the information related to the infrastructure and network specifications. Here is the structure of it. -![](./../_static/TopLevelClass-Indy.png) +A network.yaml file is the base configuration file for setting up a Indy network. This file contains all the information related to the infrastructure and network specifications. -Before setting up a Indy network, this file needs to be updated with the required specifications. -A sample configuration file is provide in the repo path: -`platforms/hyperledger-indy/configuration/samples/network-indyv3.yaml` +??? note "Schema Definition" -A json-schema definition is provided in `platforms/network-schema.json` to assist with semantic validations and lints. You can use your favorite yaml lint plugin compatible with json-schema specification, like `redhat.vscode-yaml` for VSCode. You need to adjust the directive in template located in the first line based on your actual build directory: + A json-schema definition is provided in `platforms/network-schema.json` to assist with semantic validations and lints. You can use your favorite yaml lint plugin compatible with json-schema specification, like `redhat.vscode-yaml` for VSCode. You need to adjust the directive in template located in the first line based on your actual build directory: -`# yaml-language-server: $schema=../platforms/network-schema.json` + `# yaml-language-server: $schema=../platforms/network-schema.json` The configurations are grouped in the following sections for better understanding. -* type +* [type](#type) -* version +* [version](#version) -* env +* [env](#env) -* docker +* [docker](#docker) -* name +* [name](#name) -* genesis +* [genesis](#genesis) -* organizations +* [organizations](#organizations) -Here is the snapshot from the sample configuration file -![](./../_static/NetworkYamlIndy.jpg) +Use this [sample configuration file](https://github.com/hyperledger/bevel/blob/main/platforms/hyperledger-indy/configuration/samples/network-indyv3.yaml) as a base. +```yaml +--8<-- "platforms/hyperledger-indy/configuration/samples/network-indyv3.yaml:7:19" +``` -The sections in the sample configuration file are + +type +: `type` defines the platform choice like corda/fabric/indy, here in example its Indy. -`type` defines the platform choice like corda/fabric/indy, here in example its Indy + +version +: `version` defines the version of platform being used, here in example the Indy version is 1.11.0 . -`version` defines the version of platform being used, here in example the Indy version is 1.9.2. + +env +: `env` section contains the environment type and additional configuration. Value for proxy field under this section has to be 'ambassador' as 'haproxy' has not been implemented for Indy.. -`env` section contains the environment type and additional configuration. Value for proxy field under this section has to be 'ambassador' as 'haproxy' has not been implemented for Indy. The snapshot of the `env` section with example values is below ```yaml - env: - type: "env_type" # tag for the environment. Important to run multiple flux on single cluster - proxy: ambassador # value has to be 'ambassador' as 'haproxy' has not been implemented for Indy - # Must be different from all steward ambassador ports specified in the rest of this network yaml - ambassadorPorts: # Any additional Ambassador ports can be given here, this is valid only if proxy='ambassador' - # portRange: # For a range of ports - # from: 15010 - # to: 15043 - ports: 15010,15023,15024,15033,15034,15043,15044 # Indy does not use a port range as it creates an NLB, and only necessary ports should be opened - loadBalancerSourceRanges: # (Optional) Default value is '0.0.0.0/0', this value can be changed to any other IP adres or list (comma-separated without spaces) of IP adresses, this is valid only if proxy='ambassador' - retry_count: 20 # Retry count for the checks - external_dns: disabled # Should be enabled if using external-dns for automatic route configuration +--8<-- "platforms/hyperledger-indy/configuration/samples/network-indyv3.yaml:22:34" ``` + The fields under `env` section are | Field | Description | @@ -69,19 +63,15 @@ The fields under `env` section are | retry_count | Retry count for the checks.| | external_dns | If the cluster has the external DNS service, this has to be set `enabled` so that the hosted zone is automatically updated. Must be `enabled` for Identity sample app. | - -`docker` section contains the credentials of the repository where all the required images are built and stored. + +docker +: `docker` section contains the credentials of the container registry where all the required images are stored. The snapshot of the `docker` section with example values is below ```yaml - # Docker registry details where images are stored. This will be used to create k8s secrets - # Please ensure all required images are built and stored in this registry. - # Do not check-in docker_password. - docker: - url: "docker_url" - username: "docker_username" - password: "docker_password" +--8<-- "platforms/hyperledger-indy/configuration/samples/network-indyv3.yaml:36:42" ``` + The fields under `docker` section are | Field | Description | @@ -92,19 +82,26 @@ The fields under `docker` section are !!! tip - Please follow [these instructions](../getting-started/configure-prerequisites.md#docker) to build and store the docker images before running the Ansible playbooks. + Please follow [these instructions](../getting-started/configure-prerequisites.md#docker-images) to build and store the docker images before running the Ansible playbooks. + + +name +: `name` is used as the Indy network name (has impact e.g. on paths where the Indy nodes look for crypto files on their local filesystem) -`name` is used as the Indy network name (has impact e.g. on paths where the Indy nodes look for crypto files on their local filesystem) +The snapshot of the `name` section with example values is below +```yaml +--8<-- "platforms/hyperledger-indy/configuration/samples/network-indyv3.yaml:44:45" +``` + + +genesis +: The `genesis` section contains Information about pool transaction genesis and domain transactions genesis. -The snapshot of the `genesis` section with example values is below ```yaml - # Information about pool transaction genesis and domain transactions genesis - genesis: - state: absent - pool: genesis/pool_transactions_genesis - domain: domain/domain_transactions_genesis +--8<-- "platforms/hyperledger-indy/configuration/samples/network-indyv3.yaml:47:51" ``` -The `genesis` section contains Information about pool transaction genesis and domain transactions genesis. `genesis` contains the following fields: + +`genesis` contains the following fields: | Field | Description | |-------------|----------------------------------------------------------| @@ -113,16 +110,14 @@ The `genesis` section contains Information about pool transaction genesis and do | domain | Path to domain transaction genesis. [Readme here](https://github.com/hyperledger/bevel/blob/main/platforms/hyperledger-indy/configuration/roles/setup/domain_genesis/). | -The `organizations` section allows specification of one or many organizations that will be connecting to a network. If an organization is also hosting the root of the network (e.g. membership service, etc), then these services should be listed in this section as well. + +organizations +: The `organizations` section allows specification of one or many organizations that will be connecting to a network. If an organization is also hosting the root of the network (e.g. membership service, etc), then these services should be listed in this section as well. + -The snapshot of an organization field with sample values is below +The snapshot of an `organization` field with sample values is below ```yaml - - organization: - name: authority - type: peer - external_url_suffix: indy.blockchaincloudpoc.com # Provide the external dns suffix. Only used when Indy webserver/Clients are deployed. - cloud_provider: aws # Values can be 'aws-baremetal', 'aws' or 'minikube' - +--8<-- "platforms/hyperledger-indy/configuration/samples/network-indyv3.yaml:53:60" ``` Each organization under the `organizations` section has the following fields. @@ -134,29 +129,16 @@ Each organization under the `organizations` section has the following fields. |external_url_suffix | Provide the external dns suffix. Only used when Indy webserver/Clients are deployed. external_dns should be enabled for this to work. | | cloud_provider | Cloud provider of the Kubernetes cluster for this organization. This field can be aws_baremetal, aws or minikube. | | aws | When the organization cluster is on AWS | +| publicIps | List of all public IP addresses of each availability zone from all organizations in the same k8s cluster | | k8s | Kubernetes cluster deployment variables.| | vault | Contains Hashicorp Vault server address and root-token in the example | | gitops | Git Repo details which will be used by GitOps/Flux. | | services | Contains list of services which could be trustee/steward/endorser | -For the aws and k8s field the snapshot with sample values is below +For the `aws`,`publicIps` and `k8s` field the snapshot with sample values is below ```yaml - aws: - access_key: "aws_access_key" # AWS Access key - secret_key: "aws_secret_key" # AWS Secret key - encryption_key: "encryption_key_id" # AWS encryption key. If present, it's used as the KMS key id for K8S storage class encryption. - zone: "availability_zone" # AWS availability zone - region: "region" # AWS region - - publicIps: ["1.1.1.1","2.2.2.2"] # List of all public IP addresses of each availability zone - - # Kubernetes cluster deployment variables. The config file path has to be provided in case - # the cluster has already been created. - k8s: - config_file: "cluster_config" - context: "kubernetes-admin@kubernetes" - +--8<-- "platforms/hyperledger-indy/configuration/samples/network-indyv3.yaml:62:75" ``` The `aws` field under each organisation contains: (This will be ignored if cloud_provider is not 'aws') @@ -175,8 +157,7 @@ The `publicIps` field under each organisation contains: |-------------|----------------------------------------------------------| | publicIps | List of all public IP addresses of each availability zone from all organizations in the same k8s cluster | -!!! note - +??? note "publicIps" Network.yaml file consists of more organizations, where each organization can be under different availability zone. It means, that each organization has different IP. The field `publicIps` holds list of all IPs of all organizations in the same cluster. This should be in JSON Array format like ["1.1.1.1","2.2.2.2"] and must contain different IP for each availability zone on the K8s cluster i.e. If the K8s cluster is in two AZ, then two IP addresses should be provided here. The `k8s` field under each organisation contains @@ -187,14 +168,9 @@ The `k8s` field under each organisation contains | config_file | Path to the kubernetes cluster configuration file | -For the vault field the snapshot with sample values is below +For the `vault` field the snapshot with sample values is below ```yaml - # Hashicorp Vault server address and root-token. Vault should be unsealed. - # Do not check-in root_token - vault: - url: "vault_addr" - root_token: "vault_root_token" - +--8<-- "platforms/hyperledger-indy/configuration/samples/network-indyv3.yaml:77:81" ``` The `vault` field under each organisation contains: @@ -204,22 +180,9 @@ The `vault` field under each organisation contains: | url | Vault server | | root_token | Vault root token | -For gitops fields the snapshot from the sample configuration file with the example values is below +For `gitops` fields the snapshot from the sample configuration file with the example values is below ```yaml - # Git Repo details which will be used by GitOps/Flux. - # Do not check-in git_access_token - gitops: - git_protocol: "https" # Option for git over https or ssh - git_url: "gitops_ssh_url" # Gitops https or ssh url for flux value files like "https://github.com/hyperledger/bevel.git" - branch: "gitops_branch" # Git branch where release is being made - release_dir: "gitops_release_dir" # Relative Path in the Git repo for flux sync per environment. - chart_source: "gitops_charts" # Relative Path where the Helm charts are stored in Git repo - git_repo: "gitops_repo_url" # Gitops git repository URL for git push like "github.com/hyperledger/bevel.git" - username: "git_username" # Git Service user who has rights to check-in in all branches - password: "git_access_token" # Git Server user password/ user token (Optional for ssh; Required for https) - email: "git_email" # Email to use in git config - private_key: "path_to_private_key" # Path to private key file which has write-access to the git repo (Optional for https; Required for ssh) - +--8<-- "platforms/hyperledger-indy/configuration/samples/network-indyv3.yaml:83:95" ``` The `gitops` field under each organization contains @@ -240,20 +203,12 @@ The `gitops` field under each organization contains The services field for each organization under `organizations` section of Indy contains list of `services` which could be trustee/steward/endorser -The snapshot of trustee service with example values is below +The snapshot of `trustee` service with example values is below ```yaml - services: - trustees: - - trustee: - name: provider-trustee - genesis: true - server: - port: 8000 - ambassador: 15010 +--8<-- "platforms/hyperledger-indy/configuration/samples/network-indyv3.yaml:97:106" ``` -The fields under `trustee` service are (find more about differences between trustee/steward/endorser [here](https://readthedocs.org/projects/indy-node/downloads/pdf/latest/)) - +The fields under `trustee` service are | Field | Description | |-------------|----------------------------------------------------------| | name | Name for the trustee service | @@ -261,24 +216,11 @@ The fields under `trustee` service are (find more about differences between trus | server.port | Applicable for Identity Sample App. This is the Indy webserver container port | | server.ambassador | Applicable for Identity Sample App. This is the Indy webserver ambassador port which will be exposed publicly using the external URL. | -The snapshot of steward service example values is below +The snapshot of `steward` service example values is below ```yaml - services: - stewards: - - steward: - name: provider-steward-1 - type: VALIDATOR - genesis: true - publicIp: 3.221.78.194 - node: - port: 9711 - targetPort: 9711 - ambassador: 9711 # Port for ambassador service - client: - port: 9712 - targetPort: 9712 - ambassador: 9712 # Port for ambassador service +--8<-- "platforms/hyperledger-indy/configuration/samples/network-indyv3.yaml:150:170" ``` + The fields under `steward` service are | Field | Description | @@ -295,21 +237,12 @@ The fields under `steward` service are | client.ambassador | HTTP client port number of ambassador | -The snapshot of endorser service with example values is below +The snapshot of `endorser` service with example values is below ```yaml - services: - endorsers: - - endorser: - name: provider-endorser - full_name: Some Decentralized Identity Mobile Services Provider - avatar: https://provider.com/avatar.png - # public endpoint will be {{ endorser.name}}.{{ external_url_suffix}}:{{endorser.server.httpPort}} - # E.g. In this sample https://provider-endorser.indy.blockchaincloudpoc.com:15020/ - # For minikube: http://>:15020 - server: - httpPort: 15020 - apiPort: 15030 +--8<-- "platforms/hyperledger-indy/configuration/samples/network-indyv3.yaml:152:152" +--8<-- "platforms/hyperledger-indy/configuration/samples/network-indyv3.yaml:184:195" ``` + The fields under `endorser` service are | Field | Description | @@ -319,3 +252,4 @@ The fields under `endorser` service are | avatar | Link to avatar. Not used now. | | server.httpPort | Applicable for Identity Sample App. This is the Endorser Agent's Web port which will be exposed publicly using the external URL. | | server.apiPort | Applicable for Identity Sample App. This is the Endorser Agent's API/Swagger port which will be exposed publicly using the external URL. | +| server.webhookPort | Applicable for Identity Sample App. This is the Endorser Agent's API/Swagger port which will be exposed publicly using the external URL. | diff --git a/docs/source/guides/networkyaml-quorum.md b/docs/source/guides/networkyaml-quorum.md index d642d9c3c76..d5dc2f9f127 100644 --- a/docs/source/guides/networkyaml-quorum.md +++ b/docs/source/guides/networkyaml-quorum.md @@ -18,50 +18,49 @@ A json-schema definition is provided in `platforms/network-schema.json` to assis The configurations are grouped in the following sections for better understanding. -* type +* [type](#type) -* version +* [version](#version) -* env +* [env](#env) -* docker +* [docker](#docker) -* config +* [config](#config) -* organizations +* [organizations](#organizations) -Here is the snapshot from the sample configuration file - -![](./../_static/NetworkYamlQuorum.png) +Although, the file itself has comments for each key-value, here is a more detailed description with respective snippets. +=== "Quorum" + Use this [sample configuration file](https://github.com/hyperledger/bevel/blob/main/platforms/quorum/configuration/samples/network-quorum.yaml) as a base. + ```yaml + --8<-- "platforms/quorum/configuration/samples/network-quorum.yaml:7:15" + ``` The sections in the sample configuration file are -`type` defines the platform choice like corda/fabric/indy/quorum, here in the example its **quorum**. + +type +: `type` defines the platform choice like corda/fabric/indy/quorum, here in the example its **quorum**. -`version` defines the version of platform being used. The current Quorum version support is only for **21.4.2** + +version +: `version` defines the version of platform being used. The current Quorum version support is only for **21.4.2** !!! important Use Quorum Version 23.4.0 if you are deploying Supplychain smartcontracts from examples. -`env` section contains the environment type and additional (other than 443) Ambassador port configuration. Vaule for proxy field under this section can be 'ambassador' or 'haproxy' - -The snapshot of the `env` section with example value is below -```yaml - env: - type: "env-type" # tag for the environment. Important to run multiple flux on single cluster - proxy: ambassador # value has to be 'ambassador' as 'haproxy' has not been implemented for Quorum - # These ports are enabled per cluster, so if you have multiple clusters you do not need so many ports - # This sample uses a single cluster, so we have to open 4 ports for each Node. These ports are again specified for each organization below - ambassadorPorts: # Any additional Ambassador ports can be given here, this is valid only if proxy='ambassador' - portRange: # For a range of ports - from: 15010 - to: 15043 - # ports: 15020,15021 # For specific ports - loadBalancerSourceRanges: 0.0.0.0/0 # Default value is '0.0.0.0/0', this value can be changed to any other IP adres or list (comma-separated without spaces) of IP adresses, this is valid only if proxy='ambassador' - retry_count: 50 # Retry count for the checks - external_dns: enabled # Should be enabled if using external-dns for automatic route configuration + +env +: `env` section contains the environment type and additional (other than 443) Ambassador port configuration. Vaule for proxy field under this section can be 'ambassador' or 'haproxy' + +The snippet of the `env` section with example value is below + +```yaml +--8<-- "platforms/quorum/configuration/samples/network-quorum.yaml:18:31" ``` + The fields under `env` section are | Field | Description | @@ -73,18 +72,15 @@ The fields under `env` section are | retry_count | Retry count for the checks. Use a high number if your cluster is slow. | |external_dns | If the cluster has the external DNS service, this has to be set `enabled` so that the hosted zone is automatically updated. | -`docker` section contains the credentials of the repository where all the required images are built and stored. + +docker +: `docker` section contains the credentials of the repository where all the required images are built and stored. -The snapshot of the `docker` section with example values is below +The snippet of the `docker` section with example values is below ```yaml - # Docker registry details where images are stored. This will be used to create k8s secrets - # Please ensure all required images are built and stored in this registry. - # Do not check-in docker_password. - docker: - url: "docker_url" - username: "docker_username" - password: "docker_password" +--8<-- "platforms/quorum/configuration/samples/network-quorum.yaml:33:39" ``` + The fields under `docker` section are | Field | Description | @@ -94,42 +90,15 @@ The fields under `docker` section are | password | Password required for login to docker registry| -`config` section contains the common configurations for the Quorum network. + +config +: `config` section contains the common configurations for the Quorum network. -The snapshot of the `config` section with example values is below +The snippet of the `config` section with example values is below ```yaml - config: - consensus: "raft" # Options are "raft" and "ibft" - ## Certificate subject for the root CA of the network. - # This is for development usage only where we create self-signed certificates and the truststores are generated automatically. - # Production systems should generate proper certificates and configure truststores accordingly. - subject: "CN=DLT Root CA,OU=DLT,O=DLT,L=London,C=GB" - transaction_manager: "tessera" # Options are "tessera" or "none" - tm_version: "23.4.0" # This is the version of "tessera" - tm_tls: "strict" # Options are "strict" and "off" - tm_trust: "tofu" # Options are: "ca-or-tofu", "ca", "tofu" - ## Transaction Manager nodes public addresses should be provided. - # For "tessera", all participating nodes should be provided - # In the example (for tessera ) below, the domain name is formed by the https://(peer.name).(org.external_url_suffix) - tm_nodes: - - "https://carrier.test.quorum.blockchaincloudpoc.com" - - "https://manufacturer.test.quorum.blockchaincloudpoc.com" - - "https://store.test.quorum.blockchaincloudpoc.com" - - "https://warehouse.test.quorum.blockchaincloudpoc.com" - staticnodes: "/home/user/bevel/build/quorum_staticnodes" # Location where staticnodes will be saved - genesis: "/home/user/bevel/build/quorum_genesis" # Location where genesis file will be saved - # NOTE for the above paths, the directories should exist - ##### Following keys are only used when adding new Node(s) to existing network and should NOT be used to create new network. - bootnode: - #name of the bootnode that matches one from existing node - name: carrier - #ambassador url of the bootnode - url: carrierrpc.test.quorum.blockchaincloudpoc.com - #rpc port of the bootnode - rpcport: 80 - #id of the bootnode - nodeid: 1 +--8<-- "platforms/quorum/configuration/samples/network-quorum.yaml:42:63" ``` + The fields under `config` are | Field | Description | @@ -146,18 +115,15 @@ The fields under `config` are | bootnode | This is only applicable when adding a new node to existing network and contains the boot node rpc details | -The `organizations` section contains the specifications of each organization. + +conforganizations +: `organizations` section contains the specifications of each organization. In the sample configuration example, we have four organization under the `organizations` section. -The snapshot of an organization field with sample values is below +The snippet of an organization field with sample values is below ```yaml - organizations: - # Specification for the 1st organization. Each organization maps to a VPC and a separate k8s cluster - - organization: - name: carrier - external_url_suffix: test.quorum.blockchaincloudpoc.com # This is the url suffix that will be added in DNS recordset. Must be different for different clusters - cloud_provider: aws # Options: aws, azure, gcp, minikube +--8<-- "platforms/quorum/configuration/samples/network-quorum.yaml:67:73" ``` Each `organization` under the `organizations` section has the following fields. @@ -172,16 +138,9 @@ Each `organization` under the `organizations` section has the following fields. | gitops | Git Repo details which will be used by GitOps/Flux. | | services | Contains list of services which could ca/peer/orderers/concensus based on the type of organization | -For the `aws` and `k8s` field the snapshot with sample values is below +For the `aws` `vault` and `k8s` field the snippet with sample values is below ```yaml - aws: - access_key: "" # AWS Access key, only used when cloud_provider=aws - secret_key: "" # AWS Secret key, only used when cloud_provider=aws - - # Kubernetes cluster deployment variables. - k8s: - context: "" - config_file: "" +--8<-- "platforms/quorum/configuration/samples/network-quorum.yaml:74:87" ``` The `aws` field under each organization contains: (This will be ignored if cloud_provider is not `aws`) @@ -198,20 +157,18 @@ The `k8s` field under each organization contains | context | Context/Name of the cluster where the organization entities should be deployed | | config_file | Path to the kubernetes cluster configuration file | -For gitops fields the snapshot from the sample configuration file with the example values is below +The `vault` field under each organization contains + +| Field | Description | +|-------------|----------------------------------------------------------| +| url | The URL for Hashicorp Vault server with port (Do not use 127.0.0.1 or localhost) | +| root_token | The root token for accessing the Vault server | + + + +For gitops fields the snippet from the sample configuration file with the example values is below ```yaml - # Git Repo details which will be used by GitOps/Flux. - gitops: - git_protocol: "https" # Option for git over https or ssh - git_url: "https://github.com//bevel.git" # Gitops htpps or ssh url for flux value files - branch: "" # Git branch where release is being made - release_dir: "platforms/Quorum/releases/dev" # Relative Path in the Git repo for flux sync per environment. - chart_source: "platforms/Quorum/charts" # Relative Path where the Helm charts are stored in Git repo - git_repo: "github.com//bevel.git" # without https:// - username: "" # Git Service user who has rights to check-in in all branches - password: "" # Git Server user password/personal token (Optional for ssh; Required for https) - email: "" # Email to use in git config - private_key: "" # Path to private key (Optional for https; Required for ssh) +--8<-- "platforms/quorum/configuration/samples/network-quorum.yaml:90:100" ``` The gitops field under each organization contains @@ -231,28 +188,9 @@ The gitops field under each organization contains The services field for each organization under `organizations` section of Quorum contains list of `services` which could be only peers as of now. -Each organization with type as peer will have a peers service. The snapshot of peers service with example values is below +Each organization with type as peer will have a peers service. The snippet of peers service with example values is below ```yaml - peers: - - peer: - name: carrier - subject: "O=Carrier,OU=Carrier,L=51.50/-0.13/London,C=GB" # This is the node subject. L=lat/long is mandatory for supplychain sample app - type: validator # value can be validator or member, only applicable if consensus = 'ibft' - geth_passphrase: 12345 # Passphrase to be used to generate geth account - p2p: - port: 21000 - ambassador: 15010 #Port exposed on ambassador service (use one port per org if using single cluster) - rpc: - port: 8546 - ambassador: 15011 #Port exposed on ambassador service (use one port per org if using single cluster) - transaction_manager: - port: 443 - ambassador: 443 - raft: # Only used if consensus = 'raft' - port: 50401 - ambassador: 15013 - db: # Only used if transaction_manager = "tessera" - port: 3306 +--8<-- "platforms/quorum/configuration/samples/network-quorum.yaml:103:123" ``` The fields under `peer` service are diff --git a/docs/source/guides/networkyaml-substrate.md b/docs/source/guides/networkyaml-substrate.md index 46715056118..4250045722f 100644 --- a/docs/source/guides/networkyaml-substrate.md +++ b/docs/source/guides/networkyaml-substrate.md @@ -3,120 +3,107 @@ [//]: # (SPDX-License-Identifier: Apache-2.0) [//]: # (##############################################################################################) -# Configuration file specification: Substrate -A network.yaml file is the base configuration file designed in Hyperledger Bevel for setting up a Substrate DLT network. This file contains all the configurations related to the network that has to be deployed. Below shows its structure. -![](./../_static/TopLevelClass-Substrate.png) +# `Configuration file specification: Substrate` +A `network.yaml` file is the base configuration file designed in Hyperledger Bevel for setting up a Substrate DLT network. This file contains all the configurations related to the network that has to be deployed. Below shows its structure. ![](./../_static/TopLevelClass-Substrate.png) -Before setting up a Substrate DLT/Blockchain network, this file needs to be updated with the required specifications. - -A sample configuration file is provided in the repo path: -`platforms/substrate/configuration/samples/network-sample.yaml` +??? note "Schema Definition" -A json-schema definition is provided in `platforms/network-schema.json` to assist with semantic validations and lints. You can use your favorite yaml lint plugin compatible with json-schema specification, like `redhat.vscode-yaml` for VSCode. You need to adjust the directive in template located in the first line based on your actual build directory: + A json-schema definition is provided in `platforms/network-schema.json` to assist with semantic validations and lints. You can use your favorite yaml lint plugin compatible with json-schema specification, like `redhat.vscode-yaml` for VSCode. You need to adjust the directive in template located in the first line based on your actual build directory: -`# yaml-language-server: $schema=../platforms/network-schema.json` + `# yaml-language-server: $schema=../platforms/network-schema.json` The configurations are grouped in the following sections for better understanding. -* type +* [type](#type) + +* [version](#version) -* version +* [env](#env) -* env +* [docker](#docker) -* docker +* [config](#config) -* config +* [organizations](#organizations) + +Before setting up a Substrate DLT/Blockchain network, this file needs to be updated with the required specifications. -* organizations +Use this [sample configuration file](https://github.com/hyperledger/bevel/blob/main/platforms/substrate/configuration/samples/substrate-network-config.yaml) as a base. + + +```yaml +--8<-- "platforms/substrate/configuration/samples/substrate-network-config.yaml:7:15" +``` -Here is the snapshot from the sample configuration file +**The sections in the sample configuration file are**: -![](./../_static/NetworkYamlSubstrate.png) + -The sections in the sample configuration file are +## type +- `type` defines the platform choice like corda/fabric/indy/substrate, here in the example its **substrate**. -`type` defines the platform choice like corda/fabric/indy/substrate, here in the example its **substrate**. + -`version` defines the version of platform being used. The current substrate version support is for **latest** +## version +- `version` defines the version of platform being used. The current substrate version support is for **latest** ---- + -`env` section contains the environment type and additional (other than 443) Ambassador port configuration. Value for proxy field under this section can only be Ambassador as haproxy has not been implemented for substrate. +## env +- `env` section contains the environment type and additional (other than 443) Ambassador port configuration. Value for proxy field under this section can only be Ambassador as haproxy has not been implemented for substrate. The snapshot of the `env` section with example value is below -```yaml - env: - type: "substratedev" # tag for the environment. Important to run multiple flux on single cluster - proxy: ambassador # value has to be 'ambassador' as 'haproxy' has not been implemented for Substrate - # These ports are enabled per cluster, so if you have multiple clusters you do not need so many ports - # This sample uses a single cluster, so we have to open 4 ports for each Node. These ports are again specified for each organization below - ambassadorPorts: # Any additional Ambassador ports can be given here, this is valid only if proxy='ambassador' - portRange: # For a range of ports - from: 15010 - to: 15043 - # ports: 15020,15021 # For specific ports - retry_count: 20 # Retry count for the checks on Kubernetes cluster - external_dns: enabled # Should be enabled if using external-dns for automatic route configuration +```yaml +--8<-- "platforms/substrate/configuration/samples/substrate-network-config.yaml:18:30" ``` -The fields under `env` section are -| Field | Description | -|------------|---------------------------------------------| +**The fields under `env` section are**: + +| **Field** | **Description** | +|------------ |---------------------------------------------| | type | Environment type. Can be like dev/test/prod.| -| proxy | Choice of the Cluster Ingress controller. Currently supports 'ambassador' only as 'haproxy' has not been implemented for Substrate | +| proxy | Choice of the Cluster Ingress controller. Currently supports 'ambassador' only as 'haproxy' has not been implemented for Substrate | +| proxy_namespace | Namespace in which the pods of the Cluster Ingress controller were deployed | | ambassadorPorts | Any additional Ambassador ports can be given here. This is only valid if `proxy: ambassador`. These ports are enabled per cluster, so if you have multiple clusters you do not need so many ports to be opened on Ambassador. Our sample uses a single cluster, so we have to open 4 ports for each Node. These ports are again specified in the `organization` section. | | retry_count | Retry count for the checks. Use a high number if your cluster is slow. | |external_dns | If the cluster has the external DNS service, this has to be set `enabled` so that the hosted zone is automatically updated. | -`docker` section contains the credentials of the repository where all the required images are built and stored. + + + +## docker +- `docker` section contains the credentials of the repository where all the required images are built and stored. The snapshot of the `docker` section with example values is below + ```yaml - # Docker registry details where images are stored. This will be used to create k8s secrets - # Please ensure all required images are built and stored in this registry. - # Do not check-in docker_password. - docker: - url: "docker.io" - username: "docker_username" - password: "docker_password" - +--8<-- "platforms/substrate/configuration/samples/substrate-network-config.yaml:32:38" ``` -The fields under `docker` section are -| Field | Description | +**The fields under `docker` section are**: + +| **Field**| **Description** | |----------|----------------------------------------| | url | Docker registry url | | username | Username required for login to docker registry (remove this for public registry)| | password | Password required for login to docker registry (remove this for public registry)| + -`config` section contains the common configurations for the Substrate network. +## config +- `config` section contains the common configurations for the Substrate network. The snapshot of the `config` section with example values is below + ```yaml - config: - ## Certificate subject for the root CA of the network. - # This is for development usage only where we create self-signed certificates and the truststores are generated automatically. - # Production systems should generate proper certificates and configure truststores accordingly. - subject: "CN=DLT Root CA,OU=DLT,O=DLT,L=London,C=GB" - # Provide the docker image which will be used for the Substrate Nodes in this network. (will be downloaded from docker.url) - node_image: "inteli-poc/dscp-node" - # Provide the command which is used to start the node - command: "./dscp-node" # Please ensure the command corresponds to the node_image above, for eg, target/debug/node-template - # provide a chain name for Substrate nodes - chain: "inteli" - # NOTE for the below paths, the directories should exist - bootnodes: "/BUILD_DIR/substrate_bootnodes" # Location where bootnodes information is read or saved if empty - genesis: "/BUILD_DIR/substrate_genesis" # Location where information is read or saved if empty - ipfsbootnodes: "/BUILD_DIR/ipfs_bootnodes" # Location where IPFS bootnodes information is read or saved if empty - +--8<-- "platforms/substrate/configuration/samples/substrate-network-config.yaml:40:55" ``` -The fields under `config` are -| Field | Description | +**The fields under `config` are**: + +| **Field** | **Description** | |-------------|----------------------------------------------------------| -| subject | This is the subject of the root CA which will be created for the Substrate network. The root CA is for development purposes only, production networks should already have the root certificates. | +| subject | This is the subject of the root CA which will be created for the Substrate network. The root CA is for development purposes only, production networks should already have the root certificates. | | node_image | This is image name in which will be pulled from the specified docker registry. | | command | This is the command which will run on the substrate node once it is alive.| | chain | This is the name of the chain which is used for the substrate nodes and genesis.| @@ -125,189 +112,96 @@ The fields under `config` are | ipfsbootnodes | Location where IPFS bootnode information is read or stored if empty, this is for IPFS nodes when you are deploying a DSCP network. | -The `organizations` section contains the specifications of each organization. + -In the sample configuration example, we have four organization under the `organizations` section. +## Organizations -The snapshot of an organization field with sample values is below -```yaml - organizations: - # Specification for the 1st organization. Each organization should map to a VPC and a separate k8s cluster for production deployments - - organization: - name: oem - type: superuser - persona: buyer - external_url_suffix: subs.inteli-poc.com # This is the url suffix that will be added in DNS recordset. Must be different for different clusters - cloud_provider: gcp # Options: aws, azure, gcp -``` -Each `organization` under the `organizations` section has the following fields. - -| Field | Description | -|------------------------------------------|-----------------------------------------------------| -| name | Name of the organization | -| type | Specifies the organization as the superuser/owner. | -| persona | This is used for DSCP app and can be buyer, supplier or thirdparty. | -| external_url_suffix | Public url suffix of the cluster. | -| cloud_provider | Cloud provider of the Kubernetes cluster for this organization. This field can be aws, azure, gcp or minikube | -| aws | When the organization cluster is on AWS | -| k8s | Kubernetes cluster deployment variables.| -| vault | Contains Hashicorp Vault server address and root-token in the example | -| gitops | Git Repo details which will be used by GitOps/Flux. | -| services | Contains list of services which could ca/peer/orderers/concensus based on the type of organization | +- The `organizations` section contains the specifications for the organization. + +- In the sample configuration example, we have and support only one organization under the `organizations` section. + +### Organization Field Snapshot with Sample Values: + +The `organization` under the `organizations` section has the following fields. + +| **Field** | **Description** | +|----------------------|-------------| +| name | Name of the organization | +| type | Specifies the organization as the superuser/owner. | +| persona | This is used for DSCP app and can be buyer, supplier or thirdparty. | +| external_url_suffix | Public url suffix of the cluster. | +| cloud_provider | Cloud provider of the Kubernetes cluster for this organization. This field can be `aws`, `azure`, `gcp` or `minikube` | +| aws | When the organization cluster is on `AWS` | +| k8s | Kubernetes cluster deployment variables.| +| vault | Contains Hashicorp Vault server address and root-token in the example | +| gitops | Git Repo details which will be used by GitOps/Flux. | +| services | Contains list of services which could ca/peer/orderers/concensus based on the type of organization | For the `aws`, `k8s` and `vault` field the snapshot with sample values is below + ```yaml - aws: - access_key: "aws_access_key" # AWS Access key, only used when cloud_provider=aws - secret_key: "aws_secret_key" # AWS Secret key, only used when cloud_provider=aws - # Kubernetes cluster deployment variables. The config file path and name has to be provided in case - # the cluster has already been created. - k8s: - context: "GKE_CONTEXT" - config_file: "/BUILD_DIR/config" - # Hashicorp Vault server address and root-token. Vault should be unsealed. - # Do not check-in root_token - vault: - url: "http://VAULT_URL:VAULT_PORT" - root_token: "VAULT_TOKEN" - secret_path: "secretsv2" +--8<-- "platforms/substrate/configuration/samples/substrate-network-config.yaml:66:80" ``` -The `aws` field under each organization contains: (This will be ignored if cloud_provider is not `aws`) +**The `aws` field under each organization contains**: -| Field | Description | -|-------------|----------------------------------------------------------| -| access_key | AWS Access key | -| secret_key | AWS Secret key | +> NOTE: This will be ignored if cloud_provider is not `aws` -The `k8s` field under each organization contains +| **Field** | **Description** | +|------------- |-----------------| +| access_key | AWS Access key | +| secret_key | AWS Secret key | -| Field | Description | -|-------------|----------------------------------------------------------| -| context | Context/Name of the cluster where the organization entities should be deployed | -| config_file | Path to the kubernetes cluster configuration file | +**The `k8s` field under each organization contains**: -The `vault` field under each organization contains +| **Field** | **Description** | +|------------- |---------------------------------------------------------- | +| context | Context/Name of the cluster where the organization entities should be deployed | +| config_file | Path to the kubernetes cluster configuration file | -| Field | Description | -|-------------|----------------------------------------------------------| -| url | url of the vault server including port | -| root_token | root token of the vault server required to access the contents of the vault | -| secret_path | the path in which secrets are stored, used to store and retrieve secrets from the vault | +**The `vault` field under each organization contains**: + +| **Field** | **Description** | +|------------- |----------------------------------------------------------| +| url | url of the vault server including port | +| root_token | root token of the vault server required to access the contents of the vault | +| secret_path | the path in which secrets are stored, used to store and retrieve secrets from the vault | For gitops fields the snapshot from the sample configuration file with the example values is below + ```yaml - # Git Repo details which will be used by GitOps/Flux. - # Do not check-in git_access_token - gitops: - git_protocol: "https" # Option for git over https or ssh - git_url: "https://github.com/inteli-poc/bevel.git" # Gitops https or ssh url for flux value files - branch: "substrate" # Git branch where release is being made - release_dir: "platforms/substrate/releases/dev" # Relative Path in the Git repo for flux sync per environment. - chart_source: "platforms/substrate/charts" # Relative Path where the Helm charts are stored in Git repo - git_repo: "github.com/inteli-poc/bevel.git" # Gitops git repository URL for git push - username: "bevel" # Git Service user who has rights to check-in in all branches - password: "GIT_TOKEN" # Git Server user password/token (Optional for ssh; Required for https) - email: "bevel@inteli-poc.com" # Email to use in git config - private_key: "/BUILD_DIR/gitops" # Path to private key file which has write-access to the git repo (Optional for https; Required for ssh) +--8<-- "platforms/substrate/configuration/samples/substrate-network-config.yaml:84:96" ``` -The gitops field under each organization contains +**The gitops field under each organization contains**: + +| **Field** | **Description** | +|------------- |----------------------------------------------------------| +| git_protocol | Option for git over https or ssh. Can be `https` or `ssh` | +| git_url | SSH or HTTPs url of the repository where flux should be synced | +| branch | Branch of the repository where the Helm Charts and value files are stored | +| release_dir | Relative path where flux should sync files | +| chart_source | Relative path where the helm charts are stored | +| git_repo | Gitops git repo URL https URL for git push like "github.com/hyperledger/bevel.git" | +| username | Username which has access rights to read/write on repository | +| password | Password of the user which has access rights to read/write on repository (Optional for ssh; Required for https) | +| email | Email of the user to be used in git config | +| private_key | Path to the private key file which has write-access to the git repo (Optional for https; Required for ssh) | + +Within the organizations section, there is one listed organization. This organization contains a services section that provides details for all peers, where each participating node is named as a peer. Below is a snapshot of the peers service with example values: -| Field | Description | -|-------------|----------------------------------------------------------| -| git_protocol | Option for git over https or ssh. Can be `https` or `ssh` | -| git_url | SSH or HTTPs url of the repository where flux should be synced | -| branch | Branch of the repository where the Helm Charts and value files are stored | -| release_dir | Relative path where flux should sync files | -| chart_source | Relative path where the helm charts are stored | -| git_repo | Gitops git repo URL https URL for git push like "github.com/hyperledger/bevel.git" | -| username | Username which has access rights to read/write on repository | -| password | Password of the user which has access rights to read/write on repository (Optional for ssh; Required for https) | -| email | Email of the user to be used in git config | -| private_key | Path to the private key file which has write-access to the git repo (Optional for https; Required for ssh) | - -The services field for each organization under `organizations` section of Substrate contains list of `services`. - -Each organization will have a services section which includes details of all the peers, all participating nodes are names as peers. The snapshot of peers service with example values is below ```yaml - peers: - - peer: - name: oem-bn - subject: "O=OEM,OU=OEM,L=51.50/-0.13/London,C=GB" # This is the node subject. L=lat/long is mandatory for supplychain sample app - type: bootnode # value can be validator or bootnode ( or ipfs, for vitalAM) - p2p: - port: 30333 - ambassador: 15010 #Port exposed on ambassador service (use one port per org if using single cluster) - rpc: - port: 9933 - ws: - port: 9944 - - peer: - name: validator1 - subject: "O=Validator1,OU=Validator1,L=51.50/-0.13/London,C=GB" # This is the node subject. L=lat/long is mandatory for supplychain sample app - type: validator # value can be validator or bootnode ( or ipfs, for vitalAM) - p2p: - port: 30333 - ambassador: 15011 #Port exposed on ambassador service (use one port per org if using single cluster) - rpc: - port: 9933 - ws: - port: 9944 - - peer: - name: oem-ipfs-bn - subject: "O=OEMIPFSBoot,OU=OEMIPFSBoot,London,C=GB" # This is the node subject. - type: ipfs-bootnode # value can be validator or bootnode ( or ipfs, for vitalAM) - nodeHost: oem-bn # peer name of substrate node for IPFS API-WS connection - p2p: - port: 30333 - ambassador: 15013 #Port exposed on ambassador service (use one port per org if using single cluster) - rpc: - port: 9933 - ws: - port: 9944 - ipfs: - swarmPort: 4001 - ambassador: 15014 #Port exposed on ambassador service (use one port per org if using single cluster) - apiPort: 5001 - - peer: - name: oem - subject: "O=OEMIPFS,OU=OEMIPFS,London,C=GB" # This is the node subject. - type: member # value can be validator or bootnode ( or ipfs, for vitalAM) - nodeHost: oem # peer name of substrate node for IPFS API-WS connection - p2p: - port: 30333 - ambassador: 15015 #Port exposed on ambassador service (use one port per org if using single cluster) - rpc: - port: 9933 - ws: - port: 9944 - ipfs: - swarmPort: 4001 - ambassador: 15016 #Port exposed on ambassador service (use one port per org if using single cluster) - apiPort: 5001 - api: - port: 80 - postgresql: - port: 5432 - user: postgres - password: "postgres123" - id_service: - db_name: "id-service" - port: 3001 - inteli_api: - db_name: "inteli-api" - port: 3000 - ambassador: 443 +--8<-- "platforms/substrate/configuration/samples/substrate-network-config.yaml:98:173" ``` + The fields under `peer` are -| Field | Description | -|-------------|----------------------------------------------------------| -| name | Name of the peer | -| subject | This is the alternative identity of the peer node | -| type | Type can be `bootnode`, `validator`, `ipfs-bootnode` or `member` | -| p2p.port | P2P port | -| p2p.ambassador | The P2P Port when exposed on ambassador service| -| rpc.port | RPC port | -| ws.port | WebSocket port | +| **Field** | **Description** | +|------------- |----------------------------------------------------------| +| name | Name of the peer | +| subject | This is the alternative identity of the peer node | +| type | Type can be `bootnode`, `validator` or `member` | +| p2p.port | P2P port | +| p2p.ambassador | The P2P Port when exposed on ambassador service| +| rpc.port | RPC port | +| ws.port | WebSocket port | diff --git a/docs/source/guides/quorum-add-new-org.md b/docs/source/guides/quorum-add-new-org.md index 8779a485115..df168ca1020 100644 --- a/docs/source/guides/quorum-add-new-org.md +++ b/docs/source/guides/quorum-add-new-org.md @@ -29,132 +29,12 @@ The `network.yaml` file should contain the specific `network.organization` detai Make sure that the genesis block information is given in base64 encoding. Also, if you are adding node to the same cluster as of another node, make sure that you add the ambassador ports of the existing node present in the cluster to the network.yaml -For reference, sample `network.yaml` file looks like below for RAFT consensus (but always check the latest network-quorum-newnode.yaml at `platforms/quourm/configuration/samples`): +For reference, sample `network-quorum-newnode.yaml` file [here](https://github.com/hyperledger/bevel/blob/main/platforms/quorum/configuration/samples/network-quorum-newnode.yaml) +```yaml +--8<-- "platforms/quorum/configuration/samples/network-quorum-newnode.yaml:1:133" ``` ---- -# This is a sample configuration file for Quorum network which has 4 nodes. -# All text values are case-sensitive -network: - # Network level configuration specifies the attributes required for each organization - # to join an existing network. - type: quorum - version: 23.4.0 #this is the version of Quorum - - #Environment section for Kubernetes setup - env: - type: "dev" # tag for the environment. Important to run multiple flux on single cluster - proxy: ambassador # value has to be 'ambassador' as 'haproxy' has not been implemented for Quorum - # These ports are enabled per cluster, so if you have multiple clusters you do not need so many ports - # This sample uses a single cluster, so we have to open 4 ports for each Node. These ports are again specified for each organization below - ambassadorPorts: # Any additional Ambassador ports can be given here, this is valid only if proxy='ambassador' - portRange: # For a range of ports - from: 15010 - to: 15043 - # ports: 15020,15021 # For specific ports - retry_count: 20 # Retry count for the checks on Kubernetes cluster - external_dns: enabled # Should be enabled if using external-dns for automatic route configuration - - # Docker registry details where images are stored. This will be used to create k8s secrets - # Please ensure all required images are built and stored in this registry. - # Do not check-in docker_password. - docker: - url: "ghcr.io/hyperledger" - username: "docker_username" - password: "docker_password" - - # Following are the configurations for the common Quorum network - config: - consensus: "raft" # Options are "raft" and "ibft" - ## Certificate subject for the root CA of the network. - # This is for development usage only where we create self-signed certificates and the truststores are generated automatically. - # Production systems should generate proper certificates and configure truststores accordingly. - subject: "CN=DLT Root CA,OU=DLT,O=DLT,L=London,C=GB" - transaction_manager: "tessera" # Options are "tessera" - # This is the version of "tessera" - tm_version: "23.4.0" - tm_tls: "strict" # Options are "strict" and "off" - tm_trust: "tofu" # Options are: "ca-or-tofu", "ca", "tofu" - ## Transaction Manager nodes public addresses should be provided. - # For "tessera", all participating nodes should be provided - # In the example (for tessera ) below, the domain name is formed by the https://(peer.name).(org.external_url_suffix) - tm_nodes: - - "https://carrier.test.quorum.blockchaincloudpoc.com" - - "https://manufacturer.test.quorum.blockchaincloudpoc.com" - - "https://store.test.quorum.blockchaincloudpoc.com" - - "https://warehouse.test.quorum.blockchaincloudpoc.com" - ##### Following keys are used only to add new Node(s) to existing network. - staticnodes: # Existing network's static nodes file path needs to be given - genesis: # Existing network's genesis.json file path needs to be given - # make sure that the account is unlocked prior to adding a new node - bootnode: - #name of the node - name: carrier - #ambassador url of the node - url: carrierrpc.test.quorum.blockchaincloudpoc.com - #rpc port of the node - rpcport: 80 - #id of the node. - nodeid: 1 - - # Allows specification of one or many organizations that will be connecting to a network. - organizations: - # Specification for the 1st organization. Each organization should map to a VPC and a separate k8s cluster for production deployments - - organization: - name: neworg - external_url_suffix: test.quorum.blockchaincloudpoc.com # This is the url suffix that will be added in DNS recordset. Must be different for different clusters - cloud_provider: aws # Options: aws, azure, gcp - aws: - access_key: "aws_access_key" # AWS Access key, only used when cloud_provider=aws - secret_key: "aws_secret_key" # AWS Secret key, only used when cloud_provider=aws - # Kubernetes cluster deployment variables. The config file path and name has to be provided in case - # the cluster has already been created. - k8s: - context: "cluster_context" - config_file: "cluster_config" - # Hashicorp Vault server address and root-token. Vault should be unsealed. - # Do not check-in root_token - vault: - url: "vault_addr" - root_token: "vault_root_token" - # Git Repo details which will be used by GitOps/Flux. - # Do not check-in git_access_token - gitops: - git_protocol: "https" # Option for git over https or ssh - git_url: "https://github.com//bevel.git" # Gitops https or ssh url for flux value files - branch: "develop" # Git branch where release is being made - release_dir: "platforms/quorum/releases/dev" # Relative Path in the Git repo for flux sync per environment. - chart_source: "platforms/quorum/charts" # Relative Path where the Helm charts are stored in Git repo - git_repo: "github.com//bevel.git" # Gitops git repository URL for git push - username: "git_username" # Git Service user who has rights to check-in in all branches - password: "git_access_token" # Git Server user access token (Optional for ssh; Required for https) - email: "git_email" # Email to use in git config - private_key: "path_to_private_key" # Path to private key file which has write-access to the git repo (Optional for https; Required for ssh) - # The participating nodes are named as peers - services: - peers: - - peer: - name: neworg - subject: "O=Neworg,OU=Neworg,L=51.50/-0.13/London,C=GB" # This is the node subject. L=lat/long is mandatory for supplychain sample app - type: validator # value can be validator or member, only applicable if consensus = 'ibft' - geth_passphrase: 12345 # Passphrase to be used to generate geth account - p2p: - port: 21000 - ambassador: 15010 #Port exposed on ambassador service (use one port per org if using single cluster) - rpc: - port: 8546 - ambassador: 15011 #Port exposed on ambassador service (use one port per org if using single cluster) - transaction_manager: - port: 443 - ambassador: 443 - raft: # Only used if consensus = 'raft' - port: 50401 - ambassador: 15013 - db: # Only used if transaction_manager = "tessera" - port: 3306 - -``` Below three new sections are added to the network.yaml | Field | Description | diff --git a/docs/source/index.md b/docs/source/index.md index 6e489321c74..06fbe61269d 100644 --- a/docs/source/index.md +++ b/docs/source/index.md @@ -1,27 +1,27 @@ # Introduction -Hyperledger Bevel is an advanced automation framework tailored for the seamless deployment of robust, production-ready Distributed Ledger Technology (DLT) networks on cloud-based infrastructures. Eliminating the need for intricate solution architecture, Bevel empowers teams to deliver with precision. +Hyperledger Bevel is a sophisticated automation framework designed for the deployment of production-ready Distributed Ledger Technology (DLT) networks across cloud infrastructures. By eliminating the need for complex solution architecture, Bevel empowers teams to deliver with precision and efficiency. ![](./_static/bevel-overview.png) ## Key Features - * Security Excellence: Bevel establishes a secure foundation for DLT development, implementing best practices in key management and providing essential security features by default. - * Scalability at Its Core: Bevel's network implementation is designed for seamless scalability, allowing users to effortlessly expand their environment and resources according to project requirements. - * Accelerated Deployment: Bevel expedites blockchain solution deployment, offering an opportunity for active participation and the acceleration of additional services. + - **Helm Charts**: Simplifies the deployment of complex DLT networks. + - **Security**: Implements best practices in key management. + - **Scalability**: Designed for seamless expansion. + - **Accelerated Deployment**: Expedites the creation of complex DLT environments. + - **Developer Flexibility**: Option to deploy without Hahsicorp Vault and Flux for streamlined development. -Hyperledger Bevel serves as a pivotal accelerator, enabling developers to efficiently create a DLT environment. With Bevel, users not only expedite the creation of a DLT network but also ensure its suitability for continuous utilization throughout the project lifecycle. +## Supported DLT Platforms -## Which platforms does Bevel Support? - -Bevel currently supports the following DLT/Blockchain Platforms: +Hyperledger Bevel currently supports the following DLT/Blockchain Platforms: * [R3 Corda](https://docs.corda.net/) * [Hyperledger Fabric](https://hyperledger-fabric.readthedocs.io) * [Hyperledger Indy](https://hyperledger-indy.readthedocs.io/en/latest/) * [Hyperledger Besu]( https://besu.hyperledger.org/en/stable/) * [Quorum]( https://www.goquorum.com/) - * [Substrate](https://www.parity.io/technologies/substrate/). + * [Substrate](https://www.parity.io/technologies/substrate/) Get started with the [pre-requisites](./getting-started/prerequisites.md) to rapidly deploy blockchain networks. diff --git a/docs/source/references/roadmap.md b/docs/source/references/roadmap.md index b9cd9e4a61d..59e558daf5a 100644 --- a/docs/source/references/roadmap.md +++ b/docs/source/references/roadmap.md @@ -18,12 +18,13 @@ timeline : Fabric : Quorum section 2024 Q3 - Deployment using Kubernetes Operator - : Besu - section 2024 Q4 Helm depencencies : Indy : Substrate + AWS secrets as vault + section 2024 Q4 + Deployment using Kubernetes Operator + : Besu ``` @@ -37,27 +38,23 @@ Legend of annotations: | :octicons-pin-16: | work to do | | :octicons-check-16: | work completed | | :material-run: | on-going work | -| :octicons-trophy-16: | stretch goal | +| :octicons-trophy-16: | stretch goal | | :octicons-stop-16: | on hold | ## Documentation -- :octicons-check-16: Spell and grammar linting -- :octicons-check-16: Replace ansible roles readme with high level information -- :octicons-check-16: Add helm chart readme for platform charts -- :octicons-check-16: Complete restructure and redesign of documentation +- :material-run: Spell linting workflow for PR checks - :material-run: Format/Update configuration file and ops section +- :material-run: Troubleshooting guide +- :octicons-pin-16: deployment workflow guide ## General/Shared - :material-run: Grafana and Promethus integration - :material-run: Consistent variable names for helm chart values -- :octicons-check-16: Support of Ambassador Edge Stack -- :octicons-check-16: Add git actions to automate creation of helm repo chart artifacts -- :octicons-check-16: Creation of vault auth role from the vault-k8s chart -- :octicons-check-16: Add default values to chart templates/values section +- :material-run: Add default values to chart templates/values section - :octicons-trophy-16: Improve logging/error messaging in playbooks and log storage - :octicons-trophy-16: Devcontainer for vscode containers/codespaces - :octicons-trophy-16: Git commit/yaml linting -- :octicons-trophy-16: Support for additional vault, hashicorp alternatives +- :octicons-trophy-16: Support for additional vault, hashicorp alternatives - :octicons-stop-16: Setup AWS cloudwatch exporter ## Platforms @@ -66,40 +63,45 @@ Legend of annotations: - :octicons-stop-16: HA Notary options - :octicons-stop-16: Enable PostGreSQL support for Corda Enterprise - :octicons-stop-16: Removal of node - - :octicons-stop-16: Cacti connector for Corda opensource - - :octicons-check-16: Corda enterprise Node/Notary v4.9 support + - :octicons-pin-16: Cacti connector for Corda opensource + - :octicons-pin-16: Deploy using just helm with no proxy, no vault option + - :octicons-pin-16: Corda enterprise and opensource Node/Notary v4.11 support + - :octicons-pin-16: Add cordapps operations and update docs - R3 Corda OS v5 - - :octicons-pin-16: Base network deployment + - :octicons-stop-16: Base network deployment - Hyperledger Fabric - - :octicons-check-16: External chaincode for Fabric 2.2.x - - :octicons-check-16: Support for Fabric 2.5.x - - :material-run: Operational features for Fabric 2.5.x + - :octicons-pin-16: Deploy using just helm with no proxy, no vault option + - :octicons-pin-16: chaincode and channel mgmt. decoupled from network deployment - :octicons-pin-16: chaincode operations via operator console - - :octicons-pin-16: chaincode operations automation using bevel-operator-fabric - - :octicons-pin-16: chaincode upgrade for external chaincode + - :octicons-stop-16: chaincode operations automation using bevel-operator-fabric - :octicons-stop-16: CI/CD piplelines for chaincode deployment - Hyperledger Besu - :octicons-stop-16: Enable node discovery - :octicons-stop-16: Enable bootnodes - - :octicons-check-16: Add promethus/Grafana chart for node monitoring data - - :octicons-check-16: Test onchain permission for Besu platform - - :octicons-pin-16: Node version upgrades - - :octicons-pin-16: Tessera version upgrades + - :octicons-pin-16: Test promethus/Grafana chart for node monitoring data + - :octicons-pin-16: Test tls cert creation using letsencrypt + - :octicons-pin-16: Test onchain permission for Besu platform + - :octicons-pin-16: Addition of new validator node and add guide for the same + - :octicons-pin-16: Besu node version upgrades + - :octicons-check-16: Tessera version upgrades - :octicons-stop-16: Support for Besu node on public network - Quorum - - :octicons-pin-16: Deployment using just helm charts + - :octicons-pin-16: Deploy using just helm with no proxy, no vault option + - :octicons-pin-16: Addition of new validator node and add guide for the same - Hyperledger Indy - - :octicons-stop-16: Removal of organizations from a running Indy Network - - ::octicons-pin-16: Node version upgrades + - :octicons-pin-16: Deploy using just helm with no proxy, no vault option + - :octicons-pin-16: Node version upgrades + - :octicons-stop-16: Removal of organizations from a running Indy Network - Substrate + - :octicons-pin-16: Deploy using just helm with no proxy, no vault option - :octicons-trophy-16: Test with generic substrate node - :octicons-trophy-16: Adding of org/new node ## Bevel Samples -- :octicons-pin-16: Upgrade Ambassador proxy to Edge stack -- :octicons-pin-16: Upgrade rest server/middleware applications -- :octicons-pin-16: Upgrade aca py application +- :material-run: Upgrade Ambassador proxy to Edge stack +- :material-run: Upgrade rest server/middleware applications +- :octicons-pin-16: Test Hyperledger Aries contribution and see if can replace aca-py ## Bevel Kubernetes Operators @@ -114,5 +116,4 @@ Legend of annotations: - :octicons-stop-16: Architecture diagram ## DevOps-Pipeline - -- :material-run: GitHub Actions automation script for each DLT platform +- :octicons-pin-16: Chart testing diff --git a/docs/source/tutorials/index.md b/docs/source/tutorials/index.md index b916ba76b0d..d517bee3967 100644 --- a/docs/source/tutorials/index.md +++ b/docs/source/tutorials/index.md @@ -1,12 +1,45 @@ # Tutorials -These are the developer and operator tutorials: - -| Tutorial | Description | -| :-------------------------- | :---------------------------------- | -| [Developer pre-requisites](dev-prereq.md)| How to set up Developer pre-requisites| -| [Deploy using Docker](docker-deploy.md) | How to use Bevel from a docker container which has all pre-requisites installed. | -| [Deploy using Machine](machine-deploy.md) | How to use Bevel from your own machine. | -| [Update DNS](dns-settings.md) | How to configure DNS for use with Bevel. | -| [Use Bevel with minikube](bevel-minikube-setup.md)| How to deploy any network on minikube| -| [Add a new StorageClass](adding-new-storageclass.md)| How to add a new StorageClass for a new Cloud Provider| +Here are few developer and operator tutorials: + +
+ +- :fontawesome-solid-laptop-code:{ .lg .middle } __[Developer pre-requisites](dev-prereq.md)__ + + --- + + Learn how to set up Developer pre-requisites. + +- :fontawesome-solid-box:{ .lg .middle } __[Deploy using Docker](docker-deploy.md)__ + + --- + + Learn how to use Bevel from a docker container which has all pre-requisites installed. + +- :fontawesome-solid-laptop:{ .lg .middle } __[Deploy using Machine](machine-deploy.md)__ + + --- + + Learn how to use Bevel from your own machine. + +- :material-dns-outline:{ .lg .middle } __[Update DNS](dns-settings.md)__ + + --- + + Learn how to configure DNS for use with Bevel. + +- :material-school:{ .lg .middle } __[Use Bevel with minikube](bevel-minikube-setup.md)__ + + --- + + Learn how to deploy any network on minikube. + +- :fontawesome-regular-hard-drive:{ .lg .middle } __[Add a new StorageClass](adding-new-storageclass.md)__ + + --- + + Learn how to add a new StorageClass for a new Cloud Provider. + + + +
diff --git a/docs/source/tutorials/machine-deploy.md b/docs/source/tutorials/machine-deploy.md index d8de332fcb9..f912d8a4389 100644 --- a/docs/source/tutorials/machine-deploy.md +++ b/docs/source/tutorials/machine-deploy.md @@ -8,7 +8,7 @@ To create a Production DLT/Blockchain network, ensure you have the following: 1. One running Kubernetes Cluster and the Config file (default ~/.kube.config) per Organization. -1. One running Hashicorp Vault server per Organization. Unsealed and configured as per [guidance here](../getting-started/configure-prerequisites.md#vaultunseal). +1. One running Hashicorp Vault server per Organization. Unsealed and configured as per [guidance here](../getting-started/configure-prerequisites.md#unseal-hashicorp-vault). 1. Domain Name(s) configured as per [tutorial here](../tutorials/dns-settings.md). 1. Git user details per Organization as per [pre-requisites](../getting-started/configure-prerequisites.md#gitops-authentication). 1. Ansible controller configured as per [guidance here](../getting-started/prerequisites-machine.md). diff --git a/platforms/hyperledger-besu/charts/README.md b/platforms/hyperledger-besu/charts/README.md index 1c9a423fd4d..c0ddcc017cd 100644 --- a/platforms/hyperledger-besu/charts/README.md +++ b/platforms/hyperledger-besu/charts/README.md @@ -6,7 +6,7 @@ # Charts for Hyperledger Besu components ## About -This folder contains the helm charts which are used for the deployment of the Hyperledger Besu components. Each helm that you can use has the following keys and you need to set them. The `global.cluster.provider` is used as a key for the various cloud features enabled. Also you only need to specify one cloud provider, **not** both if deploying to cloud. As of writing this doc, AWS is fully supported. +This folder contains the helm charts which are used for the deployment of the Hyperledger Besu components. Each helm chart that you can use has the following keys and you need to set them. The `global.cluster.provider` is used as a key for the various cloud features to be enabled. Also you only need to specify one cloud provider, **not** both if deploying to cloud. As of writing this doc, AWS is fully supported. ```yaml global: @@ -110,24 +110,48 @@ helm install genesis ./besu-genesis --namespace carrier-bes --values ./values/pr helm install carrier ./besu-node --namespace carrier-bes --values ./values/proxy-and-vault/txnode-sec.yaml --set global.proxy.p2p=15016 --set node.besu.identity="O=Carrier,OU=Carrier,L=51.50/-0.13/London,C=GB" ``` -### API Calls -Once deployed, services are available as follows on the address as provided in your `global.proxy.externalUrlSuffix`. +### API call -```bash -# HTTP RPC API -curl -v -X POST -H "Content-Type: application/json" --data '{"jsonrpc":"2.0","method":"eth_blockNumber","params":[],"id":1}' http://supplychainrpc.test.yourdomain.com - -# which should return (confirming that the node running the JSON-RPC service is syncing): -{ - "jsonrpc" : "2.0", - "id" : 1, - "result" : "0x64" -} -``` +Once your services are deployed, they can be accessed using the domain name provided in your `global.proxy.externalUrlSuffix`. + +1. **Retrieve the Source Host for Your Node** + + Run the following command to get the mapping for your node: + + ```bash + kubectl get mapping --namespace supplychain-bes + ``` + + From the output, copy the source host for your node. + +2. **Make HTTP RPC API Calls** + + You can interact with your node using HTTP RPC API calls. Here's an example of how to do it: + + ```bash + curl -X POST -H "Content-Type: application/json" --data '{"jsonrpc":"2.0","method":"eth_blockNumber","params":[],"id":1}' http:// + ``` + + Replace `` with the source host you copied earlier. + +3. **Verify the Node Syncing Status** + + If the node running the JSON-RPC service is syncing correctly, the previous command should return the following: + + ```json + { + "jsonrpc" : "2.0", + "id" : 1, + "result" : "0x64" + } + ``` + + This confirms that your node is syncing as expected. ### Clean-up -To clean up, just uninstall the helm releases. +To clean up, simply uninstall the Helm releases. It's important to uninstall the genesis Helm chart at the end to prevent any cleanup failure. + ```bash helm uninstall --namespace supplychain-bes validator-1 helm uninstall --namespace supplychain-bes validator-2 @@ -138,5 +162,11 @@ helm uninstall --namespace supplychain-bes genesis helm uninstall --namespace carrier-bes carrier helm uninstall --namespace carrier-bes genesis +``` +### Add and remove qbft validators + +To deploy the proposed validator chart, we need to deploy the Besu node chart first. -``` \ No newline at end of file +```bash +helm install validator-5 ./besu-propose-validator --namespace supplychain-bes --values besu-propose-validator/values.yaml +``` diff --git a/platforms/hyperledger-besu/charts/besu-genesis/README.md b/platforms/hyperledger-besu/charts/besu-genesis/README.md index 4d599b9030d..6af902b63e9 100644 --- a/platforms/hyperledger-besu/charts/besu-genesis/README.md +++ b/platforms/hyperledger-besu/charts/besu-genesis/README.md @@ -14,7 +14,7 @@ helm repo add bevel https://hyperledger.github.io/bevel helm install genesis bevel/besu-genesis ``` -## Prerequisitess +## Prerequisites - Kubernetes 1.19+ - Helm 3.2.0+ @@ -54,7 +54,7 @@ These parameters are refered to as same in each parent or child chart | Name | Description | Default Value | |--------|---------|-------------| |`global.serviceAccountName` | The serviceaccount name that will be created for Vault Auth and k8S Secret management| `vault-auth` | -| `global.cluster.provider` | Kubernetes cluster provider like AWS EKS or minikube. Currently ony `aws` and `minikube` is tested | `aws` | +| `global.cluster.provider` | Kubernetes cluster provider like AWS EKS or minikube. Currently only `aws`, `azure` and `minikube` are tested | `aws` | | `global.cluster.cloudNativeServices` | only `false` is implemented, `true` to use Cloud Native Services (SecretsManager and IAM for AWS; KeyVault & Managed Identities for Azure) is for future | `false` | | `global.cluster.kubernetesUrl` | URL of the Kubernetes Cluster | `""` | | `global.vault.type` | Type of Vault to support other providers. Currently, only `hashicorp` and `kubernetes` is supported. | `hashicorp` | @@ -71,10 +71,10 @@ These parameters are refered to as same in each parent or child chart | -------------| ---------- | --------- | | `image.genesisUtils.repository` | Quorum/Besu hooks image repository | `ghcr.io/hyperledger/bevel-k8s-hooks` | | `image.genesisUtils.tag` | Quorum/Besu hooks image tag | `qgt-0.2.12` | -| `image.pullSecret` | Provide the docker secret name in the namespace | `""` | +| `image.pullSecret` | Secret name in the namespace containing private image registry credentials | `""` | | `image.pullPolicy` | Pull policy to be used for the Docker images | `IfNotPresent` | -### TLS +### Settings | Name | Description | Default Value | |--------|---------|-------------| diff --git a/platforms/hyperledger-besu/charts/besu-genesis/requirements.yaml b/platforms/hyperledger-besu/charts/besu-genesis/requirements.yaml index b878161ca1a..b1195396c5f 100644 --- a/platforms/hyperledger-besu/charts/besu-genesis/requirements.yaml +++ b/platforms/hyperledger-besu/charts/besu-genesis/requirements.yaml @@ -1,11 +1,11 @@ dependencies: - name: bevel-vault-mgmt - repository: "https://hyperledger.github.io/bevel" + repository: "file://../../../shared/charts/bevel-vault-mgmt" tags: - bevel version: ~1.0.0 - name: bevel-scripts - repository: "https://hyperledger.github.io/bevel" + repository: "file://../../../shared/charts/bevel-scripts" tags: - bevel version: ~1.0.0 diff --git a/platforms/hyperledger-besu/charts/besu-genesis/templates/_helpers.tpl b/platforms/hyperledger-besu/charts/besu-genesis/templates/_helpers.tpl index 3b8a9a0febe..48be575a0f7 100644 --- a/platforms/hyperledger-besu/charts/besu-genesis/templates/_helpers.tpl +++ b/platforms/hyperledger-besu/charts/besu-genesis/templates/_helpers.tpl @@ -27,4 +27,3 @@ Create chart name and version as used by the chart label. {{- define "besu-genesis.chart" -}} {{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} {{- end -}} - diff --git a/platforms/hyperledger-besu/charts/besu-genesis/templates/genesis-job-cleanup.yaml b/platforms/hyperledger-besu/charts/besu-genesis/templates/genesis-job-cleanup.yaml index d653afcd19e..c296b6aa511 100644 --- a/platforms/hyperledger-besu/charts/besu-genesis/templates/genesis-job-cleanup.yaml +++ b/platforms/hyperledger-besu/charts/besu-genesis/templates/genesis-job-cleanup.yaml @@ -41,11 +41,14 @@ spec: - | {{- if .Values.settings.removeGenesisOnDelete }} + if kubectl get configmap --namespace {{ .Release.Namespace }} besu-genesis &> /dev/null; then + echo "Deleting genesis configmap in k8s ..." + kubectl delete configmap --namespace {{ .Release.Namespace }} besu-genesis + fi - echo "Deleting genesis configmap in k8s ..." - kubectl delete configmap --namespace {{ .Release.Namespace }} besu-genesis - - echo "Deleting node-enodes configmap in k8s ..." - kubectl delete configmap --namespace {{ .Release.Namespace }} besu-peers + if kubectl get configmap --namespace {{ .Release.Namespace }} besu-peers &> /dev/null; then + echo "Deleting node-enodes configmap in k8s ..." + kubectl delete configmap --namespace {{ .Release.Namespace }} besu-peers + fi {{- end}} diff --git a/platforms/hyperledger-besu/charts/besu-genesis/values.yaml b/platforms/hyperledger-besu/charts/besu-genesis/values.yaml index 352c2355ce5..1aa90bfc651 100644 --- a/platforms/hyperledger-besu/charts/besu-genesis/values.yaml +++ b/platforms/hyperledger-besu/charts/besu-genesis/values.yaml @@ -11,10 +11,10 @@ global: #Provide the service account name which will be created. serviceAccountName: vault-auth cluster: - provider: aws # choose from: minikube | aws - cloudNativeServices: false # 'false' is implemented + provider: aws # choose from: minikube | aws | azure | gcp + cloudNativeServices: false # only 'false' is implemented #Provide the kubernetes host url - #Eg. kubernetesUrl: https://10.3.8.5:6443 + #Eg. kubernetesUrl: https://10.3.8.5:8443 kubernetesUrl: vault: #Provide the type of vault diff --git a/platforms/hyperledger-besu/charts/besu-node/README.md b/platforms/hyperledger-besu/charts/besu-node/README.md index af27ae11581..fc7c1ad867c 100644 --- a/platforms/hyperledger-besu/charts/besu-node/README.md +++ b/platforms/hyperledger-besu/charts/besu-node/README.md @@ -14,7 +14,7 @@ helm repo add bevel https://hyperledger.github.io/bevel helm install validator-1 bevel/besu-node ``` -## Prerequisitess +## Prerequisites - Kubernetes 1.19+ - Helm 3.2.0+ @@ -85,10 +85,10 @@ This is where you can override the values for the [besu-tessera-node subchart](. ### Image | Name | Description | Default Value | | -------------| ---------- | --------- | -| `image.pullSecret` | Provide the docker secret name in the namespace | `""` | +| `image.pullSecret` | Secret name in the namespace containing private image registry credentials | `""` | | `image.pullPolicy` | Pull policy to be used for the Docker images | `IfNotPresent` | | `image.besu.repository` | Besu image repository | `hyperledger/besu`| -| `image.besu.tag` | Besu image tag as per version of Besu | `22.10.2`| +| `image.besu.tag` | Besu image tag as per version of Besu | `23.10.2`| | `image.hooks.repository` | Quorum/Besu hooks image repository | `ghcr.io/hyperledger/bevel-k8s-hooks` | | `image.hooks.tag` | Quorum/Besu hooks image tag | `qgt-0.2.12` | diff --git a/platforms/hyperledger-besu/charts/besu-node/requirements.yaml b/platforms/hyperledger-besu/charts/besu-node/requirements.yaml index 9244f887e30..059282799c1 100644 --- a/platforms/hyperledger-besu/charts/besu-node/requirements.yaml +++ b/platforms/hyperledger-besu/charts/besu-node/requirements.yaml @@ -1,20 +1,20 @@ dependencies: - name: bevel-storageclass alias: storage - repository: "https://hyperledger.github.io/bevel" + repository: "file://../../../shared/charts/bevel-storageclass" tags: - storage version: ~1.0.0 - name: besu-tessera-node alias: tessera - repository: "https://hyperledger.github.io/bevel" + repository: "file://../besu-tessera-node" tags: - tessera version: ~1.0.0 condition: tessera.enabled - name: besu-tlscert-gen alias: tls - repository: "https://hyperledger.github.io/bevel" + repository: "file://../besu-tlscert-gen" tags: - bevel version: ~1.0.0 diff --git a/platforms/hyperledger-besu/charts/besu-node/templates/besu-config-configmap.yaml b/platforms/hyperledger-besu/charts/besu-node/templates/besu-config-configmap.yaml index 7b711609ce9..d0ef3e34c47 100644 --- a/platforms/hyperledger-besu/charts/besu-node/templates/besu-config-configmap.yaml +++ b/platforms/hyperledger-besu/charts/besu-node/templates/besu-config-configmap.yaml @@ -27,8 +27,7 @@ data: node-private-key-file={{.Values.node.besu.privateKeyPath | quote }} # Transaction Pool - tx-pool-retention-hours={{ .Values.node.besu.txPool.retentionHours }} - tx-pool-max-size={{ .Values.node.besu.txPool.maxSize }} + tx-pool-max-size={{ .Values.node.besu.txPool.maxCapacity }} {{ if .Values.node.besu.p2p.enabled -}} # P2P network diff --git a/platforms/hyperledger-besu/charts/besu-node/templates/node-statefulset.yaml b/platforms/hyperledger-besu/charts/besu-node/templates/node-statefulset.yaml index e6f8112cbbc..bb86f3f524e 100644 --- a/platforms/hyperledger-besu/charts/besu-node/templates/node-statefulset.yaml +++ b/platforms/hyperledger-besu/charts/besu-node/templates/node-statefulset.yaml @@ -96,6 +96,8 @@ spec: - name: {{ .Release.Name }}-besu image: {{ .Values.image.besu.repository }}:{{ .Values.image.besu.tag }} imagePullPolicy: {{ .Values.image.pullPolicy }} + securityContext: + runAsUser: 0 resources: requests: cpu: "{{ .Values.node.besu.resources.cpuRequest }}" @@ -172,10 +174,9 @@ spec: --identity={{ .Values.node.besu.identity | quote }} --miner-enabled=false \ --Xdns-enabled=true --Xdns-update-enabled=true --Xnat-kube-service-name={{ include "besu-node.fullname" . }} \ --min-gas-price=0 - livenessProbe: httpGet: - path: /liveness + path: / port: 8545 initialDelaySeconds: 180 periodSeconds: 60 diff --git a/platforms/hyperledger-besu/charts/besu-node/values.yaml b/platforms/hyperledger-besu/charts/besu-node/values.yaml index 837b34f6333..2fd6b191c68 100644 --- a/platforms/hyperledger-besu/charts/besu-node/values.yaml +++ b/platforms/hyperledger-besu/charts/besu-node/values.yaml @@ -49,7 +49,7 @@ image: pullPolicy: IfNotPresent besu: repository: hyperledger/besu - tag: 22.10.2 + tag: 23.10.2 hooks: repository: ghcr.io/hyperledger/bevel-k8s-hooks tag: qgt-0.2.12 @@ -104,8 +104,7 @@ node: port: 8547 corsOrigins: '["all"]' txPool: - retentionHours: 999 - maxSize: 1024 + maxCapacity: 12 http: allowlist: '["*"]' metrics: @@ -149,4 +148,4 @@ volumePermissionsFix: labels: service: [] pvc: [] - deployment: [] \ No newline at end of file + deployment: [] diff --git a/platforms/hyperledger-besu/charts/besu-propose-validator/Chart.yaml b/platforms/hyperledger-besu/charts/besu-propose-validator/Chart.yaml new file mode 100644 index 00000000000..6cb9b839259 --- /dev/null +++ b/platforms/hyperledger-besu/charts/besu-propose-validator/Chart.yaml @@ -0,0 +1,20 @@ +apiVersion: v1 +name: besu-propose-validator +description: "besu: Proposes to add or remove a validator with the specified address." +version: 1.0.1 +appVersion: latest +keywords: + - bevel + - ethereum + - besu + - hyperledger + - enterprise + - blockchain + - deployment + - accenture +home: https://hyperledger-bevel.readthedocs.io/en/latest/ +sources: + - https://github.com/hyperledger/bevel +maintainers: + - name: Hyperledger Bevel maintainers + email: bevel@lists.hyperledger.org diff --git a/platforms/hyperledger-besu/charts/besu-propose-validator/README.md b/platforms/hyperledger-besu/charts/besu-propose-validator/README.md new file mode 100644 index 00000000000..fb52300539a --- /dev/null +++ b/platforms/hyperledger-besu/charts/besu-propose-validator/README.md @@ -0,0 +1,94 @@ +[//]: # (##############################################################################################) +[//]: # (Copyright Accenture. All Rights Reserved.) +[//]: # (SPDX-License-Identifier: Apache-2.0) +[//]: # (##############################################################################################) + +# besu-propose-validator + +This chart is a component of Hyperledger Bevel. The besu-propose-validator chart injects a new authorization candidate that the validator attempts to push through. If a majority of the validators vote the candidate in/out, the candidate is added/removed in the validator set. + +## TL;DR + +```bash +helm repo add bevel https://hyperledger.github.io/bevel +helm install propose-validator bevel/besu-propose-validator +``` + +## Prerequisites + +- Kubernetes 1.19+ +- Helm 3.2.0+ + +If Hashicorp Vault is used, then +- HashiCorp Vault Server 1.13.1+ + +> **Important**: Also check the dependent charts. + +## Installing the Chart + +To install the chart with the release name `propose-validator`: + +```bash +helm repo add bevel https://hyperledger.github.io/bevel +helm install propose-validator bevel/besu-propose-validator +``` + +The command deploys the chart on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation. + +> **Tip**: List all releases using `helm list` + +## Uninstalling the Chart + +To uninstall/delete the `besu-propose-validator` deployment: + +```bash +helm uninstall besu-propose-validator +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Parameters + +### Image + +### Image + +| Name | Description | Default Value | +| -------------| ---------- | --------- | +| `image.genesisUtils.repository` | Besu hooks image repository | `ghcr.io/hyperledger/bevel-k8s-hooks` | +| `image.genesisUtils.tag` | Besu hooks image tag | `qgt-0.2.12` | +| `image.pullSecret` | Secret name in the namespace containing private image registry credentials | `""` | +| `image.pullPolicy` | Pull policy to be used for the Docker images | `IfNotPresent` | + +### validators + +| Name | Description | Default Value | +| ----------------| ----------- | ------------- | +| `validators.auth` | Set to 'true' to vote the candidate in and 'false' to vote them out | `true` | +| `validators.authorizedValidatorsURL` | URLs of already authorized validators | `""` | +| `validators.nonAuthorizedValidatorsNodeAddress` | Node addresses of the validators that need to be proposed | `""` | + + +## License + +This chart is licensed under the Apache v2.0 license. + +Copyright © 2023 Accenture + +### Attribution + +This chart is adapted from the [charts](https://hyperledger.github.io/bevel/) which is licensed under the Apache v2.0 License which is reproduced here: + +``` +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +``` diff --git a/platforms/hyperledger-besu/charts/besu-propose-validator/templates/_helpers.tpl b/platforms/hyperledger-besu/charts/besu-propose-validator/templates/_helpers.tpl new file mode 100644 index 00000000000..3c89ba48f04 --- /dev/null +++ b/platforms/hyperledger-besu/charts/besu-propose-validator/templates/_helpers.tpl @@ -0,0 +1,31 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "besu-propose-validator.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "besu-propose-validator.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "besu-propose-validator.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} diff --git a/platforms/hyperledger-besu/charts/besu-propose-validator/templates/besu-propose-validator.yaml b/platforms/hyperledger-besu/charts/besu-propose-validator/templates/besu-propose-validator.yaml new file mode 100644 index 00000000000..138c67ced1c --- /dev/null +++ b/platforms/hyperledger-besu/charts/besu-propose-validator/templates/besu-propose-validator.yaml @@ -0,0 +1,58 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ include "besu-propose-validator.name" . }} + namespace: {{ .Release.Namespace }} + labels: + app: {{ .Release.Name }} + app.kubernetes.io/name: besu-propose-validator-job + app.kubernetes.io/component: propose-validator-job + app.kubernetes.io/part-of: {{ include "besu-propose-validator.fullname" . }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} + app.kubernetes.io/managed-by: helm +spec: + backoffLimit: 3 + template: + metadata: + labels: + app: {{ .Release.Name }} + app.kubernetes.io/name: besu-propose-validator-job + app.kubernetes.io/component: propose-validator-job + app.kubernetes.io/part-of: {{ include "besu-propose-validator.fullname" . }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/managed-by: helm + spec: + restartPolicy: "OnFailure" + containers: + - name: propose-validator + image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + securityContext: + runAsUser: 0 + env: + - name: EXISTING_VALIDATOR_URLS + value: "{{- .Values.validators.existingValidators | join " " -}}" + - name: PROPOSE_VALIDATOR_ADDRS + value: "{{- .Values.validators.proposeValidatorsAddr | join " " -}}" + - name: CONSENSUS_METHOD + value: "{{ .Values.validators.consensusMethod | join " " -}}" + command: ["/bin/sh", "-c"] + args: + - | + + for propose_val_addr in $PROPOSE_VALIDATOR_ADDRS; do + for existing_val_url in $EXISTING_VALIDATOR_URLS; do + # Send proposal to the existing validator + proposal_response=$(curl -s -X POST -H "Content-Type: application/json" --data '{"jsonrpc":"2.0","method":"$CONSENSUS_METHOD","params":["'"$propose_val_addr"'",{{ .Values.validators.auth }}],"id":1}' "$existing_val_url") + + # Check if proposal was successful or not + result_count=$(echo "$proposal_response" | grep -c "result") + if [ "$result_count" = 1 ]; then + echo "Node proposed successfully." + else + echo "$proposal_response" | jq -r '.error' + fi + done + done + echo "COMPLETED" diff --git a/platforms/hyperledger-besu/charts/besu-propose-validator/values.yaml b/platforms/hyperledger-besu/charts/besu-propose-validator/values.yaml new file mode 100644 index 00000000000..6a95f1c0773 --- /dev/null +++ b/platforms/hyperledger-besu/charts/besu-propose-validator/values.yaml @@ -0,0 +1,23 @@ +image: + repository: ghcr.io/hyperledger/bevel-k8s-hooks + tag: qgt-0.2.12 + pullPolicy: IfNotPresent + pullSecret: "" + +validators: + auth: true # Set to 'true' to vote the candidate in and 'false' to vote them out + # List of URLs of the existing validators + consensusMethod: # Choose one method from the list + # - "ibft_proposeValidatorVote" + #- "qbft_proposeValidatorVote" + # - "clique_propose" + existingValidators: + # - "http://" + # - "http://" + # - "http://" + # - "http://" + # List of node addresses of the validators that need to be proposed + proposeValidatorsAddr: + # - "<0xnodeAddress-1>" + # - "<0xnodeAddress-2>" + # - "<0xnodeAddress-3>" diff --git a/platforms/hyperledger-besu/charts/besu-tessera-node/README.md b/platforms/hyperledger-besu/charts/besu-tessera-node/README.md index 9b02022f3f7..3374181fbea 100644 --- a/platforms/hyperledger-besu/charts/besu-tessera-node/README.md +++ b/platforms/hyperledger-besu/charts/besu-tessera-node/README.md @@ -14,7 +14,7 @@ helm repo add bevel https://hyperledger.github.io/bevel helm install my-tessera bevel/besu-tessera-node ``` -## Prerequisitess +## Prerequisites - Kubernetes 1.19+ - Helm 3.2.0+ @@ -88,7 +88,7 @@ These parameters are refered to as same in each parent or child chart | `image.mysql.tag` | MySQL image tag | `5.7` | | `image.hooks.repository` | Quorum/Besu hooks image repository | `ghcr.io/hyperledger/bevel-k8s-hooks` | | `image.hooks.tag` | Quorum/Besu hooks image tag | `qgt-0.2.12` | -| `image.pullSecret` | Provide the docker secret name in the namespace | `""` | +| `image.pullSecret` | Secret name in the namespace containing private image registry credentials | `""` | | `image.pullPolicy` | Pull policy to be used for the Docker images | `IfNotPresent` | diff --git a/platforms/hyperledger-besu/charts/besu-tessera-node/requirements.yaml b/platforms/hyperledger-besu/charts/besu-tessera-node/requirements.yaml index 21dec6373ba..5f3ec035eee 100644 --- a/platforms/hyperledger-besu/charts/besu-tessera-node/requirements.yaml +++ b/platforms/hyperledger-besu/charts/besu-tessera-node/requirements.yaml @@ -1,7 +1,7 @@ dependencies: - name: bevel-storageclass alias: storage - repository: "https://hyperledger.github.io/bevel" + repository: "file://../../../shared/charts/bevel-storageclass" tags: - storage version: ~1.0.0 diff --git a/platforms/hyperledger-besu/charts/besu-tessera-node/values.yaml b/platforms/hyperledger-besu/charts/besu-tessera-node/values.yaml index 9176cc76101..c576bd7de5e 100644 --- a/platforms/hyperledger-besu/charts/besu-tessera-node/values.yaml +++ b/platforms/hyperledger-besu/charts/besu-tessera-node/values.yaml @@ -50,13 +50,13 @@ image: #Eg. tessera: quorumengineering/tessera:0.9.2 tessera: repository: quorumengineering/tessera - tag: 22.1.7 + tag: 23.4.0 #Provide the valid image name and version for busybox busybox: busybox #Provide the valid image name and version for MySQL. This is used as the DB for TM mysql: repository: mysql/mysql-server - tag: 5.7 + tag: 8.0.32 hooks: repository: ghcr.io/hyperledger/bevel-k8s-hooks tag: qgt-0.2.12 diff --git a/platforms/hyperledger-besu/charts/besu-tlscert-gen/README.md b/platforms/hyperledger-besu/charts/besu-tlscert-gen/README.md index 50791980175..eebfdf7f89b 100644 --- a/platforms/hyperledger-besu/charts/besu-tlscert-gen/README.md +++ b/platforms/hyperledger-besu/charts/besu-tlscert-gen/README.md @@ -14,7 +14,7 @@ helm repo add bevel https://hyperledger.github.io/bevel helm install my-release bevel/besu-tlscert-gen ``` -## Prerequisitess +## Prerequisites - Kubernetes 1.19+ - Helm 3.2.0+ @@ -57,9 +57,9 @@ These parameters are refered to as same in each parent or chold chart | `global.vault.address`| URL of the Vault server. | `""` | | `global.vault.authPath` | Authentication path for Vault | `supplychain` | | `global.vault.network` | Network type which will determine the vault policy | `besu` | -| `global.vault.secretEngine` | Provide the value for vault secret engine name | `secretsv2` | -| `global.vault.secretPrefix` | Provide the value for vault secret prefix which must start with `data/` | `data/supplychain` | -| `global.proxy.externalUrlSuffix` | Provide the External URL suffix which will be used as CN to generate certificate | `test.blockchaincloudpoc.com` | +| `global.vault.secretEngine` | Vault secret engine name | `secretsv2` | +| `global.vault.secretPrefix` | Vault secret prefix which must start with `data/` | `data/supplychain` | +| `global.proxy.externalUrlSuffix` | External URL suffix which will be used as CN to generate certificate | `test.blockchaincloudpoc.com` | ### Image @@ -67,14 +67,14 @@ These parameters are refered to as same in each parent or chold chart |------------|-----------|---------| | `image.repository` | Docker repository which will be used for this job | `ghcr.io/hyperledger/bevel-alpine` | | `image.tag` | Docker image tag which will be used for this job | `latest` | -| `image.pullSecret` | Provide the docker secret name | `""` | +| `image.pullSecret` | Secret name in the namespace containing private image registry credentials | `""` | | `image.pullPolicy` | The pull policy for the image | `IfNotPresent` | ### Settings | Name | Description | Default Value | | ------------| -------------- | --------------- | | `settings.tmTls` | Set value to true when transaction manager like tessera uses tls. This enables TLS for the transaction manager and Besu node. | `True` | -| `settings.certSubject` | Provide the X.509 subject for root CA | `"CN=DLT Root CA,OU=DLT,O=DLT,L=London,C=GB"` | +| `settings.certSubject` | The X.509 subject for root CA | `"CN=DLT Root CA,OU=DLT,O=DLT,L=London,C=GB"` | ### Common parameters diff --git a/platforms/hyperledger-besu/charts/besu-tlscert-gen/templates/job-cleanup.yaml b/platforms/hyperledger-besu/charts/besu-tlscert-gen/templates/job-cleanup.yaml index aec761be4c1..2048640068f 100644 --- a/platforms/hyperledger-besu/charts/besu-tlscert-gen/templates/job-cleanup.yaml +++ b/platforms/hyperledger-besu/charts/besu-tlscert-gen/templates/job-cleanup.yaml @@ -43,5 +43,7 @@ spec: - -c args: - | - echo "Deleting tls-certs secret in k8s ..." - kubectl delete secret --namespace {{ .Release.Namespace }} {{ include "besu-tlscert-gen.name" . }}-tls-certs + if kubectl get secret --namespace {{ .Release.Namespace }} {{ include "besu-tlscert-gen.name" . }}-tls-certs &>/dev/null; then + echo "Deleting tls-certs secret in k8s ..." + kubectl delete secret --namespace {{ .Release.Namespace }} {{ include "besu-tlscert-gen.name" . }}-tls-certs + fi diff --git a/platforms/hyperledger-besu/charts/besu-tlscert-gen/templates/job.yaml b/platforms/hyperledger-besu/charts/besu-tlscert-gen/templates/job.yaml index 66891ba8139..0270ecc814b 100644 --- a/platforms/hyperledger-besu/charts/besu-tlscert-gen/templates/job.yaml +++ b/platforms/hyperledger-besu/charts/besu-tlscert-gen/templates/job.yaml @@ -95,8 +95,7 @@ spec: cert=$(echo ${VAULT_SECRET} | jq -r '.["ambassadorcrt"]') # If the cert is null, empty, or contains a parse error, then the certificates do not exist in Vault - if [ "$cert" == "null" ] || [[ "$cert" = "parse error"* ]] || [ "$cert" = "" ] - then + if [ "$cert" == "null" ] || [[ "$cert" = *"error"* ]] || [ "$cert" = "" ]; then # Create a file to indicate that the ambassador TLS certificates are absent echo "Certficates absent in vault. Ignore error warning" touch ${OUTPUT_PATH}/ambassadortls_absent.txt @@ -120,7 +119,7 @@ spec: mountPath: /scripts/bevel-vault.sh subPath: bevel-vault.sh containers: - - name: "generate-certs" + - name: "generate-certs" image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" imagePullPolicy: {{ $.Values.image.pullPolicy }} env: @@ -313,8 +312,7 @@ spec: # Check if any of the certificate and key fields are missing, empty or having any kind of error for field in "$CA_PEM" "$CA_KEY" "$AMBASSADORCRT" "$AMBASSADORKEY" "$KEYSTORE" "$PASSWORD" "$KNOWNSERVER" do - if [ "$field" = "null" ] || [[ "$field" = "parse error"* ]] || [ "$field" = "" ] - then + if [ "$field" = "null" ] || [[ "$field" = *"error"* ]] || [ "$field" = "" ]; then AMBASSADORTLS_CERT_WRITTEN=false break else @@ -334,8 +332,7 @@ spec: # Check if any of the certificate and key fields are missing, empty or having any kind of error for field in "$CA_PEM" "$CA_KEY" "$AMBASSADORCRT" "$AMBASSADORKEY" do - if [ "$field" = "null" ] || [[ "$field" = "parse error"* ]] || [ "$field" = "" ] - then + if [ "$field" = "null" ] || [[ "$field" = *"error"* ]] || [ "$field" = "" ]; then AMBASSADORTLS_CERT_WRITTEN=false break else @@ -348,7 +345,7 @@ spec: rm payload.json fi; # Create tls secret with the certificates - kubectl get configmap --namespace {{ .Release.Namespace }} {{ include "besu-tlscert-gen.name" . }}-tls-certs + kubectl get secret --namespace {{ .Release.Namespace }} {{ include "besu-tlscert-gen.name" . }}-tls-certs if [ $? -ne 0 ]; then kubectl create secret tls --namespace {{ .Release.Namespace }} {{ include "besu-tlscert-gen.name" . }}-tls-certs \ --cert=${AMBASSADORTLS_PATH}/certchain.pem \ diff --git a/platforms/hyperledger-besu/configuration/roles/create/crypto/key_generation/tasks/main.yaml b/platforms/hyperledger-besu/configuration/roles/create/crypto/key_generation/tasks/main.yaml deleted file mode 100644 index 934a4ec1af2..00000000000 --- a/platforms/hyperledger-besu/configuration/roles/create/crypto/key_generation/tasks/main.yaml +++ /dev/null @@ -1,50 +0,0 @@ -############################################################################################## -# Copyright Walmart Inc. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - ---- - -# Ensure the directory exists for storing keys -- name: Ensure directory exists - file: - path: "{{ build_path }}/crypto/{{ user }}//{{ org.name }}" - state: directory - recurse: yes - -# Check if the key is present in Vault -- name: Check if the {{ user }} key of {{ org.name }} is present in Vault - shell: | - # Retrieve the public and private keys from Vault - vault kv get -field=key_pub {{ vault.secret_path | default('secretsv2') }}/{{ component_ns }}/crypto/{{ user }} > "{{ build_path }}/crypto/{{ user }}/{{ org.name }}/key_pub" - vault kv get -field=key {{ vault.secret_path | default('secretsv2') }}/{{ component_ns }}/crypto/{{ user }} > "{{ build_path }}/crypto/{{ user }}/{{ org.name }}/key" - environment: - VAULT_ADDR: "{{ vault.url }}" - VAULT_TOKEN: "{{ vault.root_token }}" - register: vault_result - ignore_errors: yes - -# Set a flag to generate keys if they are not found in Vault -- set_fact: - generate_key: True - when: vault_result.failed is defined and vault_result.failed == True - -# Generate a public key -- name: Generate {{ user }}'s public key for {{ org.name }} - shell: | - # Generate a public key and move it to the specified location - {{ bin_install_dir }}/besu/besu-{{ network.version }}/besu public-key export-address --to={{ build_path }}/crypto/{{ user }}/{{ org.name }}/key_pub - mv {{ bin_install_dir }}/besu/key {{ build_path }}/crypto/{{ user }}/{{ org.name }}/key - register: output - when: generate_key is defined and generate_key == True - -# Store the public and private keys in Vault -- name: Store the {{ user }}'s public and private keys in Vault - shell: | - # Store the public and private keys in Vault - vault kv put {{ vault.secret_path | default('secretsv2') }}/{{ component_ns }}/crypto/{{ user }} key="$(cat {{ build_path }}/crypto/{{ user }}/{{ org.name }}/key)" key_pub="$(cat {{ build_path }}/crypto/{{ user }}/{{ org.name }}/key_pub)" - environment: - VAULT_ADDR: "{{ vault.url }}" - VAULT_TOKEN: "{{ vault.root_token }}" - when: generate_key is defined and generate_key == True diff --git a/platforms/hyperledger-besu/configuration/roles/create/crypto/node/tasks/main.yaml b/platforms/hyperledger-besu/configuration/roles/create/crypto/node/tasks/main.yaml deleted file mode 100644 index dd55ba45357..00000000000 --- a/platforms/hyperledger-besu/configuration/roles/create/crypto/node/tasks/main.yaml +++ /dev/null @@ -1,63 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -# Set node_list to empty -- name: Set node_list to empty - set_fact: - node_list: [] - -# Delete the previously created release file -- name: Delete release file {{ organisation }}-node-key-mgmt - file: - path: "{{ values_dir }}/{{ organisation }}/{{ organisation }}-node-key-mgmt.yaml" - state: absent - -# Git Push : Pushes the above generated files to git directory -- name: Git Push - include_role: - name: "{{ playbook_dir }}/../../shared/configuration/roles/git_push" - vars: - GIT_DIR: "{{ playbook_dir }}/../../../" - gitops: "{{ org.gitops }}" - msg: "[ci skip] Delete previous node key mgmt files" - -# Delete the previously created HelmRelease -- name: Delete the previous {{ organisation }}-node-key-mgmt HelmRelease - k8s: - api_version: "helm.toolkit.fluxcd.io/v2beta1" - kind: HelmRelease - name: "{{ organisation }}-node-key-mgmt" - namespace: "{{ organisation }}-bes" - state: absent - kubeconfig: "{{ kubernetes.config_file }}" - context: "{{ kubernetes.context }}" - -# Fetch all node (peers and validators) present in all organizations of the network -- name: Fetching all nodes of the organisation - set_fact: - node_list={{ node_list | default([]) + [ {'name':peer.name} ] }} - loop: "{{ org.services.peers is defined | ternary(org.services.peers, org.services.validators) }}" - loop_control: - loop_var: peer - -# Creates node key mgmt value file for each organization -- name: Create node key mgmt value file for each organization - include_role: - name: create/helm_component - vars: - name: "{{ org.name | lower }}" - component_name: "{{ name }}-node-key-mgmt" - component_ns: "{{ name }}-bes" - type: "node_key_mgmt" - -# Git Push : Pushes the above generated files to git directory -- name: Git Push - include_role: - name: "{{ playbook_dir }}/../../shared/configuration/roles/git_push" - vars: - GIT_DIR: "{{ playbook_dir }}/../../../" - gitops: "{{ org.gitops }}" - msg: "[ci skip] Pushing node key mgmt files" diff --git a/platforms/hyperledger-besu/configuration/roles/create/crypto/tessera/tasks/check_vault.yaml b/platforms/hyperledger-besu/configuration/roles/create/crypto/tessera/tasks/check_vault.yaml deleted file mode 100644 index 04037922a19..00000000000 --- a/platforms/hyperledger-besu/configuration/roles/create/crypto/tessera/tasks/check_vault.yaml +++ /dev/null @@ -1,22 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - - -# Check for the crypto material to the vault -- name: Check the crypto material to Vault - shell: | - vault kv get -field=privateKey {{ vault.secret_path | default('secretsv2') }}/{{ component_ns }}/crypto/{{ item.name }}/tm - environment: - VAULT_ADDR: "{{ vault.url }}" - VAULT_TOKEN: "{{ vault.root_token }}" - with_items: "{{ peers }}" - register: vault_result - ignore_errors: yes - -# Set a fact based on vault_result -- set_fact: - generate_crypto_tessera: True - when: vault_result.failed is defined and vault_result.failed == True diff --git a/platforms/hyperledger-besu/configuration/roles/create/crypto/tessera/tasks/main.yaml b/platforms/hyperledger-besu/configuration/roles/create/crypto/tessera/tasks/main.yaml deleted file mode 100644 index 1b4ea201415..00000000000 --- a/platforms/hyperledger-besu/configuration/roles/create/crypto/tessera/tasks/main.yaml +++ /dev/null @@ -1,66 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -# Check the crypto material in the vault -- name: Check for the crypto material in the vault - include_tasks: check_vault.yaml - vars: - vault: "{{ org.vault }}" - peers: "{{ org.services.peers }}" - -# Wait for namespace creation for members -- name: "Wait for namespace creation for members" - include_role: - name: "{{ playbook_dir }}/../../shared/configuration/roles/check/k8_component" - vars: - component_type: "Namespace" - component_name: "{{ component_ns }}" - type: "retry" - when: - - generate_crypto_tessera is defined - - generate_crypto_tessera - -# Generate Tessera crypto helmrelease file -- name: "Create tessera crypto file" - include_role: - name: create/helm_component - vars: - component_type: "crypto" - type: "besu_crypto_tessera" - name: "{{ org.name | lower }}" - component_name: "{{ peer.name }}-tessera-job" - loop: "{{ org.services.peers }}" - loop_control: - loop_var: peer - when: - - generate_crypto_tessera is defined - - generate_crypto_tessera - -# Push the created deployment files to repository -- name: "Push the created deployment files to repository" - include_role: - name: "{{ playbook_dir }}/../../shared/configuration/roles/git_push" - vars: - GIT_DIR: "{{ playbook_dir }}/../../../" - msg: "[ci skip] Pushing tessera job files for {{ component_ns }}" - when: - - generate_crypto_tessera is defined - - generate_crypto_tessera - -# Check if tessera crypto job is completed -- name: Check if tessera crypto job is completed - include_role: - name: "{{ playbook_dir }}/../../shared/configuration/roles/check/helm_component" - vars: - component_name: "{{ peer.name }}-tessera-job" - component_type: Job - namespace: "{{ component_ns }}" - loop: "{{ org.services.peers }}" - loop_control: - loop_var: peer - when: - - generate_crypto_tessera is defined - - generate_crypto_tessera diff --git a/platforms/hyperledger-besu/configuration/roles/create/helm_component/templates/ambassador_besu.tpl b/platforms/hyperledger-besu/configuration/roles/create/helm_component/templates/ambassador_besu.tpl index 2260d16461f..48a6e08e71a 100644 --- a/platforms/hyperledger-besu/configuration/roles/create/helm_component/templates/ambassador_besu.tpl +++ b/platforms/hyperledger-besu/configuration/roles/create/helm_component/templates/ambassador_besu.tpl @@ -1,4 +1,4 @@ -apiVersion: helm.toolkit.fluxcd.io/v2beta1 +apiVersion: helm.toolkit.fluxcd.io/v2 kind: HelmRelease metadata: name: {{ component_name }} diff --git a/platforms/hyperledger-besu/configuration/roles/create/helm_component/templates/crypto_tessera.tpl b/platforms/hyperledger-besu/configuration/roles/create/helm_component/templates/crypto_tessera.tpl index f9621a4b4f8..63273029fa0 100644 --- a/platforms/hyperledger-besu/configuration/roles/create/helm_component/templates/crypto_tessera.tpl +++ b/platforms/hyperledger-besu/configuration/roles/create/helm_component/templates/crypto_tessera.tpl @@ -1,4 +1,4 @@ -apiVersion: helm.toolkit.fluxcd.io/v2beta1 +apiVersion: helm.toolkit.fluxcd.io/v2 kind: HelmRelease metadata: name: {{ component_name }} diff --git a/platforms/hyperledger-besu/configuration/roles/create/helm_component/templates/member.tpl b/platforms/hyperledger-besu/configuration/roles/create/helm_component/templates/member.tpl index e8302c80c6d..83142b52376 100644 --- a/platforms/hyperledger-besu/configuration/roles/create/helm_component/templates/member.tpl +++ b/platforms/hyperledger-besu/configuration/roles/create/helm_component/templates/member.tpl @@ -1,4 +1,4 @@ -apiVersion: helm.toolkit.fluxcd.io/v2beta1 +apiVersion: helm.toolkit.fluxcd.io/v2 kind: HelmRelease metadata: name: {{ component_name | replace('_','-') }} @@ -60,10 +60,12 @@ spec: {% endif %} tessera: removeKeysOnDelete: true +{% if org.type == 'member' or org.type is not defined %} peerNodes: {% for tm_node in network.config.tm_nodes %} - url: {{ tm_node | quote }} {% endfor %} +{% endif %} resources: cpuLimit: 0.25 cpuRequest: 0.05 @@ -105,7 +107,7 @@ spec: tag: {{ network.version }} node: removeKeysOnDelete: false - isBootnode: {{ peer.bootnode | default(false) }} + isBootnode: false usesBootnodes: false besu: identity: {{ peer.subject | quote }} diff --git a/platforms/hyperledger-besu/configuration/roles/create/helm_component/templates/node_key_mgmt.tpl b/platforms/hyperledger-besu/configuration/roles/create/helm_component/templates/node_key_mgmt.tpl index 9abdd10bafd..32a2a7f6a5e 100644 --- a/platforms/hyperledger-besu/configuration/roles/create/helm_component/templates/node_key_mgmt.tpl +++ b/platforms/hyperledger-besu/configuration/roles/create/helm_component/templates/node_key_mgmt.tpl @@ -1,4 +1,4 @@ -apiVersion: helm.toolkit.fluxcd.io/v2beta1 +apiVersion: helm.toolkit.fluxcd.io/v2 kind: HelmRelease metadata: name: {{ component_name }} diff --git a/platforms/hyperledger-besu/configuration/roles/create/helm_component/templates/validator.tpl b/platforms/hyperledger-besu/configuration/roles/create/helm_component/templates/validator.tpl index 69a4aba7064..d8c40fa8fdb 100644 --- a/platforms/hyperledger-besu/configuration/roles/create/helm_component/templates/validator.tpl +++ b/platforms/hyperledger-besu/configuration/roles/create/helm_component/templates/validator.tpl @@ -1,4 +1,4 @@ -apiVersion: helm.toolkit.fluxcd.io/v2beta1 +apiVersion: helm.toolkit.fluxcd.io/v2 kind: HelmRelease metadata: name: {{ component_name | replace('_','-') }} diff --git a/platforms/hyperledger-besu/configuration/roles/create/member/tasks/main.yaml b/platforms/hyperledger-besu/configuration/roles/create/member/tasks/main.yaml index f6f30106268..1f1d609cb3e 100644 --- a/platforms/hyperledger-besu/configuration/roles/create/member/tasks/main.yaml +++ b/platforms/hyperledger-besu/configuration/roles/create/member/tasks/main.yaml @@ -10,7 +10,14 @@ name: setup/genesis/secondary vars: values_dir: "./build/{{ component_ns }}" + when: org.type == 'member' +# Get the Genesis and staticnodes +- name: Get genesis and staticnodes + include_role: + name: get/genesis + when: org.type == 'member' and org.services.peers is defined + # Creates the Besu node value files for each node of organization - name: Create value file for Besu node include_role: @@ -44,21 +51,3 @@ component_name: "{{ member.name | lower }}" namespace: "{{ component_ns }}" when: org.services.peers is defined - -# Get the Genesis and staticnodes -- name: Get genesis and staticnodes - include_role: - name: get/genesis - when: org.services.peers is defined - -# Add the enode of new organizations to each of the existing nodes using rpc call only when ambassador is used -- name: Adding the enode of new peer to all existing peer. - include_role: - name: setup/new_member - loop: "{{ org.services.peers }}" - loop_control: - loop_var: peer - when: - - org.services.peers is defined - - network.config.besu_nodes is defined - - network.env.proxy == 'ambassador' diff --git a/platforms/hyperledger-besu/configuration/roles/create/validator/tasks/main.yaml b/platforms/hyperledger-besu/configuration/roles/create/validator/tasks/main.yaml index 45491fa4f6f..aa469e51c69 100644 --- a/platforms/hyperledger-besu/configuration/roles/create/validator/tasks/main.yaml +++ b/platforms/hyperledger-besu/configuration/roles/create/validator/tasks/main.yaml @@ -16,7 +16,7 @@ loop_var: peer when: org.services.validators is defined -# Git Push : Pushes the above generated files to git directory +# Git Push : Pushes the above generated files to git - name: Git Push include_role: name: "{{ playbook_dir }}/../../shared/configuration/roles/git_push" diff --git a/platforms/hyperledger-besu/configuration/roles/delete/vault_secrets/tasks/main.yaml b/platforms/hyperledger-besu/configuration/roles/delete/vault_secrets/tasks/main.yaml index c2111410480..b7968d8d224 100644 --- a/platforms/hyperledger-besu/configuration/roles/delete/vault_secrets/tasks/main.yaml +++ b/platforms/hyperledger-besu/configuration/roles/delete/vault_secrets/tasks/main.yaml @@ -19,7 +19,7 @@ state: absent kubeconfig: "{{ kubernetes.config_file }}" context: "{{ kubernetes.context }}" - ignore_errors: yes + ignore_errors: true # Deletes crypto materials - name: Delete Crypto material @@ -28,10 +28,10 @@ vault kv delete {{ item.vault.secret_path | default('secretsv2') }}/{{ network.env.type }}{{ org_name }}/tessera-{{ peer.name }}-keys vault kv delete {{ item.vault.secret_path | default('secretsv2') }}/{{ network.env.type }}{{ org_name }}/tlscerts vault kv delete {{ item.vault.secret_path | default('secretsv2') }}/{{ network.env.type }}{{ org_name }}/genesis - loop: "{{ services.peers is defined | ternary( services.peers, services.validators) }}" environment: VAULT_ADDR: "{{ item.vault.url }}" VAULT_TOKEN: "{{ item.vault.root_token }}" + loop: "{{ services.peers is defined | ternary( services.peers, services.validators) }}" loop_control: loop_var: peer - ignore_errors: yes + ignore_errors: true diff --git a/platforms/hyperledger-fabric/charts/README.md b/platforms/hyperledger-fabric/charts/README.md new file mode 100644 index 00000000000..2138f817aa7 --- /dev/null +++ b/platforms/hyperledger-fabric/charts/README.md @@ -0,0 +1,257 @@ +[//]: # (##############################################################################################) +[//]: # (Copyright Accenture. All Rights Reserved.) +[//]: # (SPDX-License-Identifier: Apache-2.0) +[//]: # (##############################################################################################) + +# Charts for Hyperledger Fabric components + +## About +This folder contains the helm charts which are used for the deployment of the Hyperledger Fabric components. Each helm that you can use has the following keys and you need to set them. The `global.cluster.provider` is used as a key for the various cloud features enabled. Also you only need to specify one cloud provider, **not** both if deploying to cloud. As of writing this doc, AWS and Azure is fully supported. + +```yaml +global: + serviceAccountName: vault-auth + cluster: + provider: aws # choose from: minikube | aws | azure + cloudNativeServices: false # future: set to true to use Cloud Native Services + kubernetesUrl: "https://yourkubernetes.com" # Provide the k8s URL, ignore if not using Hashicorp Vault + vault: + type: hashicorp # choose from hashicorp | kubernetes + network: fabric # must be fabric for these charts + # Following are necessary only when hashicorp vault is used. + address: http://vault.url:8200 + authPath: supplychain + secretEngine: secretsv2 + secretPrefix: "data/supplychain" + role: vault-role + proxy: + provider: haproxy # choose from haproxy | none + externalUrlSuffix: test.yourdomain.com +``` + +## Usage + +### Pre-requisites + +- Kubernetes Cluster (either Managed cloud option like EKS or local like minikube) +- Accessible and unsealed Hahsicorp Vault (if using Vault) +- Configured Haproxy (if using Haproxy as proxy) +- Update the dependencies + ``` + helm dependency update fabric-ca-server + helm dependency update fabric-orderernode + helm dependency update fabric-peernode + ``` + +### _Without Proxy or Vault_ + +#### Setup Orderers and Peers in an organization +```bash +# Install the CA Server +helm upgrade --install supplychain-ca ./fabric-ca-server --namespace supplychain-net --create-namespace --values ./values/noproxy-and-novault/ca-orderer.yaml + +# Install the Orderers after CA server is running +helm upgrade --install orderer1 ./fabric-orderernode --namespace supplychain-net --values ./values/noproxy-and-novault/orderer.yaml +helm upgrade --install orderer2 ./fabric-orderernode --namespace supplychain-net --values ./values/noproxy-and-novault/orderer.yaml --set certs.settings.createConfigMaps=false +helm upgrade --install orderer3 ./fabric-orderernode --namespace supplychain-net --values ./values/noproxy-and-novault/orderer.yaml --set certs.settings.createConfigMaps=false +``` + +**Note** The orderers will remain waiting in the `Pending` state for Fabric 2.2.x, until we install the `fabric-genesis` chart. + +```bash +# OPTIONAL: To use a custom peer configuration, copy core.yaml file into ./fabric-peernode/files +cp /home/bevel/build/peer0-core.yaml ./fabric-peernode/conf/default_core.yaml +# Install the peers +helm upgrade --install peer0 ./fabric-peernode --namespace supplychain-net --values ./values/noproxy-and-novault/peer.yaml +helm upgrade --install peer1 ./fabric-peernode --namespace supplychain-net --values ./values/noproxy-and-novault/peer.yaml --set peer.gossipPeerAddress=peer0.supplychain-net:7051 --set peer.cliEnabled=true +``` + +#### Setup Peers in another organization + +```bash +# Install the CA Server +helm upgrade --install carrier-ca ./fabric-ca-server --namespace carrier-net --create-namespace --values ./values/noproxy-and-novault/ca-peer.yaml + +# Get the Orderer tls certificate and place in fabric-peernode/files +cd ./fabric-peernode/files +kubectl --namespace supplychain-net get configmap orderer-tls-cacert -o jsonpath='{.data.cacert}' > orderer.crt + +# Install the Peers +cd ../.. +helm upgrade --install peer0 ./fabric-peernode --namespace carrier-net --values ./values/noproxy-and-novault/carrier.yaml +``` + +#### Create Genesis file and other channel artifacts +```bash +# Obtain certificates and the configuration file of each peer organization, place in fabric-genesis/files +cd ./fabric-genesis/files +kubectl --namespace carrier-net get secret admin-msp -o json > carrier.json +kubectl --namespace carrier-net get configmap peer0-msp-config -o json > carrier-config-file.json + +# OPTIONAL: If additional orderer from a different organization is needed in genesis +kubectl --namespace carrier-net get secret orderer5-tls -o json > orderer5-orderer-tls.json + +# Generate the genesis block +cd ../.. +helm install genesis ./fabric-genesis --namespace supplychain-net --values ./values/noproxy-and-novault/genesis.yaml +``` + +#### Create channel for Hyperledger Fabric 2.5.x +```bash +# Create channel +helm install allchannel ./fabric-osnadmin-channel-create --namespace supplychain-net --set global.vault.type=kubernetes + +# Join peer to channel and make it an anchorpeer +helm install peer0-allchannel ./fabric-channel-join --namespace supplychain-net --set global.vault.type=kubernetes +helm install peer1-allchannel ./fabric-channel-join --namespace supplychain-net --set global.vault.type=kubernetes --set peer.name=peer1 --set peer.address=peer1.supplychain-net:7051 + +# Join peer from another organization to channel and make it an anchorpeer +helm install peer0-allchannel ./fabric-channel-join --namespace carrier-net --values ./values/noproxy-and-novault/join-channel.yaml +``` +**Note** Anchorpeer job is only executed if `peer.type` is set to `anchor` + +#### Create channel for Hyperledger Fabric 2.2.x + +```bash +# Obtain the file channel.tx and place it in fabric-channel-create/files +cd ./fabric-channel-create/files +kubectl --namespace supplychain-net get configmap allchannel-channeltx -o jsonpath='{.data.allchannel-channeltx_base64}' > channeltx.json + +# Install create channel +cd ../.. +helm install allchannel ./fabric-channel-create --namespace carrier-net --set global.vault.type=kubernetes + +# Join peer to channel and make it an anchorpeer. Repeat for each peer organization. +# Get the file anchors.tx and place it in fabric-channel-join/files +cd ./fabric-channel-join/files +kubectl --namespace supplychain-net get configmap allchannel-supplychain-anchortx -o jsonpath='{.data.allchannel-supplychain-anchortx_base64}' > anchortx.json + +# Install join channel and anchorpeer +cd ../.. +helm install peer0-allchannel ./fabric-channel-join --namespace supplychain-net --set global.vault.type=kubernetes --set global.version=2.2.2 +helm install peer1-allchannel ./fabric-channel-join --namespace supplychain-net --set global.vault.type=kubernetes --set global.version=2.2.2 --set peer.name=peer1 --set peer.address=peer1.supplychain-net:7051 --set peer.type=general + +# Join peer from another organization to channel and make it an anchorpeer +cd ./fabric-channel-join/files +kubectl --namespace supplychain-net get configmap allchannel-carrier-anchortx -o jsonpath='{.data.allchannel-carrier-anchortx_base64}' > anchortx.json +cd ../.. +helm install peer0-allchannel ./fabric-channel-join --namespace carrier-net --values ./values/noproxy-and-novault/join-channel.yaml +``` +**Note** Anchorpeer job is only executed if `peer.type` is set to `anchor` + +### _With Haproxy Proxy and Vault_ + +#### Setup Orderers and Peers in an organization + +Replace the `"http://vault.url:8200"`, `"https://yourkubernetes.com"` and `"test.yourdomain.com"` in all the files in `./values/proxy-and-vault/` folder and this file. + +```bash +kubectl create namespace supplychain-net + +kubectl -n supplychain-net create secret generic roottoken --from-literal=token= + +helm upgrade --install supplychain-ca ./fabric-ca-server --namespace supplychain-net --values ./values/proxy-and-vault/ca-orderer.yaml + +# Install the Orderers after CA server is running +helm upgrade --install orderer1 ./fabric-orderernode --namespace supplychain-net --values ./values/proxy-and-vault/orderer.yaml +helm upgrade --install orderer2 ./fabric-orderernode --namespace supplychain-net --values ./values/proxy-and-vault/orderer.yaml --set certs.settings.createConfigMaps=false +helm upgrade --install orderer3 ./fabric-orderernode --namespace supplychain-net --values ./values/proxy-and-vault/orderer.yaml --set certs.settings.createConfigMaps=false +``` + +**Note** The orderers will remain waiting in the `Pending` state for Fabric 2.2.x, until we install the `fabric-genesis` chart. + +```bash +# OPTIONAL: To use a custom peer configuration, copy core.yaml file into ./fabric-peernode/files +cp /home/bevel/build/peer0-core.yaml ./fabric-peernode/conf/default_core.yaml +# Install the peers +helm upgrade --install peer0 ./fabric-peernode --namespace supplychain-net --values ./values/proxy-and-vault/peer.yaml +helm upgrade --install peer1 ./fabric-peernode --namespace supplychain-net --values ./values/proxy-and-vault/peer.yaml --set peer.gossipPeerAddress=peer0.supplychain-net.hlf.blockchaincloudpoc-develop.com:443 --set peer.cliEnabled=true +``` + +#### Setup Peers in another organization + +```bash +kubectl create namespace carrier-net +kubectl -n carrier-net create secret generic roottoken --from-literal=token= +# Install the CA Server +helm upgrade --install carrier-ca ./fabric-ca-server --namespace carrier-net --values ./values/proxy-and-vault/ca-peer.yaml + +# Get the Orderer tls certificate and place in fabric-peernode/files +cd ./fabric-peernode/files +kubectl --namespace supplychain-net get configmap orderer-tls-cacert -o jsonpath='{.data.cacert}' > orderer.crt + +# Install the Peers +cd ../.. +helm upgrade --install peer0 ./fabric-peernode --namespace carrier-net --values ./values/proxy-and-vault/carrier.yaml +``` + +#### Create Genesis file and other channel artifacts +```bash +# Obtain certificates and the configuration file of each peer organization, place in fabric-genesis/files +cd ./fabric-genesis/files +kubectl --namespace carrier-net get secret admin-msp -o json > carrier.json +kubectl --namespace carrier-net get configmap peer0-msp-config -o json > carrier-config-file.json + +# OPTIONAL: If additional orderer from a different organization is needed in genesis +kubectl --namespace carrier-net get secret orderer5-tls -o json > orderer5-orderer-tls.json + +# Generate the genesis block +cd ../.. +helm install genesis ./fabric-genesis --namespace supplychain-net --values ./values/proxy-and-vault/genesis.yaml +``` + +#### Create channel for Hyperledger Fabric 2.5.x +```bash +# Create channel +helm install allchannel ./fabric-osnadmin-channel-create --namespace supplychain-net --values ./values/proxy-and-vault/osn-create-channel.yaml + +# Join peer to channel and make it an anchorpeer +helm install peer0-allchannel ./fabric-channel-join --namespace supplychain-net --values ./values/proxy-and-vault/join-channel.yaml +helm install peer1-allchannel ./fabric-channel-join --namespace supplychain-net --values ./values/proxy-and-vault/join-channel.yaml --set peer.name=peer1 --set peer.address=peer1.supplychain-net.test.yourdomain.com:443 + +# Join peer from another organization to channel and make it an anchorpeer +helm install peer0-allchannel ./fabric-channel-join --namespace carrier-net --values ./values/proxy-and-vault/create-channel.yaml --set global.version=2.5.4 +``` +**Note** Anchorpeer job is only executed if `peer.type` is set to `anchor` + +#### Create channel for Hyperledger Fabric 2.2.x +```bash +# Obtain the file channel.tx and place it in fabric-channel-create/files +cd ./fabric-channel-create/files +kubectl --namespace supplychain-net get configmap allchannel-channeltx -o jsonpath='{.data.allchannel-channeltx_base64}' > channeltx.json + +# Install create channel +cd ../.. +helm install allchannel ./fabric-channel-create --namespace carrier-net --values ./values/proxy-and-vault/create-channel.yaml + +# Join peer to channel and make it an anchorpeer. Repeat for each peer organization. +# Get the file anchors.tx and place it in fabric-channel-join/files +cd ./fabric-channel-join/files +kubectl --namespace supplychain-net get configmap allchannel-supplychain-anchortx -o jsonpath='{.data.allchannel-supplychain-anchortx_base64}' > anchortx.json + +# Install join channel and anchorpeer +cd ../.. +helm install peer0-allchannel ./fabric-channel-join --namespace supplychain-net --values ./values/proxy-and-vault/join-channel.yaml +helm install peer1-allchannel ./fabric-channel-join --namespace supplychain-net --values ./values/proxy-and-vault/join-channel.yaml --set peer.name=peer1 --set peer.address=peer1.supplychain-net.test.yourdomain.com:443 --set peer.type=general + +# Join peer from another organization to channel and make it an anchorpeer +cd ./fabric-channel-join/files +kubectl --namespace supplychain-net get configmap allchannel-carrier-anchortx -o jsonpath='{.data.allchannel-carrier-anchortx_base64}' > anchortx.json +cd ../.. +helm install peer0-allchannel ./fabric-channel-join --namespace carrier-net --values ./values/proxy-and-vault/create-channel.yaml +``` +**Note** Anchorpeer job is only executed if `peer.type` is set to `anchor` + +### Clean-up + +To clean up, just uninstall the helm releases +```bash +helm uninstall --namespace supplychain-net peer1-allchannel peer0-allchannel +helm uninstall --namespace supplychain-net peer0 peer1 +helm uninstall --namespace supplychain-net orderer1 orderer2 orderer3 +helm uninstall --namespace supplychain-net genesis allchannel +helm uninstall --namespace supplychain-net supplychain-ca + +helm uninstall --namespace carrier-net peer0 peer0-allchannel allchannel +helm uninstall --namespace carrier-net carrier-ca +``` diff --git a/platforms/hyperledger-fabric/charts/fabric-anchorpeer/Chart.yaml b/platforms/hyperledger-fabric/charts/fabric-anchorpeer/Chart.yaml deleted file mode 100644 index b59929ffe7f..00000000000 --- a/platforms/hyperledger-fabric/charts/fabric-anchorpeer/Chart.yaml +++ /dev/null @@ -1,11 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -apiVersion: v1 -appVersion: "2.0" -description: "Hyperledger Fabric: Updates the anchorpeer details." -name: fabric-anchorpeer -version: 1.0.0 diff --git a/platforms/hyperledger-fabric/charts/fabric-anchorpeer/README.md b/platforms/hyperledger-fabric/charts/fabric-anchorpeer/README.md deleted file mode 100644 index 4cc51aa258d..00000000000 --- a/platforms/hyperledger-fabric/charts/fabric-anchorpeer/README.md +++ /dev/null @@ -1,199 +0,0 @@ -[//]: # (##############################################################################################) -[//]: # (Copyright Accenture. All Rights Reserved.) -[//]: # (SPDX-License-Identifier: Apache-2.0) -[//]: # (##############################################################################################) - - -# Anchor Peer Hyperledger Fabric Deployment - -- [Anchor Peer Hyperledger Fabric Deployment Helm Chart](#anchor-peer-hyperledger-fabric-deployment-helm-chart) -- [Prerequisites](#prerequisites) -- [Chart Structure](#chart-structure) -- [Configuration](#configuration) -- [Deployment](#deployment) -- [Verification](#verification) -- [Updating the Deployment](#updating-the-deployment) -- [Deletion](#deletion) -- [Contributing](#contributing) -- [License](#license) - - - -## Anchor Peer Hyperledger Fabric Deployment Helm Chart ---- -This [Helm chart](https://github.com/hyperledger/bevel/blob/develop/platforms/hyperledger-fabric/charts/fabric-anchorpeer) updates the anchor peers for the Hyperledger Fabric channel. - - - -## Prerequisites ---- -Before deploying the Helm chart, make sure to have the following prerequisites: - -- Kubernetes cluster up and running. -- A HashiCorp Vault instance is set up and configured to use Kubernetes service account token-based authentication. -- The Vault is unsealed and initialized. -- Helm installed. - - - -## Chart Structure ---- -The structure of the Helm chart is as follows: - -``` -fabric-anchorpeer/ - |- templates/ - |- _helpers.yaml - |- anchorpeer.yaml - |- configmap.yaml - |- Chart.yaml - |- README.md - |- values.yaml -``` - -- `templates/`: Contains the Kubernetes manifest templates that define the resources to be deployed. -- `helpers.tpl`: Contains custom label definitions used in other templates. -- `anchorpeer.yaml`: Uses two initContainers to fetch the orderer TLS certificates and the MSP certificates from Vault. The main container then uses the fetched certificates to update the anchor peer for the channel. -- `configmap.yaml`: Stores configuration data for an anchor peer. The file contains two ConfigMaps, one for the configuration data and one for the artifacts. The configuration ConfigMap contains the key-value pairs that are used to configure the peer, and the artifacts ConfigMap contains the base64-encoded transaction that anchors the peer to the channel. -- `Chart.yaml`: Contains the metadata for the Helm chart, such as the name, version, and description. -- `README.md`: Provides information and instructions about the Helm chart. -- `values.yaml`: Contains the default configuration values for the Helm chart. - - - -## Configuration ---- -The [values.yaml](https://github.com/hyperledger/bevel/blob/develop/platforms/hyperledger-fabric/charts/fabric-anchorpeer/values.yaml) file contains configurable values for the Helm chart. We can modify these values according to the deployment requirements. Here are some important configuration options: - -## Parameters ---- - -### Metadata - -| Name | Description | Default Value | -| ----------------------| ----------------------------------------------------------------------------------| --------------------------------------------------| -| namespace | Provide the namespace for organization's peer | org1-net | -| images.fabrictools | Provide the valid image name and version | ghcr.io/hyperledger/bevel-fabric-tools:2.2.2 | -| images.alpineutils | Provide the valid image name and version to read certificates from vault server | ghcr.io/hyperledger/bevel-alpine:latest | -| labels | Provide the custom labels | "" | - -### Peer - -| Name | Description | Default Value | -| --------------| --------------------------------------------------------------------------------------------------------| ------------------------------| -| name | Provide the name of the peer as per deployment yaml | peer0 | -| address | Provide the address of the peer which will update the channel about the anchor peer of the organization | peer0.org1-net:7051 | -| localmspid | Provide the localmspid for organization | org1MSP | -| loglevel | Provide the loglevel for organization's peer | debug | -| tlsstatus | Provide the value for tlsstatus to be true or false for organization's peer | true | - -### Vault - -| Name | Description | Default Value | -| ---------------------| ----------------------------------------------------------------------------| -----------------------------| -| role | Provide the vaultrole for an organization | vault-role | -| address | Provide the vault server address | "" | -| authpath | Provide the kubernetes auth backed configured in vault for an organization | devorg1-net-auth | -| adminsecretprefix | Provide the value for vault secretprefix | secretsv2/data/crypto/peerOrganizations/org1-net/users/admin | -| orderersecretprefix | Provide the value for vault secretprefix | secretsv2/data/data/crypto/peerOrganizations/org1-nets/orderer | -| serviceaccountname | Provide the serviceaccount name for vault | vault-auth | -| type | Provide the type of vault | hashicorp | -| imagesecretname | Provide the imagesecretname for vault | "" | -| tls | Enable or disable TLS for vault communication | "" | - -### Channel - -| Name | Description | Default Value | -| ----------| -------------------------------------|---------------| -| name | Provide the name of the channel | mychannel | - -### orderer - -| Name | Description | Default Value | -| -----------| -----------------------------------|----------------------------| -| address | Provide the address for orderer | orderer1.org1proxy.blockchaincloudpoc.com:443 | - -### anchorstx - -| Name | Description | Default Value | -| ---------------| ---------------------------------------------------------| ------------- | -| anchorstx | Provide the base64 encoded file contents for anchorstx | "" | - - - -## Deployment ---- - -To deploy the fabric-anchorpeer Helm chart, follow these steps: - -1. Modify the [values.yaml](https://github.com/hyperledger/bevel/blob/main/platforms/hyperledger-fabric/charts/fabric-anchorpeer/values.yaml) file to set the desired configuration values. -2. Run the following Helm command to install the chart: - ``` - $ helm repo add bevel https://hyperledger.github.io/bevel/ - $ helm install ./fabric-anchorpeer - ``` -Replace `` with the desired name for the release. - -This will deploy the fabric-anchorpeer job to the Kubernetes cluster based on the provided configurations. - - - -## Verification ---- - -To verify the deployment, we can use the following command: -``` -$ kubectl get jobs -n -``` -Replace `` with the actual namespace where the Job was created. This command will display information about the Job, including the number of completions and the current status of the Job's pods. - - - -## Updating the Deployment ---- - -If we need to update the deployment with new configurations or changes, modify the same [values.yaml](https://github.com/hyperledger/bevel/blob/main/platforms/hyperledger-fabric/charts/fabric-anchorpeer/values.yaml) file with the desired changes and run the following Helm command: -``` -$ helm upgrade ./fabric-anchorpeer -``` -Replace `` with the name of the release. This command will apply the changes to the deployment, ensuring the fabric-anchorpeer node is up to date. - - -## Deletion ---- - -To delete the deployment and associated resources, run the following Helm command: -``` -$ helm uninstall -``` -Replace `` with the name of the release. This command will remove all the resources created by the Helm chart. - - -## Contributing ---- -If you encounter any bugs, have suggestions, or would like to contribute to the [Anchor Peer Hyperledger Fabric Deployment Helm Chart](https://github.com/hyperledger/bevel/blob/main/platforms/hyperledger-fabric/charts/fabric-anchorpeer), please feel free to open an issue or submit a pull request on the [project's GitHub repository](https://github.com/hyperledger/bevel). - - -## License - -This chart is licensed under the Apache v2.0 license. - -Copyright © 2023 Accenture - -### Attribution - -This chart is adapted from the [charts](https://hyperledger.github.io/bevel/) which is licensed under the Apache v2.0 License which is reproduced here: - -``` -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -``` diff --git a/platforms/hyperledger-fabric/charts/fabric-anchorpeer/templates/_helpers.tpl b/platforms/hyperledger-fabric/charts/fabric-anchorpeer/templates/_helpers.tpl deleted file mode 100644 index d43c09d8cef..00000000000 --- a/platforms/hyperledger-fabric/charts/fabric-anchorpeer/templates/_helpers.tpl +++ /dev/null @@ -1,5 +0,0 @@ -{{- define "labels.custom" }} - {{ range $key, $val := $.Values.metadata.labels }} - {{ $key }}: {{ $val }} - {{ end }} -{{- end }} diff --git a/platforms/hyperledger-fabric/charts/fabric-anchorpeer/templates/anchorpeer.yaml b/platforms/hyperledger-fabric/charts/fabric-anchorpeer/templates/anchorpeer.yaml deleted file mode 100644 index 2671a754a77..00000000000 --- a/platforms/hyperledger-fabric/charts/fabric-anchorpeer/templates/anchorpeer.yaml +++ /dev/null @@ -1,181 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -apiVersion: batch/v1 -kind: Job -metadata: - name: anchorpeer-{{ $.Values.peer.name }}-{{ $.Values.channel.name }} - namespace: {{ $.Values.metadata.namespace }} - labels: - app: {{ .Release.Name }} - app.kubernetes.io/name: anchorpeer-{{ $.Values.peer.name }}-{{ $.Values.channel.name }} - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} - {{- include "labels.custom" . | nindent 2 }} -spec: - backoffLimit: 6 - template: - metadata: - labels: - app: {{ .Release.Name }} - app.kubernetes.io/name: anchorpeer-{{ $.Values.peer.name }}-{{ $.Values.channel.name }} - app.kubernetes.io/instance: {{ .Release.Name }} - spec: - restartPolicy: "OnFailure" - serviceAccountName: {{ $.Values.vault.serviceaccountname }} - {{- if .Values.vault.imagesecretname }} - imagePullSecrets: - - name: {{ $.Values.vault.imagesecretname }} - {{- end }} - volumes: - {{ if .Values.vault.tls }} - - name: vaultca - secret: - secretName: {{ $.Values.vault.tls }} - items: - - key: ca.crt.pem - path: ca-certificates.crt # curl expects certs to be in /etc/ssl/certs/ca-certificates.crt - {{ end }} - - name: certificates - emptyDir: - medium: Memory - - name: anchorpeer-artifacts - configMap: - name: anchorpeer-{{ $.Values.channel.name }}-{{ $.Values.peer.name }}-artifacts - - name: scripts-volume - configMap: - name: bevel-vault-script - initContainers: - - name: certificates-init - image: {{ $.Values.metadata.images.alpineutils }} - imagePullPolicy: IfNotPresent - env: - - name: VAULT_ADDR - value: {{ $.Values.vault.address }} - - name: KUBERNETES_AUTH_PATH - value: {{ $.Values.vault.authpath }} - - name: VAULT_APP_ROLE - value: {{ $.Values.vault.role }} - - name: VAULT_PEER_SECRET_PREFIX - value: "{{ $.Values.vault.adminsecretprefix }}" - - name: VAULT_ORDERER_SECRET_PREFIX - value: "{{ $.Values.vault.orderersecretprefix }}" - - name: MOUNT_PATH - value: /secret - - name: VAULT_TYPE - value: "{{ $.Values.vault.type }}" - command: ["sh", "-c"] - args: - - |- - #!/usr/bin/env sh - . /scripts/bevel-vault.sh - - # Calling a function to retrieve the vault token. - vaultBevelFunc "init" - - echo "Getting Orderer TLS certificates from Vault." - vaultBevelFunc "readJson" "${VAULT_ORDERER_SECRET_PREFIX}/tls" - - TLS_CA_CERT=$(echo ${VAULT_SECRET} | jq -r '.["ca.crt"]') - OUTPUT_PATH="${MOUNT_PATH}/orderer/tls" - mkdir -p ${OUTPUT_PATH} - echo "${TLS_CA_CERT}" >> ${OUTPUT_PATH}/ca.crt - - echo "Getting MSP certificates from Vault." - vaultBevelFunc "readJson" "${VAULT_PEER_SECRET_PREFIX}/msp" - - ADMINCERT=$(echo ${VAULT_SECRET} | jq -r '.["admincerts"]') - CACERTS=$(echo ${VAULT_SECRET} | jq -r '.["cacerts"]') - KEYSTORE=$(echo ${VAULT_SECRET} | jq -r '.["keystore"]') - SIGNCERTS=$(echo ${VAULT_SECRET} | jq -r '.["signcerts"]') - TLSCACERTS=$(echo ${VAULT_SECRET} | jq -r '.["tlscacerts"]') - - OUTPUT_PATH="${MOUNT_PATH}/admin/msp" - mkdir -p ${OUTPUT_PATH}/admincerts - mkdir -p ${OUTPUT_PATH}/cacerts - mkdir -p ${OUTPUT_PATH}/keystore - mkdir -p ${OUTPUT_PATH}/signcerts - mkdir -p ${OUTPUT_PATH}/tlscacerts - - echo "${ADMINCERT}" >> ${OUTPUT_PATH}/admincerts/admin.crt - echo "${CACERTS}" >> ${OUTPUT_PATH}/cacerts/ca.crt - echo "${KEYSTORE}" >> ${OUTPUT_PATH}/keystore/server.key - echo "${SIGNCERTS}" >> ${OUTPUT_PATH}/signcerts/server.crt - echo "${TLSCACERTS}" >> ${OUTPUT_PATH}/tlscacerts/tlsca.crt - volumeMounts: - {{ if .Values.vault.tls }} - - name: vaultca - mountPath: "/etc/ssl/certs/" - readOnly: true - {{ end }} - - name: certificates - mountPath: /secret - - name: scripts-volume - mountPath: /scripts/bevel-vault.sh - subPath: bevel-vault.sh - containers: - - name: anchorpeer - image: {{ $.Values.metadata.images.fabrictools }} - imagePullPolicy: IfNotPresent - stdin: true - tty: true - command: ["sh", "-c"] - args: - - |- - #!/usr/bin/env sh - - version2_5=`echo $NETWORK_VERSION | grep -c 2.5` - - if [ $version2_5 = 1 ] - then - echo "Fetching the most recent configuration block for the channel" - peer channel fetch config config_block.pb -o ${ORDERER_URL} -c ${CHANNEL_NAME} --tls --cafile ${ORDERER_CA} - - echo "Decoding config block to JSON and isolating config to ${CORE_PEER_LOCALMSPID}config.json" - configtxlator proto_decode --input config_block.pb --type common.Block --output config_block.json - jq .data.data[0].payload.data.config config_block.json >"${CORE_PEER_LOCALMSPID}config.json" - - PORT="${CORE_PEER_ADDRESS##*:}" - HOST="${CORE_PEER_ADDRESS%%:*}" - jq '.channel_group.groups.Application.groups.'${CORE_PEER_LOCALMSPID}'.values += {"AnchorPeers":{"mod_policy": "Admins","value":{"anchor_peers": [{"host": "'$HOST'","port": '$PORT'}]},"version": "0"}}' ${CORE_PEER_LOCALMSPID}config.json > ${CORE_PEER_LOCALMSPID}modified_config.json - - configtxlator proto_encode --input "${CORE_PEER_LOCALMSPID}config.json" --type common.Config --output original_config.pb - configtxlator proto_encode --input "${CORE_PEER_LOCALMSPID}modified_config.json" --type common.Config --output modified_config.pb - configtxlator compute_update --channel_id "${CHANNEL_NAME}" --original original_config.pb --updated modified_config.pb --output config_update.pb - configtxlator proto_decode --input config_update.pb --type common.ConfigUpdate --output config_update.json - echo '{"payload":{"header":{"channel_header":{"channel_id":"'$CHANNEL_NAME'", "type":2}},"data":{"config_update":'$(cat config_update.json)'}}}' | jq . >config_update_in_envelope.json - configtxlator proto_encode --input config_update_in_envelope.json --type common.Envelope --output "${CORE_PEER_LOCALMSPID}anchors.tx" - - peer channel update -o ${ORDERER_URL} -c ${CHANNEL_NAME} -f ${CORE_PEER_LOCALMSPID}anchors.tx --tls --cafile ${ORDERER_CA} - else - echo "Updating anchor peer for the channel ${CHANNEL_NAME}" - tls_status=${CORE_PEER_TLS_ENABLED} - if [ "$tls_status" = "true" ] - then - peer channel fetch 0 ${CHANNEL_NAME}.block -o ${ORDERER_URL} -c ${CHANNEL_NAME} --tls --cafile ${ORDERER_CA} - else - peer channel fetch 0 ${CHANNEL_NAME}.block -o ${ORDERER_URL} -c ${CHANNEL_NAME} - fi - cat ./channel-artifacts/anchors.tx.base64 | base64 -d > ${CORE_PEER_LOCALMSPID}anchors.tx - if [ "$tls_status" = "true" ] - then - peer channel update -o ${ORDERER_URL} -c ${CHANNEL_NAME} -f ${CORE_PEER_LOCALMSPID}anchors.tx --tls --cafile ${ORDERER_CA} - else - peer channel update -o ${ORDERER_URL} -c ${CHANNEL_NAME} -f ${CORE_PEER_LOCALMSPID}anchors.tx - fi - fi - workingDir: /opt/gopath/src/github.com/hyperledger/fabric/peer - envFrom: - - configMapRef: - name: anchorpeer-{{ $.Values.channel.name }}-{{ $.Values.peer.name }}-config - volumeMounts: - - name: certificates - mountPath: /opt/gopath/src/github.com/hyperledger/fabric/crypto - readOnly: true - - name: anchorpeer-artifacts - mountPath: /opt/gopath/src/github.com/hyperledger/fabric/peer/channel-artifacts - readOnly: true diff --git a/platforms/hyperledger-fabric/charts/fabric-anchorpeer/templates/configmap.yaml b/platforms/hyperledger-fabric/charts/fabric-anchorpeer/templates/configmap.yaml deleted file mode 100644 index 724c969286c..00000000000 --- a/platforms/hyperledger-fabric/charts/fabric-anchorpeer/templates/configmap.yaml +++ /dev/null @@ -1,44 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -apiVersion: v1 -kind: ConfigMap -metadata: - name: anchorpeer-{{ $.Values.channel.name }}-{{ $.Values.peer.name }}-config - namespace: {{ $.Values.metadata.namespace }} - labels: - app.kubernetes.io/name: anchorpeer-{{ $.Values.channel.name }}-{{ $.Values.peer.name }}-config - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} - {{- include "labels.custom" . | nindent 2 }} -data: - CHANNEL_NAME: {{ $.Values.channel.name }} - FABRIC_LOGGING_SPEC: {{ $.Values.peer.loglevel }} - CORE_PEER_ID: {{ $.Values.peer.name }}.{{ $.Values.metadata.namespace }} - CORE_PEER_ADDRESS: {{ $.Values.peer.address }} - CORE_PEER_LOCALMSPID: {{ $.Values.peer.localmspid }} - CORE_PEER_TLS_ENABLED: "{{ $.Values.peer.tlsstatus }}" - CORE_PEER_TLS_ROOTCERT_FILE: /opt/gopath/src/github.com/hyperledger/fabric/crypto/admin/msp/tlscacerts/tlsca.crt - ORDERER_CA: /opt/gopath/src/github.com/hyperledger/fabric/crypto/orderer/tls/ca.crt - ORDERER_URL: {{ $.Values.orderer.address }} - CORE_PEER_MSPCONFIGPATH: /opt/gopath/src/github.com/hyperledger/fabric/crypto/admin/msp - NETWORK_VERSION: {{ $.Values.metadata.network.version }} - ---- -apiVersion: v1 -kind: ConfigMap -metadata: - name: anchorpeer-{{ $.Values.channel.name }}-{{ $.Values.peer.name }}-artifacts - namespace: {{ $.Values.metadata.namespace }} - labels: - app.kubernetes.io/name: anchorpeer-{{ $.Values.channel.name }}-{{ $.Values.peer.name }}-artifacts - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} - {{- include "labels.custom" . | nindent 2 }} -data: - anchors.tx.base64: {{ .Values.anchorstx | quote }} diff --git a/platforms/hyperledger-fabric/charts/fabric-anchorpeer/values.yaml b/platforms/hyperledger-fabric/charts/fabric-anchorpeer/values.yaml deleted file mode 100644 index 055b0c94dea..00000000000 --- a/platforms/hyperledger-fabric/charts/fabric-anchorpeer/values.yaml +++ /dev/null @@ -1,83 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -metadata: - #Provide the namespace for organization's peer - #Eg. namespace: org1-net - namespace: org1-net - images: - #Provide the valid image name and version for fabric tools - #Eg. fabric-tools: hyperledger/fabrictools:1.4.0 - fabrictools: ghcr.io/hyperledger/bevel-fabric-tools:2.2.2 - #Provide the valid image name and version to read certificates from vault server - #Eg.alpineutils: ghcr.io/hyperledger/bevel-alpine:latest - alpineutils: ghcr.io/hyperledger/bevel-alpine:latest - #Provide the custom labels - #NOTE: Provide labels other than name, release name , release service, chart version , chart name , app. - #Eg. labels: - # role: anchorpeer - labels: - -peer: - #Provide the name of the peer as per deployment yaml. - #Eg. name: peer0 - name: peer0 - #Provide the address of the peer which will update the channel about the anchor peer of the organization - #Eg. address: peer0.org1-net:7051 - address: peer0.org1-net:7051 - #Provide the localmspid for organization - #Eg. localmspid: Org1MSP - localmspid: org1MSP - #Provide the loglevel for organization's peer - #Eg. loglevel: info - loglevel: debug - #Provide the value for tlsstatus to be true or false for organization's peer - #Eg. tlsstatus: true - tlsstatus: true - -vault: - #Provide the vaultrole for an organization - #Eg. vaultrole: org1-vault-role - role: vault-role - #Provide the vault server address - #Eg. vaultaddress: http://Vault-884963190.eu-west-1.elb.amazonaws.com - address: - #Provide the kubernetes auth backed configured in vault for an organization - #Eg. authpath: fra-demo-hlkube-cluster-org1 - authpath: devorg1-net-auth - #Provide the value for vault secretprefix - #Eg. adminsecretprefix: secretsv2/data/... - adminsecretprefix: secretsv2/data/crypto/peerOrganizations/org1-net/users/admin - #Provide the value for vault secretprefix - #Eg. orderersecretprefix: secretsv2/data/... - orderersecretprefix: secretsv2/data/data/crypto/peerOrganizations/org1-nets/orderer - #Provide the serviceaccount name for vault - #Eg. serviceaccountname: vault-auth - serviceaccountname: vault-auth - #Provide the type of vault - #Eg. type: hashicorp - type: hashicorp - #Provide the imagesecretname for vault - #Eg. imagesecretname: regcred - imagesecretname: "" - #Kuberenetes secret for vault ca.cert - #Enable or disable TLS for vault communication if value present or not - #Eg. tls: vaultca - tls: - - -channel: - #Provide the name of the channel - #Eg. name: mychannel - name: mychannel - -orderer: - #Provide the address for orderer - #Eg. address: orderer1.org1proxy.blockchaincloudpoc.com:443 - address: orderer1.org1proxy.blockchaincloudpoc.com:443 - -#Provide the base64 encoded file contents for anchorstx -anchorstx: diff --git a/platforms/r3-corda/charts/corda-h2/.helmignore b/platforms/hyperledger-fabric/charts/fabric-ca-server/.helmignore similarity index 92% rename from platforms/r3-corda/charts/corda-h2/.helmignore rename to platforms/hyperledger-fabric/charts/fabric-ca-server/.helmignore index f0c13194444..014fa775608 100644 --- a/platforms/r3-corda/charts/corda-h2/.helmignore +++ b/platforms/hyperledger-fabric/charts/fabric-ca-server/.helmignore @@ -15,7 +15,9 @@ *.bak *.tmp *~ +generated_config/ # Various IDEs .project .idea/ *.tmproj +.vscode/ diff --git a/platforms/hyperledger-fabric/charts/fabric-ca-server/Chart.yaml b/platforms/hyperledger-fabric/charts/fabric-ca-server/Chart.yaml index 05530cbf407..9c53ad9c83a 100644 --- a/platforms/hyperledger-fabric/charts/fabric-ca-server/Chart.yaml +++ b/platforms/hyperledger-fabric/charts/fabric-ca-server/Chart.yaml @@ -5,7 +5,23 @@ ############################################################################################## apiVersion: v1 -appVersion: "2.0" -description: "Hyperledger Fabric: Deploys a CA server." name: fabric-ca-server -version: 1.0.0 +description: "Hyperledger Fabric: Deploys Fabric CA server" +version: 1.1.0 +appVersion: latest +keywords: + - bevel + - hlf + - fabric + - hyperledger + - enterprise + - blockchain + - deployment + - accenture +home: https://hyperledger-bevel.readthedocs.io/en/latest/ +sources: + - https://github.com/hyperledger/bevel +maintainers: + - name: Hyperledger Bevel maintainers + email: bevel@lists.hyperledger.org + diff --git a/platforms/hyperledger-fabric/charts/fabric-ca-server/README.md b/platforms/hyperledger-fabric/charts/fabric-ca-server/README.md index dfd679be803..5e148f73dc6 100644 --- a/platforms/hyperledger-fabric/charts/fabric-ca-server/README.md +++ b/platforms/hyperledger-fabric/charts/fabric-ca-server/README.md @@ -3,204 +3,114 @@ [//]: # (SPDX-License-Identifier: Apache-2.0) [//]: # (##############################################################################################) - -# CA Server Hyperledger Fabric Deploymen +# fabric-ca-server -- [CA Server Hyperledger Fabric Deployment Helm Chart](#ca-server-hyperledger-fabric-deployment-helm-chart) -- [Prerequisites](#prerequisites) -- [Chart Structure](#chart-structure) -- [Configuration](#configuration) -- [Deployment](#deployment) -- [Verification](#verification) -- [Updating the Deployment](#updating-the-deployment) -- [Deletion](#deletion) -- [Contributing](#contributing) -- [License](#license) +This chart is a component of Hyperledger Bevel. The fabric-ca-server chart deploys a CA server for Hyperledger Fabric blockchain network. If enabled, the keys are then stored on the configured vault and stored as Kubernetes secrets. See [Bevel documentation](https://hyperledger-bevel.readthedocs.io/en/latest/) for details. +## TL;DR - -## CA Server Hyperledger Fabric Deployment Helm Chart ---- -A [Helm chart](https://github.com/hyperledger/bevel/blob/develop/platforms/hyperledger-fabric/charts/fabric-ca-server) to deploy a CA server. - - - -## Prerequisites ---- -Before deploying the Helm chart, make sure to have the following prerequisites: - -- Kubernetes cluster up and running. -- A HashiCorp Vault instance is set up and configured to use Kubernetes service account token-based authentication. -- The Vault is unsealed and initialized. -- HAproxy is required as ingress controller. -- Helm installed. - - - -## Chart Structure ---- -The structure of the Helm chart is as follows: - +```bash +helm repo add bevel https://hyperledger.github.io/bevel +helm install ca bevel/fabric-ca-server ``` -fabric-ca-server/ - |- conf/ - |- fabric-ca-server-config-default.yaml - |- templates/ - |- _helpers.yaml - |- configmap.yaml - |- deployment.yaml - |- service.yaml - |- volume.yaml - |- Chart.yaml - |- README.md - |- values.yaml -``` - -- `fabric-ca-server-config-default.yaml`: Configuration file for the fabric-ca-server command. -- `templates/`: Contains the Kubernetes manifest templates that define the resources to be deployed. -- `helpers.tpl`: Contains custom label definitions used in other templates. -- `configmap.yaml`: Store the configuration for the Fabric CA server. The configuration file is stored in the fabric-ca-server-config.yaml file, and it is mounted into the Fabric CA server container. The ConfigMap is optional, and it is only used if the server.configpath value is set. Otherwise, the default configuration for the Fabric CA server will be used. -- `deployment.yaml`: Deploys CA server Pod, allowing it to handle certificate-related operations within the Hyperledger Fabric blockchain network. To ensure the security and proper configuration of the CA server, the included init-container retrieves essential secrets from a Vault server. -- `service.yaml`: Expose a Fabric CA server to the outside world either using HaProxy as a reverse proxy engine. -- `volume.yaml`: Defines a persistent volume that can be used to store the Fabric CA server's database. -- `Chart.yaml`: Contains the metadata for the Helm chart, such as the name, version, and description. -- `README.md`: Provides information and instructions about the Helm chart. -- `values.yaml`: Contains the default configuration values for the Helm chart. - - - -## Configuration ---- -The [values.yaml](https://github.com/hyperledger/bevel/blob/develop/platforms/hyperledger-fabric/charts/fabric-ca-server/values.yaml) file contains configurable values for the Helm chart. We can modify these values according to the deployment requirements. Here are some important configuration options: - -## Parameters ---- - -### Metadata - -| Name | Description | Default Value | -| ----------------------| -----------------------------------------------------------------| --------------------------------------------------| -| namespace | Namespace for CA server | org1-net | -| images.ca | image name and version for fabric ca | ghcr.io/hyperledger/bevel-fabric-ca:1.4.8 | -| images.alpineutils | image name and version to read certificates from vault server | ghcr.io/hyperledger/bevel-alpine:latest | -| labels | Provide the custom labels | "" | - - -### Server -| Name | Description | Default Value | -| ----------------------| -----------------------------------------------------------------| -------------------------------------------| -| name | Name for CA server deployment | ca | -| tlsstatus | Specify if TLS is enabled or disabled for the deployment | true | -| admin | Admin name for CA server | admin | -| configpath | Path for Fabric CA Server Config | conf/fabric-ca-server-config-default.yaml | - -### Storage - -| Name | Description | Default Value | -| ----------------------| --------------------------------------| ------------- | -| storageclassname | Storage class name for CA server | aws-storageclass | -| storagesize | Size of storage for CA server | 512Mi | - -### Vault +## Prerequisites -| Name | Description | Default Value | -| ----------------------| --------------------------------------------------------------------| --------------------------------- | -| address | Vault server address | "" | -| role | Vault role for deployment | vault-role | -| authpath | Kubernetes auth backend configured in Vault for CA server | fra-demo-hlkube-cluster-cluster | -| secretcert | Path of secret certificate configured in Vault for CA server | secretsv2/data/crypto/peerOrganizations/org1-net/ca?ca.org1-net-cert.pem | -| secretkey | Path of secret key configured in Vault for CA server | secretsv2/data/crypto/peerOrganizations/org1-net/ca?org1-net-CA.key | -| secretadminpass | Secret path for admin password configured in Vault for CA server | secretsv2/data/credentials/org1-net/ca/org1?user | -| serviceaccountname | Service account name for Vault | vault-auth | -| type | Provide the type of vault | hashicorp | -| imagesecretname | Image secret name for Vault | "" | -| tls | Enable or disable TLS for Vault communication | "" | -| tlssecret | Kubernetes secret for Vault CA certificate | vaultca | +- Kubernetes 1.19+ +- Helm 3.2.0+ -### Service +If Hashicorp Vault is used, then +- HashiCorp Vault Server 1.13.1+ -| Name | Description | Default Value | -| --------------------------| ---------------------------------------------------| ---------------| -| servicetype | Service type for the pod | ClusterIP | -| ports.tcp.nodeport | TCP node port to be exposed for CA server | 30007 | -| ports.tcp.clusteripport | TCP cluster IP port to be exposed for CA server | 7054 | +> **Important**: Also check the dependent charts. -### Annotations +## Installing the Chart -| Name | Description | Default Value | -| ------------| ---------------------------------------| ------------- | -| service | Extra annotations for the service | "" | -| pvc | Extra annotations for the PVC | "" | +To install the chart with the release name `ca`: -### Proxy +```bash +helm repo add bevel https://hyperledger.github.io/bevel +helm install ca bevel/fabric-ca-server +``` -| Name | Description | Default Value | -| ----------------------| -------------------------------------------------------------------------|--------------------------------| -| provider | Proxy/ingress provider. Possible values: "haproxy" or "none" | haproxy | -| type | Type of the deployment. Possible values: "orderer", "peer", or "test" | test | -| external_url_suffix | External URL suffix for the organization | org1proxy.blockchaincloudpoc.com | +The command deploys the chart on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation. +> **Tip**: List all releases using `helm list` - -## Deployment ---- +## Uninstalling the Chart -To deploy the ca Helm chart, follow these steps: +To uninstall/delete the `ca` deployment: -1. Modify the [values.yaml](https://github.com/hyperledger/bevel/blob/main/platforms/hyperledger-fabric/charts/fabric-ca-server/values.yaml) file to set the desired configuration values. -2. Run the following Helm command to install the chart: - ``` - $ helm repo add bevel https://hyperledger.github.io/bevel/ - $ helm install ./fabric-ca-server - ``` -Replace `` with the desired name for the release. +```bash +helm uninstall ca +``` -This will deploy the ca server node to the Kubernetes cluster based on the provided configurations. +The command removes all the Kubernetes components associated with the chart and deletes the release. +## Parameters -a name = "verification"> -## Verification ---- - -To verify the deployment, we can use the following command: -``` -$ kubectl get deployments -n -``` -Replace `` with the actual namespace where the deployment was created. The command will display information about the deployment, including the number of replicas and their current status. +### Global parameters +These parameters are refered to as same in each parent or child chart +| Name | Description | Default Value | +|--------|---------|-------------| +|`global.serviceAccountName` | The serviceaccount name that will be created for Vault Auth and k8S Secret management| `vault-auth` | +| `global.cluster.provider` | Kubernetes cluster provider like AWS EKS or minikube. Currently ony `aws`, `azure` and `minikube` are tested | `aws` | +| `global.cluster.cloudNativeServices` | only `false` is implemented, `true` to use Cloud Native Services (SecretsManager and IAM for AWS; KeyVault & Managed Identities for Azure) is for future | `false` | +| `global.cluster.kubernetesUrl` | URL of the Kubernetes Cluster | `""` | +| `global.vault.type` | Type of Vault to support other providers. Currently, only `hashicorp` and `kubernetes` is supported. | `hashicorp` | +| `global.vault.role` | Role used for authentication with Vault | `vault-role` | +| `global.vault.network` | Network type that is being deployed | `fabric` | +| `global.vault.address`| URL of the Vault server. | `""` | +| `global.vault.authPath` | Authentication path for Vault | `supplychain` | +| `global.vault.secretEngine` | Vault secret engine name | `secretsv2` | +| `global.vault.secretPrefix` | Vault secret prefix which must start with `data/` | `data/supplychain` | +| `global.vault.tls` | Name of the Kubernetes secret which has certs to connect to TLS enabled Vault | `false` | +| `global.proxy.provider` | The proxy or Ingress provider. Can be `none` or `haproxy` | `haproxy` | +| `global.proxy.externalUrlSuffix` | The External URL suffix at which the Fabric GRPC services will be available | `test.blockchaincloudpoc.com` | +### Storage - -## Updating the Deployment ---- +| Name | Description | Default Value | +|--------|---------|-------------| +| `storage.size` | Size of the PVC needed for Fabric CA | `512Mi` | +| `storage.reclaimPolicy` | Reclaim policy for the PVC. Choose from: `Delete` or `Retain` | `Delete` | +| `storage.volumeBindingMode` | Volume binding mode for the PVC. Choose from: `Immediate` or `WaitForFirstConsumer` | `Immediate` | +| `storage.allowedTopologies.enabled` | Check [bevel-storageclass](../../../shared/charts/bevel-storageclass/README.md) for details | `false` | -If we need to update the deployment with new configurations or changes, modify the same [values.yaml](https://github.com/hyperledger/bevel/blob/main/platforms/hyperledger-fabric/charts/fabric-ca-server/values.yaml) file with the desired changes and run the following Helm command: -``` -$ helm upgrade ./fabric-ca-server -``` -Replace `` with the name of the release. This command will apply the changes to the deployment, ensuring the ca server node is up to date. +### Image - -## Deletion ---- +| Name | Description | Default Value | +| -------------| ---------- | --------- | +| `image.alpineUtils` | Alpine utils image repository and tag | `ghcr.io/hyperledger/bevel-alpine:latest` | +| `image.ca` | Fabric CA image repository and tag | `ghcr.io/hyperledger/bevel-fabric-ca:latest` | +| `image.pullSecret` | Secret name in the namespace containing private image registry credentials | `""` | -To delete the deployment and associated resources, run the following Helm command: -``` -$ helm uninstall -``` -Replace `` with the name of the release. This command will remove all the resources created by the Helm chart. +### Server - -## Contributing ---- -If you encounter any bugs, have suggestions, or would like to contribute to the [CA Server Hyperledger Fabric Deployment Helm Chart](https://github.com/hyperledger/bevel/blob/main/platforms/hyperledger-fabric/charts/fabric-ca-server), please feel free to open an issue or submit a pull request on the [project's GitHub repository](https://github.com/hyperledger/bevel). +| Name | Description | Default Value | +|--------|---------|-------------| +| `server.removeCertsOnDelete` | Flag to delete the certificate secrets when uninstalling the release | `true` | +| `server.tlsStatus` | TLS status of the server | `true` | +| `server.adminUsername` | CA Admin Username | `admin` | +| `server.adminPassword` | CA Admin Password | `adminpw` | +| `server.subject` | CA server root subject | `"/C=GB/ST=London/L=London/O=Orderer"` | +| `server.configPath` | Local path to the CA server configuration file which will be mounted to the CA Server | `""` | +| `server.nodePort` | NodePort for the CA Server | `""` | +| `server.clusterIpPort` | TCP Port for the CA Server | `7054` | + +### Labels + +| Name | Description | Default Value | +| ----------------| ----------- | ------------- | +| `labels.service` | Array of Labels for service object | `[]` | +| `labels.pvc` | Array of Labels for PVC object | `[]` | +| `labels.deployment` | Array of Labels for deployment or statefulset object | `[]` | - ## License This chart is licensed under the Apache v2.0 license. -Copyright © 2023 Accenture +Copyright © 2024 Accenture ### Attribution diff --git a/platforms/hyperledger-fabric/charts/fabric-ca-server/conf/fabric-ca-server-config-default.yaml b/platforms/hyperledger-fabric/charts/fabric-ca-server/conf/fabric-ca-server-config-default.yaml index 0624df3168e..289ec948095 100644 --- a/platforms/hyperledger-fabric/charts/fabric-ca-server/conf/fabric-ca-server-config-default.yaml +++ b/platforms/hyperledger-fabric/charts/fabric-ca-server/conf/fabric-ca-server-config-default.yaml @@ -136,8 +136,8 @@ registry: # Contains identity information which is used when LDAP is disabled # Do not edit this value identities: - - name: {{ $.Values.server.admin }} - pass: {{ $.Values.server.admin }}pw + - name: {{ .Values.server.adminUsername }} + pass: {{ .Values.server.adminPassword }} type: client affiliation: "" attrs: diff --git a/platforms/hyperledger-fabric/charts/fabric-ca-server/requirements.yaml b/platforms/hyperledger-fabric/charts/fabric-ca-server/requirements.yaml new file mode 100644 index 00000000000..a5b2e417d4d --- /dev/null +++ b/platforms/hyperledger-fabric/charts/fabric-ca-server/requirements.yaml @@ -0,0 +1,17 @@ +dependencies: + - name: bevel-vault-mgmt + repository: "file://../../../shared/charts/bevel-vault-mgmt" + tags: + - bevel + version: ~1.0.0 + - name: bevel-scripts + repository: "file://../../../shared/charts/bevel-scripts" + tags: + - bevel + version: ~1.0.0 + - name: bevel-storageclass + alias: storage + repository: "file://../../../shared/charts/bevel-storageclass" + tags: + - storage + version: ~1.0.0 diff --git a/platforms/hyperledger-fabric/charts/fabric-ca-server/templates/_helpers.tpl b/platforms/hyperledger-fabric/charts/fabric-ca-server/templates/_helpers.tpl index 7bf5f530a8e..26091cea227 100644 --- a/platforms/hyperledger-fabric/charts/fabric-ca-server/templates/_helpers.tpl +++ b/platforms/hyperledger-fabric/charts/fabric-ca-server/templates/_helpers.tpl @@ -1,5 +1,57 @@ -{{- define "labels.custom" }} - {{ range $key, $val := $.Values.metadata.labels }} - {{ $key }}: {{ $val }} - {{ end }} -{{- end }} \ No newline at end of file +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "fabric-ca-server.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "fabric-ca-server.fullname" -}} +{{- $name := default .Chart.Name -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" $name .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "fabric-ca-server.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- define "labels.deployment" -}} +{{- range $value := .Values.labels.deployment }} +{{ toYaml $value }} +{{- end }} +{{- end }} + +{{- define "labels.service" -}} +{{- range $value := .Values.labels.service }} +{{ toYaml $value }} +{{- end }} +{{- end }} + +{{- define "labels.pvc" -}} +{{- range $value := .Values.labels.pvc }} +{{ toYaml $value }} +{{- end }} +{{- end }} + +{{/* +Create server url depending on proxy +*/}} +{{- define "fabric-ca-server.serverURL" -}} +{{- if eq .Values.global.proxy.provider "none" -}} + {{- printf "ca.%s" .Release.Namespace }} +{{- else -}} + {{- printf "ca.%s.%s" .Release.Namespace .Values.global.proxy.externalUrlSuffix }} +{{- end -}} +{{- end -}} diff --git a/platforms/hyperledger-fabric/charts/fabric-ca-server/templates/ca-job-cleanup.yaml b/platforms/hyperledger-fabric/charts/fabric-ca-server/templates/ca-job-cleanup.yaml new file mode 100644 index 00000000000..a4d1654cc1b --- /dev/null +++ b/platforms/hyperledger-fabric/charts/fabric-ca-server/templates/ca-job-cleanup.yaml @@ -0,0 +1,50 @@ +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ include "fabric-ca-server.name" . }}-cleanup + labels: + app.kubernetes.io/name: fabric-ca-server-job-cleanup + app.kubernetes.io/component: ca-server-job-cleanup + app.kubernetes.io/part-of: {{ include "fabric-ca-server.fullname" . }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/managed-by: helm + namespace: {{ .Release.Namespace }} + annotations: + helm.sh/hook-weight: "0" + helm.sh/hook: "pre-delete" + helm.sh/hook-delete-policy: "hook-succeeded" +spec: + backoffLimit: 3 + completions: 1 + template: + metadata: + labels: + app.kubernetes.io/name: fabric-ca-server-job-cleanup + app.kubernetes.io/component: ca-server-job-cleanup + app.kubernetes.io/part-of: {{ include "fabric-ca-server.fullname" . }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/managed-by: helm + spec: + serviceAccountName: {{ .Values.global.serviceAccountName }} + restartPolicy: "Never" + containers: + - name: delete-secrets + image: {{ .Values.image.alpineUtils }} + securityContext: + runAsUser: 0 + imagePullPolicy: IfNotPresent + command: ["sh", "-c"] + args: + - |- +{{- if .Values.server.removeCertsOnDelete }} + + function deleteSecret { + key=$1 + kubectl get secret ${key} --namespace {{ .Release.Namespace }} -o json > /dev/null 2>&1 + if [ $? -eq 0 ]; then + kubectl delete secret ${key} --namespace {{ .Release.Namespace }} + fi + } + deleteSecret {{ include "fabric-ca-server.name" . }}-certs +{{- end}} diff --git a/platforms/hyperledger-fabric/charts/fabric-ca-server/templates/configmap.yaml b/platforms/hyperledger-fabric/charts/fabric-ca-server/templates/configmap.yaml index 99241a79d24..86f6b7bcfb5 100644 --- a/platforms/hyperledger-fabric/charts/fabric-ca-server/templates/configmap.yaml +++ b/platforms/hyperledger-fabric/charts/fabric-ca-server/templates/configmap.yaml @@ -4,19 +4,45 @@ # SPDX-License-Identifier: Apache-2.0 ############################################################################################## -{{- if (not (empty .Values.server.configpath)) }} +{{- if (not (empty .Values.server.configPath)) }} apiVersion: v1 kind: ConfigMap metadata: - name: {{ $.Values.server.name }}-config - namespace: {{ $.Values.metadata.namespace }} + name: {{ .Release.Name }}-config + namespace: {{ .Release.Namespace }} labels: - app.kubernetes.io/name: {{ $.Values.server.name }}-config - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} - {{- include "labels.custom" . | nindent 2 }} + app.kubernetes.io/name: {{ .Release.Name }}-config + app.kubernetes.io/component: fabric-ca-config + app.kubernetes.io/part-of: {{ include "fabric-ca-server.fullname" . }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} + app.kubernetes.io/managed-by: helm data: fabric-ca-server-config.yaml: | - {{ (tpl (.Files.Get ( printf "%s" $.Values.server.configpath )) . ) | nindent 6 }} + {{ (tpl (.Files.Get ( printf "%s" $.Values.server.configPath )) . ) | nindent 6 }} {{- end }} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: openssl-config-file + namespace: {{ .Release.Namespace }} + labels: + app.kubernetes.io/name: openssl-config-file + app.kubernetes.io/component: openssl-config + app.kubernetes.io/part-of: {{ include "fabric-ca-server.fullname" . }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} + app.kubernetes.io/managed-by: helm +data: + openssl.conf: |- + [req] + req_extensions = v3_req + distinguished_name = dn + + [dn] + + [v3_req] + basicConstraints = critical, CA:TRUE + keyUsage = critical,digitalSignature, keyEncipherment, keyCertSign, cRLSign + subjectKeyIdentifier = hash diff --git a/platforms/hyperledger-fabric/charts/fabric-ca-server/templates/deployment.yaml b/platforms/hyperledger-fabric/charts/fabric-ca-server/templates/deployment.yaml deleted file mode 100644 index db56dc48de3..00000000000 --- a/platforms/hyperledger-fabric/charts/fabric-ca-server/templates/deployment.yaml +++ /dev/null @@ -1,183 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: {{ $.Values.server.name }} - namespace: {{ $.Values.metadata.namespace }} - labels: - app.kubernetes.io/name: {{ $.Values.server.name }} - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} - {{- include "labels.custom" . | nindent 2 }} - annotations: - {{- if $.Values.annotations }} - {{- range $key, $value := $.Values.deployment.annotations }} - {{- range $k, $v := $value }} - {{ $k }}: {{ $v | quote }} - {{- end }} - {{- end }} - {{- end }} -spec: - replicas: 1 - selector: - matchLabels: - app.kubernetes.io/name: {{ $.Values.server.name }} - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} - template: - metadata: - labels: - name: {{ $.Values.server.name }} - app.kubernetes.io/name: {{ $.Values.server.name }} - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} - spec: - serviceAccountName: {{ $.Values.vault.serviceaccountname }} - {{- if .Values.vault.imagesecretname }} - imagePullSecrets: - - name: {{ $.Values.vault.imagesecretname }} - {{- end }} - volumes: - - name: ca-server-db - persistentVolumeClaim: - claimName: ca-server-db-pvc - - name: certificates - emptyDir: - medium: Memory - {{- if (not (empty .Values.server.configpath)) }} - - name: {{ $.Values.server.name }}-config-volume - configMap: - name: {{ $.Values.server.name }}-config - items: - - key: fabric-ca-server-config.yaml - path: fabric-ca-server-config.yaml - {{- end }} - {{ if .Values.vault.tls }} - - name: vaultca - secret: - secretName: "{{ .Values.vault.tls }}" - items: - - key: ca.crt.pem - path: ca-certificates.crt - {{- end }} - - name: scripts-volume - configMap: - name: bevel-vault-script - initContainers: - - name: ca-certs-init - image: {{ $.Values.metadata.images.alpineutils }} - imagePullPolicy: IfNotPresent - env: - - name: VAULT_ADDR - value: {{ $.Values.vault.address }} - - name: KUBERNETES_AUTH_PATH - value: {{ $.Values.vault.authpath }} - - name: VAULT_APP_ROLE - value: {{ $.Values.vault.role }} - - name: MOUNT_PATH - value: /secret - - name: VAULT_TYPE - value: "{{ $.Values.vault.type }}" - command: ["sh", "-c"] - args: - - |- - #!/usr/bin/env sh - source /scripts/bevel-vault.sh - - # Calling a function to retrieve the vault token. - vaultBevelFunc "init" - - SECRET_CERT={{ $.Values.vault.secretcert }} - vault_secret_key=$(echo ${SECRET_CERT} |awk -F "?" '{print $1}') - vault_data_key=$(echo ${SECRET_CERT} |awk -F "?" '{print $2}') - - # Calling a function to retrieve secrets from Vault only if they exist. - vaultBevelFunc "readJson" "${vault_secret_key}" - VALUE_OF_SECRET=$(echo ${VAULT_SECRET} | jq -r ".[\"${vault_data_key}\"]") - echo "${VALUE_OF_SECRET}" >> ${MOUNT_PATH}/server.crt - - SECRET_KEY={{ $.Values.vault.secretkey }} - vault_secret_key=$(echo ${SECRET_KEY} |awk -F "?" '{print $1}') - vault_data_key=$(echo ${SECRET_KEY} |awk -F "?" '{print $2}') - - # Calling a function to retrieve secrets from Vault only if they exist. - vaultBevelFunc "readJson" "${vault_secret_key}" - VALUE_OF_SECRET=$(echo ${VAULT_SECRET} | jq -r ".[\"${vault_data_key}\"]") - echo "${VALUE_OF_SECRET}" >> ${MOUNT_PATH}/server.key - - SECRET_ADMIN_PASS={{ $.Values.vault.secretadminpass }} - vault_secret_key=$(echo ${SECRET_ADMIN_PASS} |awk -F "?" '{print $1}') - vault_data_key=$(echo ${SECRET_ADMIN_PASS} |awk -F "?" '{print $2}') - - # Calling a function to retrieve secrets from Vault only if they exist. - vaultBevelFunc "readJson" "${vault_secret_key}" - VALUE_OF_SECRET=$(echo ${VAULT_SECRET} | jq -r ".[\"${vault_data_key}\"]") - echo "${VALUE_OF_SECRET}" >> ${MOUNT_PATH}/user_cred - volumeMounts: - - name: certificates - mountPath: /secret - {{ if .Values.vault.tls }} - - name: vaultca - mountPath: "/etc/ssl/certs/" - readOnly: true - {{ end }} - - name: scripts-volume - mountPath: /scripts/bevel-vault.sh - subPath: bevel-vault.sh - containers: - - name: ca - image: {{ $.Values.metadata.images.ca }} - imagePullPolicy: IfNotPresent - command: ["sh", "-c"] - args: - - |- - #!/usr/bin/env sh - if [[ -d /custom-config/ ]] && [[ -f /custom-config/fabric-ca-server-config.yaml ]]; then - cp /custom-config/fabric-ca-server-config.yaml $FABRIC_CA_HOME/fabric-ca-server-config.yaml - fabric-ca-server start --config $FABRIC_CA_HOME/fabric-ca-server-config.yaml -d - else - sleep 1 && fabric-ca-server start -b {{ $.Values.server.admin }}:`cat /etc/hyperledger/fabric-ca-server-config/user_cred` -d - fi - ports: - - containerPort: 7054 - - containerPort: 9443 - env: - - name: FABRIC_CA_HOME - value: /etc/hyperledger/fabric-ca-server - - name: FABRIC_CA_SERVER_CA_NAME - value: "{{ $.Values.server.name }}.{{ $.Values.metadata.namespace }}" - - name: FABRIC_CA_SERVER_CA_CERTFILE - value: /etc/hyperledger/fabric-ca-server-config/server.crt - - name: FABRIC_CA_SERVER_CA_KEYFILE - value: /etc/hyperledger/fabric-ca-server-config/server.key - - name: FABRIC_CA_SERVER_TLS_ENABLED - value: "{{ $.Values.server.tlsstatus }}" - - name: FABRIC_CA_SERVER_DEBUG - value: "true" - - name: FABRIC_CA_SERVER_TLS_CERTFILE - value: /etc/hyperledger/fabric-ca-server-config/server.crt - - name: FABRIC_CA_SERVER_TLS_KEYFILE - value: /etc/hyperledger/fabric-ca-server-config/server.key - - name: FABRIC_CA_SERVER_DB_DATASOURCE - value: /var/hyperledger/fabric-ca-server/db/fabric-ca-server.db - - name: FABRIC_CA_SERVER_OPERATIONS_LISTENADDRESS - value: 0.0.0.0:9443 - volumeMounts: - - name: certificates - mountPath: /etc/hyperledger/fabric-ca-server-config - readOnly: true - - name: ca-server-db - mountPath: /var/hyperledger/fabric-ca-server/db/ - {{- if (not (empty .Values.server.configpath)) }} - - name: {{ $.Values.server.name }}-config-volume - mountPath: /custom-config/ - {{- end }} diff --git a/platforms/hyperledger-fabric/charts/fabric-ca-server/templates/service.yaml b/platforms/hyperledger-fabric/charts/fabric-ca-server/templates/service.yaml index 66dfe4edab4..d249cdfcef2 100644 --- a/platforms/hyperledger-fabric/charts/fabric-ca-server/templates/service.yaml +++ b/platforms/hyperledger-fabric/charts/fabric-ca-server/templates/service.yaml @@ -7,80 +7,74 @@ apiVersion: v1 kind: Service metadata: - name: {{ $.Values.server.name }} - namespace: {{ $.Values.metadata.namespace }} - annotations: - {{- if $.Values.annotations }} - {{- range $key, $value := $.Values.annotations.service }} - {{- range $k, $v := $value }} - {{ $k }}: {{ $v | quote }} - {{- end }} - {{- end }} - {{- end }} + name: ca + namespace: {{ .Release.Namespace }} labels: - run: {{ $.Values.server.name }} - app.kubernetes.io/name: {{ $.Values.server.name }} - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} + app.kubernetes.io/name: {{ .Release.Name }} + app.kubernetes.io/component: fabric + app.kubernetes.io/part-of: {{ include "fabric-ca-server.fullname" . }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} - {{- include "labels.custom" . | nindent 2 }} + {{- include "labels.service" . | nindent 4 }} spec: - type: {{ $.Values.service.servicetype }} + type: ClusterIP selector: - name: {{ $.Values.server.name }} + app.kubernetes.io/part-of: {{ include "fabric-ca-server.fullname" . }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} ports: - name: tcp protocol: TCP targetPort: 7054 - port: {{ $.Values.service.ports.tcp.clusteripport }} - {{- if $.Values.service.ports.tcp.nodeport }} - nodePort: {{ $.Values.service.ports.tcp.nodeport }} + port: {{ .Values.server.clusterIpPort }} + {{- if .Values.server.nodePort }} + nodePort: {{ .Values.server.nodePort }} {{- end }} - name: operations protocol: TCP targetPort: 9443 port: 9443 -{{- if eq $.Values.proxy.provider "haproxy" }} +{{- if eq .Values.global.proxy.provider "haproxy" }} --- apiVersion: networking.k8s.io/v1 kind: Ingress metadata: - name: {{ $.Values.server.name }} - namespace: {{ $.Values.metadata.namespace }} + name: {{ .Release.Name }} + namespace: {{ .Release.Namespace }} annotations: - kubernetes.io/ingress.class: "haproxy" ingress.kubernetes.io/ssl-passthrough: "true" spec: + ingressClassName: "haproxy" rules: - - host: ca.{{ $.Values.metadata.namespace }}.{{ $.Values.proxy.external_url_suffix }} + - host: ca.{{ .Release.Namespace }}.{{ .Values.global.proxy.externalUrlSuffix }} http: paths: - path: / pathType: Prefix backend: service: - name: {{ $.Values.server.name }} + name: ca port: - number: {{ $.Values.service.ports.tcp.clusteripport }} + number: {{ .Values.server.clusterIpPort }} --- apiVersion: networking.k8s.io/v1 kind: Ingress metadata: - name: {{ $.Values.server.name }}-ops - namespace: {{ $.Values.metadata.namespace }} - annotations: - kubernetes.io/ingress.class: "haproxy" + name: {{ .Release.Name }}-ops + namespace: {{ .Release.Namespace }} spec: + ingressClassName: "haproxy" rules: - - host: ca-ops.{{ $.Values.metadata.namespace }}.{{ $.Values.proxy.external_url_suffix }} + - host: ca-ops.{{ .Release.Namespace }}.{{ .Values.global.proxy.externalUrlSuffix }} http: paths: - path: / pathType: Prefix backend: service: - name: {{ $.Values.server.name }} + name: ca port: number: 9443 {{- end }} diff --git a/platforms/hyperledger-fabric/charts/fabric-ca-server/templates/statefulset.yaml b/platforms/hyperledger-fabric/charts/fabric-ca-server/templates/statefulset.yaml new file mode 100644 index 00000000000..6a507e85596 --- /dev/null +++ b/platforms/hyperledger-fabric/charts/fabric-ca-server/templates/statefulset.yaml @@ -0,0 +1,292 @@ +############################################################################################## +# Copyright Accenture. All Rights Reserved. +# +# SPDX-License-Identifier: Apache-2.0 +############################################################################################## +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ template "fabric-ca-server.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + app: {{ .Release.Name }} + app.kubernetes.io/name: {{ .Release.Name }} + app.kubernetes.io/component: fabric-ca-statefulset + app.kubernetes.io/part-of: {{ include "fabric-ca-server.fullname" . }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} + app.kubernetes.io/managed-by: helm + {{- include "labels.deployment" . | nindent 4 }} +spec: + serviceName: {{ .Release.Name }} + replicas: 1 + podManagementPolicy: OrderedReady + updateStrategy: + type: RollingUpdate + selector: + matchLabels: + app: {{ .Release.Name }} + app.kubernetes.io/name: {{ .Release.Name }} + app.kubernetes.io/component: fabric-ca-statefulset + app.kubernetes.io/part-of: {{ include "fabric-ca-server.fullname" . }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} + app.kubernetes.io/managed-by: helm + template: + metadata: + labels: + app: {{ .Release.Name }} + app.kubernetes.io/name: {{ .Release.Name }} + app.kubernetes.io/component: fabric-ca-statefulset + app.kubernetes.io/part-of: {{ include "fabric-ca-server.fullname" . }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} + app.kubernetes.io/managed-by: helm + {{- include "labels.deployment" . | nindent 8 }} + spec: + serviceAccountName: {{ .Values.global.serviceAccountName }} + {{- if .Values.image.pullSecret }} + imagePullSecrets: + - name: {{ .Values.image.pullSecret }} + {{- end }} + initContainers: + - name: ca-certs-init + image: {{ .Values.image.alpineUtils }} + imagePullPolicy: IfNotPresent + volumeMounts: + - name: certificates + mountPath: /secret + {{ if .Values.global.vault.tls }} + - name: vaultca + mountPath: "/etc/ssl/certs/" + readOnly: true + {{ end }} + - name: package-manager + mountPath: /scripts/package-manager.sh + subPath: package-manager.sh + - name: openssl-config + mountPath: /openssl/openssl.conf + subPath: openssl.conf + {{- if eq .Values.global.vault.type "hashicorp" }} + - name: scripts-volume + mountPath: /scripts/bevel-vault.sh + subPath: bevel-vault.sh + {{- end }} + env: + - name: CA_URL + value: {{ include "fabric-ca-server.serverURL" . }} + - name: CA_SUBJECT + value: "{{ .Values.server.subject }}/CN={{ include "fabric-ca-server.serverURL" . }}" + - name: COMPONENT_NAME + value: {{ .Release.Namespace }} + {{- if eq .Values.global.vault.type "hashicorp" }} + - name: VAULT_ADDR + value: "{{ .Values.global.vault.address }}" + - name: VAULT_APP_ROLE + value: "{{ .Values.global.vault.role }}" + - name: KUBERNETES_AUTH_PATH + value: "{{ .Values.global.vault.authPath }}" + - name: VAULT_SECRET_ENGINE + value: "{{ .Values.global.vault.secretEngine }}" + - name: VAULT_SECRET_PREFIX + value: "{{ .Values.global.vault.secretPrefix }}" + - name: VAULT_TYPE + value: "{{ .Values.global.vault.type }}" + {{- end }} + command: ["sh", "-c"] + args: + - |- + + . /scripts/package-manager.sh + # Define the packages to install + packages_to_install="jq curl openssl kubectl" + install_packages "$packages_to_install" + + formatCertificate () { + NAME="${1##*/}" + while IFS= read -r line + do + echo "$line\n" + done < ${1} > ${2}/${NAME}.txt + } +{{- if eq .Values.global.vault.type "hashicorp" }} + . /scripts/bevel-vault.sh + echo "Getting vault Token..." + # Calling a function to retrieve the vault token. + vaultBevelFunc "init" + function safeWriteSecret { + path=$1 + key=$2 + # Check if certs already exist in Vault + vaultBevelFunc "readJson" "${VAULT_SECRET_ENGINE}/${VAULT_SECRET_PREFIX}/ca" + if [ "$SECRETS_AVAILABLE" == "yes" ] + then + # Create the Kubernetes Secret with data from Vault + echo "Secret found in Vault, only creating k8s secrets" + ca_cert=$(echo ${VAULT_SECRET} | jq -r ".[\"rootca_pem\"]") + echo "${ca_cert}" > ${path}/server.crt + + ca_key=$(echo ${VAULT_SECRET} | jq -r ".[\"rootca_key\"]") + echo "${ca_key}" > ${path}/server.key + else + echo "Secret to be created on Vault and k8s" + # Store the value in Vault + FORMAT_CERTIFICATE_PATH="${path}/formatcertificate" + mkdir -p ${FORMAT_CERTIFICATE_PATH} + formatCertificate "${path}/server.key" "${FORMAT_CERTIFICATE_PATH}" + formatCertificate "${path}/server.crt" "${FORMAT_CERTIFICATE_PATH}" + + PEM_CERTIFICATE=$(cat ${FORMAT_CERTIFICATE_PATH}/server.crt.txt) + KEY_CERTIFICATE=$(cat ${FORMAT_CERTIFICATE_PATH}/server.key.txt) + + # create a JSON file for the data related to node crypto + echo " + { + \"data\": + { + \"rootca_pem\": \"${PEM_CERTIFICATE}\", + \"rootca_key\": \"${KEY_CERTIFICATE}\" + } + }" > payload.json + + # Calling a function to write secrets to the vault. + vaultBevelFunc 'write' "${VAULT_SECRET_ENGINE}/${VAULT_SECRET_PREFIX}/ca" 'payload.json' + rm payload.json + + fi + # Create the Kubernetes Secret using kubectl after secrets ae stored in Vault + kubectl create secret tls ${key} --namespace ${COMPONENT_NAME} \ + --cert=${path}/server.crt \ + --key=${path}/server.key + } + +{{- else }} + + function safeWriteSecret { + path=$1 + key=$2 + # Create the Kubernetes Secret using kubectl + kubectl create secret tls ${key} --namespace ${COMPONENT_NAME} \ + --cert=${path}/server.crt \ + --key=${path}/server.key + } +{{- end }} + kubectl get secret --namespace {{ .Release.Namespace }} {{ include "fabric-ca-server.name" . }}-certs -o json > /dev/null 2>&1 + if [ $? -ne 0 ]; then + echo "Generating CA certs ..." + # this commands generate the CA certificate + cd /secret + openssl ecparam -name prime256v1 -genkey -noout -out server.key + openssl req -x509 -config "/openssl/openssl.conf" -new -nodes -key server.key \ + -days 1024 -out server.crt -extensions v3_req -subj "${CA_SUBJECT}" -addext "subjectAltName = DNS:${CA_URL}" + safeWriteSecret /secret {{ include "fabric-ca-server.name" . }}-certs + else + echo "CA certs already present." + KUBENETES_SECRET=$(kubectl get secret {{ include "fabric-ca-server.name" . }}-certs --namespace ${COMPONENT_NAME} -o json) + if [ "$KUBENETES_SECRET" = "" ]; then + echo "Certficates absent in Kubernetes secrets" + exit 1 + else + CA_KEY=$(echo "$KUBENETES_SECRET" | jq -r ".data.\"tls.key\"" | base64 -d) + CA_CERT=$(echo "$KUBENETES_SECRET" | jq -r ".data.\"tls.crt\"" | base64 -d) + echo "${CA_KEY}" > /secret/server.key + echo "${CA_CERT}" > /secret/server.crt + fi + fi + containers: + - name: ca + image: {{ .Values.image.ca }} + imagePullPolicy: IfNotPresent + command: ["sh", "-c"] + args: + - |- + #!/usr/bin/env sh + if [[ -d /custom-config/ ]] && [[ -f /custom-config/fabric-ca-server-config.yaml ]]; then + cp /custom-config/fabric-ca-server-config.yaml $FABRIC_CA_HOME/fabric-ca-server-config.yaml + fabric-ca-server start --config $FABRIC_CA_HOME/fabric-ca-server-config.yaml -d + else + sleep 1 && fabric-ca-server start -b {{ .Values.server.adminUsername }}:{{ .Values.server.adminPassword }} -d + fi + ports: + - containerPort: 7054 + - containerPort: 9443 + env: + - name: FABRIC_CA_HOME + value: /etc/hyperledger/fabric-ca-server + - name: FABRIC_CA_SERVER_CA_NAME + value: "{{ .Release.Name }}.{{ .Release.Namespace }}" + - name: FABRIC_CA_SERVER_CA_CERTFILE + value: /etc/hyperledger/fabric-ca-server-config/server.crt + - name: FABRIC_CA_SERVER_CA_KEYFILE + value: /etc/hyperledger/fabric-ca-server-config/server.key + - name: FABRIC_CA_SERVER_TLS_ENABLED + value: "{{ .Values.server.tlsStatus }}" + - name: FABRIC_CA_SERVER_DEBUG + value: "true" + - name: FABRIC_CA_SERVER_TLS_CERTFILE + value: /etc/hyperledger/fabric-ca-server-config/server.crt + - name: FABRIC_CA_SERVER_TLS_KEYFILE + value: /etc/hyperledger/fabric-ca-server-config/server.key + - name: FABRIC_CA_SERVER_DB_DATASOURCE + value: /var/hyperledger/fabric-ca-server/db/fabric-ca-server.db + - name: FABRIC_CA_SERVER_OPERATIONS_LISTENADDRESS + value: 0.0.0.0:9443 + volumeMounts: + - name: certificates + mountPath: /etc/hyperledger/fabric-ca-server-config + readOnly: true + - name: ca-server-db-pvc + mountPath: /var/hyperledger/fabric-ca-server/db/ + {{- if (not (empty .Values.server.configPath)) }} + - name: {{ .Release.Name }}-config-volume + mountPath: /custom-config/ + {{- end }} + volumes: + - name: certificates + emptyDir: + medium: Memory + {{- if (not (empty .Values.server.configPath)) }} + - name: {{ .Release.Name }}-config-volume + configMap: + name: {{ .Release.Name }}-config + items: + - key: fabric-ca-server-config.yaml + path: fabric-ca-server-config.yaml + {{- end }} + {{ if .Values.global.vault.tls }} + - name: vaultca + secret: + secretName: "{{ .Values.global.vault.tls }}" + items: + - key: ca-crt.pem + path: ca-certificates.crt + {{- end }} + {{- if eq .Values.global.vault.type "hashicorp" }} + - name: scripts-volume + configMap: + name: bevel-vault-script + defaultMode: 0777 + {{- end }} + - name: package-manager + configMap: + name: package-manager + defaultMode: 0777 + - name: openssl-config + configMap: + name: openssl-config-file + defaultMode: 0775 + items: + - key: openssl.conf + path: openssl.conf + volumeClaimTemplates: + - metadata: + name: ca-server-db-pvc + labels: + {{- include "labels.pvc" . | nindent 8 }} + spec: + accessModes: ["ReadWriteOnce"] + storageClassName: storage-{{ .Release.Name }} + resources: + requests: + storage: "{{ .Values.storage.size }}" diff --git a/platforms/hyperledger-fabric/charts/fabric-ca-server/templates/volume.yaml b/platforms/hyperledger-fabric/charts/fabric-ca-server/templates/volume.yaml deleted file mode 100644 index d7aaae803e1..00000000000 --- a/platforms/hyperledger-fabric/charts/fabric-ca-server/templates/volume.yaml +++ /dev/null @@ -1,33 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - ---- -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: ca-server-db-pvc - namespace: {{ $.Values.metadata.namespace }} - labels: - app.kubernetes.io/name: ca-server-db-pvc - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} - {{- include "labels.custom" . | nindent 2 }} - annotations: - {{- if $.Values.annotations }} - {{- range $key, $value := $.Values.annotations.pvc }} - {{- range $k, $v := $value }} - {{ $k }}: {{ $v | quote }} - {{- end }} - {{- end }} - {{- end }} -spec: - storageClassName: {{ $.Values.storage.storageclassname }} - accessModes: - - ReadWriteOnce - resources: - requests: - storage: {{ $.Values.storage.storagesize }} diff --git a/platforms/hyperledger-fabric/charts/fabric-ca-server/values.yaml b/platforms/hyperledger-fabric/charts/fabric-ca-server/values.yaml index 3d6edb20be4..ade6983421f 100644 --- a/platforms/hyperledger-fabric/charts/fabric-ca-server/values.yaml +++ b/platforms/hyperledger-fabric/charts/fabric-ca-server/values.yaml @@ -3,111 +3,100 @@ # # SPDX-License-Identifier: Apache-2.0 ############################################################################################## +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. +--- +# The following are for overriding global values +global: + #Provide the service account name which will be created. + serviceAccountName: vault-auth + cluster: + provider: aws # choose from: minikube | aws | azure | gcp + cloudNativeServices: false # only 'false' is implemented + #Provide the kubernetes host url + #Eg. kubernetesUrl: https://10.3.8.5:8443 + kubernetesUrl: + vault: + #Provide the type of vault + #Eg. type: hashicorp + type: hashicorp + #Provide the vaultrole for an organization + #Eg. vaultrole: org1-vault-role + role: vault-role + #Provide the network type + network: fabric + #Provide the vault server address + #Eg. vaultaddress: http://Vault-884963190.eu-west-1.elb.amazonaws.com + address: + #Provide the kubernetes auth backed configured in vault for an organization + #Eg. authpath: supplychain + authPath: supplychain + #Provide the secret engine. + secretEngine: secretsv2 + #Provide the vault path where the secrets will be stored + secretPrefix: "data/supplychain" + #Enable or disable TLS for Vault communication + #Eg. tls: true + tls: false -metadata: - #Provide the namespace for CA server - #Eg. namespace: org1-net - namespace: org1-net - images: - #Provide the valid image name and version for fabric ca - #Eg. ca: hyperledger/fabric-ca:1.4.8 - ca: ghcr.io/hyperledger/bevel-fabric-ca:1.4.8 - #Provide the valid image name and version to read certificates from vault server - #Eg.alpineutils: ghcr.io/hyperledger/bevel-alpine:latest - alpineutils: ghcr.io/hyperledger/bevel-alpine:latest - #Provide the custom labels - #NOTE: Provide labels other than name, release name , release service, chart version , chart name , run. - #Eg. labels: - # role: ca - labels: - -deployment: - annotations: - -server: - #Provide name for ca server deployment - #Eg. name: ca - name: ca - #Provide the value for tlsstatus to be true or false for deployment - #Eg. tlsstatus: true - tlsstatus: true - #Provide the admin name for CA server - #Eg. admin: admin - admin: admin - # Provide the path for Fabric CA Server Config - # Eg. configpath: conf/ca-config-default.yaml - configpath: conf/ca-config-default.yaml + proxy: + #This will be the proxy/ingress provider. Can have values "haproxy" or "none" + #Eg. provider: "haproxy" + provider: haproxy + #This field specifies the external url for the organization + #Eg. externalUrlSuffix: test.blockchaincloudpoc.com + externalUrlSuffix: test.blockchaincloudpoc.com storage: - #Provide the storageclassname for CA - #Eg. storageclassname: aws-storageclass - storageclassname: aws-storageclass - #Provide the storagesize for CA - #Eg. storagesize: 512Mi - storagesize: 512Mi + #Provide the size for CA + #Eg. size: 512Mi + size: 512Mi + # NOTE: when you set this to Retain, the volume WILL persist after the chart is delete and you need to manually delete it + reclaimPolicy: "Delete" # choose from: Delete | Retain + volumeBindingMode: Immediate # choose from: Immediate | WaitForFirstConsumer + allowedTopologies: + enabled: false -vault: - #Provide the vault server address - #Eg. vaultaddress: http://Vault-884963190.eu-west-1.elb.amazonaws.com - address: - #Provide the vaultrole for deployment - #Eg. vaultrole: vault-role - role: vault-role - #Provide the kubernetes auth backend configured in vault for CA server - #Eg. authpath: fra-demo-hlkube-cluster-cluster - authpath: devorg1-net-auth - #Provide the secretcert path configured in vault for CA server - #Eg. secretcert: secretsv2/data/crypto/Organizations/.../...-cert.pem - secretcert: secretsv2/data/crypto/peerOrganizations/org1-net/ca?ca.org1-net-cert.pem - #Provide the secretkey path configured in vault for CA server - #Eg. secretkey: secretsv2/data/crypto/Organizations/.../...-CA.key - secretkey: secretsv2/data/crypto/peerOrganizations/org1-net/ca?org1-net-CA.key - # Provide the secret path for admin password configured in vault for CA server - # Eg. secretadminpass: secretsv2/data/credentials/.../.../ca/org1?user - secretadminpass: secretsv2/data/credentials/org1-net/ca/org1?user - #Provide the serviceaccountname for vault - #Eg. serviceaccountname: vault-auth - serviceaccountname: vault-auth - #Provide the type of vault - #Eg. type: hashicorp - type: hashicorp - #Provide the imagesecretname for vault - #Eg. imagesecretname: regcred - imagesecretname: "" - #Enable or disable TLS for vault communication - #Eg. tls: true - tls: - #kuberenetes secret for vault ca.cert - #Eg. tlssecret: vaultca - tlssecret: vaultca +image: + #Provide the valid image name and version to read certificates from vault server + #Eg.alpineutils: ghcr.io/hyperledger/bevel-alpine:latest + alpineUtils: ghcr.io/hyperledger/bevel-alpine:latest + #Provide the valid image name and version for fabric ca + #Eg. ca: ghcr.io/hyperledger/bevel-fabric-ca:latest + ca: ghcr.io/hyperledger/bevel-fabric-ca:latest + #Provide the secret to use if private repository + #Eg. pullSecret: regcred + pullSecret: +server: + # Flag to ensure the certificates secrets are removed on helm uninstall + removeCertsOnDelete: true + #Provide the value for tls setting for CA server + #Eg. tlsStatus: true + tlsStatus: true + #Provide the admin username for CA server + #Eg. adminUsername: admin + adminUsername: admin + #Provide the admin password for CA server + adminPassword: adminpw + #Provide the subject of the services ca organization's + #Eg. subject: "/C=GB/ST=London/L=London/O=Carrier/CN=carrier-net" + subject: "/C=GB/ST=London/L=London/O=Orderer" + # Provide the path for Fabric CA Server Config + # Eg. configPath: conf/ca-config-default.yaml + configPath: + #Provide tcp node port to be exposed for ca server + #Eg. nodePort: 30007 + nodePort: + #Provide tcp cluster IP port to be exposed for ca server + #Eg. clusterIpPort: 7054 + clusterIpPort: 7054 -service: - #Provide service type for the pod - #Eg. servicetype: NodePort - servicetype: ClusterIP - ports: - tcp: - #Provide tcp node port to be exposed for ca server - #Eg. nodeport: 30007 - nodeport: - #Provide tcp cluster IP port to be exposed for ca server - #Eg. clusteripport: 7054 - clusteripport: 7054 - -annotations: - # Extra annotations for the service +# Provide additional labels in array format +labels: + #Eg. service: + # - label1: value1 + # - label2: value2 service: [] - # Extra annotations for the PVC pvc: [] - -proxy: - #This will be the proxy/ingress provider. Can have values "haproxy" or "none" - #Eg. provider: "haproxy" - provider: haproxy - #Type can be "orderer" or "peer"; "test" is defaulted - #Eg. type: orderer - type: test - #This field specifies the external url for the organization - #Eg. external_url_suffix: org1proxy.blockchaincloudpoc.com - external_url_suffix: org1proxy.blockchaincloudpoc.com + deployment: [] diff --git a/platforms/hyperledger-fabric/charts/fabric-cacerts-gen/Chart.yaml b/platforms/hyperledger-fabric/charts/fabric-cacerts-gen/Chart.yaml deleted file mode 100644 index 6eac32205fe..00000000000 --- a/platforms/hyperledger-fabric/charts/fabric-cacerts-gen/Chart.yaml +++ /dev/null @@ -1,11 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -apiVersion: v1 -appVersion: "2.0" -description: "Hyperledger Fabric: Generates CA Server certs." -name: fabric-cacerts-gen -version: 1.0.0 diff --git a/platforms/hyperledger-fabric/charts/fabric-cacerts-gen/README.md b/platforms/hyperledger-fabric/charts/fabric-cacerts-gen/README.md deleted file mode 100644 index 26e99913dc2..00000000000 --- a/platforms/hyperledger-fabric/charts/fabric-cacerts-gen/README.md +++ /dev/null @@ -1,174 +0,0 @@ -[//]: # (##############################################################################################) -[//]: # (Copyright Accenture. All Rights Reserved.) -[//]: # (SPDX-License-Identifier: Apache-2.0) -[//]: # (##############################################################################################) - - -# Generate Cacerts Hyperledger Fabric Deployment - -- [Generate Cacerts Hyperledger Fabric Deployment Helm Chart](#generate-cacerts-hyperledger-fabric-deployment-helm-chart) -- [Prerequisites](#prerequisites) -- [Chart Structure](#chart-structure) -- [Configuration](#configuration) -- [Deployment](#deployment) -- [Verification](#verification) -- [Updating the Deployment](#updating-the-deployment) -- [Deletion](#deletion) -- [Contributing](#contributing) -- [License](#license) - - - -## Generate Cacerts Hyperledger Fabric Deployment Helm Chart ---- -A [Helm chart](https://github.com/hyperledger/bevel/blob/develop/platforms/hyperledger-fabric/charts/fabric-cacerts-gen) to generate CA Server certs. - - -## Prerequisites ---- -Before deploying the Helm chart, make sure to have the following prerequisites: - -- Kubernetes cluster up and running. -- A HashiCorp Vault instance is set up and configured to use Kubernetes service account token-based authentication. -- The Vault is unsealed and initialized. -- Helm installed. - - - -## Chart Structure ---- -The structure of the Helm chart is as follows: - -``` -fabric-cacerts-gen/ - |- templates/ - |- _helpers.yaml - |- job.yaml - |- Chart.yaml - |- README.md - |- values.yaml -``` - -- `templates/`: Contains the Kubernetes manifest templates that define the resources to be deployed. -- `helpers.tpl`: Contains custom label definitions used in other templates. -- `job.yaml`: The init-check-certificates checks if the certificates are present in the Vault server. If the certificates are not present, the cacerts generates and uploads CA certificates and admin credentials to Vault. -- `Chart.yaml`: Contains the metadata for the Helm chart, such as the name, version, and description. -- `README.md`: Provides information and instructions about the Helm chart. -- `values.yaml`: Contains the default configuration values for the Helm chart. - - - -## Configuration ---- -The [values.yaml](https://github.com/hyperledger/bevel/blob/develop/platforms/hyperledger-fabric/charts/fabric-cacerts-gen/values.yaml) file contains configurable values for the Helm chart. We can modify these values according to the deployment requirements. Here are some important configuration options: - - -### Metadata - -| Name | Description | Default Value | -| ---------------------| -------------------------------------------------------------------------- | --------------------------------------------------| -| namespace | Namespace for the organization's peer | org1-net | -| name | Organization's name | org1 | -| component_name | Organization's component name | org1-net | -| images.alpineutils | Valid image name and version to read certificates from the vault server | ghcr.io/hyperledger/bevel-alpine:latest | -| labels | Custom labels for the organization | "" | - -### Vault - -| Name | Description | Default Value | -| --------------------------| ------------------------------------------------ | -----------------------------------| -| role | Vault role for the organization | vault-role | -| address | Vault server address | "" | -| authpath | Kubernetes auth backend configured in vault | devorg1-net-auth | -| secretcryptoprefix | Vault secret prefix for crypto | secrets/secretsv2/data/crypto/ordererOrganizations/org1-net/ca | -| secretcredentialsprefix | Vault secret prefix for credentials | secrets/secretsv2/data/credentials/org1-net/ca/smari | -| serviceaccountname | Service account name for vault | vault-auth | -| type | Provide the type of vault | hashicorp | -| imagesecretname | Image secret name for vault | "" | - -### CA - -| Name | Description | Default Value | -| ---------| ------------------------------------------------ | --------------------------------------------------------| -| subject | Subject of the services CA organization's | /C=GB/ST=London/L=London/O=Orderer/CN=ca.org1-net | - - - -## Deployment ---- - -To deploy the fabric-cacerts-gen Helm chart, follow these steps: - -1. Modify the [values.yaml](https://github.com/hyperledger/bevel/blob/main/platforms/hyperledger-fabric/charts/fabric-cacerts-gen/values.yaml) file to set the desired configuration values. -2. Run the following Helm command to install the chart: - ``` - $ helm repo add bevel https://hyperledger.github.io/bevel/ - $ helm install ./fabric-cacerts-gen - ``` -Replace `` with the desired name for the release. - -This will deploy the fabric-cacerts-gen node to the Kubernetes cluster based on the provided configurations. - - - -## Verification ---- - -To verify the deployment, we can use the following command: -``` -$ kubectl get jobs -n -``` -Replace `` with the actual namespace where the Job was created. This command will display information about the Job, including the number of completions and the current status of the Job's pods. - - - -## Updating the Deployment ---- - -If we need to update the deployment with new configurations or changes, modify the same [values.yaml](https://github.com/hyperledger/bevel/blob/main/platforms/hyperledger-fabric/charts/fabric-cacerts-gen/values.yaml) file with the desired changes and run the following Helm command: -``` -$ helm upgrade ./fabric-cacerts-gen -``` -Replace `` with the name of the release. This command will apply the changes to the deployment, ensuring the fabric-cacerts-gen node is up to date. - - - -## Deletion ---- - -To delete the deployment and associated resources, run the following Helm command: -``` -$ helm uninstall -``` -Replace `` with the name of the release. This command will remove all the resources created by the Helm chart. - - -## Contributing ---- -If you encounter any bugs, have suggestions, or would like to contribute to the [Generate Cacerts Hyperledger Fabric Deployment Helm Chart](https://github.com/hyperledger/bevel/blob/main/platforms/hyperledger-fabric/charts/fabric-cacerts-gen), please feel free to open an issue or submit a pull request on the [project's GitHub repository](https://github.com/hyperledger/bevel). - - - -## License - -This chart is licensed under the Apache v2.0 license. - -Copyright © 2023 Accenture - -### Attribution - -This chart is adapted from the [charts](https://hyperledger.github.io/bevel/) which is licensed under the Apache v2.0 License which is reproduced here: - -``` -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -``` diff --git a/platforms/hyperledger-fabric/charts/fabric-cacerts-gen/templates/_helpers.tpl b/platforms/hyperledger-fabric/charts/fabric-cacerts-gen/templates/_helpers.tpl deleted file mode 100644 index d43c09d8cef..00000000000 --- a/platforms/hyperledger-fabric/charts/fabric-cacerts-gen/templates/_helpers.tpl +++ /dev/null @@ -1,5 +0,0 @@ -{{- define "labels.custom" }} - {{ range $key, $val := $.Values.metadata.labels }} - {{ $key }}: {{ $val }} - {{ end }} -{{- end }} diff --git a/platforms/hyperledger-fabric/charts/fabric-cacerts-gen/templates/job.yaml b/platforms/hyperledger-fabric/charts/fabric-cacerts-gen/templates/job.yaml deleted file mode 100644 index c758f4e1de2..00000000000 --- a/platforms/hyperledger-fabric/charts/fabric-cacerts-gen/templates/job.yaml +++ /dev/null @@ -1,213 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -apiVersion: batch/v1 -kind: Job -metadata: - name: "{{ $.Values.metadata.name }}-cacerts-job" - namespace: "{{ $.Values.metadata.namespace }}" - labels: - app: "{{ $.Values.metadata.name }}-cacerts-job" - app.kubernetes.io/name: "{{ $.Values.metadata.name }}-cacerts-job" - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} -spec: - backoffLimit: 6 - template: - metadata: - labels: - app: "{{ $.Values.metadata.name }}-cacerts-job" - app.kubernetes.io/name: "{{ $.Values.metadata.name }}-cacerts-job" - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} - spec: - restartPolicy: "OnFailure" - serviceAccountName: {{ $.Values.vault.serviceaccountname }} - {{- if .Values.vault.imagesecretname }} - imagePullSecrets: - - name: {{ $.Values.vault.imagesecretname }} - {{- end }} - volumes: - - name: certcheck - emptyDir: - medium: Memory - - name: scripts-volume - configMap: - name: bevel-vault-script - - name: package-manager - configMap: - name: package-manager - initContainers: - - name: init-check-certificates - image: {{ $.Values.metadata.images.alpineutils }} - imagePullPolicy: IfNotPresent - env: - - name: VAULT_ADDR - value: {{ $.Values.vault.address }} - - name: VAULT_APP_ROLE - value: {{ $.Values.vault.role }} - - name: KUBERNETES_AUTH_PATH - value: {{ $.Values.vault.authpath }} - - name: VAULT_SECRET_CRYPTO_PATH - value: {{ $.Values.vault.secretcryptoprefix }} - - name: VAULT_SECRET_CREDENTIALS_PATH - value: {{ $.Values.vault.secretcredentialsprefix }} - - name: MOUNT_PATH - value: "/certcheck" - - name: VAULT_TYPE - value: "{{ $.Values.vault.type }}" - command: ["sh", "-c"] - args: - - |- - #!/usr/bin/env sh - . /scripts/bevel-vault.sh - - # Calling a function to retrieve the vault token. - vaultBevelFunc "init" - - mkdir -p ${MOUNT_PATH} - - # Calling a function to retrieve secrets from Vault only if they exist. - vaultBevelFunc "readJson" "${VAULT_SECRET_CRYPTO_PATH}" - - if [ "$SECRETS_AVAILABLE" == "yes" ] - then - echo "Certificates present in vault" - touch ${MOUNT_PATH}/present_cacert.txt - else - echo "Certficates absent in vault. Ignore error warning." - touch ${MOUNT_PATH}/absent_cacert.txt - fi - - # Check if CA server admin credentials already present in the vault - vaultBevelFunc "readJson" "${VAULT_SECRET_CREDENTIALS_PATH}" - - if [ "$SECRETS_AVAILABLE" == "yes" ] - then - echo "Certificates present in vault" - touch ${MOUNT_PATH}/present_creds.txt - else - echo "Certficates absent in vault. Ignore error warning." - touch ${MOUNT_PATH}/absent_creds.txt - fi - - echo "Done checking for certificates in vault." - volumeMounts: - - name: certcheck - mountPath: /certcheck - - name: scripts-volume - mountPath: /scripts/bevel-vault.sh - subPath: bevel-vault.sh - containers: - - name: "cacerts" - image: {{ $.Values.metadata.images.alpineutils }} - imagePullPolicy: IfNotPresent - env: - - name: VAULT_ADDR - value: {{ $.Values.vault.address }} - - name: VAULT_APP_ROLE - value: {{ $.Values.vault.role }} - - name: KUBERNETES_AUTH_PATH - value: {{ $.Values.vault.authpath }} - - name: VAULT_SECRET_CRYPTO_PATH - value: {{ $.Values.vault.secretcryptoprefix }} - - name: VAULT_SECRET_CREDENTIALS_PATH - value: {{ $.Values.vault.secretcredentialsprefix }} - - name: COMPONENT_NAME - value: {{ $.Values.metadata.component_name }} - - name: ORG_NAME - value: {{ $.Values.metadata.name }} - - name: CA_SUBJECT - value: "{{ $.Values.ca.subject }}" - - name: VAULT_TYPE - value: "{{ $.Values.vault.type }}" - command: ["sh", "-c"] - args: - - |- - . /scripts/bevel-vault.sh - . /scripts/package-manager.sh - - # Define the packages to install - packages_to_install="jq curl openssl" - install_packages "$packages_to_install" - - if [ -e /certcheck/absent_cacert.txt ] - then - # Create openssl.conf file - echo "[req] - req_extensions = v3_req - distinguished_name = dn - - [dn] - - [v3_req] - basicConstraints = critical, CA:TRUE - keyUsage = critical,digitalSignature, keyEncipherment, keyCertSign, cRLSign - subjectKeyIdentifier = hash - " > openssl.conf - - # this commands generate the CA certificate - openssl ecparam -name prime256v1 -genkey -noout -out ${COMPONENT_NAME}-CA.key - openssl req -x509 -config "openssl.conf" -new -nodes -key ${COMPONENT_NAME}-CA.key -days 1024 -out ca.${COMPONENT_NAME}-cert.pem -extensions v3_req -subj "${CA_SUBJECT}" - - # This commands put the certificates with correct format for the curl command - while IFS= read -r line - do - echo "$line\n" - done < ${COMPONENT_NAME}-CA.key > ./cakey_formatted.txt - - while IFS= read -r line - do - echo "$line\n" - done < ca.${COMPONENT_NAME}-cert.pem > ./capem_formatted.txt - - PEM_CERTIFICATE=$(cat capem_formatted.txt) - KEY_CERTIFICATE=$(cat cakey_formatted.txt) - - echo " - { - \"data\": - { - \"ca.${COMPONENT_NAME}-cert.pem\": \"${PEM_CERTIFICATE}\", - \"${COMPONENT_NAME}-CA.key\": \"${KEY_CERTIFICATE}\" - } - }" > payload.json - - # Calling a function to retrieve the vault token. - vaultBevelFunc "init" - # Calling a function to write secrets to the vault. - vaultBevelFunc 'write' "${VAULT_SECRET_CRYPTO_PATH}" 'payload.json' - rm payload.json - fi - - if [ -e /certcheck/absent_creds.txt ] - then - # Calling a function to retrieve the vault token. - vaultBevelFunc "init" - echo " - { - \"data\": - { - \"user\": \"${ORG_NAME}-adminpw\" - } - }" > payload.json - - # Calling a function to write a secret to the vault. - vaultBevelFunc 'write' "${VAULT_SECRET_CREDENTIALS_PATH}" 'payload.json' - # Calling a function to retrieve secrets from Vault only if they exist. - vaultBevelFunc "readJson" "${VAULT_SECRET_CREDENTIALS_PATH}" - fi - volumeMounts: - - name: certcheck - mountPath: /certcheck - - name: scripts-volume - mountPath: /scripts/bevel-vault.sh - subPath: bevel-vault.sh - - name: package-manager - mountPath: /scripts/package-manager.sh - subPath: package-manager.sh diff --git a/platforms/hyperledger-fabric/charts/fabric-cacerts-gen/values.yaml b/platforms/hyperledger-fabric/charts/fabric-cacerts-gen/values.yaml deleted file mode 100644 index 3fbbe603697..00000000000 --- a/platforms/hyperledger-fabric/charts/fabric-cacerts-gen/values.yaml +++ /dev/null @@ -1,56 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -metadata: - #Provide organization's name - #Eg. namespace: org1 - name: org1 - #Provide organization's component_name - #Eg. component_name: org1-net - component_name: org1-net - #Provide the namespace for organization's peer - #Eg. namespace: org1-net - namespace: org1-net - images: - #Provide the valid image name and version to read certificates from vault server - #Eg.alpineutils: ghcr.io/hyperledger/bevel-alpine:latest - alpineutils: ghcr.io/hyperledger/bevel-alpine:latest - #Provide the custom labels - #NOTE: Provide labels other than name, release name , release service, chart version , chart name , app. - #Eg. labels: - # role: anchorpeer - labels: anchorpeer - -vault: - #Provide the vaultrole for an organization - #Eg. vaultrole: org1-vault-role - role: vault-role - #Provide the vault server address - #Eg. vaultaddress: http://Vault-884963190.eu-west-1.elb.amazonaws.com - address: - #Provide the kubernetes auth backed configured in vault for an organization - #Eg. authpath: fra-demo-hlkube-cluster-org1 - authpath: devorg1-net-auth - # Vault secret prefix for crypto - secretcryptoprefix: secretsv2/data/crypto/ordererOrganizations/org1-net/ca - # Vault secret prefix for credentials - secretcredentialsprefix: secretsv2/data/credentials/org1-net/ca/smari - #Provide the serviceaccountname for vault - #Eg. serviceaccountname: vault-auth - serviceaccountname: vault-auth - #Provide the type of vault - #Eg. type: hashicorp - type: hashicorp - #Provide the imagesecretname for vault - #Eg. imagesecretname: regcred - imagesecretname: "" - #Kuberenetes secret for vault ca.cert - #Enable or disable TLS for vault communication if value present or not - -ca: - #Provide the subject of the services ca organization's - #Eg. subject: "/C=GB/ST=London/L=London/O=Carrier/CN=org1-net" - subject: /C=GB/ST=London/L=London/O=Orderer/CN=ca.org1-net diff --git a/platforms/hyperledger-fabric/charts/fabric-catools/.helmignore b/platforms/hyperledger-fabric/charts/fabric-catools/.helmignore new file mode 100644 index 00000000000..014fa775608 --- /dev/null +++ b/platforms/hyperledger-fabric/charts/fabric-catools/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +generated_config/ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/platforms/hyperledger-fabric/charts/fabric-catools/Chart.yaml b/platforms/hyperledger-fabric/charts/fabric-catools/Chart.yaml index 6ff272cccab..8a692565134 100644 --- a/platforms/hyperledger-fabric/charts/fabric-catools/Chart.yaml +++ b/platforms/hyperledger-fabric/charts/fabric-catools/Chart.yaml @@ -5,7 +5,22 @@ ############################################################################################## apiVersion: v1 -appVersion: "2.0" -description: "Hyperledger Fabric: Deploys a Fabric CA tools." name: fabric-catools -version: 1.0.0 +description: "Hyperledger Fabric: Generates Fabric Certificates and Keys" +version: 1.1.0 +appVersion: latest +keywords: + - bevel + - hlf + - fabric + - hyperledger + - enterprise + - blockchain + - deployment + - accenture +home: https://hyperledger-bevel.readthedocs.io/en/latest/ +sources: + - https://github.com/hyperledger/bevel +maintainers: + - name: Hyperledger Bevel maintainers + email: bevel@lists.hyperledger.org diff --git a/platforms/hyperledger-fabric/charts/fabric-catools/README.md b/platforms/hyperledger-fabric/charts/fabric-catools/README.md index 8353542dcba..a77580f726d 100644 --- a/platforms/hyperledger-fabric/charts/fabric-catools/README.md +++ b/platforms/hyperledger-fabric/charts/fabric-catools/README.md @@ -3,234 +3,117 @@ [//]: # (SPDX-License-Identifier: Apache-2.0) [//]: # (##############################################################################################) - -# CA Tools Hyperledger Fabric Deployment +# fabric-catools -- [CA Tools Hyperledger Fabric Deployment Helm Chart](#ca-tools-hyperledger-fabric-deployment-helm-chart) -- [Prerequisites](#prerequisites) -- [Chart Structure](#chart-structure) -- [Configuration](#configuration) -- [Deployment](#deployment) -- [Verification](#verification) -- [Updating the Deployment](#updating-the-deployment) -- [Deletion](#deletion) -- [Contributing](#contributing) -- [License](#license) +This chart is a component of Hyperledger Bevel. The fabric-catools chart creates job(s) to generate the certificates and keys required for Hyperledger Fabric network. If enabled, the keys are then stored on the configured vault and stored as Kubernetes secrets. See [Bevel documentation](https://hyperledger-bevel.readthedocs.io/en/latest/) for details. +## TL;DR - -## CA Tools Hyperledger Fabric Deployment Helm Chart ---- -A [Helm chart](https://github.com/hyperledger/bevel/blob/develop/platforms/hyperledger-fabric/charts/fabric-catools) to deploy Fabric CA tools. - +```bash +helm repo add bevel https://hyperledger.github.io/bevel +helm install catools bevel/fabric-catools +``` - ## Prerequisites ---- -Before deploying the Helm chart, make sure to have the following prerequisites: -- Kubernetes cluster up and running. -- A HashiCorp Vault instance is set up and configured to use Kubernetes service account token-based authentication. -- The Vault is unsealed and initialized. -- Helm installed. +- Kubernetes 1.19+ +- Helm 3.2.0+ +If Hashicorp Vault is used, then +- HashiCorp Vault Server 1.13.1+ - -## Chart Structure ---- -The structure of the Helm chart is as follows: +## Installing the Chart -``` -fabric-catools/ - |- templates/ - |- _helpers.yaml - |- configmap.yaml - |- deployment.yaml - |- volume.yaml - |- Chart.yaml - |- README.md - |- values.yaml -``` +To install the chart with the release name `catools`: -- `templates/`: Contains the Kubernetes manifest templates that define the resources to be deployed. -- `helpers.tpl`: Contains custom label definitions used in other templates. -- `configmap.yaml`: Contains definitions for six different configmaps. These configmaps will be used by the main and store-vault containers through volume mounting to support their respective tasks. -- `deployment.yaml`: The init-container generates the cryptographic material for the Fabric CA server and checks if the cryptographic material already exists in Vault. If it does, the init-container will skip the generation process. The main container runs the Fabric CA server, issues certificates to clients in the organization, and has a liveness probe that checks if the Fabric CA server is running. The store-vault container stores the cryptographic material in Vault, Checks if any certificates have not been stored correctly. -- `volume.yaml`: Defines 2 persistent volume to store the data. -- `Chart.yaml`: Contains the metadata for the Helm chart, such as the name, version, and description. -- `README.md`: Provides information and instructions about the Helm chart. -- `values.yaml`: Contains the default configuration values for the Helm chart. +```bash +helm repo add bevel https://hyperledger.github.io/bevel +helm install catools bevel/fabric-catools +``` +The command deploys the chart on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation. - -## Configuration ---- -The [values.yaml](https://github.com/hyperledger/bevel/blob/develop/platforms/hyperledger-fabric/charts/fabric-catools/values.yaml) file contains configurable values for the Helm chart. We can modify these values according to the deployment requirements. Here are some important configuration options: +> **Tip**: List all releases using `helm list` -### Metadata +## Uninstalling the Chart -| Name | Description | Default Value | -| ----------------------| --------------------------------------------------| ------------------- | -| namespace | Namespace for CA deployment | org1-net | -| name | Name for CA server deployment | ca-tools | -| component_type | Organization's type (orderer or peer) | orderer | -| org_name | Organization's name in lowercase | org1 | -| proxy | Proxy/ingress provider (haproxy or none) | haproxy | +To uninstall/delete the `catools` deployment: -### Replica +```bash +helm uninstall catools +``` -| Name | Description | Default Value | -| ----------------------| --------------------------- | ---------------| -| replicaCount | Number of replica pods | 1 | +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Parameters + +### Global parameters +These parameters are refered to as same in each parent or child chart +| Name | Description | Default Value | +|--------|---------|-------------| +|`global.serviceAccountName` | The serviceaccount name that will be created for Vault Auth and k8S Secret management| `vault-auth` | +| `global.cluster.provider` | Kubernetes cluster provider like AWS EKS or minikube. Currently ony `aws`, `azure` and `minikube` are tested | `aws` | +| `global.cluster.cloudNativeServices` | only `false` is implemented, `true` to use Cloud Native Services (SecretsManager and IAM for AWS; KeyVault & Managed Identities for Azure) is for future | `false` | +| `global.vault.type` | Type of Vault to support other providers. Currently, only `hashicorp` and `kubernetes` is supported. | `hashicorp` | +| `global.vault.role` | Role used for authentication with Vault | `vault-role` | +| `global.vault.address`| URL of the Vault server. | `""` | +| `global.vault.authPath` | Authentication path for Vault | `supplychain` | +| `global.vault.secretEngine` | Vault secret engine name | `secretsv2` | +| `global.vault.secretPrefix` | Vault secret prefix which must start with `data/` | `data/supplychain` | +| `global.vault.tls` | Name of the Kubernetes secret which has certs to connect to TLS enabled Vault | `false` | +| `global.proxy.provider` | The proxy or Ingress provider. Can be `none` or `haproxy` | `haproxy` | +| `global.proxy.externalUrlSuffix` | The External URL suffix at which the Fabric GRPC services will be available | `test.blockchaincloudpoc.com` | ### Image -| Name | Description | Default Value | -| --------------| ------------------------------------------------------------------------| ----------------------------------------------------| -| repository | Image name for the server container | ghcr.io/hyperledger/bevel-fabric-ca-tools:1.2.1 | -| pullPolicy | Image pull policy | IfNotPresent | -| alpineutils | Valid image name and version to read certificates from the vault server | ghcr.io/hyperledger/bevel-alpine:latest | - - -### Annotations - -| Name | Description | Default Value | -| ---------------| --------------------------------------|-----------------| -| pvc | Extra annotations for PVC | "" | -| deployment | Extra annotations for Deployment | "" | - -### Storage - -| Name | Description | Default Value | -| ----------------------| --------------------------- | ------------------- | -| storageclassname | Storage class name | aws-storageclass | -| storagesize | Storage size for CA | 512Mi | - -### Vault - -| Name | Description | Default Value | -| ----------------------| ------------------------------------------------------------------|-----------------------------------| -| role | Vault role for an organization | vault-role | -| address | Vault server address | "" | -| authpath | Kubernetes auth backend configured in vault for an organization | devorg1-net-auth | -| secretusers | Path configured in vault for users certificates | secretsv2/data/crypto/ordererOrganizations/org1-net/users | -| secretorderer | Path configured in vault for orderers | secretsv2/data/crypto/ordererOrganizations/org1-net/orderers | -| secretpeerorderertls | Path configured in vault for peer orderer TLS | secretsv2/data/crypto/peerOrganizations/org1-net/orderer/tls | -| secretcert | Path configured in vault for CA server certificate | secretsv2/data/crypto/ordererOrganizations/org1-net/ca?ca.org1-net-cert.pem | -| secretkey | Path configured in vault for CA server private key | secretsv2/data/crypto/ordererOrganizations/org1-net/ca?org1-net-CA.key | -| secretconfigfile | Path configured in vault for MSP config.yaml file | secretsv2/data/crypto/ordererOrganizations/org1-net/msp/config | -| secretcouchdb | Path configured in vault for CouchDB credentials | secretsv2/data/credentials/org1-net/couchdb/org1 | -| serviceaccountname | Service account name for Vault | vault-auth | -| type | Provide the type of vault | hashicorp | -| imagesecretname | Image secret name for Vault | "" | - -### HealthCheck - -| Name | Description | Default Value | -| ----------------------| --------------------------------------------------------------------------| ---------------| -| retries | Number of times to retry fetching from/writing to Vault before giving up | 10 | -| sleepTimeAfterError | Time in seconds to wait after an error occurs when interacting with Vault | 15 | - -### Org_data - -| Name | Description | Default Value | -| ----------------------| ----------------------------------| ----------------| -| external_url_suffix | External URL of the organization | org1proxy.blockchaincloudpoc.com | -| component_subject | Organization's subject | "" | -| cert_subject | Organization's subject | "" | -| component_country | Organization's country | UK | -| component_state | Organization's state | London | -| component_location | Organization's location | London | -| ca_url | Organization's CA URL | "" | - -### Orderers - -| Name | Description | Default Value | -| ---------------| --------------------------------------| ---------------| -| name | Orderer's name | orderer1 | -| orderers_info | Orderer's names and CA certificates | "" | - -### Peers - -| Name | Description | Default Value | -| --------------| --------------------------- | -----------------| -| name | Peer's name | peer1 | -| peer_count | Total number of peers | 4 | - -### Users - -| Name | Description | Default Value | -| ----------------------| --------------------------- | ----------------| -| users_list | Base64 encoded list of users | "" | -| users_identities | List of user identities | "" | - -### Checks - -| Name | Description | Default Value | -| ----------------------| --------------------------- | ------------------- | -| refresh_cert_value | Refresh user certificates | false | -| add_peer_value | Add a peer to an existing network | false | - +| Name | Description | Default Value | +| -------------| ---------- | --------- | +| `image.caTools` | Fabric CA Tools image repository and tag | `ghcr.io/hyperledger/bevel-fabric-ca:latest` | +| `image.alpineUtils` | Alpine utils image repository and tag | `ghcr.io/hyperledger/bevel-alpine:latest` | +| `image.pullSecret` | Secret name in the namespace containing private image registry credentials | `""` | +| `image.pullPolicy` | Image pull policy | `IfNotPresent` | - -## Deployment ---- +### OrgData -To deploy the fabric-catools Helm chart, follow these steps: - -1. Modify the [values.yaml](https://github.com/hyperledger/bevel/blob/main/platforms/hyperledger-fabric/charts/fabric-catools/values.yaml) file to set the desired configuration values. -2. Run the following Helm command to install the chart: - ``` - $ helm repo add bevel https://hyperledger.github.io/bevel/ - $ helm install ./fabric-catools - ``` -Replace `` with the desired name for the release. - -This will deploy the fabric-catools node to the Kubernetes cluster based on the provided configurations. - - - -## Verification ---- - -To verify the deployment, we can use the following command: -``` -$ kubectl get deployments -n -``` -Replace `` with the actual namespace where the deployment was created. The command will display information about the deployment, including the number of replicas and their current status. +| Name | Description | Default Value | +|--------|---------|-------------| +| `orgData.caAddress` | Address of the CA Server without https | `""` | +| `orgData.caAdminUser` | CA Admin Username | `supplychain-admin` | +| `orgData.caAdminPassword` | CA Admin Password | `supplychain-adminpw` | +| `orgData.orgName` | Organization Name | `supplychain` | +| `orgData.type` | Type of certificate to generate, choosed from `orderer` or `peer` | `orderer` | +| `orgData.componentSubject` | X.509 subject for the organization | `"O=Orderer,L=51.50/-0.13/London,C=GB"` | +### Users - -## Updating the Deployment ---- +| Name | Description | Default Value | +| ----------------| ----------- | ------------- | +| `users.usersList` | Array of Users with their attributes | `- identity: user1`
`attributes:`
`- key: "hf.Revoker"`
`value: "true"` | +| `users.usersListAnsible` | Base64 encoded list of Users generally passed from Ansible | `""` | -If we need to update the deployment with new configurations or changes, modify the same [values.yaml](https://github.com/hyperledger/bevel/blob/main/platforms/hyperledger-fabric/charts/fabric-catools/values.yaml) file with the desired changes and run the following Helm command: -``` -$ helm upgrade ./fabric-catools -``` -Replace `` with the name of the release. This command will apply the changes to the deployment, ensuring the fabric-catools node is up to date. +### Settings - -## Deletion ---- +| Name | Description | Default Value | +| ----------------| ----------- | ------------- | +| `settings.createConfigMaps` | Flag to create configmaps. Must be set to `false` for additional orderers/peers in the same organization. | `true` | +| `settings.refreshCertValue` | Flag to refresh User certificates | `false` | +| `settings.addPeerValue` | Flag to be used when adding a new peer to the organization | `false` | +| `settings.removeCertsOnDelete` | Flag to delete the user and peer certificates on uninstall | `false` | +| `settings.removeOrdererTlsOnDelete` | Flag to delete the orderer TLS certificates on uninstall | `false` | -To delete the deployment and associated resources, run the following Helm command: -``` -$ helm uninstall -``` -Replace `` with the name of the release. This command will remove all the resources created by the Helm chart. +### Labels - -## Contributing ---- -If you encounter any bugs, have suggestions, or would like to contribute to the [CA Tools Hyperledger Fabric Deployment Helm Chart](https://github.com/hyperledger/bevel/blob/main/platforms/hyperledger-fabric/charts/fabric-catools), please feel free to open an issue or submit a pull request on the [project's GitHub repository](https://github.com/hyperledger/bevel). +| Name | Description | Default Value | +| ----------------| ----------- | ------------- | +| `labels.service` | Array of Labels for service object | `[]` | +| `labels.pvc` | Array of Labels for PVC object | `[]` | +| `labels.deployment` | Array of Labels for deployment or statefulset object | `[]` | - ## License This chart is licensed under the Apache v2.0 license. -Copyright © 2023 Accenture +Copyright © 2024 Accenture ### Attribution diff --git a/platforms/hyperledger-fabric/charts/fabric-catools/templates/_helpers.tpl b/platforms/hyperledger-fabric/charts/fabric-catools/templates/_helpers.tpl index d43c09d8cef..89092a8c24e 100644 --- a/platforms/hyperledger-fabric/charts/fabric-catools/templates/_helpers.tpl +++ b/platforms/hyperledger-fabric/charts/fabric-catools/templates/_helpers.tpl @@ -1,5 +1,54 @@ -{{- define "labels.custom" }} - {{ range $key, $val := $.Values.metadata.labels }} - {{ $key }}: {{ $val }} - {{ end }} +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "fabric-catools.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "fabric-catools.fullname" -}} +{{- $name := default .Chart.Name -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" $name .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "fabric-catools.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- define "labels.deployment" -}} +{{- range $value := .Values.labels.deployment }} +{{ toYaml $value }} {{- end }} +{{- end }} + +{{- define "labels.service" -}} +{{- range $value := .Values.labels.service }} +{{ toYaml $value }} +{{- end }} +{{- end }} + +{{- define "labels.pvc" -}} +{{- range $value := .Values.labels.pvc }} +{{ toYaml $value }} +{{- end }} +{{- end }} + +{{/* +Create server name depending on proxy +*/}} +{{- define "fabric-catools.caFileName" -}} +{{- $serverAddress := .Values.orgData.caAddress | replace "." "-" | replace ":" "-" -}} +{{- printf "%s.pem" $serverAddress -}} +{{- end -}} diff --git a/platforms/hyperledger-fabric/charts/fabric-catools/templates/configmap.yaml b/platforms/hyperledger-fabric/charts/fabric-catools/templates/configmap.yaml index 40bb8bc304e..12ddce59656 100644 --- a/platforms/hyperledger-fabric/charts/fabric-catools/templates/configmap.yaml +++ b/platforms/hyperledger-fabric/charts/fabric-catools/templates/configmap.yaml @@ -3,112 +3,104 @@ # # SPDX-License-Identifier: Apache-2.0 ############################################################################################## - +{{- if .Values.settings.createConfigMaps }} --- apiVersion: v1 kind: ConfigMap metadata: name: crypto-scripts-cm - namespace: {{ .Values.metadata.namespace }} + namespace: {{ .Release.Namespace }} labels: app.kubernetes.io/name: crypto-scripts - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/managed-by: {{ .Release.Service }} app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/part-of: {{ include "fabric-catools.fullname" . }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} + app.kubernetes.io/managed-by: helm data: generate-crypto-orderer.sh: |- #!/bin/bash - + # IMP: Do not add newline before the #!/bin/bash line above as then the shell scripts don't work set -x CURRENT_DIR=${PWD} - FULLY_QUALIFIED_ORG_NAME="{{ .Values.metadata.namespace }}" - EXTERNAL_URL_SUFFIX="{{ .Values.org_data.external_url_suffix }}" - ALTERNATIVE_ORG_NAMES=("{{ .Values.org_data.external_url_suffix }}") - ORG_NAME="{{ .Values.metadata.org_name }}" - SUBJECT="C={{ .Values.org_data.component_country }},ST={{ .Values.org_data.component_state }},L={{ .Values.org_data.component_location }},O={{ .Values.metadata.org_name }}" - SUBJECT_PEER="{{ .Values.org_data.component_subject }}" - CA="{{ .Values.org_data.ca_url }}" - CA_ADMIN_USER="${ORG_NAME}-admin" - CA_ADMIN_PASS="${ORG_NAME}-adminpw" + FULLY_QUALIFIED_ORG_NAME="{{ .Release.Namespace }}" + EXTERNAL_URL_SUFFIX="{{ .Values.global.proxy.externalUrlSuffix }}" + SUBJECT="{{ .Values.orgData.componentSubject }}" + AFFILIATION="{{ .Values.orgData.orgName }}" + CA="{{ .Values.orgData.caAddress }}" + CA_ADMIN_USER="{{ .Values.orgData.caAdminUser }}" + CA_ADMIN_PASS="{{ .Values.orgData.caAdminPassword }}" ORG_ADMIN_USER="Admin@${FULLY_QUALIFIED_ORG_NAME}" ORG_ADMIN_PASS="Admin@${FULLY_QUALIFIED_ORG_NAME}-pw" ORG_CYPTO_FOLDER="/crypto-config/ordererOrganizations/${FULLY_QUALIFIED_ORG_NAME}" - ROOT_TLS_CERT="/crypto-config/ordererOrganizations/${FULLY_QUALIFIED_ORG_NAME}/ca/ca.${FULLY_QUALIFIED_ORG_NAME}-cert.pem" - CAS_FOLDER="${HOME}/ca-tools/cas/ca-${ORG_NAME}" - ORG_HOME="${HOME}/ca-tools/${ORG_NAME}" + CAS_FOLDER="${HOME}/ca-tools/cas/ca" + ORG_HOME="${HOME}/ca-tools/org" ## Enroll CA administrator for Org. This user will be used to create other identities - fabric-ca-client enroll -d -u https://${CA_ADMIN_USER}:${CA_ADMIN_PASS}@${CA} --tls.certfiles ${ROOT_TLS_CERT} --home ${CAS_FOLDER} --csr.names "${SUBJECT_PEER}" + fabric-ca-client enroll -d -u https://${CA_ADMIN_USER}:${CA_ADMIN_PASS}@${CA} --tls.certfiles ${ROOT_TLS_CERT} --home ${CAS_FOLDER} --csr.names "${SUBJECT}" ## Get the CA cert and store in Org MSP folder fabric-ca-client getcacert -d -u https://${CA} --tls.certfiles ${ROOT_TLS_CERT} -M ${ORG_CYPTO_FOLDER}/msp - if [ "{{ .Values.metadata.proxy }}" != "none" ]; then - mv ${ORG_CYPTO_FOLDER}/msp/cacerts/*.pem ${ORG_CYPTO_FOLDER}/msp/cacerts/ca-${FULLY_QUALIFIED_ORG_NAME}-${EXTERNAL_URL_SUFFIX}.pem - fi mkdir ${ORG_CYPTO_FOLDER}/msp/tlscacerts cp ${ORG_CYPTO_FOLDER}/msp/cacerts/* ${ORG_CYPTO_FOLDER}/msp/tlscacerts - ## Register and enroll admin for Org and populate admincerts for MSP - fabric-ca-client register -d --id.name ${ORG_ADMIN_USER} --id.secret ${ORG_ADMIN_PASS} --id.type admin --csr.names "${SUBJECT_PEER}" --id.attrs "hf.Registrar.Roles=client,hf.Registrar.Attributes=*,hf.Revoker=true,hf.AffiliationMgr=true,hf.GenCRL=true,admin=true:ecert,abac.init=true:ecert" --tls.certfiles ${ROOT_TLS_CERT} --home ${CAS_FOLDER} - - fabric-ca-client enroll -d -u https://${ORG_ADMIN_USER}:${ORG_ADMIN_PASS}@${CA} --tls.certfiles ${ROOT_TLS_CERT} --home ${ORG_HOME}/admin --csr.names "${SUBJECT_PEER}" + if [ ! -e /crypto-config/admin-msp-exists ] || [ ! -e /crypto-config/admin-tls-exists ] || [ "$REFRESH_CERT_VALUE" = "true" ]; then + # Add affiliation for organisation + fabric-ca-client affiliation add ${AFFILIATION} -u https://${CA_ADMIN_USER}:${CA_ADMIN_PASS}@${CA} --tls.certfiles ${ROOT_TLS_CERT} --home ${CAS_FOLDER} + ## Register and enroll admin for Org and populate admincerts for MSP + fabric-ca-client register -d --id.name ${ORG_ADMIN_USER} --id.secret ${ORG_ADMIN_PASS} --id.type admin --csr.names "${SUBJECT}" --id.affiliation ${AFFILIATION} --id.attrs "hf.Registrar.Roles=client,hf.Registrar.Attributes=*,hf.Revoker=true,hf.AffiliationMgr=true,hf.GenCRL=true,admin=true:ecert,abac.init=true:ecert" --tls.certfiles ${ROOT_TLS_CERT} --home ${CAS_FOLDER} - mkdir -p ${ORG_CYPTO_FOLDER}/msp/admincerts - cp ${ORG_HOME}/admin/msp/signcerts/* ${ORG_CYPTO_FOLDER}/msp/admincerts/${ORG_ADMIN_USER}-cert.pem + fabric-ca-client enroll -d -u https://${ORG_ADMIN_USER}:${ORG_ADMIN_PASS}@${CA} --id.affiliation ${AFFILIATION} --tls.certfiles ${ROOT_TLS_CERT} --home ${ORG_HOME}/admin --csr.names "${SUBJECT}" - mkdir ${ORG_HOME}/admin/msp/admincerts - cp ${ORG_HOME}/admin/msp/signcerts/* ${ORG_HOME}/admin/msp/admincerts/${ORG_ADMIN_USER}-cert.pem + mkdir -p ${ORG_CYPTO_FOLDER}/msp/admincerts + cp ${ORG_HOME}/admin/msp/signcerts/* ${ORG_CYPTO_FOLDER}/msp/admincerts/${ORG_ADMIN_USER}-cert.pem - mkdir -p ${ORG_CYPTO_FOLDER}/users/${ORG_ADMIN_USER} - cp -R ${ORG_HOME}/admin/msp ${ORG_CYPTO_FOLDER}/users/${ORG_ADMIN_USER} + mkdir ${ORG_HOME}/admin/msp/admincerts + cp ${ORG_HOME}/admin/msp/signcerts/* ${ORG_HOME}/admin/msp/admincerts/${ORG_ADMIN_USER}-cert.pem - if [ "{{ .Values.metadata.proxy }}" != "none" ]; then - mv ${ORG_CYPTO_FOLDER}/users/${ORG_ADMIN_USER}/msp/cacerts/*.pem ${ORG_CYPTO_FOLDER}/users/${ORG_ADMIN_USER}/msp/cacerts/ca-${FULLY_QUALIFIED_ORG_NAME}-${EXTERNAL_URL_SUFFIX}.pem - fi + mkdir -p ${ORG_CYPTO_FOLDER}/users/${ORG_ADMIN_USER} + cp -R ${ORG_HOME}/admin/msp ${ORG_CYPTO_FOLDER}/users/${ORG_ADMIN_USER} - # Get TLS cert for admin and copy to appropriate location - fabric-ca-client enroll -d --enrollment.profile tls -u https://${ORG_ADMIN_USER}:${ORG_ADMIN_PASS}@${CA} -M ${ORG_HOME}/admin/tls --tls.certfiles ${ROOT_TLS_CERT} --csr.names "${SUBJECT_PEER}" + # Get TLS cert for admin and copy to appropriate location + fabric-ca-client enroll -d --enrollment.profile tls -u https://${ORG_ADMIN_USER}:${ORG_ADMIN_PASS}@${CA} -M ${ORG_HOME}/admin/tls --tls.certfiles ${ROOT_TLS_CERT} --csr.names "${SUBJECT}" - # Copy the TLS key and cert to the appropriate place - mkdir -p ${ORG_CYPTO_FOLDER}/users/${ORG_ADMIN_USER}/tls - cp ${ORG_HOME}/admin/tls/keystore/* ${ORG_CYPTO_FOLDER}/users/${ORG_ADMIN_USER}/tls/client.key - cp ${ORG_HOME}/admin/tls/signcerts/* ${ORG_CYPTO_FOLDER}/users/${ORG_ADMIN_USER}/tls/client.crt - cp ${ORG_HOME}/admin/tls/tlscacerts/* ${ORG_CYPTO_FOLDER}/users/${ORG_ADMIN_USER}/tls/ca.crt + # Copy the TLS key and cert to the appropriate place + mkdir -p ${ORG_CYPTO_FOLDER}/users/${ORG_ADMIN_USER}/tls + cp ${ORG_HOME}/admin/tls/keystore/* ${ORG_CYPTO_FOLDER}/users/${ORG_ADMIN_USER}/tls/client.key + cp ${ORG_HOME}/admin/tls/signcerts/* ${ORG_CYPTO_FOLDER}/users/${ORG_ADMIN_USER}/tls/client.crt + cp ${ORG_HOME}/admin/tls/tlscacerts/* ${ORG_CYPTO_FOLDER}/users/${ORG_ADMIN_USER}/tls/ca.crt + fi cd ${CURRENT_DIR} orderer-script.sh: |- #!/bin/bash - set -x CURRENT_DIR=${PWD} - FULLY_QUALIFIED_ORG_NAME="{{ .Values.metadata.namespace }}" - EXTERNAL_URL_SUFFIX="{{ .Values.org_data.external_url_suffix }}" - ALTERNATIVE_ORG_NAMES=("{{ .Values.org_data.external_url_suffix }}") - ORG_NAME="{{ .Values.metadata.org_name }}" - SUBJECT="C={{ .Values.org_data.component_country }},ST={{ .Values.org_data.component_state }},L={{ .Values.org_data.component_location }},O={{ .Values.metadata.org_name }}" - SUBJECT_PEER="{{ .Values.org_data.component_subject }}" - CA="{{ .Values.org_data.ca_url }}" - CA_ADMIN_USER="${ORG_NAME}-admin" - CA_ADMIN_PASS="${ORG_NAME}-adminpw" + FULLY_QUALIFIED_ORG_NAME="{{ .Release.Namespace }}" + EXTERNAL_URL_SUFFIX="{{ .Values.global.proxy.externalUrlSuffix }}" + ALTERNATIVE_ORG_NAMES=("{{ .Release.Namespace }}.svc.cluster.local" "{{ .Values.orgData.orgName }}.net" "{{ .Release.Namespace }}.{{ .Values.global.proxy.externalUrlSuffix }}") + SUBJECT="{{ .Values.orgData.componentSubject }}" + CA="{{ .Values.orgData.caAddress }}" + CA_ADMIN_USER="{{ .Values.orgData.caAdminUser }}" + CA_ADMIN_PASS="{{ .Values.orgData.caAdminPassword }}" ORDERER_NAME=$1 ORG_ADMIN_USER="Admin@${FULLY_QUALIFIED_ORG_NAME}" ORG_ADMIN_PASS="Admin@${FULLY_QUALIFIED_ORG_NAME}-pw" ORG_CYPTO_FOLDER="/crypto-config/ordererOrganizations/${FULLY_QUALIFIED_ORG_NAME}" - ROOT_TLS_CERT="/crypto-config/ordererOrganizations/${FULLY_QUALIFIED_ORG_NAME}/ca/ca.${FULLY_QUALIFIED_ORG_NAME}-cert.pem" - CAS_FOLDER="${HOME}/ca-tools/cas/ca-${ORG_NAME}" - ORG_HOME="${HOME}/ca-tools/${ORG_NAME}" + CAS_FOLDER="${HOME}/ca-tools/cas/ca" + ORG_HOME="${HOME}/ca-tools/org" ## Register and enroll node and populate its MSP folder PEER="${ORDERER_NAME}.${FULLY_QUALIFIED_ORG_NAME}" @@ -123,7 +115,7 @@ data: fabric-ca-client register -d --id.name ${PEER} --id.secret ${PEER}-pw --id.type orderer --tls.certfiles ${ROOT_TLS_CERT} --home ${CAS_FOLDER} # Enroll to get peers TLS cert - fabric-ca-client enroll -d --enrollment.profile tls -u https://${PEER}:${PEER}-pw@${CA} -M ${ORG_HOME}/cas/orderers/tls --csr.hosts "${CSR_HOSTS}" --tls.certfiles ${ROOT_TLS_CERT} --csr.names "${SUBJECT_PEER}" + fabric-ca-client enroll -d --enrollment.profile tls -u https://${PEER}:${PEER}-pw@${CA} -M ${ORG_HOME}/cas/orderers/tls --csr.hosts "${CSR_HOSTS}" --tls.certfiles ${ROOT_TLS_CERT} --csr.names "${SUBJECT}" # Copy the TLS key and cert to the appropriate place mkdir -p ${ORG_CYPTO_FOLDER}/orderers/${PEER}/tls @@ -134,110 +126,91 @@ data: rm -rf ${ORG_HOME}/cas/orderers/tls # Enroll again to get the peer's enrollment certificate (default profile) - fabric-ca-client enroll -d -u https://${PEER}:${PEER}-pw@${CA} -M ${ORG_CYPTO_FOLDER}/orderers/${PEER}/msp --tls.certfiles ${ROOT_TLS_CERT} --csr.names "${SUBJECT_PEER}" - + fabric-ca-client enroll -d -u https://${PEER}:${PEER}-pw@${CA} -M ${ORG_CYPTO_FOLDER}/orderers/${PEER}/msp --tls.certfiles ${ROOT_TLS_CERT} --csr.names "${SUBJECT}" # Create the TLS CA directories of the MSP folder if they don't exist. mkdir ${ORG_CYPTO_FOLDER}/orderers/${PEER}/msp/tlscacerts - if [ "{{ .Values.metadata.proxy }}" != "none" ]; then - mv ${ORG_CYPTO_FOLDER}/orderers/${PEER}/msp/cacerts/*.pem ${ORG_CYPTO_FOLDER}/orderers/${PEER}/msp/cacerts/ca-${FULLY_QUALIFIED_ORG_NAME}-${EXTERNAL_URL_SUFFIX}.pem - fi cp ${ORG_CYPTO_FOLDER}/orderers/${PEER}/msp/cacerts/* ${ORG_CYPTO_FOLDER}/orderers/${PEER}/msp/tlscacerts # Copy the peer org's admin cert into target MSP directory mkdir -p ${ORG_CYPTO_FOLDER}/orderers/${PEER}/msp/admincerts cp ${ORG_CYPTO_FOLDER}/msp/admincerts/${ORG_ADMIN_USER}-cert.pem ${ORG_CYPTO_FOLDER}/orderers/${PEER}/msp/admincerts - cd ${CURRENT_DIR} generate-crypto-peer.sh: |- #!/bin/bash - set -x CURRENT_DIR=${PWD} - FULLY_QUALIFIED_ORG_NAME="{{ .Values.metadata.namespace }}" - ALTERNATIVE_ORG_NAMES=("{{ .Values.metadata.namespace }}.svc.cluster.local" "{{ .Values.metadata.org_name }}.net" "{{ .Values.metadata.namespace }}.{{ .Values.org_data.external_url_suffix }}") - ORG_NAME="{{ .Values.metadata.org_name }}" - EXTERNAL_URL_SUFFIX="{{ .Values.org_data.external_url_suffix }}" - AFFILIATION="{{ .Values.metadata.org_name }}" - SUBJECT="C={{ .Values.org_data.component_country }},ST={{ .Values.org_data.component_state }},L={{ .Values.org_data.component_location }},O={{ .Values.metadata.org_name }}" - SUBJECT_PEER="{{ .Values.org_data.component_subject }}" - CA="{{ .Values.org_data.ca_url }}" - CA_ADMIN_USER="${ORG_NAME}-admin" - CA_ADMIN_PASS="${ORG_NAME}-adminpw" + FULLY_QUALIFIED_ORG_NAME="{{ .Release.Namespace }}" + ALTERNATIVE_ORG_NAMES=("{{ .Release.Namespace }}.svc.cluster.local" "{{ .Values.orgData.orgName }}.net" "{{ .Release.Namespace }}.{{ .Values.global.proxy.externalUrlSuffix }}") + EXTERNAL_URL_SUFFIX="{{ .Values.global.proxy.externalUrlSuffix }}" + AFFILIATION="{{ .Values.orgData.orgName }}" + SUBJECT="{{ .Values.orgData.componentSubject }}" + CA="{{ .Values.orgData.caAddress }}" + CA_ADMIN_USER="{{ .Values.orgData.caAdminUser }}" + CA_ADMIN_PASS="{{ .Values.orgData.caAdminPassword }}" ORG_ADMIN_USER="Admin@${FULLY_QUALIFIED_ORG_NAME}" ORG_ADMIN_PASS="Admin@${FULLY_QUALIFIED_ORG_NAME}-pw" ORG_CYPTO_FOLDER="/crypto-config/peerOrganizations/${FULLY_QUALIFIED_ORG_NAME}" - ROOT_TLS_CERT="/crypto-config/peerOrganizations/${FULLY_QUALIFIED_ORG_NAME}/ca/ca.${FULLY_QUALIFIED_ORG_NAME}-cert.pem" - CAS_FOLDER="${HOME}/ca-tools/cas/ca-${ORG_NAME}" - ORG_HOME="${HOME}/ca-tools/${ORG_NAME}" - - NO_OF_PEERS={{ .Values.peer_count }} + CAS_FOLDER="${HOME}/ca-tools/cas/ca" + ORG_HOME="${HOME}/ca-tools/org" ## Enroll CA administrator for Org. This user will be used to create other identities - fabric-ca-client enroll -d -u https://${CA_ADMIN_USER}:${CA_ADMIN_PASS}@${CA} --tls.certfiles ${ROOT_TLS_CERT} --home ${CAS_FOLDER} --csr.names "${SUBJECT_PEER}" + fabric-ca-client enroll -d -u https://${CA_ADMIN_USER}:${CA_ADMIN_PASS}@${CA} --tls.certfiles ${ROOT_TLS_CERT} --home ${CAS_FOLDER} --csr.names "${SUBJECT}" ## Get the CA cert and store in Org MSP folder fabric-ca-client getcacert -d -u https://${CA} --tls.certfiles ${ROOT_TLS_CERT} -M ${ORG_CYPTO_FOLDER}/msp - if [ "{{ .Values.metadata.proxy }}" != "none" ]; then - mv ${ORG_CYPTO_FOLDER}/msp/cacerts/*.pem ${ORG_CYPTO_FOLDER}/msp/cacerts/ca-${FULLY_QUALIFIED_ORG_NAME}-${EXTERNAL_URL_SUFFIX}.pem - fi mkdir ${ORG_CYPTO_FOLDER}/msp/tlscacerts cp ${ORG_CYPTO_FOLDER}/msp/cacerts/* ${ORG_CYPTO_FOLDER}/msp/tlscacerts + if [ ! -e /crypto-config/admin-msp-exists ] || [ ! -e /crypto-config/admin-tls-exists ] || [ "$REFRESH_CERT_VALUE" = "true" ]; then + # Add affiliation for organisation + fabric-ca-client affiliation add ${AFFILIATION} -u https://${CA_ADMIN_USER}:${CA_ADMIN_PASS}@${CA} --tls.certfiles ${ROOT_TLS_CERT} --home ${CAS_FOLDER} + ## Register and enroll admin for Org and populate admincerts for MSP + fabric-ca-client register -d --id.name ${ORG_ADMIN_USER} --id.secret ${ORG_ADMIN_PASS} --id.type admin --csr.names "${SUBJECT}" --id.affiliation ${AFFILIATION} --id.attrs "hf.Registrar.Roles=client,hf.Registrar.Attributes=*,hf.Revoker=true,hf.AffiliationMgr=true,hf.GenCRL=true,admin=true:ecert,abac.init=true:ecert" --tls.certfiles ${ROOT_TLS_CERT} --home ${CAS_FOLDER} - # Add affiliation for organisation - fabric-ca-client affiliation add ${AFFILIATION} -u https://${CA_ADMIN_USER}:${CA_ADMIN_PASS}@${CA} --tls.certfiles ${ROOT_TLS_CERT} --home ${CAS_FOLDER} - ## Register and enroll admin for Org and populate admincerts for MSP - fabric-ca-client register -d --id.name ${ORG_ADMIN_USER} --id.secret ${ORG_ADMIN_PASS} --id.type admin --csr.names "${SUBJECT_PEER}" --id.affiliation ${AFFILIATION} --id.attrs "hf.Registrar.Roles=client,hf.Registrar.Attributes=*,hf.Revoker=true,hf.AffiliationMgr=true,hf.GenCRL=true,admin=true:ecert,abac.init=true:ecert" --tls.certfiles ${ROOT_TLS_CERT} --home ${CAS_FOLDER} + fabric-ca-client enroll -d -u https://${ORG_ADMIN_USER}:${ORG_ADMIN_PASS}@${CA} --id.affiliation ${AFFILIATION} --tls.certfiles ${ROOT_TLS_CERT} --home ${ORG_HOME}/admin --csr.names "${SUBJECT}" - fabric-ca-client enroll -d -u https://${ORG_ADMIN_USER}:${ORG_ADMIN_PASS}@${CA} --id.affiliation ${AFFILIATION} --tls.certfiles ${ROOT_TLS_CERT} --home ${ORG_HOME}/admin --csr.names "${SUBJECT_PEER}" + mkdir -p ${ORG_CYPTO_FOLDER}/msp/admincerts + cp ${ORG_HOME}/admin/msp/signcerts/* ${ORG_CYPTO_FOLDER}/msp/admincerts/${ORG_ADMIN_USER}-cert.pem - mkdir -p ${ORG_CYPTO_FOLDER}/msp/admincerts - cp ${ORG_HOME}/admin/msp/signcerts/* ${ORG_CYPTO_FOLDER}/msp/admincerts/${ORG_ADMIN_USER}-cert.pem + mkdir ${ORG_HOME}/admin/msp/admincerts + cp ${ORG_HOME}/admin/msp/signcerts/* ${ORG_HOME}/admin/msp/admincerts/${ORG_ADMIN_USER}-cert.pem - mkdir ${ORG_HOME}/admin/msp/admincerts - cp ${ORG_HOME}/admin/msp/signcerts/* ${ORG_HOME}/admin/msp/admincerts/${ORG_ADMIN_USER}-cert.pem + mkdir -p ${ORG_CYPTO_FOLDER}/users/${ORG_ADMIN_USER} + cp -R ${ORG_HOME}/admin/msp ${ORG_CYPTO_FOLDER}/users/${ORG_ADMIN_USER} - mkdir -p ${ORG_CYPTO_FOLDER}/users/${ORG_ADMIN_USER} - cp -R ${ORG_HOME}/admin/msp ${ORG_CYPTO_FOLDER}/users/${ORG_ADMIN_USER} + # Get TLS cert for admin and copy to appropriate location + fabric-ca-client enroll -d --enrollment.profile tls -u https://${ORG_ADMIN_USER}:${ORG_ADMIN_PASS}@${CA} -M ${ORG_HOME}/admin/tls --tls.certfiles ${ROOT_TLS_CERT} --csr.names "${SUBJECT}" - if [ "{{ .Values.metadata.proxy }}" != "none" ]; then - mv ${ORG_CYPTO_FOLDER}/users/${ORG_ADMIN_USER}/msp/cacerts/*.pem ${ORG_CYPTO_FOLDER}/users/${ORG_ADMIN_USER}/msp/cacerts/ca-${FULLY_QUALIFIED_ORG_NAME}-${EXTERNAL_URL_SUFFIX}.pem + # Copy the TLS key and cert to the appropriate place + mkdir -p ${ORG_CYPTO_FOLDER}/users/${ORG_ADMIN_USER}/tls + cp ${ORG_HOME}/admin/tls/keystore/* ${ORG_CYPTO_FOLDER}/users/${ORG_ADMIN_USER}/tls/client.key + cp ${ORG_HOME}/admin/tls/signcerts/* ${ORG_CYPTO_FOLDER}/users/${ORG_ADMIN_USER}/tls/client.crt + cp ${ORG_HOME}/admin/tls/tlscacerts/* ${ORG_CYPTO_FOLDER}/users/${ORG_ADMIN_USER}/tls/ca.crt fi - - # Get TLS cert for admin and copy to appropriate location - fabric-ca-client enroll -d --enrollment.profile tls -u https://${ORG_ADMIN_USER}:${ORG_ADMIN_PASS}@${CA} -M ${ORG_HOME}/admin/tls --tls.certfiles ${ROOT_TLS_CERT} --csr.names "${SUBJECT_PEER}" - - # Copy the TLS key and cert to the appropriate place - mkdir -p ${ORG_CYPTO_FOLDER}/users/${ORG_ADMIN_USER}/tls - cp ${ORG_HOME}/admin/tls/keystore/* ${ORG_CYPTO_FOLDER}/users/${ORG_ADMIN_USER}/tls/client.key - cp ${ORG_HOME}/admin/tls/signcerts/* ${ORG_CYPTO_FOLDER}/users/${ORG_ADMIN_USER}/tls/client.crt - cp ${ORG_HOME}/admin/tls/tlscacerts/* ${ORG_CYPTO_FOLDER}/users/${ORG_ADMIN_USER}/tls/ca.crt - - ## Register and enroll peers and populate their MSP folder - COUNTER=0 - while [ ${COUNTER} -lt ${NO_OF_PEERS} ]; do - PEER="peer${COUNTER}.${FULLY_QUALIFIED_ORG_NAME}" - CSR_HOSTS=${PEER} - for i in "${ALTERNATIVE_ORG_NAMES[@]}" - do - CSR_HOSTS="${CSR_HOSTS},peer${COUNTER}.${i}" - done - echo "Registering and enrolling $PEER with csr hosts ${CSR_HOSTS}" - + ## Register and enroll peer as per argument provided and populate their MSP folder + PEER_NAME=$1 + PEER="${PEER_NAME}.${FULLY_QUALIFIED_ORG_NAME}" + CSR_HOSTS=${PEER} + for i in "${ALTERNATIVE_ORG_NAMES[@]}" + do + CSR_HOSTS="${CSR_HOSTS},${PEER_NAME}.${i}" + done + echo "Registering and enrolling $PEER with csr hosts ${CSR_HOSTS}" + if [ ! -e /crypto-config/$PEER_NAME-msp-exists ] || [ ! -e /crypto-config/$PEER_NAME-tls-exists ] || [ "$REFRESH_CERT_VALUE" = "true" ]; then # Register the peer fabric-ca-client register -d --id.name ${PEER} --id.secret ${PEER}-pw --id.type peer --tls.certfiles ${ROOT_TLS_CERT} --home ${CAS_FOLDER} # Enroll to get peers TLS cert - fabric-ca-client enroll -d --enrollment.profile tls -u https://${PEER}:${PEER}-pw@${CA} -M ${ORG_HOME}/cas/peers/tls --csr.hosts "${CSR_HOSTS}" --tls.certfiles ${ROOT_TLS_CERT} --csr.names "${SUBJECT_PEER}" + fabric-ca-client enroll -d --enrollment.profile tls -u https://${PEER}:${PEER}-pw@${CA} -M ${ORG_HOME}/cas/peers/tls --csr.hosts "${CSR_HOSTS}" --tls.certfiles ${ROOT_TLS_CERT} --csr.names "${SUBJECT}" # Copy the TLS key and cert to the appropriate place mkdir -p ${ORG_CYPTO_FOLDER}/peers/${PEER}/tls @@ -248,68 +221,53 @@ data: rm -rf ${ORG_HOME}/cas/peers/tls # Enroll again to get the peer's enrollment certificate (default profile) - fabric-ca-client enroll -d -u https://${PEER}:${PEER}-pw@${CA} -M ${ORG_CYPTO_FOLDER}/peers/${PEER}/msp --tls.certfiles ${ROOT_TLS_CERT} --csr.names "${SUBJECT_PEER}" + fabric-ca-client enroll -d -u https://${PEER}:${PEER}-pw@${CA} -M ${ORG_CYPTO_FOLDER}/peers/${PEER}/msp --tls.certfiles ${ROOT_TLS_CERT} --csr.names "${SUBJECT}" # Create the TLS CA directories of the MSP folder if they don't exist. mkdir -p ${ORG_CYPTO_FOLDER}/peers/${PEER}/msp/tlscacerts # Copy the peer org's admin cert into target MSP directory mkdir -p ${ORG_CYPTO_FOLDER}/peers/${PEER}/msp/admincerts - if [ "{{ .Values.metadata.proxy }}" != "none" ]; then - mv ${ORG_CYPTO_FOLDER}/peers/${PEER}/msp/cacerts/*.pem ${ORG_CYPTO_FOLDER}/peers/${PEER}/msp/cacerts/ca-${FULLY_QUALIFIED_ORG_NAME}-${EXTERNAL_URL_SUFFIX}.pem - fi + cp ${ORG_CYPTO_FOLDER}/peers/${PEER}/msp/cacerts/* ${ORG_CYPTO_FOLDER}/peers/${PEER}/msp/tlscacerts cp ${ORG_CYPTO_FOLDER}/msp/admincerts/${ORG_ADMIN_USER}-cert.pem ${ORG_CYPTO_FOLDER}/peers/${PEER}/msp/admincerts - - let COUNTER=COUNTER+1 - done - + fi cd ${CURRENT_DIR} - + generate-crypto-add-peer.sh: |- #!/bin/bash - set -x CURRENT_DIR=${PWD} - FULLY_QUALIFIED_ORG_NAME="{{ .Values.metadata.namespace }}" - ALTERNATIVE_ORG_NAMES=("{{ .Values.metadata.namespace }}.svc.cluster.local" "{{ .Values.metadata.org_name }}.net" "{{ .Values.metadata.namespace }}.{{ .Values.org_data.external_url_suffix }}") - ORG_NAME="{{ .Values.metadata.org_name }}" - EXTERNAL_URL_SUFFIX="{{ .Values.org_data.external_url_suffix }}" - AFFILIATION="{{ .Values.metadata.org_name }}" - SUBJECT="C={{ .Values.org_data.component_country }},ST={{ .Values.org_data.component_state }},L={{ .Values.org_data.component_location }},O={{ .Values.metadata.org_name }}" - SUBJECT_PEER="{{ .Values.org_data.component_subject }}" - CA="{{ .Values.org_data.ca_url }}" - CA_ADMIN_USER="${ORG_NAME}-admin" - CA_ADMIN_PASS="${ORG_NAME}-adminpw" + FULLY_QUALIFIED_ORG_NAME="{{ .Release.Namespace }}" + ALTERNATIVE_ORG_NAMES=("{{ .Release.Namespace }}.svc.cluster.local" "{{ .Values.orgData.orgName }}.net" "{{ .Release.Namespace }}.{{ .Values.global.proxy.externalUrlSuffix }}") + EXTERNAL_URL_SUFFIX="{{ .Values.global.proxy.externalUrlSuffix }}" + AFFILIATION="{{ .Values.orgData.orgName }}" + SUBJECT="{{ .Values.orgData.componentSubject }}" + CA="{{ .Values.orgData.caAddress }}" + CA_ADMIN_USER="{{ .Values.orgData.caAdminUser }}" + CA_ADMIN_PASS="{{ .Values.orgData.caAdminPassword }}" ORG_ADMIN_USER="Admin@${FULLY_QUALIFIED_ORG_NAME}" ORG_ADMIN_PASS="Admin@${FULLY_QUALIFIED_ORG_NAME}-pw" ORG_CYPTO_FOLDER="/crypto-config/peerOrganizations/${FULLY_QUALIFIED_ORG_NAME}" - ROOT_TLS_CERT="/crypto-config/peerOrganizations/${FULLY_QUALIFIED_ORG_NAME}/ca/ca.${FULLY_QUALIFIED_ORG_NAME}-cert.pem" - CAS_FOLDER="${HOME}/ca-tools/cas/ca-${ORG_NAME}" - ORG_HOME="${HOME}/ca-tools/${ORG_NAME}" - - NO_OF_PEERS={{ .Values.peer_count }} - NO_OF_NEW_PEERS={{ .Values.new_peer_count }} + CAS_FOLDER="${HOME}/ca-tools/cas/ca" + ORG_HOME="${HOME}/ca-tools/org" ## Enroll CA administrator for Org. This user will be used to create other identities - fabric-ca-client enroll -d -u https://${CA_ADMIN_USER}:${CA_ADMIN_PASS}@${CA} --tls.certfiles ${ROOT_TLS_CERT} --home ${CAS_FOLDER} --csr.names "${SUBJECT_PEER}" + fabric-ca-client enroll -d -u https://${CA_ADMIN_USER}:${CA_ADMIN_PASS}@${CA} --tls.certfiles ${ROOT_TLS_CERT} --home ${CAS_FOLDER} --csr.names "${SUBJECT}" ## Get the CA cert and store in Org MSP folder fabric-ca-client getcacert -d -u https://${CA} --tls.certfiles ${ROOT_TLS_CERT} -M ${ORG_CYPTO_FOLDER}/msp - if [ "{{ .Values.metadata.proxy }}" != "none" ]; then - mv ${ORG_CYPTO_FOLDER}/msp/cacerts/*.pem ${ORG_CYPTO_FOLDER}/msp/cacerts/ca-${FULLY_QUALIFIED_ORG_NAME}-${EXTERNAL_URL_SUFFIX}.pem - fi mkdir ${ORG_CYPTO_FOLDER}/msp/tlscacerts cp ${ORG_CYPTO_FOLDER}/msp/cacerts/* ${ORG_CYPTO_FOLDER}/msp/tlscacerts ## Enroll admin for Org and populate admincerts for MSP - fabric-ca-client enroll -d -u https://${ORG_ADMIN_USER}:${ORG_ADMIN_PASS}@${CA} --id.affiliation ${AFFILIATION} --tls.certfiles ${ROOT_TLS_CERT} --home ${ORG_HOME}/admin --csr.names "${SUBJECT_PEER}" + fabric-ca-client enroll -d -u https://${ORG_ADMIN_USER}:${ORG_ADMIN_PASS}@${CA} --id.affiliation ${AFFILIATION} --tls.certfiles ${ROOT_TLS_CERT} --home ${ORG_HOME}/admin --csr.names "${SUBJECT}" # Copy existing org certs mkdir -p ${ORG_CYPTO_FOLDER}/msp/admincerts @@ -321,12 +279,8 @@ data: mkdir -p ${ORG_CYPTO_FOLDER}/users/${ORG_ADMIN_USER} cp -R ${ORG_HOME}/admin/msp ${ORG_CYPTO_FOLDER}/users/${ORG_ADMIN_USER} - if [ "{{ .Values.metadata.proxy }}" != "none" ]; then - mv ${ORG_CYPTO_FOLDER}/users/${ORG_ADMIN_USER}/msp/cacerts/*.pem ${ORG_CYPTO_FOLDER}/users/${ORG_ADMIN_USER}/msp/cacerts/ca-${FULLY_QUALIFIED_ORG_NAME}-${EXTERNAL_URL_SUFFIX}.pem - fi - # Get TLS cert for admin and copy to appropriate location - fabric-ca-client enroll -d --enrollment.profile tls -u https://${ORG_ADMIN_USER}:${ORG_ADMIN_PASS}@${CA} -M ${ORG_HOME}/admin/tls --tls.certfiles ${ROOT_TLS_CERT} --csr.names "${SUBJECT_PEER}" + fabric-ca-client enroll -d --enrollment.profile tls -u https://${ORG_ADMIN_USER}:${ORG_ADMIN_PASS}@${CA} -M ${ORG_HOME}/admin/tls --tls.certfiles ${ROOT_TLS_CERT} --csr.names "${SUBJECT}" # Copy the TLS key and cert to the appropriate place mkdir -p ${ORG_CYPTO_FOLDER}/users/${ORG_ADMIN_USER}/tls @@ -335,78 +289,67 @@ data: cp ${ORG_HOME}/admin/tls/tlscacerts/* ${ORG_CYPTO_FOLDER}/users/${ORG_ADMIN_USER}/tls/ca.crt ## Register and enroll peers and populate their MSP folder - COUNTER=`expr ${NO_OF_PEERS} - ${NO_OF_NEW_PEERS}` - while [ ${COUNTER} -lt ${NO_OF_PEERS} ]; do - PEER="peer${COUNTER}.${FULLY_QUALIFIED_ORG_NAME}" - CSR_HOSTS=${PEER} - for i in "${ALTERNATIVE_ORG_NAMES[@]}" - do - CSR_HOSTS="${CSR_HOSTS},peer${COUNTER}.${i}" - done - echo "Registering and enrolling $PEER with csr hosts ${CSR_HOSTS}" + PEER_NAME=$1 + PEER="${PEER_NAME}.${FULLY_QUALIFIED_ORG_NAME}" + CSR_HOSTS=${PEER} + for i in "${ALTERNATIVE_ORG_NAMES[@]}" + do + CSR_HOSTS="${CSR_HOSTS},${PEER_NAME}.${i}" + done + echo "Registering and enrolling $PEER with csr hosts ${CSR_HOSTS}" - # Register the peer - fabric-ca-client register -d --id.name ${PEER} --id.secret ${PEER}-pw --id.type peer --tls.certfiles ${ROOT_TLS_CERT} --home ${CAS_FOLDER} -u https://${CA} + # Register the peer + fabric-ca-client register -d --id.name ${PEER} --id.secret ${PEER}-pw --id.type peer --tls.certfiles ${ROOT_TLS_CERT} --home ${CAS_FOLDER} -u https://${CA} - # Enroll to get peers TLS cert - fabric-ca-client enroll -d --enrollment.profile tls -u https://${PEER}:${PEER}-pw@${CA} -M ${ORG_HOME}/cas/peers/tls --csr.hosts "${CSR_HOSTS}" --tls.certfiles ${ROOT_TLS_CERT} --csr.names "${SUBJECT_PEER}" + # Enroll to get peers TLS cert + fabric-ca-client enroll -d --enrollment.profile tls -u https://${PEER}:${PEER}-pw@${CA} -M ${ORG_HOME}/cas/peers/tls --csr.hosts "${CSR_HOSTS}" --tls.certfiles ${ROOT_TLS_CERT} --csr.names "${SUBJECT}" - # Copy the TLS key and cert to the appropriate place - mkdir -p ${ORG_CYPTO_FOLDER}/peers/${PEER}/tls - cp ${ORG_HOME}/cas/peers/tls/keystore/* ${ORG_CYPTO_FOLDER}/peers/${PEER}/tls/server.key - cp ${ORG_HOME}/cas/peers/tls/signcerts/* ${ORG_CYPTO_FOLDER}/peers/${PEER}/tls/server.crt - cp ${ORG_HOME}/cas/peers/tls/tlscacerts/* ${ORG_CYPTO_FOLDER}/peers/${PEER}/tls/ca.crt - - rm -rf ${ORG_HOME}/cas/peers/tls - - # Enroll again to get the peer's enrollment certificate (default profile) - fabric-ca-client enroll -d -u https://${PEER}:${PEER}-pw@${CA} -M ${ORG_CYPTO_FOLDER}/peers/${PEER}/msp --tls.certfiles ${ROOT_TLS_CERT} --csr.names "${SUBJECT_PEER}" + # Copy the TLS key and cert to the appropriate place + mkdir -p ${ORG_CYPTO_FOLDER}/peers/${PEER}/tls + cp ${ORG_HOME}/cas/peers/tls/keystore/* ${ORG_CYPTO_FOLDER}/peers/${PEER}/tls/server.key + cp ${ORG_HOME}/cas/peers/tls/signcerts/* ${ORG_CYPTO_FOLDER}/peers/${PEER}/tls/server.crt + cp ${ORG_HOME}/cas/peers/tls/tlscacerts/* ${ORG_CYPTO_FOLDER}/peers/${PEER}/tls/ca.crt + + rm -rf ${ORG_HOME}/cas/peers/tls + + # Enroll again to get the peer's enrollment certificate (default profile) + fabric-ca-client enroll -d -u https://${PEER}:${PEER}-pw@${CA} -M ${ORG_CYPTO_FOLDER}/peers/${PEER}/msp --tls.certfiles ${ROOT_TLS_CERT} --csr.names "${SUBJECT}" - # Create the TLS CA directories of the MSP folder if they don't exist. - mkdir -p ${ORG_CYPTO_FOLDER}/peers/${PEER}/msp/tlscacerts - - # Copy the peer org's admin cert into target MSP directory - mkdir -p ${ORG_CYPTO_FOLDER}/peers/${PEER}/msp/admincerts - if [ "{{ .Values.metadata.proxy }}" != "none" ]; then - mv ${ORG_CYPTO_FOLDER}/peers/${PEER}/msp/cacerts/*.pem ${ORG_CYPTO_FOLDER}/peers/${PEER}/msp/cacerts/ca-${FULLY_QUALIFIED_ORG_NAME}-${EXTERNAL_URL_SUFFIX}.pem - fi - cp ${ORG_CYPTO_FOLDER}/peers/${PEER}/msp/cacerts/* ${ORG_CYPTO_FOLDER}/peers/${PEER}/msp/tlscacerts - cp ${ORG_CYPTO_FOLDER}/msp/admincerts/${ORG_ADMIN_USER}-cert.pem ${ORG_CYPTO_FOLDER}/peers/${PEER}/msp/admincerts - - let COUNTER=COUNTER+1 - done + # Create the TLS CA directories of the MSP folder if they don't exist. + mkdir -p ${ORG_CYPTO_FOLDER}/peers/${PEER}/msp/tlscacerts + + # Copy the peer org's admin cert into target MSP directory + mkdir -p ${ORG_CYPTO_FOLDER}/peers/${PEER}/msp/admincerts + + cp ${ORG_CYPTO_FOLDER}/peers/${PEER}/msp/cacerts/* ${ORG_CYPTO_FOLDER}/peers/${PEER}/msp/tlscacerts + cp ${ORG_CYPTO_FOLDER}/msp/admincerts/${ORG_ADMIN_USER}-cert.pem ${ORG_CYPTO_FOLDER}/peers/${PEER}/msp/admincerts cd ${CURRENT_DIR} generate-user-crypto.sh: |- #!/bin/bash - set -x CURRENT_DIR=${PWD} # Input parameters - FULLY_QUALIFIED_ORG_NAME="{{ .Values.metadata.namespace }}" - ORG_NAME="{{ .Values.metadata.org_name }}" + FULLY_QUALIFIED_ORG_NAME="{{ .Release.Namespace }}" TYPE_FOLDER=$1s USER_IDENTITIES=$2 - AFFILIATION="{{ .Values.metadata.org_name }}" - SUBJECT="{{ .Values.org_data.component_subject }}" - CA="{{ .Values.org_data.ca_url }}" - if [ "$1" != "peer" ]; then - ORG_CYPTO_FOLDER="/crypto-config/ordererOrganizations/${FULLY_QUALIFIED_ORG_NAME}" - ROOT_TLS_CERT="/crypto-config/ordererOrganizations/${FULLY_QUALIFIED_ORG_NAME}/ca/ca.${FULLY_QUALIFIED_ORG_NAME}-cert.pem" - else - ORG_CYPTO_FOLDER="/crypto-config/$1Organizations/${FULLY_QUALIFIED_ORG_NAME}" - ROOT_TLS_CERT="/crypto-config/$1Organizations/${FULLY_QUALIFIED_ORG_NAME}/ca/ca.${FULLY_QUALIFIED_ORG_NAME}-cert.pem" - fi - CAS_FOLDER="${HOME}/ca-tools/cas/ca-${ORG_NAME}" - ORG_HOME="${HOME}/ca-tools/${ORG_NAME}" + AFFILIATION="{{ .Values.orgData.orgName }}" + SUBJECT="{{ .Values.orgData.componentSubject }}" + CA="{{ .Values.orgData.caAddress }}" + + ORG_CYPTO_FOLDER="/crypto-config/$1Organizations/${FULLY_QUALIFIED_ORG_NAME}" + ROOT_TLS_CERT="/crypto-config/$1Organizations/${FULLY_QUALIFIED_ORG_NAME}/ca/ca.${FULLY_QUALIFIED_ORG_NAME}-cert.pem" + + CAS_FOLDER="${HOME}/ca-tools/cas/ca" + ORG_HOME="${HOME}/ca-tools/org" ## Register and enroll users CUR_USER=0 TOTAL_USERS=$(echo ${USER_IDENTITIES} | base64 -d | sed -e 's/None/null/g' | tr "'" '"' | jq '. | length') + while [ ${CUR_USER} -lt ${TOTAL_USERS} ]; do - # Get the user identity USER=$(echo ${USER_IDENTITIES} | base64 -d | sed -e 's/None/null/g' | tr "'" '"' | jq '.['${CUR_USER}'].identity' | sed -e 's/"//g') ORG_USER="${USER}@${FULLY_QUALIFIED_ORG_NAME}" @@ -422,16 +365,12 @@ data: ATTRS=${ATTRS}","$(echo ${USER_IDENTITIES} | base64 -d | sed -e 's/None/null/g' | tr "'" '"' | jq '.['${CUR_USER}'].attributes['${CUR_ATTRS}'].key' | sed -e 's/"//g')"="$(echo ${USER_IDENTITIES} | base64 -d | sed -e 's/None/null/g' | tr "'" '"' | jq '.['${CUR_USER}'].attributes['${CUR_ATTRS}'].value' | sed -e 's/"//g')":ecert" CUR_ATTRS=$((CUR_ATTRS + 1)) done - - # Checking if the user msp folder exists in the CA server - if [ ! -d "${ORG_HOME}/client${USER}" ]; then # if user certificates do not exist - + + #Check if the user certs does not exist + if [ ! -e /crypto-config/${USER}-msp-exists ] || [ ! -e /crypto-config/${USER}-tls-exists ]; then + # if user certificates do not exist ## Register and enroll User for Org - if [ "$1" = "peer" ]; then - fabric-ca-client register -d --id.name ${ORG_USER} --id.secret ${ORG_USERPASS} --id.type client --csr.names "${SUBJECT}" --id.affiliation ${AFFILIATION} --id.attrs "${ATTRS}" --tls.certfiles ${ROOT_TLS_CERT} --home ${CAS_FOLDER} - else - fabric-ca-client register -d --id.name ${ORG_USER} --id.secret ${ORG_USERPASS} --id.type client --csr.names "${SUBJECT}" --id.attrs "${ATTRS}" --tls.certfiles ${ROOT_TLS_CERT} --home ${CAS_FOLDER} - fi + fabric-ca-client register -d --id.name ${ORG_USER} --id.secret ${ORG_USERPASS} --id.type client --csr.names "${SUBJECT}" --id.affiliation ${AFFILIATION} --id.attrs "${ATTRS}" --tls.certfiles ${ROOT_TLS_CERT} --home ${CAS_FOLDER} # Enroll the registered user to generate enrollment certificate fabric-ca-client enroll -d -u https://${ORG_USER}:${ORG_USERPASS}@${CA} --csr.names "${SUBJECT}" --tls.certfiles ${ROOT_TLS_CERT} --home ${ORG_HOME}/client${USER} @@ -442,9 +381,6 @@ data: mkdir -p ${ORG_CYPTO_FOLDER}/users/${ORG_USER} cp -R ${ORG_HOME}/client${USER}/msp ${ORG_CYPTO_FOLDER}/users/${ORG_USER} - if [ "{{ .Values.metadata.proxy }}" != "none" ]; then - mv ${ORG_CYPTO_FOLDER}/users/${ORG_USER}/msp/cacerts/*.pem ${ORG_CYPTO_FOLDER}/users/${ORG_USER}/msp/cacerts/ca-${FULLY_QUALIFIED_ORG_NAME}-${EXTERNAL_URL_SUFFIX}.pem - fi mkdir ${ORG_CYPTO_FOLDER}/users/${ORG_USER}/msp/tlscacerts cp ${ORG_CYPTO_FOLDER}/users/${ORG_USER}/msp/cacerts/* ${ORG_CYPTO_FOLDER}/users/${ORG_USER}/msp/tlscacerts @@ -461,7 +397,7 @@ data: # Current datetime + 5 minutes | e.g. 20210302182036 CUR_DATETIME=$(date -d "$(echo $(date)' + 5 minutes')" +'%Y%m%d%H%M%S') - + #TODO get ${ORG_HOME}/client${USER}/msp/signcerts/cert.pem from Kubernetes secret or Vault in job.yaml # Extracting "notAfter" datetime from the existing user certificate | e.g. 20210302182036 CERT_DATETIME=$(date -d "$(echo $(openssl x509 -noout -enddate < ${ORG_HOME}/client${USER}/msp/signcerts/cert.pem) | sed 's/notAfter=//g')" +'%Y%m%d%H%M%S') @@ -539,12 +475,14 @@ apiVersion: v1 kind: ConfigMap metadata: name: orderer-script-store-vault - namespace: {{ .Values.metadata.namespace }} + namespace: {{ .Release.Namespace }} labels: app.kubernetes.io/name: orderer-script-vault - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/managed-by: {{ .Release.Service }} app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/part-of: {{ include "fabric-catools.fullname" . }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} + app.kubernetes.io/managed-by: helm data: store-vault-orderer.sh: |- #!/bin/bash @@ -557,371 +495,230 @@ data: done < ${1} > ${2}/${NAME}.txt } - validateVaultResponse () { - if echo ${2} | grep "errors" || [ "${2}" = "" ]; then - echo "ERROR: unable to retrieve ${1}: ${2}" - exit 1 - fi - if [ "$3" == "LOOKUPSECRETRESPONSE" ] - then - http_code=$(curl -fsS -o /dev/null -w "%{http_code}" \ - --header "X-Vault-Token: ${VAULT_TOKEN}" \ - ${VAULT_ADDR}/v1/${1}) - curl_response=$? - if test "$http_code" != "200" ; then - echo "Http response code from Vault - $http_code and curl_response - $curl_response" - if test "$curl_response" != "0"; then - echo "Error: curl command failed with error code - $curl_response" - exit 1 - fi - fi - fi - } - - echo "Puting secrets/certificates from Vault server" - KUBE_SA_TOKEN=$(cat /var/run/secrets/kubernetes.io/serviceaccount/token) - VAULT_TOKEN=$(curl -sS --request POST ${VAULT_ADDR}/v1/auth/${KUBERNETES_AUTH_PATH}/login -H "Content-Type: application/json" -d '{"role":"'"${VAULT_APP_ROLE}"'","jwt":"'"${KUBE_SA_TOKEN}"'"}' | jq -r 'if .errors then . else .auth.client_token end') - validateVaultResponse 'vault login token' "${VAULT_TOKEN}" - - FORMAT_CERTIFICATE_PATH="/formatcertificate" - mkdir -p ${FORMAT_CERTIFICATE_PATH}/tls - mkdir -p ${FORMAT_CERTIFICATE_PATH}/msp - - ORG_CYPTO_FOLDER="/crypto-config/ordererOrganizations/${COMPONENT_NAME}/users/Admin@${COMPONENT_NAME}" - - if [ -e /certcheck/present_tls.txt ]; then ADMIN_TLS_CERT_WRITTEN=true; else ADMIN_TLS_CERT_WRITTEN=false; fi - if [ -e /certcheck/present_msp.txt ]; then ADMIN_MSP_CERT_WRITTEN=true; else ADMIN_MSP_CERT_WRITTEN=false; fi - COUNTER=1 - while [ "$COUNTER" -le {{ $.Values.healthcheck.retries }} ] - do + function saveAdminSecrets { + TLS_KEY=admin-tls + MSP_KEY=admin-msp +{{- if eq .Values.global.vault.type "hashicorp" }} + . ../bevel-vault.sh + # Calling a function to retrieve the vault token. + vaultBevelFunc "init" + + FORMAT_CERTIFICATE_PATH="/formatcertificate" + mkdir -p ${FORMAT_CERTIFICATE_PATH}/tls + mkdir -p ${FORMAT_CERTIFICATE_PATH}/msp + if [ ! -e /crypto-config/${TLS_KEY}-exists ] || [ "$REFRESH_CERT_VALUE" = "true" ]; then + # This commands put the certificates with correct format for the curl command + formatCertificate "${ORG_CYPTO_FOLDER}/tls/ca.crt" "${FORMAT_CERTIFICATE_PATH}/tls" + formatCertificate "${ORG_CYPTO_FOLDER}/tls/client.crt" "${FORMAT_CERTIFICATE_PATH}/tls" + formatCertificate "${ORG_CYPTO_FOLDER}/tls/client.key" "${FORMAT_CERTIFICATE_PATH}/tls" - if [ -e /certcheck/absent_tls.txt ] && [ "$ADMIN_TLS_CERT_WRITTEN" = "false" ] - then - - # This commands put the certificates with correct format for the curl command - formatCertificate "${ORG_CYPTO_FOLDER}/tls/ca.crt" "${FORMAT_CERTIFICATE_PATH}/tls" - formatCertificate "${ORG_CYPTO_FOLDER}/tls/client.crt" "${FORMAT_CERTIFICATE_PATH}/tls" - formatCertificate "${ORG_CYPTO_FOLDER}/tls/client.key" "${FORMAT_CERTIFICATE_PATH}/tls" - - CA_CRT=$(cat ${FORMAT_CERTIFICATE_PATH}/tls/ca.crt.txt) - CLIENT_CRT=$(cat ${FORMAT_CERTIFICATE_PATH}/tls/client.crt.txt) - CLIENT_KEY=$(cat ${FORMAT_CERTIFICATE_PATH}/tls/client.key.txt) - - echo " - { - \"data\": - { - \"ca.crt\": \"${CA_CRT}\", - \"client.crt\": \"${CLIENT_CRT}\", - \"client.key\": \"${CLIENT_KEY}\" - } - }" > payload.json - - # This command copy organization level tls certificates for orgs - curl \ - -H "X-Vault-Token: ${VAULT_TOKEN}" \ - -H "Content-Type: application/json" \ - -X POST \ - -d @payload.json \ - ${VAULT_ADDR}/v1/${VAULT_SECRET_USERS}/admin/tls - - # Check tls certificates - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${VAULT_SECRET_USERS}/admin/tls | jq -r 'if .errors then . else . end') - TLS_CA_CERT=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["ca.crt"]' 2>&1) - TLS_CLIENT_CERT=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["client.crt"]' 2>&1) - TLS_CLIENT_KEY=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["client.key"]' 2>&1) - - tls_certificate_fields=("$TLS_CA_CERT" "$TLS_CLIENT_CERT" "$TLS_CLIENT_KEY") - - for field in "${tls_certificate_fields[@]}" - do - if [ "$field" = "null" ] || [[ "$field" = "parse error"* ]] || [ "$field" = "" ] - then - ADMIN_TLS_CERT_WRITTEN=false - break - else - ADMIN_TLS_CERT_WRITTEN=true - fi - done - rm payload.json + CA_CRT=$(cat ${FORMAT_CERTIFICATE_PATH}/tls/ca.crt.txt) + CLIENT_CRT=$(cat ${FORMAT_CERTIFICATE_PATH}/tls/client.crt.txt) + CLIENT_KEY=$(cat ${FORMAT_CERTIFICATE_PATH}/tls/client.key.txt) + + echo " + { + \"data\": + { + \"ca_crt\": \"${CA_CRT}\", + \"client_crt\": \"${CLIENT_CRT}\", + \"client_key\": \"${CLIENT_KEY}\" + } + }" > payload.json + vaultBevelFunc 'write' "${VAULT_SECRET_ENGINE}/${VAULT_SECRET_PREFIX}/users/${TLS_KEY}" 'payload.json' + rm payload.json fi - if [ -e /certcheck/absent_msp.txt ] && [ "$ADMIN_MSP_CERT_WRITTEN" = "false" ] - then - # This commands put the certificates with correct format for the curl command - SK_NAME=$(find ${ORG_CYPTO_FOLDER}/msp/keystore/ -name "*_sk") - - formatCertificate "${ORG_CYPTO_FOLDER}/msp/admincerts/Admin@${COMPONENT_NAME}-cert.pem" "${FORMAT_CERTIFICATE_PATH}/msp" - formatCertificate "${SK_NAME}" "${FORMAT_CERTIFICATE_PATH}/msp" - formatCertificate "${ORG_CYPTO_FOLDER}/msp/signcerts/cert.pem" "${FORMAT_CERTIFICATE_PATH}/msp" - formatCertificate "${ORG_CYPTO_FOLDER}/tls/ca.crt" "${FORMAT_CERTIFICATE_PATH}/tls" + if [ ! -e /crypto-config/${MSP_KEY}-exists ] || [ "$REFRESH_CERT_VALUE" = "true" ]; then + # This commands put the certificates with correct format for the curl command + SK_NAME=$(find ${ORG_CYPTO_FOLDER}/msp/keystore/ -name "*_sk") + formatCertificate "${ORG_CYPTO_FOLDER}/msp/admincerts/Admin@${COMPONENT_NAME}-cert.pem" "${FORMAT_CERTIFICATE_PATH}/msp" + formatCertificate "${SK_NAME}" "${FORMAT_CERTIFICATE_PATH}/msp" + formatCertificate "${ORG_CYPTO_FOLDER}/msp/signcerts/cert.pem" "${FORMAT_CERTIFICATE_PATH}/msp" + formatCertificate "${ORG_CYPTO_FOLDER}/tls/ca.crt" "${FORMAT_CERTIFICATE_PATH}/tls" - ADMINCERTS=$(cat ${FORMAT_CERTIFICATE_PATH}/msp/Admin@${COMPONENT_NAME}-cert.pem.txt) - KEYSTORE=$(cat ${FORMAT_CERTIFICATE_PATH}/msp/*_sk.txt) - SIGNCERTS=$(cat ${FORMAT_CERTIFICATE_PATH}/msp/cert.pem.txt) - CA_CRT=$(cat ${FORMAT_CERTIFICATE_PATH}/tls/ca.crt.txt) + ADMINCERTS=$(cat ${FORMAT_CERTIFICATE_PATH}/msp/Admin@${COMPONENT_NAME}-cert.pem.txt) + KEYSTORE=$(cat ${FORMAT_CERTIFICATE_PATH}/msp/*_sk.txt) + SIGNCERTS=$(cat ${FORMAT_CERTIFICATE_PATH}/msp/cert.pem.txt) + CA_CRT=$(cat ${FORMAT_CERTIFICATE_PATH}/tls/ca.crt.txt) - if [ "$PROXY" != "none" ] ; then + formatCertificate "${ORG_CYPTO_FOLDER}/msp/cacerts/{{ include "fabric-catools.caFileName" . }}" "${FORMAT_CERTIFICATE_PATH}/msp" + CACERTS=$(cat ${FORMAT_CERTIFICATE_PATH}/msp/{{ include "fabric-catools.caFileName" . }}.txt) + + echo " + { + \"data\": + { + \"admincerts\": \"${ADMINCERTS}\", + \"cacerts\": \"${CACERTS}\", + \"keystore\": \"${KEYSTORE}\", + \"signcerts\": \"${SIGNCERTS}\", + \"tlscacerts\": \"${CA_CRT}\" + } + }" > payload.json - formatCertificate "${ORG_CYPTO_FOLDER}/msp/cacerts/ca-${COMPONENT_NAME}-${EXTERNAL_URL_SUFFIX}.pem" "${FORMAT_CERTIFICATE_PATH}/msp" - CACERTS=$(cat ${FORMAT_CERTIFICATE_PATH}/msp/ca-${COMPONENT_NAME}-${EXTERNAL_URL_SUFFIX}.pem.txt) - - echo " - { - \"data\": - { - \"admincerts\": \"${ADMINCERTS}\", - \"cacerts\": \"${CACERTS}\", - \"keystore\": \"${KEYSTORE}\", - \"signcerts\": \"${SIGNCERTS}\", - \"tlscacerts\": \"${CA_CRT}\" - } - }" > payload.json - fi; - - if [ "$PROXY" = "none" ] ; then - - formatCertificate "${ORG_CYPTO_FOLDER}/msp/cacerts/ca-${COMPONENT_NAME}-7054.pem" "${FORMAT_CERTIFICATE_PATH}/msp" - CACERTS=$(cat ${FORMAT_CERTIFICATE_PATH}/msp/ca-${COMPONENT_NAME}-7054.pem.txt) - - echo " - { - \"data\": - { - \"admincerts\": \"${ADMINCERTS}\", - \"cacerts\": \"${CACERTS}\", - \"keystore\": \"${KEYSTORE}\", - \"signcerts\": \"${SIGNCERTS}\", - \"tlscacerts\": \"${CA_CRT}\" - } - }" > payload.json - fi; - - # This command copy organization level msp certificates for orgs - curl \ - -H "X-Vault-Token: ${VAULT_TOKEN}" \ - -H "Content-Type: application/json" \ - -X POST \ - -d @payload.json \ - ${VAULT_ADDR}/v1/${VAULT_SECRET_USERS}/admin/msp - - # Check msp certificates - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${VAULT_SECRET_USERS}/admin/msp | jq -r 'if .errors then . else . end') - MSP_ADMINCERT=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["admincerts"]' 2>&1) - MSP_CACERTS=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["cacerts"]' 2>&1) - MSP_KEYSTORE=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["keystore"]' 2>&1) - MSP_SIGNCERTS=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["signcerts"]' 2>&1) - MSP_TLSCACERTS=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["tlscacerts"]' 2>&1) - - msp_certificate_fields=("$MSP_ADMINCERT" "$MSP_CACERTS" "$MSP_KEYSTORE" "$MSP_SIGNCERTS" "$MSP_TLSCACERTS") - - for field in "${msp_certificate_fields[@]}" - do - if [ "$field" = "null" ] || [[ "$field" = "parse error"* ]] || [ "$field" = "" ] - then - ADMIN_MSP_CERT_WRITTEN=false - break - else - ADMIN_MSP_CERT_WRITTEN=true - fi - done - rm payload.json + vaultBevelFunc 'write' "${VAULT_SECRET_ENGINE}/${VAULT_SECRET_PREFIX}/users/${MSP_KEY}" 'payload.json' + rm payload.json fi - - if [ "$ADMIN_TLS_CERT_WRITTEN" = "true" ] && [ "$ADMIN_MSP_CERT_WRITTEN" = "true" ] - then - echo "Admin certificates are successfully stored in vault" - break - else - echo "Admin certificates are not ready, sleeping for {{ $.Values.healthcheck.sleepTimeAfterError }}" - sleep {{ $.Values.healthcheck.sleepTimeAfterError }} - COUNTER=`expr "$COUNTER" + 1` +{{- end }} # end Vault if condition + # Files are stored as K8s secrets; add more conditions here for cloud KMS + if [ ! -e /crypto-config/${TLS_KEY}-exists ] || [ "$REFRESH_CERT_VALUE" = "true" ]; then + # Check if secret exists + kubectl get secret --namespace ${COMPONENT_NAME} ${TLS_KEY} >/dev/null 2>&1 + if [ $? -eq 0 ]; then + # Delete the secret if exists + kubectl delete secret --namespace ${COMPONENT_NAME} ${TLS_KEY} + fi + kubectl create secret generic ${TLS_KEY} --namespace ${COMPONENT_NAME} --from-file=cacrt=${ORG_CYPTO_FOLDER}/tls/ca.crt \ + --from-file=clientcrt=${ORG_CYPTO_FOLDER}/tls/client.crt \ + --from-file=clientkey=${ORG_CYPTO_FOLDER}/tls/client.key fi - done - - if [ "$COUNTER" -gt {{ $.Values.healthcheck.retries }} ] - then - echo "Retry attempted `expr $COUNTER - 1` times, Admin certificates have not been saved." - touch ${MOUNT_PATH}/certs_not_found.txt - exit 1 - fi; + + if [ ! -e /crypto-config/${MSP_KEY}-exists ] || [ "$REFRESH_CERT_VALUE" = "true" ]; then + SK_NAME=$(find ${ORG_CYPTO_FOLDER}/msp/keystore/ -name "*_sk") + kubectl get secret --namespace ${COMPONENT_NAME} ${MSP_KEY} >/dev/null 2>&1 + if [ $? -eq 0 ]; then + # Delete the secret if exists + kubectl delete secret --namespace ${COMPONENT_NAME} ${MSP_KEY} + fi + kubectl create secret generic ${MSP_KEY} --namespace ${COMPONENT_NAME} \ + --from-file=admincerts=${ORG_CYPTO_FOLDER}/msp/admincerts/Admin@${COMPONENT_NAME}-cert.pem \ + --from-file=cacerts=${ORG_CYPTO_FOLDER}/msp/cacerts/{{ include "fabric-catools.caFileName" . }} \ + --from-file=keystore=${SK_NAME} \ + --from-file=signcerts=${ORG_CYPTO_FOLDER}/msp/signcerts/cert.pem \ + --from-file=tlscacerts=${ORG_CYPTO_FOLDER}/tls/ca.crt + fi + echo "Orderer Admin certificates are successfully stored." + } - ORG_CYPTO_ORDERER_FOLDER="/crypto-config/ordererOrganizations/${COMPONENT_NAME}/orderers" - list=$(echo "$ORDERERS_NAMES" | tr "-" "\n") - for ORDERER_NAME in $list - do - COUNTER=1 - if [ -e /certcheck/present_tls_${ORDERER_NAME}.txt ]; then ORDERER_TLS_CERT_WRITTEN=true; else ORDERER_TLS_CERT_WRITTEN=false; fi - if [ -e /certcheck/present_msp_${ORDERER_NAME}.txt ]; then ORDERER_MSP_CERT_WRITTEN=true; else ORDERER_MSP_CERT_WRITTEN=false; fi + function saveOrdererSecrets { + ORDERER_NAME=$1 + TLS_KEY=$1-tls + MSP_KEY=$1-msp +{{- if eq .Values.global.vault.type "hashicorp" }} + . ../bevel-vault.sh + # Calling a function to retrieve the vault token. + vaultBevelFunc "init" + FORMAT_CERTIFICATE_PATH="/formatcertificate" mkdir -p ${FORMAT_CERTIFICATE_PATH}/${ORDERER_NAME}/tls mkdir -p ${FORMAT_CERTIFICATE_PATH}/${ORDERER_NAME}/msp mkdir -p ${FORMAT_CERTIFICATE_PATH}/${ORDERER_NAME}/msp/cacerts mkdir -p ${FORMAT_CERTIFICATE_PATH}/${ORDERER_NAME}/msp/tlscacerts - while [ "$COUNTER" -le {{ $.Values.healthcheck.retries }} ] - do - if [ -e /certcheck/absent_tls_${ORDERER_NAME}.txt ] && [ "$ORDERER_TLS_CERT_WRITTEN" = "false" ]; then + if [ ! -e /crypto-config/${TLS_KEY}-exists ] || [ "$REFRESH_CERT_VALUE" = "true" ]; then + # This commands put the certificates with correct format for the curl command + formatCertificate "${ORG_CYPTO_ORDERER_FOLDER}/${ORDERER_NAME}.${COMPONENT_NAME}/tls/ca.crt" "${FORMAT_CERTIFICATE_PATH}/${ORDERER_NAME}/tls" + formatCertificate "${ORG_CYPTO_ORDERER_FOLDER}/${ORDERER_NAME}.${COMPONENT_NAME}/tls/server.crt" "${FORMAT_CERTIFICATE_PATH}/${ORDERER_NAME}/tls" + formatCertificate "${ORG_CYPTO_ORDERER_FOLDER}/${ORDERER_NAME}.${COMPONENT_NAME}/tls/server.key" "${FORMAT_CERTIFICATE_PATH}/${ORDERER_NAME}/tls" + + CA_CRT=$(cat ${FORMAT_CERTIFICATE_PATH}/${ORDERER_NAME}/tls/ca.crt.txt) + SERVER_CRT=$(cat ${FORMAT_CERTIFICATE_PATH}/${ORDERER_NAME}/tls/server.crt.txt) + SERVER_KEY=$(cat ${FORMAT_CERTIFICATE_PATH}/${ORDERER_NAME}/tls/server.key.txt) + + echo " + { + \"data\": + { + \"ca_crt\": \"${CA_CRT}\", + \"server_crt\": \"${SERVER_CRT}\", + \"server_key\": \"${SERVER_KEY}\" + } + }" > payload.json - # This commands put the certificates with correct format for the curl command - formatCertificate "${ORG_CYPTO_ORDERER_FOLDER}/${ORDERER_NAME}.${COMPONENT_NAME}/tls/ca.crt" "${FORMAT_CERTIFICATE_PATH}/${ORDERER_NAME}/tls" - formatCertificate "${ORG_CYPTO_ORDERER_FOLDER}/${ORDERER_NAME}.${COMPONENT_NAME}/tls/server.crt" "${FORMAT_CERTIFICATE_PATH}/${ORDERER_NAME}/tls" - formatCertificate "${ORG_CYPTO_ORDERER_FOLDER}/${ORDERER_NAME}.${COMPONENT_NAME}/tls/server.key" "${FORMAT_CERTIFICATE_PATH}/${ORDERER_NAME}/tls" + vaultBevelFunc 'write' "${VAULT_SECRET_ENGINE}/${VAULT_SECRET_PREFIX}/orderers/${TLS_KEY}" 'payload.json' + rm payload.json + fi + + if [ ! -e /crypto-config/${MSP_KEY}-exists ] || [ "$REFRESH_CERT_VALUE" = "true" ]; then + # This commands put the certificates with correct format for the curl command + SK_NAME=$(find ${ORG_CYPTO_ORDERER_FOLDER}/${ORDERER_NAME}.${COMPONENT_NAME}/msp/keystore/ -name "*_sk") + formatCertificate "${ORG_CYPTO_ORDERER_FOLDER}/${ORDERER_NAME}.${COMPONENT_NAME}/msp/admincerts/Admin@${COMPONENT_NAME}-cert.pem" "${FORMAT_CERTIFICATE_PATH}/${ORDERER_NAME}/msp" + formatCertificate "${SK_NAME}" "${FORMAT_CERTIFICATE_PATH}/${ORDERER_NAME}/msp" + formatCertificate "${ORG_CYPTO_ORDERER_FOLDER}/${ORDERER_NAME}.${COMPONENT_NAME}/msp/signcerts/cert.pem" "${FORMAT_CERTIFICATE_PATH}/${ORDERER_NAME}/msp" + + ADMINCERTS=$(cat ${FORMAT_CERTIFICATE_PATH}/${ORDERER_NAME}/msp/Admin@${COMPONENT_NAME}-cert.pem.txt) + KEYSTORE=$(cat ${FORMAT_CERTIFICATE_PATH}/${ORDERER_NAME}/msp/*_sk.txt) + SIGNCERTS=$(cat ${FORMAT_CERTIFICATE_PATH}/${ORDERER_NAME}/msp/cert.pem.txt) - CA_CRT=$(cat ${FORMAT_CERTIFICATE_PATH}/${ORDERER_NAME}/tls/ca.crt.txt) - SERVER_CRT=$(cat ${FORMAT_CERTIFICATE_PATH}/${ORDERER_NAME}/tls/server.crt.txt) - SERVER_KEY=$(cat ${FORMAT_CERTIFICATE_PATH}/${ORDERER_NAME}/tls/server.key.txt) + formatCertificate "${ORG_CYPTO_ORDERER_FOLDER}/${ORDERER_NAME}.${COMPONENT_NAME}/msp/cacerts/{{ include "fabric-catools.caFileName" . }}" "${FORMAT_CERTIFICATE_PATH}/${ORDERER_NAME}/msp/cacerts" + formatCertificate "${ORG_CYPTO_ORDERER_FOLDER}/${ORDERER_NAME}.${COMPONENT_NAME}/msp/tlscacerts/{{ include "fabric-catools.caFileName" . }}" "${FORMAT_CERTIFICATE_PATH}/${ORDERER_NAME}/msp/tlscacerts" + CACERTS=$(cat ${FORMAT_CERTIFICATE_PATH}/${ORDERER_NAME}/msp/cacerts/{{ include "fabric-catools.caFileName" . }}.txt) + TLSCERTS=$(cat ${FORMAT_CERTIFICATE_PATH}/${ORDERER_NAME}/msp/tlscacerts/{{ include "fabric-catools.caFileName" . }}.txt) - echo " + echo " { \"data\": { - \"ca.crt\": \"${CA_CRT}\", - \"server.crt\": \"${SERVER_CRT}\", - \"server.key\": \"${SERVER_KEY}\" + \"admincerts\": \"${ADMINCERTS}\", + \"cacerts\": \"${CACERTS}\", + \"keystore\": \"${KEYSTORE}\", + \"signcerts\": \"${SIGNCERTS}\", + \"tlscacerts\": \"${TLSCERTS}\" } }" > payload.json - # This command copy the crypto material for orderer (tls) - curl \ - -H "X-Vault-Token: ${VAULT_TOKEN}" \ - -H "Content-Type: application/json" \ - -X POST \ - -d @payload.json \ - ${VAULT_ADDR}/v1/${VAULT_SECRET_ORDERER}/${ORDERER_NAME}.${COMPONENT_NAME}/tls - - # Check tls certificates - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${VAULT_SECRET_ORDERER}/${ORDERER_NAME}.${COMPONENT_NAME}/tls | jq -r 'if .errors then . else . end') - TLS_CA_CERT=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["ca.crt"]' 2>&1) - TLS_SERVER_CERT=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["server.crt"]' 2>&1) - TLS_SERVER_KEY=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["server.key"]' 2>&1) - - tls_certificate_fields=("$TLS_CA_CERT" "$TLS_SERVER_CERT" "$TLS_SERVER_KEY") - - for field in "${tls_certificate_fields[@]}" - do - if [ "$field" = "null" ] || [[ "$field" = "parse error"* ]] || [ "$field" = "" ] - then - ORDERER_TLS_CERT_WRITTEN=false - break - else - ORDERER_TLS_CERT_WRITTEN=true - fi - done - rm payload.json - fi; - - if [ -e /certcheck/absent_msp_${ORDERER_NAME}.txt ] && [ "$ORDERER_MSP_CERT_WRITTEN" = "false" ]; then - # This commands put the certificates with correct format for the curl command - SK_NAME=$(find ${ORG_CYPTO_ORDERER_FOLDER}/${ORDERER_NAME}.${COMPONENT_NAME}/msp/keystore/ -name "*_sk") - - formatCertificate "${ORG_CYPTO_ORDERER_FOLDER}/${ORDERER_NAME}.${COMPONENT_NAME}/msp/admincerts/Admin@${COMPONENT_NAME}-cert.pem" "${FORMAT_CERTIFICATE_PATH}/${ORDERER_NAME}/msp" - formatCertificate "${SK_NAME}" "${FORMAT_CERTIFICATE_PATH}/${ORDERER_NAME}/msp" - formatCertificate "${ORG_CYPTO_ORDERER_FOLDER}/${ORDERER_NAME}.${COMPONENT_NAME}/msp/signcerts/cert.pem" "${FORMAT_CERTIFICATE_PATH}/${ORDERER_NAME}/msp" - - ADMINCERTS=$(cat ${FORMAT_CERTIFICATE_PATH}/${ORDERER_NAME}/msp/Admin@${COMPONENT_NAME}-cert.pem.txt) - KEYSTORE=$(cat ${FORMAT_CERTIFICATE_PATH}/${ORDERER_NAME}/msp/*_sk.txt) - SIGNCERTS=$(cat ${FORMAT_CERTIFICATE_PATH}/${ORDERER_NAME}/msp/cert.pem.txt) - - if [ "$PROXY" != "none" ] ; then - - formatCertificate "${ORG_CYPTO_ORDERER_FOLDER}/${ORDERER_NAME}.${COMPONENT_NAME}/msp/cacerts/ca-${COMPONENT_NAME}-${EXTERNAL_URL_SUFFIX}.pem" "${FORMAT_CERTIFICATE_PATH}/${ORDERER_NAME}/msp/cacerts" - formatCertificate "${ORG_CYPTO_ORDERER_FOLDER}/${ORDERER_NAME}.${COMPONENT_NAME}/msp/tlscacerts/ca-${COMPONENT_NAME}-${EXTERNAL_URL_SUFFIX}.pem" "${FORMAT_CERTIFICATE_PATH}/${ORDERER_NAME}/msp/tlscacerts" - CACERTS=$(cat ${FORMAT_CERTIFICATE_PATH}/${ORDERER_NAME}/msp/cacerts/ca-${COMPONENT_NAME}-${EXTERNAL_URL_SUFFIX}.pem.txt) - TLSCERTS=$(cat ${FORMAT_CERTIFICATE_PATH}/${ORDERER_NAME}/msp/tlscacerts/ca-${COMPONENT_NAME}-${EXTERNAL_URL_SUFFIX}.pem.txt) - - echo " - { - \"data\": - { - \"admincerts\": \"${ADMINCERTS}\", - \"cacerts\": \"${CACERTS}\", - \"keystore\": \"${KEYSTORE}\", - \"signcerts\": \"${SIGNCERTS}\", - \"tlscacerts\": \"${TLSCERTS}\" - } - }" > payload.json - - fi; - - if [ "$PROXY" = "none" ] ; then - formatCertificate "${ORG_CYPTO_ORDERER_FOLDER}/${ORDERER_NAME}.${COMPONENT_NAME}/msp/cacerts/ca-${COMPONENT_NAME}-7054.pem" "${FORMAT_CERTIFICATE_PATH}/${ORDERER_NAME}/msp/cacerts" - formatCertificate "${ORG_CYPTO_ORDERER_FOLDER}/${ORDERER_NAME}.${COMPONENT_NAME}/msp/tlscacerts/ca-${COMPONENT_NAME}-7054.pem" "${FORMAT_CERTIFICATE_PATH}/${ORDERER_NAME}/msp/tlscacerts" - CACERTS=$(cat ${FORMAT_CERTIFICATE_PATH}/${ORDERER_NAME}/msp/cacerts/ca-${COMPONENT_NAME}-7054.pem.txt) - TLSCERTS=$(cat ${FORMAT_CERTIFICATE_PATH}/${ORDERER_NAME}/msp/tlscacerts/ca-${COMPONENT_NAME}-7054.pem.txt) - - echo " - { - \"data\": - { - \"admincerts\": \"${ADMINCERTS}\", - \"cacerts\": \"${CACERTS}\", - \"keystore\": \"${KEYSTORE}\", - \"signcerts\": \"${SIGNCERTS}\", - \"tlscacerts\": \"${TLSCERTS}\" - } - }" > payload.json - - fi; - - # This command copy the msp certificates to the Vault - curl \ - -H "X-Vault-Token: ${VAULT_TOKEN}" \ - -H "Content-Type: application/json" \ - -X POST \ - -d @payload.json \ - ${VAULT_ADDR}/v1/${VAULT_SECRET_ORDERER}/${ORDERER_NAME}.${COMPONENT_NAME}/msp - - # Check msp certificates - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${VAULT_SECRET_ORDERER}/${ORDERER_NAME}.${COMPONENT_NAME}/msp | jq -r 'if .errors then . else . end') - MSP_ADMINCERT=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["admincerts"]' 2>&1) - MSP_CACERTS=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["cacerts"]' 2>&1) - MSP_KEYSTORE=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["keystore"]' 2>&1) - MSP_SIGNCERTS=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["signcerts"]' 2>&1) - MSP_TLSCACERTS=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["tlscacerts"]' 2>&1) - - msp_certificate_fields=("$MSP_ADMINCERT" "$MSP_CACERTS" "$MSP_KEYSTORE" "$MSP_SIGNCERTS" "$MSP_TLSCACERTS") - - for field in "${msp_certificate_fields[@]}" - do - if [ "$field" = "null" ] || [[ "$field" = "parse error"* ]] || [ "$field" = "" ] - then - ORDERER_MSP_CERT_WRITTEN=false - break - else - ORDERER_MSP_CERT_WRITTEN=true - fi - done - rm payload.json - fi; - - if [ "$ORDERER_TLS_CERT_WRITTEN" = "true" ] && [ "$ORDERER_MSP_CERT_WRITTEN" = "true" ] - then - echo "${ORDERER_NAME} certificates are successfully stored in vault" - break - else - echo "${ORDERER_NAME} certificates are not ready, sleeping for {{ $.Values.healthcheck.sleepTimeAfterError }}" - sleep {{ $.Values.healthcheck.sleepTimeAfterError }} - COUNTER=`expr "$COUNTER" + 1` + vaultBevelFunc 'write' "${VAULT_SECRET_ENGINE}/${VAULT_SECRET_PREFIX}/orderers/${MSP_KEY}" 'payload.json' + rm payload.json + fi +{{- end }} # End Vault if condition + # Files are stored as K8s secrets; add more conditions here for cloud KMS + if [ ! -e /crypto-config/${TLS_KEY}-exists ] || [ "$REFRESH_CERT_VALUE" = "true" ]; then + # Check if secret exists + kubectl get secret --namespace ${COMPONENT_NAME} ${TLS_KEY} >/dev/null 2>&1 + if [ $? -eq 0 ]; then + # Delete the secret if exists + kubectl delete secret --namespace ${COMPONENT_NAME} ${TLS_KEY} fi - done - - if [ "$COUNTER" -gt {{ $.Values.healthcheck.retries }} ] - then - echo "Retry attempted `expr $COUNTER - 1` times, Orderers certificates have not been saved." - touch ${MOUNT_PATH}/certs_not_found.txt - exit 1 - fi; - done + kubectl create secret generic ${TLS_KEY} --namespace ${COMPONENT_NAME} \ + --from-file=cacrt=${ORG_CYPTO_ORDERER_FOLDER}/${ORDERER_NAME}.${COMPONENT_NAME}/tls/ca.crt \ + --from-file=servercrt=${ORG_CYPTO_ORDERER_FOLDER}/${ORDERER_NAME}.${COMPONENT_NAME}/tls/server.crt \ + --from-file=serverkey=${ORG_CYPTO_ORDERER_FOLDER}/${ORDERER_NAME}.${COMPONENT_NAME}/tls/server.key + fi + + if [ ! -e /crypto-config/${MSP_KEY}-exists ] || [ "$REFRESH_CERT_VALUE" = "true" ]; then + SK_NAME=$(find ${ORG_CYPTO_ORDERER_FOLDER}/${ORDERER_NAME}.${COMPONENT_NAME}/msp/keystore/ -name "*_sk") + kubectl get secret --namespace ${COMPONENT_NAME} ${MSP_KEY} >/dev/null 2>&1 + if [ $? -eq 0 ]; then + # Delete the secret if exists + kubectl delete secret --namespace ${COMPONENT_NAME} ${MSP_KEY} + fi + kubectl create secret generic ${MSP_KEY} --namespace ${COMPONENT_NAME} \ + --from-file=admincerts=${ORG_CYPTO_ORDERER_FOLDER}/${ORDERER_NAME}.${COMPONENT_NAME}/msp/admincerts/Admin@${COMPONENT_NAME}-cert.pem \ + --from-file=cacerts=${ORG_CYPTO_ORDERER_FOLDER}/${ORDERER_NAME}.${COMPONENT_NAME}/msp/cacerts/{{ include "fabric-catools.caFileName" . }} \ + --from-file=keystore=${SK_NAME} \ + --from-file=signcerts=${ORG_CYPTO_ORDERER_FOLDER}/${ORDERER_NAME}.${COMPONENT_NAME}/msp/signcerts/cert.pem \ + --from-file=tlscacerts=${ORG_CYPTO_ORDERER_FOLDER}/${ORDERER_NAME}.${COMPONENT_NAME}/msp/tlscacerts/{{ include "fabric-catools.caFileName" . }} + fi + echo "$ORDERER_NAME Client certificates are successfully stored." + } + + function safeOrderererTlsConfigmap { + ORDERER_NAME=$1 + kubectl get configmap --namespace {{ .Release.Namespace }} orderer-tls-cacert + if [ $? -ne 0 ]; then + kubectl create configmap --namespace {{ .Release.Namespace }} orderer-tls-cacert --from-file=cacert=${ORG_CYPTO_ORDERER_FOLDER}/${ORDERER_NAME}.${COMPONENT_NAME}/tls/ca.crt + fi + } + + ORG_CYPTO_FOLDER="/crypto-config/ordererOrganizations/${COMPONENT_NAME}/users/Admin@${COMPONENT_NAME}" + ORG_CYPTO_ORDERER_FOLDER="/crypto-config/ordererOrganizations/${COMPONENT_NAME}/orderers" + + saveAdminSecrets + ORDERER=$1 + saveOrdererSecrets $ORDERER + safeOrderererTlsConfigmap $ORDERER --- apiVersion: v1 kind: ConfigMap metadata: name: peer-script-store-vault - namespace: {{ .Values.metadata.namespace }} + namespace: {{ .Release.Namespace }} labels: app.kubernetes.io/name: peer-script-vault - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/managed-by: {{ .Release.Service }} app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/part-of: {{ include "fabric-catools.fullname" . }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} + app.kubernetes.io/managed-by: helm data: store-vault-peer.sh: |- #!/bin/bash @@ -933,47 +730,18 @@ data: echo "$line\n" done < ${1} > ${2}/${NAME}.txt } - - validateVaultResponse () { - if echo ${2} | grep "errors" || [ "${2}" = "" ]; then - echo "ERROR: unable to retrieve ${1}: ${2}" - exit 1 - fi - if [ "$3" == "LOOKUPSECRETRESPONSE" ] - then - http_code=$(curl -fsS -o /dev/null -w "%{http_code}" \ - --header "X-Vault-Token: ${VAULT_TOKEN}" \ - ${VAULT_ADDR}/v1/${1}) - curl_response=$? - if test "$http_code" != "200" ; then - echo "Http response code from Vault - $http_code and curl_response - $curl_response" - if test "$curl_response" != "0"; then - echo "Error: curl command failed with error code - $curl_response" - exit 1 - fi - fi - fi - } - - echo "Puting secrets/certificates from Vault server" - KUBE_SA_TOKEN=$(cat /var/run/secrets/kubernetes.io/serviceaccount/token) - VAULT_TOKEN=$(curl -sS --request POST ${VAULT_ADDR}/v1/auth/${KUBERNETES_AUTH_PATH}/login -H "Content-Type: application/json" -d '{"role":"'"${VAULT_APP_ROLE}"'","jwt":"'"${KUBE_SA_TOKEN}"'"}' | jq -r 'if .errors then . else .auth.client_token end') - validateVaultResponse 'vault login token' "${VAULT_TOKEN}" - - FORMAT_CERTIFICATE_PATH="/formatcertificate" - mkdir -p ${FORMAT_CERTIFICATE_PATH}/tls - mkdir -p ${FORMAT_CERTIFICATE_PATH}/msp - - ORG_CYPTO_FOLDER="/crypto-config/peerOrganizations/${COMPONENT_NAME}/users/Admin@${COMPONENT_NAME}" - - if [ -e /certcheck/present_tls.txt ]; then ADMIN_TLS_CERT_WRITTEN=true; else ADMIN_TLS_CERT_WRITTEN=false; fi - if [ -e /certcheck/present_msp.txt ]; then ADMIN_MSP_CERT_WRITTEN=true; else ADMIN_MSP_CERT_WRITTEN=false; fi - COUNTER=1 - while [ "$COUNTER" -le {{ $.Values.healthcheck.retries }} ] - do - - if ([ -e /certcheck/absent_tls.txt ] && [ "$ADMIN_TLS_CERT_WRITTEN" = "false" ]) || [ "$REFRESH_CERTS" == 'true' ]; then - + function saveAdminSecrets { + TLS_KEY=admin-tls + MSP_KEY=admin-msp +{{- if eq .Values.global.vault.type "hashicorp" }} + . ../bevel-vault.sh + # Calling a function to retrieve the vault token. + vaultBevelFunc "init" + FORMAT_CERTIFICATE_PATH="/formatcertificate" + mkdir -p ${FORMAT_CERTIFICATE_PATH}/tls + mkdir -p ${FORMAT_CERTIFICATE_PATH}/msp + + if [ ! -e /crypto-config/${TLS_KEY}-exists ] || [ "$REFRESH_CERT_VALUE" = "true" ]; then # This commands put the certificates with correct format for the curl command formatCertificate "${ORG_CYPTO_FOLDER}/tls/ca.crt" "${FORMAT_CERTIFICATE_PATH}/tls" formatCertificate "${ORG_CYPTO_FOLDER}/tls/client.crt" "${FORMAT_CERTIFICATE_PATH}/tls" @@ -984,49 +752,22 @@ data: CLIENT_KEY=$(cat ${FORMAT_CERTIFICATE_PATH}/tls/client.key.txt) echo " - { - \"data\": - { - \"ca.crt\": \"${CA_CRT}\", - \"client.crt\": \"${CLIENT_CRT}\", - \"client.key\": \"${CLIENT_KEY}\" - } - }" > payload.json - - # This command copy organization level tls certificates for orgs - curl \ - -H "X-Vault-Token: ${VAULT_TOKEN}" \ - -H "Content-Type: application/json" \ - -X POST \ - -d @payload.json \ - ${VAULT_ADDR}/v1/${VAULT_SECRET_USERS}/admin/tls - - # Check tls certificates - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${VAULT_SECRET_USERS}/admin/tls | jq -r 'if .errors then . else . end') - TLS_CA_CERT=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["ca.crt"]' 2>&1) - TLS_CLIENT_CERT=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["client.crt"]' 2>&1) - TLS_CLIENT_KEY=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["client.key"]' 2>&1) - - tls_certificate_fields=("$TLS_CA_CERT" "$TLS_CLIENT_CERT" "$TLS_CLIENT_KEY") - - for field in "${tls_certificate_fields[@]}" - do - if [ "$field" = "null" ] || [[ "$field" = "parse error"* ]] || [ "$field" = "" ] - then - ADMIN_TLS_CERT_WRITTEN=false - break - else - ADMIN_TLS_CERT_WRITTEN=true - fi - done + { + \"data\": + { + \"ca_crt\": \"${CA_CRT}\", + \"client_crt\": \"${CLIENT_CRT}\", + \"client_key\": \"${CLIENT_KEY}\" + } + }" > payload.json + + vaultBevelFunc 'write' "${VAULT_SECRET_ENGINE}/${VAULT_SECRET_PREFIX}/users/${TLS_KEY}" 'payload.json' rm payload.json - fi; - - if ([ -e /certcheck/absent_msp.txt ] && [ "$ADMIN_MSP_CERT_WRITTEN" = "false" ]) || [ "$REFRESH_CERTS" == 'true' ]; then + fi + if [ ! -e /crypto-config/${MSP_KEY}-exists ] || [ "$REFRESH_CERT_VALUE" = "true" ]; then # This commands put the certificates with correct format for the curl command SK_NAME=$(find ${ORG_CYPTO_FOLDER}/msp/keystore/ -name "*_sk") - formatCertificate "${ORG_CYPTO_FOLDER}/msp/admincerts/Admin@${COMPONENT_NAME}-cert.pem" "${FORMAT_CERTIFICATE_PATH}/msp" formatCertificate "${SK_NAME}" "${FORMAT_CERTIFICATE_PATH}/msp" formatCertificate "${ORG_CYPTO_FOLDER}/msp/signcerts/cert.pem" "${FORMAT_CERTIFICATE_PATH}/msp" @@ -1037,426 +778,172 @@ data: SIGNCERTS=$(cat ${FORMAT_CERTIFICATE_PATH}/msp/cert.pem.txt) CA_CRT=$(cat ${FORMAT_CERTIFICATE_PATH}/tls/ca.crt.txt) - if [ "$PROXY" != "none" ] ; then - - formatCertificate "${ORG_CYPTO_FOLDER}/msp/cacerts/ca-${COMPONENT_NAME}-${EXTERNAL_URL_SUFFIX}.pem" "${FORMAT_CERTIFICATE_PATH}/msp" - CACERTS=$(cat ${FORMAT_CERTIFICATE_PATH}/msp/ca-${COMPONENT_NAME}-${EXTERNAL_URL_SUFFIX}.pem.txt) - - echo " - { - \"data\": - { - \"admincerts\": \"${ADMINCERTS}\", - \"cacerts\": \"${CACERTS}\", - \"keystore\": \"${KEYSTORE}\", - \"signcerts\": \"${SIGNCERTS}\", - \"tlscacerts\": \"${CA_CRT}\" - } - }" > payload.json - - fi; - - if [ "$PROXY" = "none" ] ; then - - formatCertificate "${ORG_CYPTO_FOLDER}/msp/cacerts/ca-${COMPONENT_NAME}-7054.pem" "${FORMAT_CERTIFICATE_PATH}/msp" - CACERTS=$(cat ${FORMAT_CERTIFICATE_PATH}/msp/ca-${COMPONENT_NAME}-7054.pem.txt) - - echo " - { - \"data\": - { - \"admincerts\": \"${ADMINCERTS}\", - \"cacerts\": \"${CACERTS}\", - \"keystore\": \"${KEYSTORE}\", - \"signcerts\": \"${SIGNCERTS}\", - \"tlscacerts\": \"${CA_CRT}\" - } - }" > payload.json - fi; - - # This command copy organization level msp certificates for orgs - curl \ - -H "X-Vault-Token: ${VAULT_TOKEN}" \ - -H "Content-Type: application/json" \ - -X POST \ - -d @payload.json \ - ${VAULT_ADDR}/v1/${VAULT_SECRET_USERS}/admin/msp - - # Check msp certificates - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${VAULT_SECRET_USERS}/admin/msp | jq -r 'if .errors then . else . end') - MSP_ADMINCERT=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["admincerts"]' 2>&1) - MSP_CACERTS=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["cacerts"]' 2>&1) - MSP_KEYSTORE=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["keystore"]' 2>&1) - MSP_SIGNCERTS=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["signcerts"]' 2>&1) - MSP_TLSCACERTS=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["tlscacerts"]' 2>&1) - - msp_certificate_fields=("$MSP_ADMINCERT" "$MSP_CACERTS" "$MSP_KEYSTORE" "$MSP_SIGNCERTS" "$MSP_TLSCACERTS") - - for field in "${msp_certificate_fields[@]}" - do - if [ "$field" = "null" ] || [[ "$field" = "parse error"* ]] || [ "$field" = "" ] - then - ADMIN_MSP_CERT_WRITTEN=false - break - else - ADMIN_MSP_CERT_WRITTEN=true - fi - done - + formatCertificate "${ORG_CYPTO_FOLDER}/msp/cacerts/{{ include "fabric-catools.caFileName" . }}" "${FORMAT_CERTIFICATE_PATH}/msp" + CACERTS=$(cat ${FORMAT_CERTIFICATE_PATH}/msp/{{ include "fabric-catools.caFileName" . }}.txt) + + echo " + { + \"data\": + { + \"admincerts\": \"${ADMINCERTS}\", + \"cacerts\": \"${CACERTS}\", + \"keystore\": \"${KEYSTORE}\", + \"signcerts\": \"${SIGNCERTS}\", + \"tlscacerts\": \"${CA_CRT}\" + } + }" > payload.json + + vaultBevelFunc 'write' "${VAULT_SECRET_ENGINE}/${VAULT_SECRET_PREFIX}/users/${MSP_KEY}" 'payload.json' rm payload.json - fi; - - if [ "$ADMIN_TLS_CERT_WRITTEN" = "true" ] && [ "$ADMIN_MSP_CERT_WRITTEN" = "true" ] - then - echo "Admin certificates are successfully stored in vault" - break - else - echo "Admin certificates are not ready, sleeping for {{ $.Values.healthcheck.sleepTimeAfterError }}" - sleep {{ $.Values.healthcheck.sleepTimeAfterError }} - COUNTER=`expr "$COUNTER" + 1` fi - done - - if [ "$COUNTER" -gt {{ $.Values.healthcheck.retries }} ] - then - echo "Retry attempted `expr $COUNTER - 1` times, Admin certificates have not been saved." - touch ${MOUNT_PATH}/certs_not_found.txt - exit 1 - fi; +{{- end }} # End Vault if condition + if [ ! -e /crypto-config/${TLS_KEY}-exists ] || [ "$REFRESH_CERT_VALUE" = "true" ]; then + # Check if secret exists + kubectl get secret --namespace ${COMPONENT_NAME} ${TLS_KEY} >/dev/null 2>&1 + if [ $? -eq 0 ]; then + # Delete the secret if exists + kubectl delete secret --namespace ${COMPONENT_NAME} ${TLS_KEY} + fi + kubectl create secret generic ${TLS_KEY} --namespace ${COMPONENT_NAME} --from-file=cacrt=${ORG_CYPTO_FOLDER}/tls/ca.crt \ + --from-file=clientcrt=${ORG_CYPTO_FOLDER}/tls/client.crt \ + --from-file=clientkey=${ORG_CYPTO_FOLDER}/tls/client.key + fi - ORG_CYPTO_PEER_FOLDER="/crypto-config/peerOrganizations/${COMPONENT_NAME}/peers" + if [ ! -e /crypto-config/${MSP_KEY}-exists ] || [ "$REFRESH_CERT_VALUE" = "true" ]; then + SK_NAME=$(find ${ORG_CYPTO_FOLDER}/msp/keystore/ -name "*_sk") + kubectl get secret --namespace ${COMPONENT_NAME} ${MSP_KEY} >/dev/null 2>&1 + if [ $? -eq 0 ]; then + # Delete the secret if exists + kubectl delete secret --namespace ${COMPONENT_NAME} ${MSP_KEY} + fi + kubectl create secret generic ${MSP_KEY} --namespace ${COMPONENT_NAME} \ + --from-file=admincerts=${ORG_CYPTO_FOLDER}/msp/admincerts/Admin@${COMPONENT_NAME}-cert.pem \ + --from-file=cacerts=${ORG_CYPTO_FOLDER}/msp/cacerts/{{ include "fabric-catools.caFileName" . }} \ + --from-file=keystore=${SK_NAME} \ + --from-file=signcerts=${ORG_CYPTO_FOLDER}/msp/signcerts/cert.pem \ + --from-file=tlscacerts=${ORG_CYPTO_FOLDER}/tls/ca.crt + fi - list=$(echo "$PEERS_NAMES" | tr "-" "\n") - for PEER in $list - do - SAVE=false - STATUS="${PEER##*,}" - if [ "$STATUS" = "new" ] || [ "$STATUS" = "" ]; then - PEER_NAME="${PEER%%,*}" - SAVE=true - else - continue - fi; - - if [ -e /certcheck/present_tls_${PEER_NAME}.txt ]; then PEER_TLS_CERT_WRITTEN=true; else PEER_TLS_CERT_WRITTEN=false; fi - if [ -e /certcheck/present_msp_${PEER_NAME}.txt ]; then PEER_MSP_CERT_WRITTEN=true; else PEER_MSP_CERT_WRITTEN=false; fi + echo "Peer Admin certificates are successfully stored." + } + function savePeerSecrets { + PEER_NAME=$1 + TLS_KEY=$1-tls + MSP_KEY=$1-msp +{{- if eq .Values.global.vault.type "hashicorp" }} + . ../bevel-vault.sh + # Calling a function to retrieve the vault token. + vaultBevelFunc "init" + FORMAT_CERTIFICATE_PATH="/formatcertificate" mkdir -p ${FORMAT_CERTIFICATE_PATH}/${PEER_NAME}/tls mkdir -p ${FORMAT_CERTIFICATE_PATH}/${PEER_NAME}/msp mkdir -p ${FORMAT_CERTIFICATE_PATH}/${PEER_NAME}/msp/cacerts mkdir -p ${FORMAT_CERTIFICATE_PATH}/${PEER_NAME}/msp/tlscacerts - while [ "$COUNTER" -le {{ $.Values.healthcheck.retries }} ] - do - if ([ -e /certcheck/absent_tls_${PEER_NAME}.txt ] && [ "$PEER_TLS_CERT_WRITTEN" = "false" ] && [ "$SAVE" == 'true' ]) || [ "$REFRESH_CERTS" == 'true' ]; then - - # This commands put the certificates with correct format for the curl command - formatCertificate "${ORG_CYPTO_PEER_FOLDER}/${PEER_NAME}.${COMPONENT_NAME}/tls/ca.crt" "${FORMAT_CERTIFICATE_PATH}/${PEER_NAME}/tls" - formatCertificate "${ORG_CYPTO_PEER_FOLDER}/${PEER_NAME}.${COMPONENT_NAME}/tls/server.crt" "${FORMAT_CERTIFICATE_PATH}/${PEER_NAME}/tls" - formatCertificate "${ORG_CYPTO_PEER_FOLDER}/${PEER_NAME}.${COMPONENT_NAME}/tls/server.key" "${FORMAT_CERTIFICATE_PATH}/${PEER_NAME}/tls" - - CA_CRT=$(cat ${FORMAT_CERTIFICATE_PATH}/${PEER_NAME}/tls/ca.crt.txt) - SERVER_CRT=$(cat ${FORMAT_CERTIFICATE_PATH}/${PEER_NAME}/tls/server.crt.txt) - SERVER_KEY=$(cat ${FORMAT_CERTIFICATE_PATH}/${PEER_NAME}/tls/server.key.txt) - - echo " - { - \"data\": - { - \"ca.crt\": \"${CA_CRT}\", - \"server.crt\": \"${SERVER_CRT}\", - \"server.key\": \"${SERVER_KEY}\" - } - }" > payload.json - - # This command copy the crypto material for peers (tls) - curl \ - -H "X-Vault-Token: ${VAULT_TOKEN}" \ - -H "Content-Type: application/json" \ - -X POST \ - -d @payload.json \ - ${VAULT_ADDR}/v1/${VAULT_SECRET_PEER}/${PEER_NAME}.${COMPONENT_NAME}/tls - - # Check tls certificates - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${VAULT_SECRET_PEER}/${PEER_NAME}.${COMPONENT_NAME}/tls | jq -r 'if .errors then . else . end') - TLS_CA_CERT=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["ca.crt"]' 2>&1) - TLS_SERVER_CERT=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["server.crt"]' 2>&1) - TLS_SERVER_KEY=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["server.key"]' 2>&1) - - tls_certificate_fields=("$TLS_CA_CERT" "$TLS_SERVER_CERT" "$TLS_SERVER_KEY") - - for field in "${tls_certificate_fields[@]}" - do - if [ "$field" = "null" ] || [[ "$field" = "parse error"* ]] || [ "$field" = "" ] - then - PEER_TLS_CERT_WRITTEN=false - break - else - PEER_TLS_CERT_WRITTEN=true - fi - done - rm payload.json - fi; - - if ([ -e /certcheck/absent_msp_${PEER_NAME}.txt ] && [ "$PEER_MSP_CERT_WRITTEN" = "false" ] && [ "$SAVE" == 'true' ]) || [ "$REFRESH_CERTS" == 'true' ]; then - - # This commands put the certificates with correct format for the curl command - SK_NAME=$(find ${ORG_CYPTO_PEER_FOLDER}/${PEER_NAME}.${COMPONENT_NAME}/msp/keystore/ -name "*_sk") - - formatCertificate "${ORG_CYPTO_PEER_FOLDER}/${PEER_NAME}.${COMPONENT_NAME}/msp/admincerts/Admin@${COMPONENT_NAME}-cert.pem" "${FORMAT_CERTIFICATE_PATH}/${PEER_NAME}/msp" - formatCertificate "${SK_NAME}" "${FORMAT_CERTIFICATE_PATH}/${PEER_NAME}/msp" - formatCertificate "${ORG_CYPTO_PEER_FOLDER}/${PEER_NAME}.${COMPONENT_NAME}/msp/signcerts/cert.pem" "${FORMAT_CERTIFICATE_PATH}/${PEER_NAME}/msp" - - ADMINCERTS=$(cat ${FORMAT_CERTIFICATE_PATH}/${PEER_NAME}/msp/Admin@${COMPONENT_NAME}-cert.pem.txt) - KEYSTORE=$(cat ${FORMAT_CERTIFICATE_PATH}/${PEER_NAME}/msp/*_sk.txt) - SIGNCERTS=$(cat ${FORMAT_CERTIFICATE_PATH}/${PEER_NAME}/msp/cert.pem.txt) - - if [ "$PROXY" != "none" ] ; then + if [ ! -e /crypto-config/${TLS_KEY}-exists ] || [ "$REFRESH_CERT_VALUE" = "true" ]; then + # This commands put the certificates with correct format for the curl command + formatCertificate "${ORG_CYPTO_PEER_FOLDER}/${PEER_NAME}.${COMPONENT_NAME}/tls/ca.crt" "${FORMAT_CERTIFICATE_PATH}/${PEER_NAME}/tls" + formatCertificate "${ORG_CYPTO_PEER_FOLDER}/${PEER_NAME}.${COMPONENT_NAME}/tls/server.crt" "${FORMAT_CERTIFICATE_PATH}/${PEER_NAME}/tls" + formatCertificate "${ORG_CYPTO_PEER_FOLDER}/${PEER_NAME}.${COMPONENT_NAME}/tls/server.key" "${FORMAT_CERTIFICATE_PATH}/${PEER_NAME}/tls" - formatCertificate "${ORG_CYPTO_PEER_FOLDER}/${PEER_NAME}.${COMPONENT_NAME}/msp/cacerts/ca-${COMPONENT_NAME}-${EXTERNAL_URL_SUFFIX}.pem" "${FORMAT_CERTIFICATE_PATH}/${PEER_NAME}/msp/cacerts" - formatCertificate "${ORG_CYPTO_PEER_FOLDER}/${PEER_NAME}.${COMPONENT_NAME}/msp/tlscacerts/ca-${COMPONENT_NAME}-${EXTERNAL_URL_SUFFIX}.pem" "${FORMAT_CERTIFICATE_PATH}/${PEER_NAME}/msp/tlscacerts" - CACERTS=$(cat ${FORMAT_CERTIFICATE_PATH}/${PEER_NAME}/msp/cacerts/ca-${COMPONENT_NAME}-${EXTERNAL_URL_SUFFIX}.pem.txt) - TLSCERTS=$(cat ${FORMAT_CERTIFICATE_PATH}/${PEER_NAME}/msp/tlscacerts/ca-${COMPONENT_NAME}-${EXTERNAL_URL_SUFFIX}.pem.txt) + CA_CRT=$(cat ${FORMAT_CERTIFICATE_PATH}/${PEER_NAME}/tls/ca.crt.txt) + SERVER_CRT=$(cat ${FORMAT_CERTIFICATE_PATH}/${PEER_NAME}/tls/server.crt.txt) + SERVER_KEY=$(cat ${FORMAT_CERTIFICATE_PATH}/${PEER_NAME}/tls/server.key.txt) - echo " - { - \"data\": - { - \"admincerts\": \"${ADMINCERTS}\", - \"cacerts\": \"${CACERTS}\", - \"keystore\": \"${KEYSTORE}\", - \"signcerts\": \"${SIGNCERTS}\", - \"tlscacerts\": \"${TLSCERTS}\" - } - }" > payload.json - - fi; - - if [ "$PROXY" = "none" ] ; then - formatCertificate "${ORG_CYPTO_PEER_FOLDER}/${PEER_NAME}.${COMPONENT_NAME}/msp/cacerts/ca-${COMPONENT_NAME}-7054.pem" "${FORMAT_CERTIFICATE_PATH}/${PEER_NAME}/msp/cacerts" - formatCertificate "${ORG_CYPTO_PEER_FOLDER}/${PEER_NAME}.${COMPONENT_NAME}/msp/tlscacerts/ca-${COMPONENT_NAME}-7054.pem" "${FORMAT_CERTIFICATE_PATH}/${PEER_NAME}/msp/tlscacerts" - CACERTS=$(cat ${FORMAT_CERTIFICATE_PATH}/${PEER_NAME}/msp/cacerts/ca-${COMPONENT_NAME}-7054.pem.txt) - TLSCERTS=$(cat ${FORMAT_CERTIFICATE_PATH}/${PEER_NAME}/msp/tlscacerts/ca-${COMPONENT_NAME}-7054.pem.txt) - - echo " - { - \"data\": - { - \"admincerts\": \"${ADMINCERTS}\", - \"cacerts\": \"${CACERTS}\", - \"keystore\": \"${KEYSTORE}\", - \"signcerts\": \"${SIGNCERTS}\", - \"tlscacerts\": \"${TLSCERTS}\" - } - }" > payload.json - - fi; - - # This command copy the msp certificates to the Vault - curl \ - -H "X-Vault-Token: ${VAULT_TOKEN}" \ - -H "Content-Type: application/json" \ - -X POST \ - -d @payload.json \ - ${VAULT_ADDR}/v1/${VAULT_SECRET_PEER}/${PEER_NAME}.${COMPONENT_NAME}/msp - - # Check msp certificates - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${VAULT_SECRET_PEER}/${PEER_NAME}.${COMPONENT_NAME}/msp | jq -r 'if .errors then . else . end') - MSP_ADMINCERT=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["admincerts"]' 2>&1) - MSP_CACERTS=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["cacerts"]' 2>&1) - MSP_KEYSTORE=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["keystore"]' 2>&1) - MSP_SIGNCERTS=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["signcerts"]' 2>&1) - MSP_TLSCACERTS=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["tlscacerts"]' 2>&1) - - msp_certificate_fields=("$MSP_ADMINCERT" "$MSP_CACERTS" "$MSP_KEYSTORE" "$MSP_SIGNCERTS" "$MSP_TLSCACERTS") - - for field in "${msp_certificate_fields[@]}" - do - if [ "$field" = "null" ] || [[ "$field" = "parse error"* ]] || [ "$field" = "" ] - then - PEER_MSP_CERT_WRITTEN=false - break - else - PEER_MSP_CERT_WRITTEN=true - fi - done - rm payload.json - fi; - - if [ "$PEER_TLS_CERT_WRITTEN" = "true" ] && [ "$PEER_MSP_CERT_WRITTEN" = "true" ] - then - echo "${PEER_NAME} certificates are successfully stored in vault" - break - else - echo "${PEER_NAME} certificates are not ready, sleeping for {{ $.Values.healthcheck.sleepTimeAfterError }}" - sleep {{ $.Values.healthcheck.sleepTimeAfterError }} - COUNTER=`expr "$COUNTER" + 1` - fi - done + echo " + { + \"data\": + { + \"ca_crt\": \"${CA_CRT}\", + \"server_crt\": \"${SERVER_CRT}\", + \"server_key\": \"${SERVER_KEY}\" + } + }" > payload.json - if [ "$COUNTER" -gt {{ $.Values.healthcheck.retries }} ] - then - echo "Retry attempted `expr $COUNTER - 1` times, peers certificates have not been saved." - touch ${MOUNT_PATH}/certs_not_found.txt - exit 1 - fi; + vaultBevelFunc 'write' "${VAULT_SECRET_ENGINE}/${VAULT_SECRET_PREFIX}/peers/${TLS_KEY}" 'payload.json' + rm payload.json + fi - done + if [ ! -e /crypto-config/${MSP_KEY}-exists ] || [ "$REFRESH_CERT_VALUE" = "true" ]; then + # This commands put the certificates with correct format for the curl command + SK_NAME=$(find ${ORG_CYPTO_PEER_FOLDER}/${PEER_NAME}.${COMPONENT_NAME}/msp/keystore/ -name "*_sk") + formatCertificate "${ORG_CYPTO_PEER_FOLDER}/${PEER_NAME}.${COMPONENT_NAME}/msp/admincerts/Admin@${COMPONENT_NAME}-cert.pem" "${FORMAT_CERTIFICATE_PATH}/${PEER_NAME}/msp" + formatCertificate "${SK_NAME}" "${FORMAT_CERTIFICATE_PATH}/${PEER_NAME}/msp" + formatCertificate "${ORG_CYPTO_PEER_FOLDER}/${PEER_NAME}.${COMPONENT_NAME}/msp/signcerts/cert.pem" "${FORMAT_CERTIFICATE_PATH}/${PEER_NAME}/msp" - COUNTER=1 - if [ -e /certcheck/present_config_file.txt ]; then CONFIG_FILE_WRITTEN=true; else CONFIG_FILE_WRITTEN=false; fi - COUCHDB_WRITTEN=false - while [ "$COUNTER" -le {{ $.Values.healthcheck.retries }} ] - do - if [ -e /certcheck/absent_config_file.txt ] && [ "$CONFIG_FILE_WRITTEN" = "false" ]; then + ADMINCERTS=$(cat ${FORMAT_CERTIFICATE_PATH}/${PEER_NAME}/msp/Admin@${COMPONENT_NAME}-cert.pem.txt) + KEYSTORE=$(cat ${FORMAT_CERTIFICATE_PATH}/${PEER_NAME}/msp/*_sk.txt) + SIGNCERTS=$(cat ${FORMAT_CERTIFICATE_PATH}/${PEER_NAME}/msp/cert.pem.txt) - # This commands put the config file with correct format for the curl command - mkdir -p ${FORMAT_CERTIFICATE_PATH}/msp_config_file - formatCertificate "/crypto-config/peerOrganizations/${COMPONENT_NAME}/msp/config.yaml" "${FORMAT_CERTIFICATE_PATH}/msp_config_file" - MSP_CONFIG_FILE=$(cat ${FORMAT_CERTIFICATE_PATH}/msp_config_file/config.yaml.txt) + formatCertificate "${ORG_CYPTO_PEER_FOLDER}/${PEER_NAME}.${COMPONENT_NAME}/msp/cacerts/{{ include "fabric-catools.caFileName" . }}" "${FORMAT_CERTIFICATE_PATH}/${PEER_NAME}/msp/cacerts" + formatCertificate "${ORG_CYPTO_PEER_FOLDER}/${PEER_NAME}.${COMPONENT_NAME}/msp/tlscacerts/{{ include "fabric-catools.caFileName" . }}" "${FORMAT_CERTIFICATE_PATH}/${PEER_NAME}/msp/tlscacerts" + CACERTS=$(cat ${FORMAT_CERTIFICATE_PATH}/${PEER_NAME}/msp/cacerts/{{ include "fabric-catools.caFileName" . }}.txt) + TLSCERTS=$(cat ${FORMAT_CERTIFICATE_PATH}/${PEER_NAME}/msp/tlscacerts/{{ include "fabric-catools.caFileName" . }}.txt) echo " + { + \"data\": { - \"data\": - { - \"configfile\": \"${MSP_CONFIG_FILE}\" - } - }" > payload.json - - # This command write the msp config file to Vault - curl \ - -H "X-Vault-Token: ${VAULT_TOKEN}" \ - -H "Content-Type: application/json" \ - -X POST \ - -d @payload.json \ - ${VAULT_ADDR}/v1/${VAULT_SECRET_CONFIG_FILE} - - # Check msp config file - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${VAULT_SECRET_CONFIG_FILE} | jq -r 'if .errors then . else . end') - CONFIG_FILE=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["configfile"]' 2>&1) - - if [ "$CONFIG_FILE" = "null" ] || [[ "$CONFIG_FILE" = "parse error"* ]] || [ "$CONFIG_FILE" = "" ] - then - CONFIG_FILE_WRITTEN=false - else - CONFIG_FILE_WRITTEN=true - fi + \"admincerts\": \"${ADMINCERTS}\", + \"cacerts\": \"${CACERTS}\", + \"keystore\": \"${KEYSTORE}\", + \"signcerts\": \"${SIGNCERTS}\", + \"tlscacerts\": \"${TLSCERTS}\" + } + }" > payload.json + + vaultBevelFunc 'write' "${VAULT_SECRET_ENGINE}/${VAULT_SECRET_PREFIX}/peers/${MSP_KEY}" 'payload.json' rm payload.json - fi; - - if [ "$COUCHDB_WRITTEN" = "false" ]; then - - # This command writes the couchdb credentials for each organization to the vault - curl \ - -H "X-Vault-Token: ${VAULT_TOKEN}" \ - -H "Content-Type: application/json" \ - -X POST \ - -d '{ "data": {"user":"admin123"}}' \ - ${VAULT_ADDR}/v1/${VAULT_SECRET_COUCHDB} - - # Check couchdb credentials - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${VAULT_SECRET_COUCHDB} | jq -r 'if .errors then . else . end') - USER=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["user"]' 2>&1) - - if [ "$USER" = "null" ] || [[ "$USER" = "parse error"* ]] || [ "$USER" = "" ] - then - COUCHDB_WRITTEN=false - else - COUCHDB_WRITTEN=true - fi - fi; - - if [ "$CONFIG_FILE_WRITTEN" = "true" ] && [ "$COUCHDB_WRITTEN" = "true" ] - then - echo "MSP config file and couchdb credentials are successfully stored in vault" - break - else - echo "MSP config file or couchdb credentials are not ready, sleeping for {{ $.Values.healthcheck.sleepTimeAfterError }}" - sleep {{ $.Values.healthcheck.sleepTimeAfterError }} - COUNTER=`expr "$COUNTER" + 1` fi - done - - if [ "$COUNTER" -gt {{ $.Values.healthcheck.retries }} ] - then - echo "Retry attempted `expr $COUNTER - 1` times, cryto materials have not been saved." - touch ${MOUNT_PATH}/certs_not_found.txt - exit 1 - fi; - - list=$(echo "$ORDERERS_NAMES" | tr "-" "\n") - for ORDERER in $list - do - COUNTER=1 - if [ -e /certcheck/present_orderer_tls_cert.txt ]; then ORDERER_TLS_WRITTEN=true; else ORDERER_TLS_WRITTEN=false; fi - while [ "$COUNTER" -le {{ $.Values.healthcheck.retries }} ] - do - if [ -e /certcheck/absent_orderer_tls_cert.txt ] && [ "$ORDERER_TLS_WRITTEN" = "false" ]; then - cat /tlscerts/${ORDERER}.crt | base64 -d > ${ORDERER}.formatted - # formatting is needed because bas64 encoding removed the newlines, so they need to be added again - while read line || [ -n "$line" ]; - do - echo "$line\n"; - done < ${ORDERER}.formatted > ${ORDERER}.final - ORDERER_TLS=$(cat ${ORDERER}.final) - echo " - { - \"data\": - { - \"ca.crt\": \"${ORDERER_TLS}\" - } - }" > payload.json - - # This command writes organization level certificates for orderers to vault - curl \ - -H "X-Vault-Token: ${VAULT_TOKEN}" \ - -H "Content-Type: application/json" \ - -X POST \ - -d @payload.json \ - ${VAULT_ADDR}/v1/${VAULT_SECRET_PEER_ORDERER_TLS} - - # Check orderer certs - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${VAULT_SECRET_PEER_ORDERER_TLS} | jq -r 'if .errors then . else . end') - CA_CRT=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["ca.crt"]' 2>&1) - - if [ "$CA_CRT" = "null" ] || [[ "$CA_CRT" = "parse error"* ]] || [ "$CA_CRT" = "" ] - then - ORDERER_TLS_WRITTEN=false - else - ORDERER_TLS_WRITTEN=true - fi - rm payload.json +{{- end }} # End Vault if condition + if [ ! -e /crypto-config/${TLS_KEY}-exists ] || [ "$REFRESH_CERT_VALUE" = "true" ]; then + # Check if secret exists + kubectl get secret --namespace ${COMPONENT_NAME} ${TLS_KEY} >/dev/null 2>&1 + if [ $? -eq 0 ]; then + # Delete the secret if exists + kubectl delete secret --namespace ${COMPONENT_NAME} ${TLS_KEY} fi + kubectl create secret generic ${TLS_KEY} --namespace ${COMPONENT_NAME} \ + --from-file=cacrt=${ORG_CYPTO_PEER_FOLDER}/${PEER_NAME}.${COMPONENT_NAME}/tls/ca.crt \ + --from-file=servercrt=${ORG_CYPTO_PEER_FOLDER}/${PEER_NAME}.${COMPONENT_NAME}/tls/server.crt \ + --from-file=serverkey=${ORG_CYPTO_PEER_FOLDER}/${PEER_NAME}.${COMPONENT_NAME}/tls/server.key + fi - if [ "$ORDERER_TLS_WRITTEN" = "true" ] - then - echo "${ORDERER} tls certificate are successfully stored in vault" - break - else - echo "${ORDERER} tls certificate are not ready, sleeping for {{ $.Values.healthcheck.sleepTimeAfterError }}" - sleep {{ $.Values.healthcheck.sleepTimeAfterError }} - COUNTER=`expr "$COUNTER" + 1` + if [ ! -e /crypto-config/${MSP_KEY}-exists ] || [ "$REFRESH_CERT_VALUE" = "true" ]; then + SK_NAME=$(find ${ORG_CYPTO_PEER_FOLDER}/${PEER_NAME}.${COMPONENT_NAME}/msp/keystore/ -name "*_sk") + kubectl get secret --namespace ${COMPONENT_NAME} ${MSP_KEY} >/dev/null 2>&1 + if [ $? -eq 0 ]; then + # Delete the secret if exists + kubectl delete secret --namespace ${COMPONENT_NAME} ${MSP_KEY} fi - done + kubectl create secret generic ${MSP_KEY} --namespace ${COMPONENT_NAME} \ + --from-file=admincerts=${ORG_CYPTO_PEER_FOLDER}/${PEER_NAME}.${COMPONENT_NAME}/msp/admincerts/Admin@${COMPONENT_NAME}-cert.pem \ + --from-file=cacerts=${ORG_CYPTO_PEER_FOLDER}/${PEER_NAME}.${COMPONENT_NAME}/msp/cacerts/{{ include "fabric-catools.caFileName" . }} \ + --from-file=keystore=${SK_NAME} \ + --from-file=signcerts=${ORG_CYPTO_PEER_FOLDER}/${PEER_NAME}.${COMPONENT_NAME}/msp/signcerts/cert.pem \ + --from-file=tlscacerts=${ORG_CYPTO_PEER_FOLDER}/${PEER_NAME}.${COMPONENT_NAME}/msp/tlscacerts/{{ include "fabric-catools.caFileName" . }} + fi + echo "${PEER_NAME} Client certificates are successfully stored." + } - if [ "$COUNTER" -gt {{ $.Values.healthcheck.retries }} ] - then - echo "Retry attempted `expr $COUNTER - 1` times, orderer tls have not been saved." - touch ${MOUNT_PATH}/certs_not_found.txt - exit 1 - fi; - done + ORG_CYPTO_FOLDER="/crypto-config/peerOrganizations/${COMPONENT_NAME}/users/Admin@${COMPONENT_NAME}" + ORG_CYPTO_PEER_FOLDER="/crypto-config/peerOrganizations/${COMPONENT_NAME}/peers" + saveAdminSecrets + savePeerSecrets $1 --- apiVersion: v1 kind: ConfigMap metadata: name: users-script-store-vault - namespace: {{ .Values.metadata.namespace }} + namespace: {{ .Release.Namespace }} labels: app.kubernetes.io/name: users-script-vault - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/managed-by: {{ .Release.Service }} app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/part-of: {{ include "fabric-catools.fullname" . }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} + app.kubernetes.io/managed-by: helm data: store-vault-users.sh: |- #!/bin/bash @@ -1468,259 +955,114 @@ data: echo "$line\n" done < ${1} > ${2}/${NAME}.txt } - - validateVaultResponse () { - if echo ${2} | grep "errors" || [ "${2}" = "" ]; then - echo "ERROR: unable to retrieve ${1}: ${2}" - exit 1 - fi - if [ "$3" == "LOOKUPSECRETRESPONSE" ] - then - http_code=$(curl -fsS -o /dev/null -w "%{http_code}" \ - --header "X-Vault-Token: ${VAULT_TOKEN}" \ - ${VAULT_ADDR}/v1/${1}) - curl_response=$? - if test "$http_code" != "200" ; then - echo "Http response code from Vault - $http_code and curl_response - $curl_response" - if test "$curl_response" != "0"; then - echo "Error: curl command failed with error code - $curl_response" - exit 1 - fi - fi - fi - } - - echo "Puting secrets/certificates from Vault server" - KUBE_SA_TOKEN=$(cat /var/run/secrets/kubernetes.io/serviceaccount/token) - VAULT_TOKEN=$(curl -sS --request POST ${VAULT_ADDR}/v1/auth/${KUBERNETES_AUTH_PATH}/login -H "Content-Type: application/json" -d '{"role":"'"${VAULT_APP_ROLE}"'","jwt":"'"${KUBE_SA_TOKEN}"'"}' | jq -r 'if .errors then . else .auth.client_token end') - validateVaultResponse 'vault login token' "${VAULT_TOKEN}" - - FORMAT_CERTIFICATE_PATH="/formatcertificate" - ORG_CYPTO_USERS_FOLDER="/crypto-config/peerOrganizations/${COMPONENT_NAME}/users" + function saveUserSecrets { + USER=$1 + TLS_KEY=$1-tls + MSP_KEY=$1-msp +{{- if eq .Values.global.vault.type "hashicorp" }} + . ../bevel-vault.sh + # Calling a function to retrieve the vault token. + vaultBevelFunc "init" - list=$(echo "$USERS_IDENTITIES" | tr "-" "\n") - for USER in $list - do - if [ -e /certcheck/present_tls_${USER}.txt ]; then USER_TLS_CERT_WRITTEN=true; else USER_TLS_CERT_WRITTEN=false; fi - if [ -e /certcheck/present_msp_${USER}.txt ]; then USER_MSP_CERT_WRITTEN=true; else USER_MSP_CERT_WRITTEN=false; fi - + FORMAT_CERTIFICATE_PATH="/formatcertificate" + ORG_CYPTO_USERS_FOLDER="/crypto-config/peerOrganizations/${COMPONENT_NAME}/users" + mkdir -p ${FORMAT_CERTIFICATE_PATH}/${USER}/tls mkdir -p ${FORMAT_CERTIFICATE_PATH}/${USER}/msp mkdir -p ${FORMAT_CERTIFICATE_PATH}/${USER}/msp/cacerts mkdir -p ${FORMAT_CERTIFICATE_PATH}/${USER}/msp/tlscacerts - COUNTER=1 - while [ "$COUNTER" -le {{ $.Values.healthcheck.retries }} ] - do - if ([ -e /certcheck/absent_tls_${USER}.txt ] && [ "$USER_TLS_CERT_WRITTEN" = "false" ]) || [ "$REFRESH_CERTS" == 'true' ]; then - - # This commands put the certificates with correct format for the curl command - formatCertificate "${ORG_CYPTO_USERS_FOLDER}/${USER}@${COMPONENT_NAME}/tls/ca.crt" "${FORMAT_CERTIFICATE_PATH}/${USER}/tls" - formatCertificate "${ORG_CYPTO_USERS_FOLDER}/${USER}@${COMPONENT_NAME}/tls/client.crt" "${FORMAT_CERTIFICATE_PATH}/${USER}/tls" - formatCertificate "${ORG_CYPTO_USERS_FOLDER}/${USER}@${COMPONENT_NAME}/tls/client.key" "${FORMAT_CERTIFICATE_PATH}/${USER}/tls" - - CA_CRT=$(cat ${FORMAT_CERTIFICATE_PATH}/${USER}/tls/ca.crt.txt) - CLIENT_CRT=$(cat ${FORMAT_CERTIFICATE_PATH}/${USER}/tls/client.crt.txt) - CLIENT_KEY=$(cat ${FORMAT_CERTIFICATE_PATH}/${USER}/tls/client.key.txt) - - echo " - { - \"data\": - { - \"ca.crt\": \"${CA_CRT}\", - \"client.crt\": \"${CLIENT_CRT}\", - \"client.key\": \"${CLIENT_KEY}\" - } - }" > payload.json + if [ ! -e /crypto-config/${TLS_KEY}-exists ] || [ "$REFRESH_CERT_VALUE" = "true" ]; then + # This commands put the certificates with correct format for the curl command + formatCertificate "${ORG_CYPTO_USERS_FOLDER}/${USER}@${COMPONENT_NAME}/tls/ca.crt" "${FORMAT_CERTIFICATE_PATH}/${USER}/tls" + formatCertificate "${ORG_CYPTO_USERS_FOLDER}/${USER}@${COMPONENT_NAME}/tls/client.crt" "${FORMAT_CERTIFICATE_PATH}/${USER}/tls" + formatCertificate "${ORG_CYPTO_USERS_FOLDER}/${USER}@${COMPONENT_NAME}/tls/client.key" "${FORMAT_CERTIFICATE_PATH}/${USER}/tls" - # This command copy the crypto material for users (tls) - curl \ - -H "X-Vault-Token: ${VAULT_TOKEN}" \ - -H "Content-Type: application/json" \ - -X POST \ - -d @payload.json \ - ${VAULT_ADDR}/v1/${VAULT_SECRET_USERS}/${USER}/tls - - # Check tls certificates - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${VAULT_SECRET_USERS}/${USER}/tls | jq -r 'if .errors then . else . end') - TLS_CA_CERT=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["ca.crt"]' 2>&1) - TLS_CLIENT_CERT=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["client.crt"]' 2>&1) - TLS_CLIENT_KEY=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["client.key"]' 2>&1) - - tls_certificate_fields=("$TLS_CA_CERT" "$TLS_CLIENT_CERT" "$TLS_CLIENT_KEY") - - for field in "${tls_certificate_fields[@]}" - do - if [ "$field" = "null" ] || [[ "$field" = "parse error"* ]] || [ "$field" = "" ] - then - USER_TLS_CERT_WRITTEN=false - break - else - USER_TLS_CERT_WRITTEN=true - fi - done - rm payload.json - fi; + CA_CRT=$(cat ${FORMAT_CERTIFICATE_PATH}/${USER}/tls/ca.crt.txt) + CLIENT_CRT=$(cat ${FORMAT_CERTIFICATE_PATH}/${USER}/tls/client.crt.txt) + CLIENT_KEY=$(cat ${FORMAT_CERTIFICATE_PATH}/${USER}/tls/client.key.txt) - if ([ -e /certcheck/absent_msp_${USER}.txt ] && [ "$USER_MSP_CERT_WRITTEN" = "false" ]) || [ "$REFRESH_CERTS" == 'true' ]; then + echo " + { + \"data\": + { + \"ca_crt\": \"${CA_CRT}\", + \"client_crt\": \"${CLIENT_CRT}\", + \"client_key\": \"${CLIENT_KEY}\" + } + }" > payload.json + + # This command copy the crypto material for users (tls) + vaultBevelFunc 'write' "${VAULT_SECRET_ENGINE}/${VAULT_SECRET_PREFIX}/users/${TLS_KEY}" 'payload.json' + rm payload.json + fi - # This commands put the certificates with correct format for the curl command - SK_NAME=$(find ${ORG_CYPTO_USERS_FOLDER}/${USER}@${COMPONENT_NAME}/msp/keystore/ -name "*_sk") + if [ ! -e /crypto-config/${MSP_KEY}-exists ] || [ "$REFRESH_CERT_VALUE" = "true" ]; then + # This commands put the certificates with correct format for the curl command + SK_NAME=$(find ${ORG_CYPTO_USERS_FOLDER}/${USER}@${COMPONENT_NAME}/msp/keystore/ -name "*_sk") + formatCertificate "${ORG_CYPTO_USERS_FOLDER}/${USER}@${COMPONENT_NAME}/msp/admincerts/${USER}@${COMPONENT_NAME}-cert.pem" "${FORMAT_CERTIFICATE_PATH}/${USER}/msp" + formatCertificate "${SK_NAME}" "${FORMAT_CERTIFICATE_PATH}/${USER}/msp" + formatCertificate "${ORG_CYPTO_USERS_FOLDER}/${USER}@${COMPONENT_NAME}/msp/signcerts/cert.pem" "${FORMAT_CERTIFICATE_PATH}/${USER}/msp" - formatCertificate "${ORG_CYPTO_USERS_FOLDER}/${USER}@${COMPONENT_NAME}/msp/admincerts/${USER}@${COMPONENT_NAME}-cert.pem" "${FORMAT_CERTIFICATE_PATH}/${USER}/msp" - formatCertificate "${SK_NAME}" "${FORMAT_CERTIFICATE_PATH}/${USER}/msp" - formatCertificate "${ORG_CYPTO_USERS_FOLDER}/${USER}@${COMPONENT_NAME}/msp/signcerts/cert.pem" "${FORMAT_CERTIFICATE_PATH}/${USER}/msp" + ADMINCERTS=$(cat ${FORMAT_CERTIFICATE_PATH}/${USER}/msp/${USER}@${COMPONENT_NAME}-cert.pem.txt) + KEYSTORE=$(cat ${FORMAT_CERTIFICATE_PATH}/${USER}/msp/*_sk.txt) + SIGNCERTS=$(cat ${FORMAT_CERTIFICATE_PATH}/${USER}/msp/cert.pem.txt) - ADMINCERTS=$(cat ${FORMAT_CERTIFICATE_PATH}/${USER}/msp/${USER}@${COMPONENT_NAME}-cert.pem.txt) - KEYSTORE=$(cat ${FORMAT_CERTIFICATE_PATH}/${USER}/msp/*_sk.txt) - SIGNCERTS=$(cat ${FORMAT_CERTIFICATE_PATH}/${USER}/msp/cert.pem.txt) - - if [ "$PROXY" != "none" ] ; then + formatCertificate "${ORG_CYPTO_USERS_FOLDER}/${USER}@${COMPONENT_NAME}/msp/cacerts/{{ include "fabric-catools.caFileName" . }}" "${FORMAT_CERTIFICATE_PATH}/${USER}/msp/cacerts" + formatCertificate "${ORG_CYPTO_USERS_FOLDER}/${USER}@${COMPONENT_NAME}/msp/tlscacerts/{{ include "fabric-catools.caFileName" . }}" "${FORMAT_CERTIFICATE_PATH}/${USER}/msp/tlscacerts" + CACERTS=$(cat ${FORMAT_CERTIFICATE_PATH}/${USER}/msp/cacerts/{{ include "fabric-catools.caFileName" . }}.txt) + # En el rol lo copia directamente del tls + TLSCERTS=$(cat ${FORMAT_CERTIFICATE_PATH}/${USER}/msp/tlscacerts/{{ include "fabric-catools.caFileName" . }}.txt) - formatCertificate "${ORG_CYPTO_USERS_FOLDER}/${USER}@${COMPONENT_NAME}/msp/cacerts/ca-${COMPONENT_NAME}-${EXTERNAL_URL_SUFFIX}.pem" "${FORMAT_CERTIFICATE_PATH}/${USER}/msp/cacerts" - formatCertificate "${ORG_CYPTO_USERS_FOLDER}/${USER}@${COMPONENT_NAME}/msp/tlscacerts/ca-${COMPONENT_NAME}-${EXTERNAL_URL_SUFFIX}.pem" "${FORMAT_CERTIFICATE_PATH}/${USER}/msp/tlscacerts" - CACERTS=$(cat ${FORMAT_CERTIFICATE_PATH}/${USER}/msp/cacerts/ca-${COMPONENT_NAME}-${EXTERNAL_URL_SUFFIX}.pem.txt) - # En el rol lo copia directamente del tls - TLSCERTS=$(cat ${FORMAT_CERTIFICATE_PATH}/${USER}/msp/tlscacerts/ca-${COMPONENT_NAME}-${EXTERNAL_URL_SUFFIX}.pem.txt) + echo " + { + \"data\": + { + \"admincerts\": \"${ADMINCERTS}\", + \"cacerts\": \"${CACERTS}\", + \"keystore\": \"${KEYSTORE}\", + \"signcerts\": \"${SIGNCERTS}\", + \"tlscacerts\": \"${TLSCERTS}\" + } + }" > payload.json + + #This command copy the msp certificates to the Vault + vaultBevelFunc 'write' "${VAULT_SECRET_ENGINE}/${VAULT_SECRET_PREFIX}/users/${MSP_KEY}" 'payload.json' + rm payload.json + fi +{{- end }} + # Check if secret exists + if [ ! -e /crypto-config/${TLS_KEY}-exists ] || [ "$REFRESH_CERT_VALUE" = "true" ]; then + kubectl get secret --namespace ${COMPONENT_NAME} ${TLS_KEY} >/dev/null 2>&1 + if [ $? -eq 0 ]; then + # Delete the secret if exists + kubectl delete secret --namespace ${COMPONENT_NAME} ${TLS_KEY} + fi - echo " - { - \"data\": - { - \"admincerts\": \"${ADMINCERTS}\", - \"cacerts\": \"${CACERTS}\", - \"keystore\": \"${KEYSTORE}\", - \"signcerts\": \"${SIGNCERTS}\", - \"tlscacerts\": \"${TLSCERTS}\" - } - }" > payload.json - - fi; - - if [ "$PROXY" = "none" ] ; then - formatCertificate "${ORG_CYPTO_USERS_FOLDER}/${USER}@${COMPONENT_NAME}/msp/cacerts/ca-${COMPONENT_NAME}-7054.pem" "${FORMAT_CERTIFICATE_PATH}/${USER}/msp/cacerts" - formatCertificate "${ORG_CYPTO_USERS_FOLDER}/${USER}@${COMPONENT_NAME}/msp/tlscacerts/ca-${COMPONENT_NAME}-7054.pem" "${FORMAT_CERTIFICATE_PATH}/${USER}/msp/tlscacerts" - CACERTS=$(cat ${FORMAT_CERTIFICATE_PATH}/${USER}/msp/cacerts/ca-${COMPONENT_NAME}-7054.pem.txt) - TLSCERTS=$(cat ${FORMAT_CERTIFICATE_PATH}/${USER}/msp/tlscacerts/ca-${COMPONENT_NAME}-7054.pem.txt) + kubectl create secret generic ${TLS_KEY} --namespace ${COMPONENT_NAME} \ + --from-file=cacrt=${ORG_CYPTO_USERS_FOLDER}/${USER}@${COMPONENT_NAME}/tls/ca.crt \ + --from-file=clientcrt=${ORG_CYPTO_USERS_FOLDER}/${USER}@${COMPONENT_NAME}/tls/client.crt \ + --from-file=clientkey=${ORG_CYPTO_USERS_FOLDER}/${USER}@${COMPONENT_NAME}/tls/client.key + fi - echo " - { - \"data\": - { - \"admincerts\": \"${ADMINCERTS}\", - \"cacerts\": \"${CACERTS}\", - \"keystore\": \"${KEYSTORE}\", - \"signcerts\": \"${SIGNCERTS}\", - \"tlscacerts\": \"${TLSCERTS}\" - } - }" > payload.json - - fi; - - # This command copy the msp certificates to the Vault - curl \ - -H "X-Vault-Token: ${VAULT_TOKEN}" \ - -H "Content-Type: application/json" \ - -X POST \ - -d @payload.json \ - ${VAULT_ADDR}/v1/${VAULT_SECRET_USERS}/${USER}/msp - - # Check msp certificates - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${VAULT_SECRET_USERS}/${USER}/msp | jq -r 'if .errors then . else . end') - MSP_ADMINCERT=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["admincerts"]' 2>&1) - MSP_CACERTS=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["cacerts"]' 2>&1) - MSP_KEYSTORE=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["keystore"]' 2>&1) - MSP_SIGNCERTS=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["signcerts"]' 2>&1) - MSP_TLSCACERTS=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["tlscacerts"]' 2>&1) - - msp_certificate_fields=("$MSP_ADMINCERT" "$MSP_CACERTS" "$MSP_KEYSTORE" "$MSP_SIGNCERTS" "$MSP_TLSCACERTS") - - for field in "${msp_certificate_fields[@]}" - do - if [ "$field" = "null" ] || [[ "$field" = "parse error"* ]] || [ "$field" = "" ] - then - USER_MSP_CERT_WRITTEN=false - break - else - USER_MSP_CERT_WRITTEN=true - fi - done - rm payload.json - fi; - - if [ "$USER_TLS_CERT_WRITTEN" = "true" ] && [ "$USER_MSP_CERT_WRITTEN" = "true" ] - then - echo "${USER} certificates are successfully stored in vault" - break - else - echo "${USER} certificates are not ready, sleeping for {{ $.Values.healthcheck.sleepTimeAfterError }}" - sleep {{ $.Values.healthcheck.sleepTimeAfterError }} - COUNTER=`expr "$COUNTER" + 1` + if [ ! -e /crypto-config/${MSP_KEY}-exists ] || [ "$REFRESH_CERT_VALUE" = "true" ]; then + SK_NAME=$(find ${ORG_CYPTO_USERS_FOLDER}/${USER}@${COMPONENT_NAME}/msp/keystore/ -name "*_sk") + kubectl get secret --namespace ${COMPONENT_NAME} ${MSP_KEY} >/dev/null 2>&1 + if [ $? -eq 0 ]; then + # Delete the secret if exists + kubectl delete secret --namespace ${COMPONENT_NAME} ${MSP_KEY} fi - done; + kubectl create secret generic ${MSP_KEY} --namespace ${COMPONENT_NAME} \ + --from-file=admincerts=${ORG_CYPTO_USERS_FOLDER}/${USER}@${COMPONENT_NAME}/msp/admincerts/${USER}@${COMPONENT_NAME}-cert.pem \ + --from-file=cacerts=${ORG_CYPTO_USERS_FOLDER}/${USER}@${COMPONENT_NAME}/msp/cacerts/{{ include "fabric-catools.caFileName" . }} \ + --from-file=keystore=${SK_NAME} \ + --from-file=signcerts=${ORG_CYPTO_USERS_FOLDER}/${USER}@${COMPONENT_NAME}/msp/signcerts/cert.pem \ + --from-file=tlscacerts=${ORG_CYPTO_USERS_FOLDER}/${USER}@${COMPONENT_NAME}/msp/tlscacerts/{{ include "fabric-catools.caFileName" . }} + fi + echo "${USER} certificates are successfully stored." + } - if [ "$COUNTER" -gt {{ $.Values.healthcheck.retries }} ] - then - echo "Retry attempted `expr $COUNTER - 1` times, users certificates have not been saved." - touch ${MOUNT_PATH}/certs_not_found.txt - exit 1 - fi; - done; + ORG_CYPTO_USERS_FOLDER="/crypto-config/peerOrganizations/${COMPONENT_NAME}/users" + saveUserSecrets $1 ---- -apiVersion: v1 -kind: ConfigMap -metadata: - name: msp-config-file - namespace: {{ .Values.metadata.namespace }} - labels: - app.kubernetes.io/name: msp-config-file - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} -data: - no-none-config.yaml: |- - NodeOUs: - Enable: true - ClientOUIdentifier: - Certificate: cacerts/ca-{{ .Values.metadata.namespace }}-{{ .Values.org_data.external_url_suffix }}.pem - OrganizationalUnitIdentifier: client - PeerOUIdentifier: - Certificate: cacerts/ca-{{ .Values.metadata.namespace }}-{{ .Values.org_data.external_url_suffix }}.pem - OrganizationalUnitIdentifier: peer - AdminOUIdentifier: - Certificate: cacerts/ca-{{ .Values.metadata.namespace }}-{{ .Values.org_data.external_url_suffix }}.pem - OrganizationalUnitIdentifier: admin - OrdererOUIdentifier: - Certificate: cacerts/ca-{{ .Values.metadata.namespace }}-{{ .Values.org_data.external_url_suffix }}.pem - OrganizationalUnitIdentifier: orderer - none-config.yaml: |- - NodeOUs: - Enable: true - ClientOUIdentifier: - Certificate: cacerts/ca-{{ .Values.metadata.namespace }}-7054.pem - OrganizationalUnitIdentifier: client - PeerOUIdentifier: - Certificate: cacerts/ca-{{ .Values.metadata.namespace }}-7054.pem - OrganizationalUnitIdentifier: peer - AdminOUIdentifier: - Certificate: cacerts/ca-{{ .Values.metadata.namespace }}-7054.pem - OrganizationalUnitIdentifier: admin - OrdererOUIdentifier: - Certificate: cacerts/ca-{{ .Values.metadata.namespace }}-7054.pem - OrganizationalUnitIdentifier: orderer ---- -apiVersion: v1 -kind: ConfigMap -metadata: - name: tls-cert - namespace: {{ .Values.metadata.namespace }} - labels: - app.kubernetes.io/name: tls-cert - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} -data: - {{- range $orderers := $.Values.orderers_info }} - {{ $orderers.name }}.crt: {{ $orderers.path | quote }} - {{- end }} +{{- end }} # End createConfigMaps condition diff --git a/platforms/hyperledger-fabric/charts/fabric-catools/templates/deployment.yaml b/platforms/hyperledger-fabric/charts/fabric-catools/templates/deployment.yaml deleted file mode 100644 index 0ba1a2f0c7c..00000000000 --- a/platforms/hyperledger-fabric/charts/fabric-catools/templates/deployment.yaml +++ /dev/null @@ -1,605 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: {{ .Values.metadata.name }} - namespace: {{ .Values.metadata.namespace }} - labels: - app: {{ .Release.Name }} - app.kubernetes.io/name: {{ .Values.metadata.name }} - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} - annotations: - {{- if $.Values.annotations }} - {{- range $key, $value := $.Values.annotations.deployment }} - {{- range $k, $v := $value }} - {{ $k }}: {{ $v | quote }} - {{- end }} - {{- end }} - {{- end }} -spec: - replicas: {{ .Values.replicaCount }} - selector: - matchLabels: - name: {{ .Values.metadata.name }} - template: - metadata: - labels: - name: {{ .Values.metadata.name }} - spec: - serviceAccountName: {{ $.Values.vault.serviceaccountname }} - {{- if .Values.vault.imagesecretname }} - imagePullSecrets: - - name: {{ $.Values.vault.imagesecretname }} - {{- end }} - volumes: - - name: ca-tools-pv - persistentVolumeClaim: - claimName: ca-tools-pvc - - name: ca-tools-crypto-pv - persistentVolumeClaim: - claimName: ca-tools-crypto-pvc - - name: certcheck - emptyDir: - medium: Memory - - name: generate-crypto - configMap: - name: crypto-scripts-cm - defaultMode: 0775 - items: - - key: generate-crypto-orderer.sh - path: generate-crypto-orderer.sh - - name: generate-orderer-crypto - configMap: - name: crypto-scripts-cm - defaultMode: 0775 - items: - - key: orderer-script.sh - path: orderer-script.sh - - name: generate-crypto-peer - configMap: - name: crypto-scripts-cm - defaultMode: 0775 - items: - - key: generate-crypto-peer.sh - path: generate-crypto-peer.sh - - name: generate-crypto-add-peer - configMap: - name: crypto-scripts-cm - defaultMode: 0775 - items: - - key: generate-crypto-add-peer.sh - path: generate-crypto-add-peer.sh - - name: generate-user-crypto - configMap: - name: crypto-scripts-cm - defaultMode: 0775 - items: - - key: generate-user-crypto.sh - path: generate-user-crypto.sh - - name: store-vault-orderer - configMap: - name: orderer-script-store-vault - defaultMode: 0775 - items: - - key: store-vault-orderer.sh - path: store-vault-orderer.sh - - name: store-vault-peer - configMap: - name: peer-script-store-vault - defaultMode: 0775 - items: - - key: store-vault-peer.sh - path: store-vault-peer.sh - - name: store-vault-users - configMap: - name: users-script-store-vault - defaultMode: 0775 - items: - - key: store-vault-users.sh - path: store-vault-users.sh - - name: none-config - configMap: - name: msp-config-file - defaultMode: 0775 - items: - - key: none-config.yaml - path: none-config.yaml - - name: no-none-config - configMap: - name: msp-config-file - defaultMode: 0775 - items: - - key: no-none-config.yaml - path: no-none-config.yaml - {{- if eq $.Values.metadata.component_type "peer" }} - {{- range $orderers := $.Values.orderers_info }} - - name: {{ $orderers.name }}-tls-cert - configMap: - name: tls-cert - defaultMode: 0775 - items: - - key: {{ $orderers.name }}.crt - path: {{ $orderers.name }}.crt - {{- end }} - {{- end }} - - name: scripts-volume - configMap: - name: bevel-vault-script - - name: package-manager - configMap: - name: package-manager - initContainers: - - name: init-check-certificates - image: {{ $.Values.image.alpineutils }} - imagePullPolicy: IfNotPresent - env: - - name: VAULT_ADDR - value: {{ $.Values.vault.address }} - - name: VAULT_APP_ROLE - value: {{ $.Values.vault.role }} - - name: KUBERNETES_AUTH_PATH - value: {{ $.Values.vault.authpath }} - - name: VAULT_SECRET_USERS - value: {{ $.Values.vault.secretusers }} - - name: VAULT_SECRET_ORDERER - value: {{ $.Values.vault.secretorderer }} - - name: VAULT_SECRET_PEER - value: {{ $.Values.vault.secretpeer }} - - name: VAULT_SECRET_CONFIG_FILE - value: {{ $.Values.vault.secretconfigfile }} - - name: VAULT_SECRET_PEER_ORDERER_TLS - value: {{ $.Values.vault.secretpeerorderertls }} - - name: COMPONENT_TYPE - value: {{ $.Values.metadata.component_type }} - - name: COMPONENT_NAME - value: {{ $.Values.metadata.namespace }} - - name: ORG_NAME_EXT - value: {{ $.Values.metadata.org_name }} - - name: PROXY - value: {{ .Values.metadata.proxy }} - - name: ORDERERS_NAMES - value: "{{ $.Values.orderers.name }}" - - name: PEERS_NAMES - value: "{{ $.Values.peers.name }}" - - name: USERS_IDENTITIES - value: {{ $.Values.users.users_identities }} - - name: MOUNT_PATH - value: "/certcheck" - - name: VAULT_TYPE - value: "{{ $.Values.vault.type }}" - command: ["sh", "-c"] - args: - - |- - #!/usr/bin/env sh - . /scripts/bevel-vault.sh - - # Calling a function to retrieve the vault token. - vaultBevelFunc "init" - - mkdir -p ${MOUNT_PATH} - OUTPUT_PATH="/crypto-config/${COMPONENT_TYPE}Organizations/${COMPONENT_NAME}" - mkdir -p ${OUTPUT_PATH}/ca - mkdir -p /root/ca-tools/${ORG_NAME_EXT} - - SECRET_CERT={{ $.Values.vault.secretcert }} - vault_secret_key=$(echo ${SECRET_CERT} |awk -F "?" '{print $1}') - vault_data_key=$(echo ${SECRET_CERT} |awk -F "?" '{print $2}') - - # Get ca cert - vaultBevelFunc "readJson" "${vault_secret_key}" - VALUE_OF_SECRET=$(echo ${VAULT_SECRET} | jq -r ".[\"${vault_data_key}\"]") - echo "${VALUE_OF_SECRET}" > ${OUTPUT_PATH}/ca/ca.${COMPONENT_NAME}-cert.pem - - SECRET_KEY={{ $.Values.vault.secretkey }} - vault_secret_key=$(echo ${SECRET_KEY} |awk -F "?" '{print $1}') - vault_data_key=$(echo ${SECRET_KEY} |awk -F "?" '{print $2}') - - # Get ca key - vaultBevelFunc "readJson" "${vault_secret_key}" - VALUE_OF_SECRET=$(echo ${VAULT_SECRET} | jq -r ".[\"${vault_data_key}\"]") - echo "${VALUE_OF_SECRET}" > ${OUTPUT_PATH}/ca/${COMPONENT_NAME}-CA.key - - # Check if admin msp already created - vaultBevelFunc "readJson" "${VAULT_SECRET_USERS}/admin/msp" - if [ "$SECRETS_AVAILABLE" == "yes" ] - then - echo "Certificates present in vault" - touch ${MOUNT_PATH}/present_msp.txt - else - echo "Certficates absent in vault. Ignore error warning." - touch ${MOUNT_PATH}/absent_msp.txt - fi - - # Check if admin tls already created - vaultBevelFunc "readJson" "${VAULT_SECRET_USERS}/admin/tls" - if [ "$SECRETS_AVAILABLE" == "yes" ] - then - echo "Certificates present in vault" - touch ${MOUNT_PATH}/present_tls.txt - else - echo "Certficates absent in vault. Ignore error warning." - touch ${MOUNT_PATH}/absent_tls.txt - fi - - if [ "$COMPONENT_TYPE" = "orderer" ]; then - SERVICES_NAMES=$ORDERERS_NAMES; - fi; - - if [ "$COMPONENT_TYPE" = "peer" ]; then - SERVICES_NAMES=$PEERS_NAMES; - fi; - - list=$(echo "$SERVICES_NAMES" | tr "-" "\n") - for SERVICE in $list - do - # Check if orderer/peer msp already created - if [ "$COMPONENT_TYPE" = "peer" ]; then - SERVICE_NAME="${SERVICE%%,*}" - vaultBevelFunc "readJson" "${VAULT_SECRET_PEER}/${SERVICE_NAME}.${COMPONENT_NAME}/msp" - fi; - - if [ "$COMPONENT_TYPE" = "orderer" ]; then - SERVICE_NAME="${SERVICE}" - vaultBevelFunc "readJson" "${VAULT_SECRET_ORDERER}/${SERVICE_NAME}.${COMPONENT_NAME}/msp" - fi; - - if [ "$SECRETS_AVAILABLE" == "yes" ] - then - echo "Certificates present in vault" - touch ${MOUNT_PATH}/present_msp_${SERVICE_NAME}.txt - else - echo "Certficates absent in vault. Ignore error warning." - touch ${MOUNT_PATH}/absent_msp_${SERVICE_NAME}.txt - fi - - # Check if orderer/peer msp already created - if [ "$COMPONENT_TYPE" = "peer" ]; then - SERVICE_NAME="${SERVICE%%,*}" - vaultBevelFunc "readJson" "${VAULT_SECRET_PEER}/${SERVICE_NAME}.${COMPONENT_NAME}/tls" - fi; - - if [ "$COMPONENT_TYPE" = "orderer" ]; then - SERVICE_NAME="${SERVICE}" - vaultBevelFunc "readJson" "${VAULT_SECRET_ORDERER}/${SERVICE_NAME}.${COMPONENT_NAME}/tls" - fi; - - if [ "$SECRETS_AVAILABLE" == "yes" ] - then - echo "Certificates present in vault" - touch ${MOUNT_PATH}/present_tls_${SERVICE_NAME}.txt - else - echo "Certficates absent in vault. Ignore error warning." - touch ${MOUNT_PATH}/absent_tls_${SERVICE_NAME}.txt - fi - done - - if [ $COMPONENT_TYPE == 'peer' ]; - then - # Check if msp config file already created - vaultBevelFunc "readJson" "${VAULT_SECRET_CONFIG_FILE}" - if [ "$SECRETS_AVAILABLE" == "yes" ] - then - echo "Certificates present in vault." - touch ${MOUNT_PATH}/present_config_file.txt - else - echo "Certficates absent in vault. Ignore error warning." - touch ${MOUNT_PATH}/absent_config_file.txt - fi - - # Check if msp config file already created - vaultBevelFunc "readJson" "${VAULT_SECRET_PEER_ORDERER_TLS}" - if [ "$SECRETS_AVAILABLE" == "yes" ] - then - echo "Certificates present in vault." - touch ${MOUNT_PATH}/present_orderer_tls_cert.txt - else - echo "Certficates absent in vault. Ignore error warning." - touch ${MOUNT_PATH}/absent_orderer_tls_cert.txt - fi - fi; - - if [ "$USERS_IDENTITIES" ] - then - identities_list=$(echo "$USERS_IDENTITIES" | tr "-" "\n") - for user_identity in $identities_list - do - # Check if users msp already created - vaultBevelFunc "readJson" "${VAULT_SECRET_USERS}/${user_identity}/msp" - if [ "$SECRETS_AVAILABLE" == "yes" ] - then - echo "Certificates present in vault." - touch ${MOUNT_PATH}/present_msp_${user_identity}.txt - else - echo "Certficates absent in vault. Ignore error warning." - touch ${MOUNT_PATH}/absent_msp_${user_identity}.txt - fi - - # Check if users tls already created - vaultBevelFunc "readJson" "${VAULT_SECRET_USERS}/${user_identity}/tls" - if [ "$SECRETS_AVAILABLE" == "yes" ] - then - echo "Certificates present in vault" - touch ${MOUNT_PATH}/present_tls_${user_identity}.txt - else - echo "Certficates absent in vault. Ignore error warning." - touch ${MOUNT_PATH}/absent_tls_${user_identity}.txt - fi - done - fi - volumeMounts: - - name: ca-tools-pv - mountPath: /root/ca-tools - - name: ca-tools-crypto-pv - mountPath: /crypto-config - - name: certcheck - mountPath: /certcheck - - name: scripts-volume - mountPath: /scripts/bevel-vault.sh - subPath: bevel-vault.sh - containers: - - name: {{ .Values.metadata.name }} - image: "{{ .Values.image.catools }}" - imagePullPolicy: {{ .Values.image.pullPolicy }} - env: - - name: COMPONENT_TYPE - value: {{ $.Values.metadata.component_type }} - - name: COMPONENT_NAME - value: {{ $.Values.metadata.namespace }} - - name: ORG_NAME_EXT - value: {{ $.Values.metadata.org_name }} - - name: REFRESH_CERTS - value: "{{ $.Values.checks.refresh_cert_value }}" - - name: REFRESH_USER_CERTS - value: "{{ $.Values.checks.refresh_user_cert_value }}" - - name: ADD_PEER - value: "{{ $.Values.checks.add_peer_value }}" - - name: ORDERERS_NAMES - value: "{{ $.Values.orderers.name }}" - - name: PEERS_NAMES - value: "{{ $.Values.peers.name }}" - - name: USERS - value: {{ $.Values.users.users_list }} - - name: USERS_IDENTITIES - value: {{ $.Values.users.users_identities }} - - name: SUBJECT - value: {{ .Values.org_data.component_subject }} - - name: CERT_SUBJECT - value: {{ .Values.org_data.cert_subject }} - - name: CA_URL - value: {{ .Values.org_data.ca_url }} - - name: EXTERNAL_URL_SUFFIX - value: {{ .Values.org_data.external_url_suffix }} - - name: PROXY - value: {{ .Values.metadata.proxy }} - - name: MOUNT_PATH - value: "/certcheck" - command: ["sh", "-c"] - args: - - |- - - if [ "$COMPONENT_TYPE" = "orderer" ]; then - if [ -e ${MOUNT_PATH}/absent_msp.txt ]; then - ORG_CYPTO_FOLDER="/crypto-config/ordererOrganizations/${COMPONENT_NAME}/users/Admin@${COMPONENT_NAME}" - ORG_CYPTO_ORDERER_FOLDER="/crypto-config/ordererOrganizations/${COMPONENT_NAME}/orderers" - - SK_NAME=$(find ${ORG_CYPTO_FOLDER}/msp/keystore/ -name "*_sk") - if [ -n "$SK_NAME" ]; then - rm ${ORG_CYPTO_FOLDER}/msp/keystore/*_sk - rm /root/ca-tools/${ORG_NAME_EXT}/admin/msp/keystore/*_sk - rm /root/ca-tools/${ORG_NAME_EXT}/admin/tls/keystore/*_sk - fi; - - # Generate crypto material for organization orderers (admin) - cd /root/ca-tools/${ORG_NAME_EXT} - ./generate-crypto-orderer.sh - fi; - - # Generate crypto material for organization orderers (for each orderer) - orderers=$(echo "$ORDERERS_NAMES" | tr "-" "\n") - for ORDERER_NAME in $orderers - do - if [ -e ${MOUNT_PATH}/absent_msp_${ORDERER_NAME}.txt ]; then - echo "need to execute scripts for ${ORDERER_NAME} " - - SK_NAME=$(find ${ORG_CYPTO_ORDERER_FOLDER}/${ORDERER_NAME}.${COMPONENT_NAME}/msp/keystore/ -name "*_sk") - if [ -n "$SK_NAME" ]; then - rm ${ORG_CYPTO_ORDERER_FOLDER}/${ORDERER_NAME}.${COMPONENT_NAME}/msp/keystore/*_sk - rm /root/ca-tools/${ORG_NAME_EXT}/cas/orderers/msp/keystore/*_sk - rm /root/ca-tools/${ORG_NAME_EXT}/cas/orderers/tls/keystore/*_sk - fi; - cd /root/ca-tools/${ORG_NAME_EXT} - ./orderer-script.sh ${ORDERER_NAME} - fi; - done - fi; - - if [ "$COMPONENT_TYPE" = "peer" ]; then - - list=$(echo "$PEERS_NAMES" | tr "-" "\n") - for PEER in $list - do - PEER_NAME="${PEER%%,*}" - - if [ -e ${MOUNT_PATH}/absent_msp.txt ] || [ -e ${MOUNT_PATH}/absent_msp_${PEER_NAME}.txt ] || [ "$REFRESH_CERTS" = "true" ]; then - - ORG_CYPTO_FOLDER="/crypto-config/peerOrganizations/${COMPONENT_NAME}/users/Admin@${COMPONENT_NAME}" - - SK_NAME=$(find ${ORG_CYPTO_FOLDER}/msp/keystore/ -name "*_sk") - if [ -n "$SK_NAME" ]; then - rm ${ORG_CYPTO_FOLDER}/msp/keystore/*_sk - rm /root/ca-tools/${ORG_NAME_EXT}/admin/msp/keystore/*_sk - rm /root/ca-tools/${ORG_NAME_EXT}/admin/tls/keystore/*_sk - fi; - - # Generate crypto material for organization peers - cd /root/ca-tools/${ORG_NAME_EXT} - if [ "$ADD_PEER" = "false" ]; then - ./generate-crypto-peer.sh - break - else - ./generate-crypto-add-peer.sh - break - fi; - fi; - done - - # Generate crypto material for users - list=$(echo "$USERS_IDENTITIES" | tr "-" "\n") - for USER in $list - do - if ([ "$USERS" ] && [ -e ${MOUNT_PATH}/absent_msp_${USER}.txt ]) || [ "$REFRESH_CERTS" = "true" || [ "$REFRESH_USER_CERTS" = "true" ] - then - cd /root/ca-tools/${ORG_NAME_EXT} - ./generate-user-crypto.sh peer ${USERS} - break - fi; - done - fi; - - # this command creates the indicator of the completion of scripts - touch ${MOUNT_PATH}/flag_finish.txt - tail -f /dev/null - volumeMounts: - - name: ca-tools-pv - mountPath: /root/ca-tools - - name: ca-tools-crypto-pv - mountPath: /crypto-config - - name: certcheck - mountPath: /certcheck - - name: generate-crypto - mountPath: /root/ca-tools/{{ $.Values.metadata.org_name }}/generate-crypto-orderer.sh - subPath: generate-crypto-orderer.sh - - name: generate-orderer-crypto - mountPath: /root/ca-tools/{{ $.Values.metadata.org_name }}/orderer-script.sh - subPath: orderer-script.sh - - name: generate-crypto-peer - mountPath: /root/ca-tools/{{ $.Values.metadata.org_name }}/generate-crypto-peer.sh - subPath: generate-crypto-peer.sh - - name: generate-crypto-add-peer - mountPath: /root/ca-tools/{{ $.Values.metadata.org_name }}/generate-crypto-add-peer.sh - subPath: generate-crypto-add-peer.sh - - name: generate-user-crypto - mountPath: /root/ca-tools/{{ $.Values.metadata.org_name }}/generate-user-crypto.sh - subPath: generate-user-crypto.sh - - name: store-vault - image: {{ $.Values.image.alpineutils }} - imagePullPolicy: {{ .Values.image.pullPolicy }} - env: - - name: VAULT_ADDR - value: {{ $.Values.vault.address }} - - name: VAULT_APP_ROLE - value: {{ $.Values.vault.role }} - - name: KUBERNETES_AUTH_PATH - value: {{ $.Values.vault.authpath }} - - name: VAULT_SECRET_USERS - value: {{ $.Values.vault.secretusers }} - - name: VAULT_SECRET_ORDERER - value: {{ $.Values.vault.secretorderer }} - - name: VAULT_SECRET_PEER - value: {{ $.Values.vault.secretpeer }} - - name: VAULT_SECRET_PEER_ORDERER_TLS - value: {{ $.Values.vault.secretpeerorderertls }} - - name: VAULT_SECRET_CONFIG_FILE - value: {{ $.Values.vault.secretconfigfile }} - - name: VAULT_SECRET_COUCHDB - value: {{ $.Values.vault.secretcouchdb }} - - name: COMPONENT_TYPE - value: {{ $.Values.metadata.component_type }} - - name: COMPONENT_NAME - value: {{ $.Values.metadata.namespace }} - - name: REFRESH_CERTS - value: "{{ $.Values.checks.refresh_cert_value }}" - - name: PROXY - value: {{ .Values.metadata.proxy }} - - name: EXTERNAL_URL_SUFFIX - value: {{ .Values.org_data.external_url_suffix }} - - name: ORDERERS_NAMES - value: "{{ $.Values.orderers.name }}" - - name: PEERS_NAMES - value: "{{ $.Values.peers.name }}" - - name: USERS_IDENTITIES - value: {{ $.Values.users.users_identities }} - - name: MOUNT_PATH - value: "/certcheck" - command: ["sh", "-c"] - args: - - |- - . /scripts/package-manager.sh - # Define the packages to install - packages_to_install="jq curl bash" - install_packages "$packages_to_install" - - while ! [ -f ${MOUNT_PATH}/flag_finish.txt ] - do - echo 'Waiting for completion of scripts' - sleep 2s - done - - if [ -e /${MOUNT_PATH}/flag_finish.txt ]; then - if [ "$COMPONENT_TYPE" = "orderer" ]; then - # Generate crypto material for organization orderers - cd /scripts/orderer - ./store-vault-orderer.sh - fi; - - if [ "$COMPONENT_TYPE" = "peer" ]; then - # Generate crypto material for organization peers - cd /scripts/peer - ./store-vault-peer.sh - if [ "$USERS_IDENTITIES" ] - then - cd /scripts/peer - ./store-vault-users.sh - fi; - fi; - fi; - - # Raises an error if any certificate has not been stored correctly - if [ -e /certcheck/certs_not_found.txt ]; then - exit 1 - fi - tail -f /dev/null - volumeMounts: - - name: ca-tools-pv - mountPath: /root/ca-tools - - name: ca-tools-crypto-pv - mountPath: /crypto-config - - name: certcheck - mountPath: /certcheck - - name: store-vault-orderer - mountPath: /scripts/orderer/store-vault-orderer.sh - subPath: store-vault-orderer.sh - - name: store-vault-peer - mountPath: /scripts/peer/store-vault-peer.sh - subPath: store-vault-peer.sh - - name: store-vault-users - mountPath: /scripts/peer/store-vault-users.sh - subPath: store-vault-users.sh - {{ if and (eq $.Values.metadata.component_type "peer") (ne $.Values.metadata.proxy "none") }} - - name: no-none-config - mountPath: /crypto-config/peerOrganizations/{{ $.Values.metadata.namespace }}/msp/config.yaml - subPath: no-none-config.yaml - {{ end }} - {{ if and (eq $.Values.metadata.component_type "peer") (eq $.Values.metadata.proxy "none") }} - - name: none-config - mountPath: /crypto-config/peerOrganizations/{{ $.Values.metadata.namespace }}/msp/config.yaml - subPath: none-config.yaml - {{ end }} - {{- if eq $.Values.metadata.component_type "peer" }} - {{- range $orderers := $.Values.orderers_info }} - - name: {{ $orderers.name }}-tls-cert - mountPath: /tlscerts/{{ $orderers.name }}.crt - subPath: {{ $orderers.name }}.crt - {{- end }} - {{- end }} - - name: package-manager - mountPath: /scripts/package-manager.sh - subPath: package-manager.sh diff --git a/platforms/hyperledger-fabric/charts/fabric-catools/templates/job-cleanup.yaml b/platforms/hyperledger-fabric/charts/fabric-catools/templates/job-cleanup.yaml new file mode 100644 index 00000000000..4689847725b --- /dev/null +++ b/platforms/hyperledger-fabric/charts/fabric-catools/templates/job-cleanup.yaml @@ -0,0 +1,75 @@ +############################################################################################## +# Copyright Accenture. All Rights Reserved. +# +# SPDX-License-Identifier: Apache-2.0 +############################################################################################## +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ .Release.Name }}-certs-cleanup + namespace: {{ .Release.Namespace }} + labels: + app.kubernetes.io/name: {{ include "fabric-catools.name" . }}-cleanup + app.kubernetes.io/component: ca-tools + app.kubernetes.io/part-of: {{ include "fabric-catools.fullname" . }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} + app.kubernetes.io/managed-by: helm + annotations: + helm.sh/hook-weight: "0" + helm.sh/hook: "pre-delete" + helm.sh/hook-delete-policy: "hook-succeeded" +spec: + backoffLimit: 3 + completions: 1 + template: + metadata: + labels: + app.kubernetes.io/name: {{ include "fabric-catools.name" . }}-cleanup + app.kubernetes.io/component: ca-tools + app.kubernetes.io/part-of: {{ include "fabric-catools.fullname" . }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} + app.kubernetes.io/managed-by: helm + spec: + serviceAccountName: {{ .Values.global.serviceAccountName }} + restartPolicy: "Never" + {{- if .Values.image.pullSecret }} + imagePullSecrets: + - name: {{ .Values.image.pullSecret }} + {{- end }} + containers: + - name: delete-secrets + image: {{ .Values.image.alpineUtils }} + securityContext: + runAsUser: 0 + imagePullPolicy: IfNotPresent + command: ["sh", "-c"] + args: + - | + +{{- if .Values.settings.removeCertsOnDelete }} + function deleteSecret { + key=$1 + kubectl get secret ${key} --namespace {{ .Release.Namespace }} -o json > /dev/null 2>&1 + if [ $? -eq 0 ]; then + kubectl delete secret ${key} --namespace {{ .Release.Namespace }} + fi + } + + deleteSecret admin-tls + deleteSecret admin-msp + deleteSecret {{ .Release.Name }}-msp + deleteSecret {{ .Release.Name }}-tls + {{- range .Values.users.usersList }} + deleteSecret {{ .identity }}-msp + deleteSecret {{ .identity }}-tls + {{ end }} +{{- end}} +{{- if .Values.settings.removeOrdererTlsOnDelete }} + if kubectl get configmap --namespace {{ .Release.Namespace }} orderer-tls-cacert &> /dev/null; then + echo "Deleting orderer-tls-cacert configmap in k8s ..." + kubectl delete configmap --namespace {{ .Release.Namespace }} orderer-tls-cacert + fi +{{- end}} diff --git a/platforms/hyperledger-fabric/charts/fabric-catools/templates/job.yaml b/platforms/hyperledger-fabric/charts/fabric-catools/templates/job.yaml new file mode 100644 index 00000000000..87ee6cffb76 --- /dev/null +++ b/platforms/hyperledger-fabric/charts/fabric-catools/templates/job.yaml @@ -0,0 +1,292 @@ +############################################################################################## +# Copyright Accenture. All Rights Reserved. +# +# SPDX-License-Identifier: Apache-2.0 +############################################################################################## + +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ .Release.Name }}-certs-job + namespace: {{ .Release.Namespace }} + annotations: + helm.sh/hook-delete-policy: "before-hook-creation" + labels: + app.kubernetes.io/name: {{ include "fabric-catools.name" . }}-job + app.kubernetes.io/component: ca-tools + app.kubernetes.io/part-of: {{ include "fabric-catools.fullname" . }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} + app.kubernetes.io/managed-by: helm +spec: + backoffLimit: 6 + template: + metadata: + labels: + app.kubernetes.io/name: {{ include "fabric-catools.name" . }} + app.kubernetes.io/component: ca-tools + app.kubernetes.io/part-of: {{ include "fabric-catools.fullname" . }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} + app.kubernetes.io/managed-by: helm + spec: + restartPolicy: OnFailure + serviceAccountName: {{ .Values.global.serviceAccountName }} + {{- if .Values.image.pullSecret }} + imagePullSecrets: + - name: {{ .Values.image.pullSecret }} + {{- end }} + volumes: + - name: certificates + emptyDir: + medium: Memory + - name: generate-crypto + configMap: + name: crypto-scripts-cm + defaultMode: 0775 + items: + - key: generate-crypto-orderer.sh + path: generate-crypto-orderer.sh + - name: generate-orderer-crypto + configMap: + name: crypto-scripts-cm + defaultMode: 0775 + items: + - key: orderer-script.sh + path: orderer-script.sh + - name: generate-crypto-peer + configMap: + name: crypto-scripts-cm + defaultMode: 0775 + items: + - key: generate-crypto-peer.sh + path: generate-crypto-peer.sh + - name: generate-crypto-add-peer + configMap: + name: crypto-scripts-cm + defaultMode: 0775 + items: + - key: generate-crypto-add-peer.sh + path: generate-crypto-add-peer.sh + - name: generate-user-crypto + configMap: + name: crypto-scripts-cm + defaultMode: 0775 + items: + - key: generate-user-crypto.sh + path: generate-user-crypto.sh + - name: store-vault-orderer + configMap: + name: orderer-script-store-vault + defaultMode: 0775 + items: + - key: store-vault-orderer.sh + path: store-vault-orderer.sh + - name: store-vault-peer + configMap: + name: peer-script-store-vault + defaultMode: 0775 + items: + - key: store-vault-peer.sh + path: store-vault-peer.sh + - name: store-vault-users + configMap: + name: users-script-store-vault + defaultMode: 0775 + items: + - key: store-vault-users.sh + path: store-vault-users.sh + - name: scripts-volume + configMap: + name: bevel-vault-script + - name: package-manager + configMap: + name: package-manager + containers: + - name: generate-certs + image: "{{ .Values.image.caTools }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + env: + - name: COMPONENT_TYPE + value: {{ .Values.orgData.type }} + - name: COMPONENT_NAME + value: {{ .Release.Namespace }} + - name: ADD_PEER + value: "{{ .Values.settings.addPeerValue }}" + - name: USERS + value: {{ .Values.users.usersList | toJson | b64enc }} + - name: REFRESH_CERT_VALUE + value: "{{ .Values.settings.refreshCertValue }}" + {{- if eq .Values.global.vault.type "hashicorp" }} + - name: VAULT_ADDR + value: {{ .Values.global.vault.address }} + - name: VAULT_APP_ROLE + value: {{ .Values.global.vault.role }} + - name: KUBERNETES_AUTH_PATH + value: {{ .Values.global.vault.authPath }} + - name: VAULT_TYPE + value: {{ .Values.global.vault.type }} + - name: VAULT_SECRET_ENGINE + value: "{{ .Values.global.vault.secretEngine }}" + - name: VAULT_SECRET_PREFIX + value: "{{ .Values.global.vault.secretPrefix }}" + {{- end }} + command: ["sh", "-c"] + args: + - | + + . /scripts/package-manager.sh + # Define the packages to install + packages_to_install="jq curl" + install_packages "$packages_to_install" + # Download kubectl binary + curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.27.0/bin/linux/amd64/kubectl; + chmod u+x kubectl && mv kubectl /usr/local/bin/kubectl; + +{{- if (eq .Values.global.vault.type "hashicorp") }} + . /scripts/bevel-vault.sh + echo "Getting vault Token..." + vaultBevelFunc "init" +{{- end }} + OUTPUT_PATH="/crypto-config/${COMPONENT_TYPE}Organizations/${COMPONENT_NAME}" + mkdir -p ${OUTPUT_PATH}/ca + mkdir -p ${OUTPUT_PATH}/msp/admincerts + # Get the CA cert from Kubernetes secret + kubectl get secret --namespace {{ .Release.Namespace }} fabric-ca-server-certs >/dev/null 2>&1 + if [ $? -eq 0 ]; then + LOOKUP_SECRET_RESPONSE=$(kubectl get secret -n {{ .Release.Namespace }} fabric-ca-server-certs -o jsonpath='{.data}'); + echo "${LOOKUP_SECRET_RESPONSE}" | jq -j ".\"tls.crt\"" | base64 -d > ${OUTPUT_PATH}/ca/ca.${COMPONENT_NAME}-cert.pem; + else +{{- if (eq .Values.global.vault.type "hashicorp") }} + vaultBevelFunc "readJson" "${VAULT_SECRET_ENGINE}/${VAULT_SECRET_PREFIX}/ca" + if [ "$SECRETS_AVAILABLE" = "yes" ]; then + # Get ca cert + ca_cert=$(echo ${VAULT_SECRET} | jq -r ".rootca_pem") + echo "${ca_cert}" > ${OUTPUT_PATH}/ca/ca.${COMPONENT_NAME}-cert.pem + ca_key=$(echo ${VAULT_SECRET} | jq -r ".rootca_key") + echo "${ca_key}" > ${OUTPUT_PATH}/ca/ca.${COMPONENT_NAME}.key + # Also create the k8s secret + kubectl create secret tls ${key} --namespace ${COMPONENT_NAME} \ + --cert=${OUTPUT_PATH}/ca/ca.${COMPONENT_NAME}-cert.pem \ + --key=${OUTPUT_PATH}/ca/ca.${COMPONENT_NAME}.key + else + echo "CA certs not found in Vault" + exit 1 + fi; +{{- else }} + echo "CA certs not found in Kubernetes secret" + exit 1 +{{- end }} + fi + echo "CA certificate saved locally." + checkSecrets() { + type=$1 + key=$2 + kubectl get secret --namespace {{ .Release.Namespace }} ${key} >/dev/null 2>&1 + if [ $? -eq 0 ]; then + # Secret found + touch /crypto-config/${key}-exists + if [ $key = "admin-msp" ]; then + # Get the admin cert if admin-msp already exists + LOOKUP_SECRET_RESPONSE=$(kubectl get secret --namespace {{ .Release.Namespace }} ${key} -o jsonpath='{.data}'); + echo "${LOOKUP_SECRET_RESPONSE}" | jq -j ".\"admincerts\"" | base64 -d > ${OUTPUT_PATH}/msp/admincerts/Admin@${COMPONENT_NAME}-cert.pem; + fi + else +{{- if (eq .Values.global.vault.type "hashicorp") }} + #Read if secret exists in Vault + vaultBevelFunc 'readJson' "${VAULT_SECRET_ENGINE}/${VAULT_SECRET_PREFIX}/${type}/${key}" + if [ "$SECRETS_AVAILABLE" = "yes" ]; then + touch /crypto-config/${key}-exists + #TODO Maybe create the K8s secrets from Vault secrets here if needed + fi; +{{- else }} + echo "Secret $key does not exist." +{{- end }} + fi + } + + # Check if secrets already exist + checkSecrets users admin-msp + checkSecrets users admin-tls + checkSecrets ${COMPONENT_TYPE}s {{ .Release.Name }}-msp + checkSecrets ${COMPONENT_TYPE}s {{ .Release.Name }}-tls + + echo "Starting certificate generation." + if [ "$COMPONENT_TYPE" = "orderer" ]; then + # Generate crypto material for organization orderers (admin) + cd /root/ca-tools/org + ./generate-crypto-orderer.sh + + if [ ! -e /crypto-config/{{ .Release.Name }}-msp-exists ] || [ ! -e /crypto-config/{{ .Release.Name }}-tls-exists ] || [ "$REFRESH_CERT_VALUE" = "true" ]; then + # Generate crypto material for organization orderer + echo "Need to execute scripts for orderer {{ .Release.Name }}" + cd /root/ca-tools/org + ./orderer-script.sh {{ .Release.Name }} + fi + + # Save the generated certificates + cd /scripts/orderer + ./store-vault-orderer.sh {{ .Release.Name }} + fi + + if [ "$COMPONENT_TYPE" = "peer" ]; then + # Generate crypto material for organization peer (admin) + cd /root/ca-tools/org + if [ "$ADD_PEER" = "false" ]; then + ./generate-crypto-peer.sh {{ .Release.Name }} + else + if [ ! -e /crypto-config/{{ .Release.Name }}-msp-exists ] || [ ! -e /crypto-config/{{ .Release.Name }}-tls-exists ] || [ "$REFRESH_CERT_VALUE" = "true" ]; then + ./generate-crypto-add-peer.sh {{ .Release.Name }} + fi + fi; + {{- range .Values.users.usersList }} + checkSecrets users {{ .identity }}-msp + checkSecrets users {{ .identity }}-tls + {{ end }} + cd /root/ca-tools/org + ./generate-user-crypto.sh peer ${USERS} + + # Save the generated certificates for peers and users + cd /scripts/peer + ./store-vault-peer.sh {{ .Release.Name }} + cd /scripts/peer + {{- range .Values.users.usersList }} + ./store-vault-users.sh {{ .identity }} + {{ end }} + fi; + # this command creates the indicator of the completion of scripts + echo "Certificate generation complete." + volumeMounts: + - name: certificates + mountPath: /crypto-config + - name: generate-crypto + mountPath: /root/ca-tools/org/generate-crypto-orderer.sh + subPath: generate-crypto-orderer.sh + - name: generate-orderer-crypto + mountPath: /root/ca-tools/org/orderer-script.sh + subPath: orderer-script.sh + - name: generate-crypto-peer + mountPath: /root/ca-tools/org/generate-crypto-peer.sh + subPath: generate-crypto-peer.sh + - name: generate-crypto-add-peer + mountPath: /root/ca-tools/org/generate-crypto-add-peer.sh + subPath: generate-crypto-add-peer.sh + - name: generate-user-crypto + mountPath: /root/ca-tools/org/generate-user-crypto.sh + subPath: generate-user-crypto.sh + - name: package-manager + mountPath: /scripts/package-manager.sh + subPath: package-manager.sh + - name: store-vault-orderer + mountPath: /scripts/orderer/store-vault-orderer.sh + subPath: store-vault-orderer.sh + - name: store-vault-peer + mountPath: /scripts/peer/store-vault-peer.sh + subPath: store-vault-peer.sh + - name: store-vault-users + mountPath: /scripts/peer/store-vault-users.sh + subPath: store-vault-users.sh + - name: scripts-volume + mountPath: /scripts/bevel-vault.sh + subPath: bevel-vault.sh diff --git a/platforms/hyperledger-fabric/charts/fabric-catools/templates/volume.yaml b/platforms/hyperledger-fabric/charts/fabric-catools/templates/volume.yaml deleted file mode 100644 index 0fc3023e7de..00000000000 --- a/platforms/hyperledger-fabric/charts/fabric-catools/templates/volume.yaml +++ /dev/null @@ -1,50 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - ---- -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: ca-tools-crypto-pvc - namespace: {{ $.Values.metadata.namespace }} - annotations: - {{- if $.Values.annotations }} - {{- range $key, $value := $.Values.annotations.pvc }} - {{- range $k, $v := $value }} - {{ $k }}: {{ $v | quote }} - {{- end }} - {{- end }} - {{- end }} -spec: - storageClassName: {{ $.Values.storage.storageclassname }} - accessModes: - - ReadWriteOnce - resources: - requests: - storage: {{ $.Values.storage.storagesize }} - ---- -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: ca-tools-pvc - namespace: {{ $.Values.metadata.namespace }} - annotations: - {{- if $.Values.annotations }} - {{- range $key, $value := $.Values.annotations.pvc }} - {{- range $k, $v := $value }} - {{ $k }}: {{ $v | quote }} - {{- end }} - {{- end }} - {{- end }} -spec: - storageClassName: {{ $.Values.storage.storageclassname }} - accessModes: - - ReadWriteOnce - resources: - requests: - storage: {{ $.Values.storage.storagesize }} - diff --git a/platforms/hyperledger-fabric/charts/fabric-catools/values.yaml b/platforms/hyperledger-fabric/charts/fabric-catools/values.yaml index 97135404c83..3486449ec78 100644 --- a/platforms/hyperledger-fabric/charts/fabric-catools/values.yaml +++ b/platforms/hyperledger-fabric/charts/fabric-catools/values.yaml @@ -3,143 +3,111 @@ # # SPDX-License-Identifier: Apache-2.0 ############################################################################################## +global: + #Provide the service account name which will be created. + serviceAccountName: vault-auth + cluster: + provider: aws # choose from: minikube | aws | azure | gcp + cloudNativeServices: false # only 'false' is implemented + vault: + #Provide the type of vault + #Eg. type: hashicorp + type: hashicorp + #Provide the vaultrole for an organization + #Eg. vaultrole: supplychain-vault-role + role: vault-role + #Provide the vault server address + #Eg. vaultaddress: http://Vault-884963190.eu-west-1.elb.amazonaws.com + address: + #Provide the kubernetes auth backed configured in vault for an organization + #Eg. authpath: supplychain + authPath: supplychain + #Provide the secret engine. + secretEngine: secretsv2 + #Provide the vault path where the secrets will be stored + secretPrefix: "data/supplychain" -metadata: - #Provide the namespace for CA deployment - #Eg. namespace: org1-net - namespace: org1-net - #Provide name for ca server deployment - #Eg. name: ca-tools - name: ca-tools - #Provide organization's type (orderer or peer) - #Eg. component_type: orderer - component_type: orderer - #Provide organization's name in lowercases - #Eg. org_name: org1 - org_name: org1 - #This will be the proxy/ingress provider. Can have values "haproxy" or "none" - #Eg. provider: "haproxy" - proxy: haproxy -# Provide the number of replica pods -replicaCount: 1 + proxy: + #This will be the proxy/ingress provider. Can have values "haproxy" or "none" + #Eg. provider: "haproxy" + provider: haproxy + #This field specifies the external url for the organization + #Eg. externalUrlSuffix: test.blockchaincloudpoc.com + externalUrlSuffix: test.blockchaincloudpoc.com image: #Provide the image name for the server container - #Eg. image: hyperledger/fabric-ca-tools - repository: ghcr.io/hyperledger/bevel-fabric-ca-tools:1.2.1 - # Provide image pull policy - pullPolicy: IfNotPresent + #Eg. caTools: hyperledger/fabric-ca-tools:latest + caTools: ghcr.io/hyperledger/bevel-fabric-ca:latest #Provide the valid image name and version to read certificates from vault server #Eg.alpineutils: ghcr.io/hyperledger/bevel-alpine:latest - alpineutils: ghcr.io/hyperledger/bevel-alpine:latest - -annotations: - #Extra annotations - pvc: {} - deployment: {} - -storage: - #Provide the storageclassname for - #Eg. storageclassname: aws-storageclass - storageclassname: aws-storageclass - #Provide the storagesize for CA - #Eg. storagesize: 512Mi - storagesize: 512Mi - -vault: - #Provide the vaultrole for an organization - #Eg. vaultrole: vault-role - role: vault-role - #Provide the vault server address - #Eg. vaultaddress: http://Vault-884963190.eu-west-1.elb.amazonaws.com - address: - #Provide the kubernetes auth backed configured in vault for an organization - #Eg. authpath: devorg1-net-auth - authpath: devorg1-net-auth - #Provide the path configured in vault for users certficates - #Eg. secretmsp: secretsv2/data/crypto/ordererOrganizations/..../users - secretusers: secretsv2/data/crypto/ordererOrganizations/org1-net/users - #Provide the path configured in vault for orderers - #Eg. secrettls: secretsv2/data/crypto/Organizations/.../.../orderers - secretorderer: secretsv2/data/crypto/ordererOrganizations/org1-net/orderers - #Provide the path configured in vault for orderers - #Eg. secretpeerorderertls: secretsv2/data/crypto/Organizations/.../.../orderer/tls - secretpeerorderertls: secretsv2/data/crypto/peerOrganizations/org1-net/orderer/tls - #Provide the secretcert path configured in vault for CA server - #Eg. secretcert: secretsv2/data/crypto/Organizations/.../...-cert.pem - secretcert: secretsv2/data/crypto/ordererOrganizations/org1-net/ca?ca.org1-net-cert.pem - #Provide the secretkey path configured in vault for CA server - #Eg. secretkey: secretsv2/data/crypto/Organizations/.../...-CA.key - secretkey: secretsv2/data/crypto/ordererOrganizations/org1-net/ca?org1-net-CA.key - #Provide the path configured in vault for MSP config.yaml file - #Eg. secretconfigfile: secretsv2/data/crypto/Organizations/.../config - secretconfigfile: secretsv2/data/crypto/ordererOrganizations/org1-net/msp/config - #Provide the path configured in vault for couchdb credentials - #Eg. secretconfigfile: secretsv2/data/credentials/.../couchdb/org1 - secretcouchdb: secretsv2/data/credentials/org1-net/couchdb/org1 - #Provide the serviceaccountname for vault - #Eg. serviceaccountname: vault-auth - serviceaccountname: vault-auth - #Provide the type of vault - #Eg. type: hashicorp - type: hashicorp - #Provide the imagesecretname for vault - #Eg. imagesecretname: regcred - imagesecretname: "" - #Kuberenetes secret for vault ca.cert - -healthcheck: - # The amount of times to retry fetching from/writing to Vault before giving up. - # Eg. retries: 10 - retries: 10 - # The amount of time in seconds to wait after an error occurs when fetching from/writing to Vault. - # Eg. sleepTimeAfterError: 15 - sleepTimeAfterError: 15 + alpineUtils: ghcr.io/hyperledger/bevel-alpine:latest + #Provide the secret to use if private repository + #Eg. pullSecret: regcred + pullSecret: + # Provide image pull policy + pullPolicy: IfNotPresent -org_data: - #External URL of the organization - #Eg. external_url_suffix: org1.blockchaincloudpoc.com - external_url_suffix: org1proxy.blockchaincloudpoc.com - #Provide organization's subject - #Eg. "O=Orderer,L=51.50/-0.13/London,C=GB" - component_subject: +orgData: + #Provide the CA URL for the organization without https + #Eg. caAddress: ca.example.com + caAddress: "" + #Provide the CA Admin User for the organization + #Eg. caAdminUser: admin + caAdminUser: supplychain-admin + #Provide the CA Admin Password for the organization + #Eg. caAdminPassword: adminpw + caAdminPassword: supplychain-adminpw + #Provide organization's name in lowercases + #Eg. orgName: supplychain + orgName: supplychain + #Provide organization's type (orderer or peer) + #Eg. type: orderer + type: orderer #Provide organization's subject - #Eg. "O=Orderer,L=51.50/-0.13/London,C=GB" - cert_subject: - #Provide organization's country - #Eg. UK - component_country: UK - #Provide organization's state - #Eg. London - component_state: London - #Provide organization's location - #Eg. Lodon - component_location: Lodon - #Provide organization's ca_url - #Eg. "ca.supplychain-net.org1.blockchaincloudpoc.com" - ca_url: - -#Provide orderer's names -orderers: - name: orderer1 -#Provide orderer's names and ca certificates -orderers_info: {} -#Provide peer's names -peers: - name: peer1 -#Provide the total number of peers -peer_count: 4 + #Eg. componentSubject: "O=Orderer,L=51.50/-0.13/London,C=GB" + componentSubject: "O=Orderer,L=51.50/-0.13/London,C=GB" users: - #Base64 encoded list of users - #Eg. IC0gdXNlcjoKICAgICAgICAgIGlkZW50aXR5OiB1c2VyMQogICAgICAgICAgYXR0cmlidXRlczoKICAgICAgICAgICAgLSBrZXk6IGtleTEKICAgICAgIgICAgICAgIC0ga2V5OiBrZXkyCiAgICAgICAgICAgICAgdmFsdWU6IHZhbHVlMgogICAgICAgIC0gdXNlcjoKICAgICAgICAgIGlkZW50aXR5OiB1c2VyMgogICAgICAgICAgYXR0cmlidXRlczoKICAgICAgICAgICAgLSBrZXk6IGtleTEKICAgICAgICAgICAgICB2YWx1ZTogdmFsdWUxCiAgICAgICAgICAgIC0ga2V5OiBrZXkzCiAgICAgICAgICAgICAgdmFsdWU6IHZhbHVlMw== - users_list: - #Provides a list of user identities - #Eg. "user1-user2-user3" - users_identities: -checks: - #Provides the need to refresh user certificates - refresh_cert_value: false - refresh_user_cert_value: false - #Add a peer to an existing network - add_peer_value: False + # Generating User Certificates with custom attributes using Fabric CA in Bevel for Peer Organizations + # Eg. + # usersList: + # - user: + # identity: user1 + # attributes: + # - key: "hf.Revoker" + # value: "true" + # - user: + # identity: user2 + # attributes: + # - key: "hf.Revoker" + # value: "true" + usersList: + # - identity: user1 + # attributes: + # - key: "hf.Revoker" + # value: "true" + # - identity: user2 + # attributes: + # - key: "hf.Revoker" + # value: "true" + #Base64 encoded list of users + #Eg. usersListAnsible: IC0gdXNlcjoKICAgICAgICAgIGlkZW50aXR5OiB1c2VyMQogICAgICAgICAgYXR0cmlidXRlczoKICAgICAgICAgICAgLSBrZXk6IGtleTEKICAgICAgIgICAgICAgIC0ga2V5OiBrZXkyCiAgICAgICAgICAgICAgdmFsdWU6IHZhbHVlMgogICAgICAgIC0gdXNlcjoKICAgICAgICAgIGlkZW50aXR5OiB1c2VyMgogICAgICAgICAgYXR0cmlidXRlczoKICAgICAgICAgICAgLSBrZXk6IGtleTEKICAgICAgICAgICAgICB2YWx1ZTogdmFsdWUxCiAgICAgICAgICAgIC0ga2V5OiBrZXkzCiAgICAgICAgICAgICAgdmFsdWU6IHZhbHVlMw== + usersListAnsible: + +settings: + #Flag to create configmaps for the organization. This flag must be set to true when installing the first orderer/peer in organization and false for others. + createConfigMaps: true + #Flag to refresh user certificates + refreshCertValue: false + #Flag to add a peer to an existing network + addPeerValue: false + #Flag to remove certificates on uninstall + removeCertsOnDelete: false + #Flag to remove orderer certificates on uninstall + removeOrdererTlsOnDelete: false + +labels: + service: [] + pvc: [] + deployment: [] diff --git a/platforms/hyperledger-fabric/charts/fabric-channel-create/Chart.yaml b/platforms/hyperledger-fabric/charts/fabric-channel-create/Chart.yaml index 228c9310b9b..46a9ce16dd2 100644 --- a/platforms/hyperledger-fabric/charts/fabric-channel-create/Chart.yaml +++ b/platforms/hyperledger-fabric/charts/fabric-channel-create/Chart.yaml @@ -5,7 +5,22 @@ ############################################################################################## apiVersion: v1 -appVersion: "2.0" -description: "Hyperledger Fabric: Creates channel." name: fabric-channel-create -version: 1.0.0 +description: "Hyperledger Fabric: Creates channel" +version: 1.1.0 +appVersion: latest +keywords: + - bevel + - hlf + - fabric + - hyperledger + - enterprise + - blockchain + - deployment + - accenture +home: https://hyperledger-bevel.readthedocs.io/en/latest/ +sources: + - https://github.com/hyperledger/bevel +maintainers: + - name: Hyperledger Bevel maintainers + email: bevel@lists.hyperledger.org diff --git a/platforms/hyperledger-fabric/charts/fabric-channel-create/README.md b/platforms/hyperledger-fabric/charts/fabric-channel-create/README.md index 9a5cdd7cf27..fa2153fa56c 100644 --- a/platforms/hyperledger-fabric/charts/fabric-channel-create/README.md +++ b/platforms/hyperledger-fabric/charts/fabric-channel-create/README.md @@ -3,189 +3,98 @@ [//]: # (SPDX-License-Identifier: Apache-2.0) [//]: # (##############################################################################################) - -# Create Channel Hyperledger Fabric Deployment +# fabric-channel-create -- [Create Channel Hyperledger Fabric Deployment Helm Chart](#create-channel-hyperledger-fabric-deployment-helm-chart) -- [Prerequisites](#prerequisites) -- [Chart Structure](#chart-structure) -- [Configuration](#configuration) -- [Deployment](#deployment) -- [Verification](#verification) -- [Updating the Deployment](#updating-the-deployment) -- [Deletion](#deletion) -- [Contributing](#contributing) -- [License](#license) +This chart is a component of Hyperledger Bevel. The fabric-channel-create chart deploys a Kubernetes job to create a channel. This chart should be executed after the [fabric-genesis](../fabric-genesis/README.md) chart and the channeltx should be present in `files`. See [Bevel documentation](https://hyperledger-bevel.readthedocs.io/en/latest/) for details. +## TL;DR - -## Create Channel Hyperledger Fabric Deployment Helm Chart ---- -A [Helm chart](https://github.com/hyperledger/bevel/blob/develop/platforms/hyperledger-fabric/charts/fabric-channel-create) to create a channel. - +```bash +helm repo add bevel https://hyperledger.github.io/bevel +helm install allchannel bevel/fabric-channel-create +``` - ## Prerequisites ---- -Before deploying the Helm chart, make sure to have the following prerequisites: -- Kubernetes cluster up and running. -- A HashiCorp Vault instance is set up and configured to use Kubernetes service account token-based authentication. -- The Vault is unsealed and initialized. -- Helm installed. +- Kubernetes 1.19+ +- Helm 3.2.0+ +If Hashicorp Vault is used, then +- HashiCorp Vault Server 1.13.1+ - -## Chart Structure ---- -The structure of the Helm chart is as follows: +Also, [fabric-genesis](../fabric-genesis/README.md) chart should be installed. Then you can get the channeltx with following commands: +```bash +cd ./fabric-channel-create/files +kubectl --namespace supplychain-net get configmap allchannel-channeltx -o jsonpath='{.data.allchannel-channeltx_base64}' > channeltx.json ``` -fabric-channel-create/ - |- templates/ - |- _helpers.yaml - |- configmap.yaml - |- create_channel.yaml - |- Chart.yaml - |- README.md - |- values.yaml -``` - -- `templates/`: Contains the Kubernetes manifest templates that define the resources to be deployed. -- `helpers.tpl`: Contains custom label definitions used in other templates. -- `configmap.yaml`: Store configuration data that can be consumed by containers. The first ConfigMap stores various configuration data as key-value pairs and the second ConfigMap stores the base64-encoded content of the channel configuration file (channel.tx.base64). -- `create_channel.yaml`: The certificates-init fetches TLS certificates from a Vault server and stores them in a local directory. The createchannel fetches the channel configuration file from a local directory and checks to see if the channel already exists. If the channel does not exist, the createchannel creates the channel. -- `Chart.yaml`: Contains the metadata for the Helm chart, such as the name, version, and description. -- `README.md`: Provides information and instructions about the Helm chart. -- `values.yaml`: Contains the default configuration values for the Helm chart. - - - -## Configuration ---- -The [values.yaml](https://github.com/hyperledger/bevel/blob/develop/platforms/hyperledger-fabric/charts/fabric-channel-create/values.yaml) file contains configurable values for the Helm chart. We can modify these values according to the deployment requirements. Here are some important configuration options: - -### Metadata - -| Name | Description | Default Value | -| ----------------------| ----------------------------------------------------------------------|---------------------------------------------------| -| namespace | Provide the namespace for organization's peer | org1-net | -| images.fabrictools | Valid image name and version for fabric tools | ghcr.io/hyperledger/bevel-fabric-tools:2.2.2 | -| images.alpineutils | Valid image name and version to read certificates from vault server | ghcr.io/hyperledger/bevel-alpine:latest | -| labels | Custom labels (other than specified) | "" | - - -### Deployment - -| Name | Description | Default Value | -| ------------ | ------------------------------------------- | -------------- | -| annotations | Deployment annotations | "" | - -### Peer - -| Name | Description | Default Value | -| --------------| ----------------------------------------------| ------------------------------| -| name | Name of the peer as per deployment yaml | peer0 | -| address | Address of the peer and grpc cluster IP port | peer0.org1-net:7051 | -| localmspid | Local MSP ID for organization | Org1MSP | -| loglevel | Log level for organization's peer | debug | -| tlsstatus | True or False for organization's peer | true | - -### Vault - -| Name | Description | Default Value | -| ------------------- | --------------------------------------------------------------------| ------------------------------| -| role | Vault role for the organization | vault-role | -| address | Vault server address | "" | -| authpath | Kubernetes auth backend configured in vault for the organization | devorg1-net-auth | -| adminsecretprefix | Vault secret prefix for admin | secretsv2/data/crypto/peerOrganizations/org1-net/users/admin | -| orderersecretprefix | Vault secret prefix for orderer | secretsv2/data/crypto/peerOrganizations/org1-net/orderer | -| serviceaccountname | Service account name for vault | vault-auth | -| type | Provide the type of vault | hashicorp | -| imagesecretname | Image secret name for vault | "" | -| tls | Vault ca.cert Kubernetes secret | "" | - -### Channel -| Name | Description | Default Value | -| ------ | --------------------------------- | -------------- | -| name | Name of the channel | mychannel | +## Installing the Chart -### Orderer +To install the chart with the release name `allchannel`: -| Name | Description | Default Value | -| ------- | ----------------------------| --------------------------| -| address | Address for the orderer | orderer1.org1proxy.blockchaincloudpoc.com:443 | - -### Other - -| Name | Description | Default Value | -| ---------- | ---------------------------------------------| --------------- | -| channeltx | Base64 encoded file contents for channeltx | "" | - - - -## Deployment ---- - -To deploy the fabric-channel-create Helm chart, follow these steps: - -1. Modify the [values.yaml](https://github.com/hyperledger/bevel/blob/main/platforms/hyperledger-fabric/charts/fabric-channel-create/values.yaml) file to set the desired configuration values. -2. Run the following Helm command to install the chart: - ``` - $ helm repo add bevel https://hyperledger.github.io/bevel/ - $ helm install ./fabric-channel-create - ``` -Replace `` with the desired name for the release. - -This will deploy the fabric-channel-create node to the Kubernetes cluster based on the provided configurations. - - - -## Verification ---- - -To verify the deployment, we can use the following command: +```bash +helm repo add bevel https://hyperledger.github.io/bevel +helm install allchannel bevel/fabric-channel-create ``` -$ kubectl get jobs -n -``` -Replace `` with the actual namespace where the Job was created. This command will display information about the Job, including the number of completions and the current status of the Job's pods. - - -## Updating the Deployment ---- +The command deploys the chart on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation. -If we need to update the deployment with new configurations or changes, modify the same [values.yaml](https://github.com/hyperledger/bevel/blob/main/platforms/hyperledger-fabric/charts/fabric-channel-create/values.yaml) file with the desired changes and run the following Helm command: -``` -$ helm upgrade ./fabric-channel-create -``` -Replace `` with the name of the release. This command will apply the changes to the deployment, ensuring the fabric-channel-create node is up to date. +> **Tip**: List all releases using `helm list` +## Uninstalling the Chart - -## Deletion ---- +To uninstall/delete the `allchannel` deployment: -To delete the deployment and associated resources, run the following Helm command: +```bash +helm uninstall allchannel ``` -$ helm uninstall -``` -Replace `` with the name of the release. This command will remove all the resources created by the Helm chart. +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Parameters + +### Global parameters +These parameters are refered to as same in each parent or child chart +| Name | Description | Default Value | +|--------|---------|-------------| +|`global.version` | Fabric Version. This chart is only used for `2.2.x` | `2.2.2` | +|`global.serviceAccountName` | The serviceaccount name that will be created for Vault Auth and k8S Secret management| `vault-auth` | +| `global.cluster.provider` | Kubernetes cluster provider like AWS EKS or minikube. Currently ony `aws`, `azure` and `minikube` are tested | `aws` | +| `global.cluster.cloudNativeServices` | only `false` is implemented, `true` to use Cloud Native Services (SecretsManager and IAM for AWS; KeyVault & Managed Identities for Azure) is for future | `false` | +| `global.vault.type` | Type of Vault to support other providers. Currently, only `hashicorp` and `kubernetes` is supported. | `hashicorp` | +| `global.vault.role` | Role used for authentication with Vault | `vault-role` | +| `global.vault.network` | Network type that is being deployed | `fabric` | +| `global.vault.address`| URL of the Vault server. | `""` | +| `global.vault.authPath` | Authentication path for Vault | `carrier` | +| `global.vault.secretEngine` | Vault secret engine name | `secretsv2` | +| `global.vault.secretPrefix` | Vault secret prefix which must start with `data/` | `data/carrier` | +| `global.vault.tls` | Name of the Kubernetes secret which has certs to connect to TLS enabled Vault | `false` | + +### Image + +| Name | Description | Default Value | +| -------------| ---------- | --------- | +| `image.fabricTools` | Fabric Tools image repository | `ghcr.io/hyperledger/bevel-fabric-tools` | +| `image.alpineUtils` | Alpine utils image repository and tag | `ghcr.io/hyperledger/bevel-alpine:latest` | +| `image.pullSecret` | Secret name in the namespace containing private image registry credentials | `""` | - -## Contributing ---- -If you encounter any bugs, have suggestions, or would like to contribute to the [Create Channel Hyperledger Fabric Deployment Helm Chart](https://github.com/hyperledger/bevel/blob/main/platforms/hyperledger-fabric/charts/fabric-channel-create), please feel free to open an issue or submit a pull request on the [project's GitHub repository](https://github.com/hyperledger/bevel). +### Peer +| Name | Description | Default Value | +|--------|---------|-------------| +| `peer.name` | Name of the Peer that is creating the channel | `peer0` | +| `peer.address` | Peer Internal or External Address with port | `peer0.carrier-net:7051` | +| `peer.localMspId` | Peer MSP ID | `carrierMSP` | +| `peer.logLevel` | Peer Log Level | `debug` | +| `peer.tlsStatus` | TLS status of the peer | `true` | +| `peer.ordererAddress` | Orderer Internal or External Address with port for Peer to connect | `orderer1.supplychain-net:7050` | - ## License This chart is licensed under the Apache v2.0 license. -Copyright © 2023 Accenture +Copyright © 2024 Accenture ### Attribution diff --git a/platforms/hyperledger-fabric/charts/fabric-channel-create/files/readme.txt b/platforms/hyperledger-fabric/charts/fabric-channel-create/files/readme.txt new file mode 100644 index 00000000000..bf16a121ea7 --- /dev/null +++ b/platforms/hyperledger-fabric/charts/fabric-channel-create/files/readme.txt @@ -0,0 +1 @@ +This is a dummy file. Place the channeltx_base64 file in this directory.. \ No newline at end of file diff --git a/platforms/hyperledger-fabric/charts/fabric-channel-create/templates/_helpers.tpl b/platforms/hyperledger-fabric/charts/fabric-channel-create/templates/_helpers.tpl index 7bf5f530a8e..4b4d123f9eb 100644 --- a/platforms/hyperledger-fabric/charts/fabric-channel-create/templates/_helpers.tpl +++ b/platforms/hyperledger-fabric/charts/fabric-channel-create/templates/_helpers.tpl @@ -1,5 +1,46 @@ -{{- define "labels.custom" }} - {{ range $key, $val := $.Values.metadata.labels }} - {{ $key }}: {{ $val }} - {{ end }} -{{- end }} \ No newline at end of file +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "fabric-channel-create.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "fabric-channel-create.fullname" -}} +{{- $name := default .Chart.Name -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" $name .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "fabric-channel-create.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create orderer tls configmap name depending on Configmap existance +*/}} +{{- define "fabric-channel-create.orderercrt" -}} +{{- $secret := lookup "v1" "ConfigMap" .Release.Namespace "orderer-tls-cacert" -}} +{{- if $secret -}} +{{/* + Use this configmap +*/}} +{{- printf "orderer-tls-cacert" -}} +{{- else -}} +{{/* + Use the release configmap +*/}} +{{- printf "%s-orderer-tls-cacert" $.Values.peer.name -}} +{{- end -}} +{{- end -}} diff --git a/platforms/hyperledger-fabric/charts/fabric-channel-create/templates/configmap.yaml b/platforms/hyperledger-fabric/charts/fabric-channel-create/templates/configmap.yaml index d6d00bafeee..39a6c936fe9 100644 --- a/platforms/hyperledger-fabric/charts/fabric-channel-create/templates/configmap.yaml +++ b/platforms/hyperledger-fabric/charts/fabric-channel-create/templates/configmap.yaml @@ -7,43 +7,42 @@ apiVersion: v1 kind: ConfigMap metadata: - name: {{ $.Values.channel.name }}-config - namespace: {{ $.Values.metadata.namespace }} - {{- if $.Values.deployment.annotations }} - annotations: -{{ toYaml $.Values.deployment.annotations | nindent 8 }} - {{- end }} + name: {{ .Release.Name }}-config + namespace: {{ .Release.Namespace }} labels: - app.kubernetes.io/name: {{ $.Values.channel.name }}-config - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} - {{- include "labels.custom" . | nindent 2 }} + app: {{ .Release.Name }} + app.kubernetes.io/name: {{ .Release.Name }}-config + app.kubernetes.io/component: configmap + app.kubernetes.io/part-of: {{ include "fabric-channel-create.fullname" . }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} + app.kubernetes.io/managed-by: helm data: - CHANNEL_NAME: {{ $.Values.channel.name }} - FABRIC_LOGGING_SPEC: {{ $.Values.peer.loglevel }} - CORE_PEER_ID: {{ $.Values.peer.name }}.{{ $.Values.metadata.namespace }} - CORE_PEER_ADDRESS: {{ $.Values.peer.address }} - CORE_PEER_LOCALMSPID: {{ $.Values.peer.localmspid }} - CORE_PEER_TLS_ENABLED: "{{ $.Values.peer.tlsstatus }}" + CHANNEL_NAME: {{ .Release.Name }} + FABRIC_LOGGING_SPEC: {{ .Values.peer.logLevel }} + CORE_PEER_ID: {{ .Values.peer.name }}.{{ .Release.Namespace }} + CORE_PEER_ADDRESS: {{ .Values.peer.address }} + CORE_PEER_LOCALMSPID: {{ .Values.peer.localMspId }} + CORE_PEER_TLS_ENABLED: "{{ $.Values.peer.tlsStatus }}" CORE_PEER_TLS_ROOTCERT_FILE: /opt/gopath/src/github.com/hyperledger/fabric/crypto/admin/msp/tlscacerts/tlsca.crt - ORDERER_CA: /opt/gopath/src/github.com/hyperledger/fabric/crypto/orderer/tls/ca.crt - ORDERER_URL: {{ $.Values.orderer.address }} + ORDERER_CA: /opt/gopath/src/github.com/hyperledger/fabric/orderer/tls/orderer.crt + ORDERER_URL: {{ .Values.peer.ordererAddress }} CORE_PEER_MSPCONFIGPATH: /opt/gopath/src/github.com/hyperledger/fabric/crypto/admin/msp - NETWORK_VERSION: {{ $.Values.metadata.network.version }} - + NETWORK_VERSION: {{ .Values.global.version }} --- apiVersion: v1 kind: ConfigMap metadata: - name: channel-artifacts-{{ $.Values.channel.name }} - namespace: {{ $.Values.metadata.namespace }} + name: {{ .Release.Name }}-channel-artifacts + namespace: {{ .Release.Namespace }} labels: - app.kubernetes.io/name: channel-artifacts-{{ $.Values.channel.name }} - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} - {{- include "labels.custom" . | nindent 2 }} + app: {{ .Release.Name }} + app.kubernetes.io/name: {{ .Release.Name }}-channel-artifacts + app.kubernetes.io/component: channel-artifacts + app.kubernetes.io/part-of: {{ include "fabric-channel-create.fullname" . }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} + app.kubernetes.io/managed-by: helm data: - channel.tx.base64: {{ .Values.channeltx | quote }} - \ No newline at end of file + channeltx_base64: |- + {{ .Files.Get "files/channeltx.json" | nindent 8 }} diff --git a/platforms/hyperledger-fabric/charts/fabric-channel-create/templates/create_channel.yaml b/platforms/hyperledger-fabric/charts/fabric-channel-create/templates/create_channel.yaml index 6c214670fb6..8c1f25d07bc 100644 --- a/platforms/hyperledger-fabric/charts/fabric-channel-create/templates/create_channel.yaml +++ b/platforms/hyperledger-fabric/charts/fabric-channel-create/templates/create_channel.yaml @@ -7,98 +7,125 @@ apiVersion: batch/v1 kind: Job metadata: - name: createchannel-{{ $.Values.channel.name }} - namespace: {{ $.Values.metadata.namespace }} - {{- if $.Values.deployment.annotations }} - annotations: -{{ toYaml $.Values.deployment.annotations | nindent 8 }} - {{- end }} + name: createchannel-{{ .Release.Name }} + namespace: {{ .Release.Namespace }} labels: - app: createchannel-{{ $.Values.channel.name }} - app.kubernetes.io/name: createchannel-{{ $.Values.channel.name }} - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} - {{- include "labels.custom" . | nindent 2 }} + app: {{ .Release.Name }} + app.kubernetes.io/name: createchannel-{{ .Release.Name }} + app.kubernetes.io/component: fabric-channel-create-job + app.kubernetes.io/part-of: {{ include "fabric-channel-create.fullname" . }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} + app.kubernetes.io/managed-by: helm spec: backoffLimit: 6 template: metadata: labels: - app: createchannel-{{ $.Values.channel.name }} - app.kubernetes.io/name: createchannel-{{ $.Values.channel.name }} - app.kubernetes.io/instance: {{ .Release.Name }} - app.kubernetes.io/managed-by: {{ .Release.Service }} + app: {{ .Release.Name }} + app.kubernetes.io/name: createchannel-{{ .Release.Name }} + app.kubernetes.io/component: fabric-channel-create-job + app.kubernetes.io/part-of: {{ include "fabric-channel-create.fullname" . }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} + app.kubernetes.io/managed-by: helm spec: restartPolicy: "OnFailure" - serviceAccountName: {{ $.Values.vault.serviceaccountname }} - {{- if .Values.vault.imagesecretname }} + serviceAccountName: {{ .Values.global.serviceAccountName }} + {{- if .Values.image.pullSecret }} imagePullSecrets: - - name: {{ $.Values.vault.imagesecretname }} + - name: {{ .Values.image.pullSecret }} {{- end }} volumes: - {{ if .Values.vault.tls }} + {{ if .Values.global.vault.tls }} - name: vaultca secret: - secretName: {{ $.Values.vault.tls }} + secretName: {{ .Values.global.vault.tls }} items: - key: ca.crt.pem path: ca-certificates.crt {{ end }} - - name: channel-artifacts-{{ $.Values.channel.name }} + - name: {{ .Release.Name }}-channel-artifacts configMap: - name: channel-artifacts-{{ $.Values.channel.name }} + name: {{ .Release.Name }}-channel-artifacts - name: certificates emptyDir: medium: Memory - name: scripts-volume configMap: name: bevel-vault-script + - name: orderer-tls-cacert + configMap: + name: {{ include "fabric-channel-create.orderercrt" . }} + defaultMode: 0775 + items: + - key: cacert + path: orderer.crt initContainers: - name: certificates-init - image: {{ $.Values.metadata.images.alpineutils }} + image: {{ .Values.image.alpineUtils }} imagePullPolicy: IfNotPresent env: - name: VAULT_ADDR - value: {{ $.Values.vault.address }} - - name: KUBERNETES_AUTH_PATH - value: {{ $.Values.vault.authpath }} + value: {{ .Values.global.vault.address }} - name: VAULT_APP_ROLE - value: {{ $.Values.vault.role }} - - name: VAULT_PEER_SECRET_PREFIX - value: "{{ $.Values.vault.adminsecretprefix }}" - - name: VAULT_ORDERER_SECRET_PREFIX - value: "{{ $.Values.vault.orderersecretprefix }}" + value: {{ .Values.global.vault.role }} + - name: KUBERNETES_AUTH_PATH + value: {{ .Values.global.vault.authPath }} + - name: VAULT_SECRET_ENGINE + value: "{{ .Values.global.vault.secretEngine }}" + - name: VAULT_SECRET_PREFIX + value: "{{ .Values.global.vault.secretPrefix }}" + - name: VAULT_TYPE + value: "{{ .Values.global.vault.type }}" - name: MOUNT_PATH value: /secret - - name: VAULT_TYPE - value: "{{ $.Values.vault.type }}" command: ["sh", "-c"] args: - |- #!/usr/bin/env sh - . /scripts/bevel-vault.sh +{{- if eq .Values.global.vault.type "hashicorp" }} + . /scripts/bevel-vault.sh vaultBevelFunc "init" - echo "Getting Orderer TLS certificates from Vault." - vaultBevelFunc "readJson" "${VAULT_ORDERER_SECRET_PREFIX}/tls" - TLS_CA_CERT=$(echo ${VAULT_SECRET} | jq -r '.["ca.crt"]') + function getAdminMspSecret { + KEY=$1 + + echo "Getting MSP certificates from Vault." + vaultBevelFunc "readJson" "${VAULT_SECRET_ENGINE}/${VAULT_SECRET_PREFIX}/users/${KEY}" - OUTPUT_PATH="${MOUNT_PATH}/orderer/tls" - mkdir -p ${OUTPUT_PATH} - echo "${TLS_CA_CERT}" >> ${OUTPUT_PATH}/ca.crt + ADMINCERT=$(echo ${VAULT_SECRET} | jq -r '.["admincerts"]') + CACERTS=$(echo ${VAULT_SECRET} | jq -r '.["cacerts"]') + KEYSTORE=$(echo ${VAULT_SECRET} | jq -r '.["keystore"]') + SIGNCERTS=$(echo ${VAULT_SECRET} | jq -r '.["signcerts"]') + TLSCACERTS=$(echo ${VAULT_SECRET} | jq -r '.["tlscacerts"]') - ############################################################################### + echo "${ADMINCERT}" > ${OUTPUT_PATH}/admincerts/admin.crt + echo "${CACERTS}" > ${OUTPUT_PATH}/cacerts/ca.crt + echo "${KEYSTORE}" > ${OUTPUT_PATH}/keystore/server.key + echo "${SIGNCERTS}" > ${OUTPUT_PATH}/signcerts/server.crt + echo "${TLSCACERTS}" > ${OUTPUT_PATH}/tlscacerts/tlsca.crt + } - echo "Getting MSP certificates from Vault." - vaultBevelFunc "readJson" "${VAULT_PEER_SECRET_PREFIX}/msp" +{{- else }} + function getAdminMspSecret { + KEY=$1 + KUBENETES_SECRET=$(kubectl get secret ${KEY} --namespace {{ .Release.Namespace }} -o json) - ADMINCERT=$(echo ${VAULT_SECRET} | jq -r '.["admincerts"]') - CACERTS=$(echo ${VAULT_SECRET} | jq -r '.["cacerts"]') - KEYSTORE=$(echo ${VAULT_SECRET} | jq -r '.["keystore"]') - SIGNCERTS=$(echo ${VAULT_SECRET} | jq -r '.["signcerts"]') - TLSCACERTS=$(echo ${VAULT_SECRET} | jq -r '.["tlscacerts"]') + ADMINCERT=$(echo ${KUBENETES_SECRET} | jq -r '.data.admincerts' | base64 -d) + CACERTS=$(echo ${KUBENETES_SECRET} | jq -r '.data.cacerts' | base64 -d) + KEYSTORE=$(echo ${KUBENETES_SECRET} | jq -r '.data.keystore' | base64 -d) + SIGNCERTS=$(echo ${KUBENETES_SECRET} | jq -r '.data.signcerts' | base64 -d) + TLSCACERTS=$(echo ${KUBENETES_SECRET} | jq -r '.data.tlscacerts' | base64 -d) + + echo "${ADMINCERT}" > ${OUTPUT_PATH}/admincerts/admin.crt + echo "${CACERTS}" > ${OUTPUT_PATH}/cacerts/ca.crt + echo "${KEYSTORE}" > ${OUTPUT_PATH}/keystore/server.key + echo "${SIGNCERTS}" > ${OUTPUT_PATH}/signcerts/server.crt + echo "${TLSCACERTS}" > ${OUTPUT_PATH}/tlscacerts/tlsca.crt + } +{{- end }} OUTPUT_PATH="${MOUNT_PATH}/admin/msp" mkdir -p ${OUTPUT_PATH}/admincerts @@ -106,14 +133,9 @@ spec: mkdir -p ${OUTPUT_PATH}/keystore mkdir -p ${OUTPUT_PATH}/signcerts mkdir -p ${OUTPUT_PATH}/tlscacerts - - echo "${ADMINCERT}" >> ${OUTPUT_PATH}/admincerts/admin.crt - echo "${CACERTS}" >> ${OUTPUT_PATH}/cacerts/ca.crt - echo "${KEYSTORE}" >> ${OUTPUT_PATH}/keystore/server.key - echo "${SIGNCERTS}" >> ${OUTPUT_PATH}/signcerts/server.crt - echo "${TLSCACERTS}" >> ${OUTPUT_PATH}/tlscacerts/tlsca.crt + getAdminMspSecret admin-msp volumeMounts: - {{ if .Values.vault.tls }} + {{ if .Values.global.vault.tls }} - name: vaultca mountPath: "/etc/ssl/certs/" readOnly: true @@ -125,7 +147,7 @@ spec: subPath: bevel-vault.sh containers: - name: createchannel - image: {{ $.Values.metadata.images.fabrictools }} + image: {{ .Values.image.fabricTools }}:{{ .Values.global.version }} imagePullPolicy: IfNotPresent stdin: true tty: true @@ -133,47 +155,39 @@ spec: args: - |- #!/usr/bin/env sh - cat ./channel-artifacts/channel.tx.base64 | base64 -d > channel.tx + cat ./channel-artifacts/channeltx_base64 | base64 -d > channel.tx + echo "Fetch block to see if channel has already been created..." tls_status=${CORE_PEER_TLS_ENABLED} if [ "$tls_status" = "true" ] then peer channel fetch 0 -c ${CHANNEL_NAME} --tls --cafile ${ORDERER_CA} -o ${ORDERER_URL} /tmp/${CHANNEL_NAME}.block else - peer channel fetch 0 -c ${CHANNEL_NAME} -o ${ORDERER_URL} /tmp/${CHANNEL_NAME}.block + peer channel fetch 0 -c ${CHANNEL_NAME} -o ${ORDERER_URL} /tmp/${CHANNEL_NAME}.block fi - if [ -f /tmp/${CHANNEL_NAME}.block ] then echo "Channel ${CHANNEL_NAME} is already created." else echo "Creating Channel ${CHANNEL_NAME}" - version1_4=`echo $NETWORK_VERSION | grep -c 1.4` if [ "$tls_status" = "true" ] then - if [ $version1_4 = 1 ] - then - peer channel create -o ${ORDERER_URL} -c ${CHANNEL_NAME} -f channel.tx --tls ${CORE_PEER_TLS_ENABLED} --cafile ${ORDERER_CA} - else - peer channel create -o ${ORDERER_URL} -c ${CHANNEL_NAME} -f channel.tx --tls ${CORE_PEER_TLS_ENABLED} --cafile ${ORDERER_CA} --outputBlock /tmp/${CHANNEL_NAME}.block - fi + peer channel create -o ${ORDERER_URL} -c ${CHANNEL_NAME} -f channel.tx --tls ${CORE_PEER_TLS_ENABLED} --cafile ${ORDERER_CA} --outputBlock /tmp/${CHANNEL_NAME}.block else - if [ $version1_4 = 1 ] - then - peer channel create -o ${ORDERER_URL} -c ${CHANNEL_NAME} -f channel.tx - else - peer channel create -o ${ORDERER_URL} -c ${CHANNEL_NAME} -f channel.tx --outputBlock /tmp/${CHANNEL_NAME}.block - fi + peer channel create -o ${ORDERER_URL} -c ${CHANNEL_NAME} -f channel.tx --outputBlock /tmp/${CHANNEL_NAME}.block fi fi workingDir: /opt/gopath/src/github.com/hyperledger/fabric/peer envFrom: - configMapRef: - name: {{ $.Values.channel.name }}-config + name: {{ .Release.Name }}-config volumeMounts: - name: certificates mountPath: /opt/gopath/src/github.com/hyperledger/fabric/crypto readOnly: true - - name: channel-artifacts-{{ $.Values.channel.name }} + - name: {{ .Release.Name }}-channel-artifacts mountPath: /opt/gopath/src/github.com/hyperledger/fabric/peer/channel-artifacts readOnly: true + - name: orderer-tls-cacert + mountPath: /opt/gopath/src/github.com/hyperledger/fabric/orderer/tls/orderer.crt + subPath: orderer.crt diff --git a/platforms/hyperledger-fabric/charts/fabric-channel-create/values.yaml b/platforms/hyperledger-fabric/charts/fabric-channel-create/values.yaml index d52002dffd1..0a6da1ff8aa 100644 --- a/platforms/hyperledger-fabric/charts/fabric-channel-create/values.yaml +++ b/platforms/hyperledger-fabric/charts/fabric-channel-create/values.yaml @@ -3,85 +3,69 @@ # # SPDX-License-Identifier: Apache-2.0 ############################################################################################## +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. +--- +# The following are for overriding global values +global: + # HLF Network Version + #Eg. version: 2.2.2 + version: 2.2.2 + #Provide the service account name which will be created. + serviceAccountName: vault-auth + cluster: + provider: aws # choose from: minikube | aws | azure | gcp + cloudNativeServices: false # only 'false' is implemented + vault: + #Provide the type of vault + #Eg. type: hashicorp + type: hashicorp + #Provide the vaultrole for an organization + #Eg. vaultrole: org1-vault-role + role: vault-role + #Provide the network type + network: fabric + #Provide the vault server address + #Eg. vaultaddress: http://Vault-884963190.eu-west-1.elb.amazonaws.com + address: + #Provide the kubernetes auth backed configured in vault for an organization + #Eg. authpath: carrier + authPath: carrier + #Provide the secret engine. + secretEngine: secretsv2 + #Provide the vault path where the secrets will be stored + secretPrefix: "data/carrier" + #Enable or disable TLS for vault communication + #Eg. tls: true + tls: -metadata: - #Provide the namespace for organization's peer - #Eg. namespace: org1-net - namespace: org1-net - images: - #Provide the valid image name and version for fabric tools - #Eg. fabrictools: hyperledger/fabric-tools:1.4.0 - fabrictools: ghcr.io/hyperledger/bevel-fabric-tools:2.2.2 - #Provide the valid image name and version to read certificates from vault server - #Eg. alpineutils: ghcr.io/hyperledger/bevel-alpine:latest - alpineutils: ghcr.io/hyperledger/bevel-alpine:latest - #Provide the custom labels - #NOTE: Provide labels other than name, release name , release service, chart version , chart name , app. - #Eg. labels: - # role: create_channel - labels: - -deployment: - annotations: - +image: + #Provide the valid image name and version for fabric tools + #Eg. fabricTools: hyperledger/fabrictools + fabricTools: ghcr.io/hyperledger/bevel-fabric-tools + #Provide the valid image name and version to read certificates from vault server + #Eg. alpineutils: ghcr.io/hyperledger/bevel-alpine:latest + alpineUtils: ghcr.io/hyperledger/bevel-alpine:latest + #Provide the secret to use if private repository + #Eg. pullSecret: regcred + pullSecret: peer: #Provide the name of the peer as per deployment yaml. #Eg. name: peer0 name: peer0 #Provide the address of the peer who creates the channel and port to be mentioned is grpc cluster IP port - #Eg. address: peer0.org1-net:7051 - address: peer0.org1-net:7051 - #Provide the localmspid for organization - #Eg. localmspid: Org1MSP - localmspid: Org1MSP - #Provide the loglevel for organization's peer - #Eg. loglevel: info - loglevel: debug - #Provide the value for tlsstatus to be true or false for organization's peer - #Eg. tlsstatus: true - tlsstatus: true - - -vault: - #Provide the vaultrole for an organization - #Eg. vaultrole: org1-vault-role - role: vault-role - #Provide the vault server address - #Eg. vaultaddress: http://Vault-884963190.eu-west-1.elb.amazonaws.com - address: - #Provide the kubernetes auth backed configured in vault for an organization - #Eg. authpath: devorg1-net-auth1 - authpath: devorg1-net-auth - #Provide the value for vault secretprefix - #Eg. adminsecretprefix: secretsv2/data/crypto/peerOrganizations/.../users/admin - adminsecretprefix: secretsv2/data/crypto/peerOrganizations/org1-net/users/admin - #Provide the value for vault secretprefix - #Eg. orderersecretprefix: secretsv2/data/crypto/peerOrganizations/.../orderer - orderersecretprefix: secretsv2/data/crypto/peerOrganizations/org1-net/orderer - #Provide the serviceaccountname for vault - #Eg. serviceaccountname: vault-auth - serviceaccountname: vault-auth - #Provide the type of vault - #Eg. type: hashicorp - type: hashicorp - #Provide the imagesecretname for vault - #Eg. imagesecretname: regcred - imagesecretname: "" - #Kuberenetes secret for vault ca.cert - #Enable or disable TLS for vault communication if value present or not - #Eg. tls: vaultca - tls: - -channel: - #Provide the name of the channel - #Eg. name: mychannel - name: mychannel - -orderer: + #Eg. address: peer0.carrier-net:7051 + address: peer0.carrier-net:7051 + #Provide the localMspId for organization + #Eg. localMspId: carrierMSP + localMspId: carrierMSP + #Provide the logLevel for organization's peer + #Eg. logLevel: info + logLevel: debug + #Provide the value for tlsStatus to be true or false for organization's peer + #Eg. tlsStatus: true + tlsStatus: true #Provide the address for orderer - #Eg. address: orderer.fratest-com:7050 - address: orderer1.org1proxy.blockchaincloudpoc.com:443 - -#Provide the base64 encoded file contents for channeltx -channeltx: + #Eg. ordererAddress: orderer1.test.yourdomain.com:443 + ordererAddress: orderer1.supplychain-net:7050 diff --git a/platforms/hyperledger-fabric/charts/fabric-channel-join/.helmignore b/platforms/hyperledger-fabric/charts/fabric-channel-join/.helmignore new file mode 100644 index 00000000000..014fa775608 --- /dev/null +++ b/platforms/hyperledger-fabric/charts/fabric-channel-join/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +generated_config/ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/platforms/hyperledger-fabric/charts/fabric-channel-join/Chart.yaml b/platforms/hyperledger-fabric/charts/fabric-channel-join/Chart.yaml index f4fa27ef82b..4b659443cc8 100644 --- a/platforms/hyperledger-fabric/charts/fabric-channel-join/Chart.yaml +++ b/platforms/hyperledger-fabric/charts/fabric-channel-join/Chart.yaml @@ -5,7 +5,22 @@ ############################################################################################## apiVersion: v1 -appVersion: "2.0" -description: "Hyperledger Fabric: Joins the peer to the channel." name: fabric-channel-join -version: 1.0.0 +description: "Hyperledger Fabric: Joins the peer to the channel." +version: 1.1.0 +appVersion: latest +keywords: + - bevel + - hlf + - fabric + - hyperledger + - enterprise + - blockchain + - deployment + - accenture +home: https://hyperledger-bevel.readthedocs.io/en/latest/ +sources: + - https://github.com/hyperledger/bevel +maintainers: + - name: Hyperledger Bevel maintainers + email: bevel@lists.hyperledger.org diff --git a/platforms/hyperledger-fabric/charts/fabric-channel-join/README.md b/platforms/hyperledger-fabric/charts/fabric-channel-join/README.md index fb57f297204..562e5989108 100644 --- a/platforms/hyperledger-fabric/charts/fabric-channel-join/README.md +++ b/platforms/hyperledger-fabric/charts/fabric-channel-join/README.md @@ -3,175 +3,102 @@ [//]: # (SPDX-License-Identifier: Apache-2.0) [//]: # (##############################################################################################) - -# Join Channel Hyperledger Fabric Deployment +# fabric-channel-join -- [Join Channel Hyperledger Fabric Deployment Helm Chart](#join-channel-hyperledger-fabric-deployment-helm-chart) -- [Prerequisites](#prerequisites) -- [Chart Structure](#chart-structure) -- [Configuration](#configuration) -- [Deployment](#deployment) -- [Verification](#verification) -- [Updating the Deployment](#updating-the-deployment) -- [Deletion](#deletion) -- [Contributing](#contributing) -- [License](#license) +This chart is a component of Hyperledger Bevel. The fabric-channel-join chart deploys a Kubernetes job to join a channel. This chart should be executed after the [fabric-channel-create](../fabric-channel-create/README.md) chart or the [fabric-osnadmin-channel-create](../fabric-osnadmin-channel-create/README.md) chart for 2.5.x and the anchortx should be present in `files`. See [Bevel documentation](https://hyperledger-bevel.readthedocs.io/en/latest/) for details. +## TL;DR - -## Join Channel Hyperledger Fabric Deployment Helm Chart ---- -A [Helm chart](https://github.com/hyperledger/bevel/blob/develop/platforms/hyperledger-fabric/charts/fabric-channel-join) for joining the channel. - +```bash +helm repo add bevel https://hyperledger.github.io/bevel +helm install peer0-allchannel bevel/fabric-channel-join +``` - ## Prerequisites ---- -Before deploying the Helm chart, make sure to have the following prerequisites: -- Kubernetes cluster up and running. -- A HashiCorp Vault instance is set up and configured to use Kubernetes service account token-based authentication. -- The Vault is unsealed and initialized. -- Helm installed. +- Kubernetes 1.19+ +- Helm 3.2.0+ +If Hashicorp Vault is used, then +- HashiCorp Vault Server 1.13.1+ - -## Chart Structure ---- -The structure of the Helm chart is as follows: +Also, for Fabric 2.2.x, the [fabric-genesis](../fabric-genesis/README.md) and [fabric-channel-create](../fabric-channel-create/README.md) charts should be installed. +For Fabric 2.5.x, the [fabric-osnadmin-channel-create](../fabric-osnadmin-channel-create/README.md) chart should be installed before this chart. +Then you can get the channeltx with following commands: +```bash +cd ./fabric-channel-join/files +kubectl --namespace supplychain-net get configmap allchannel-supplychain-anchortx -o jsonpath='{.data.allchannel-supplychain-anchortx_base64}' > anchortx.json ``` -fabric-channel-join/ - |- templates/ - |- _helpers.yaml - |- configmap.yaml - |- join_channel.yaml - |- Chart.yaml - |- README.md - |- values.yaml -``` - -- `templates/`: Contains the Kubernetes manifest templates that define the resources to be deployed. -- `helpers.tpl`: Contains custom label definitions used in other templates. -- `configmap.yaml`: Stores the configuration for the joinchannel container. -- `join_channel.yaml`: The certificates-init retrieves TLS and MSP certificates from Vault and stores them in the local filesystem. The joinchannel joins the peer to the channel by fetching the channel configuration block from the orderer. Both containers are essential for the peer to join the channel and start participating in the network. -- `Chart.yaml`: Contains the metadata for the Helm chart, such as the name, version, and description. -- `README.md`: Provides information and instructions about the Helm chart. -- `values.yaml`: Contains the default configuration values for the Helm chart. - - - -## Configuration ---- -The [values.yaml](https://github.com/hyperledger/bevel/blob/develop/platforms/hyperledger-fabric/charts/fabric-channel-join/values.yaml) file contains configurable values for the Helm chart. We can modify these values according to the deployment requirements. Here are some important configuration options: - -### Metadata - -| Name | Description | Default Value | -| -----------------------| ---------------------------------------------------------------------------------| --------------------------------------------------| -| namespace | Namespace for organization's peer | org1-net | -| images.fabrictools | Valid image name and version for Fabric tools | ghcr.io/hyperledger/bevel-fabric-tools:2.2.2 | -| images.alpineutils | Valid image name and version to read certificates from the Vault server | ghcr.io/hyperledger/bevel-alpine:latest | -| labels | Custom labels for the organization | "" | - -### Peer - -| Name | Description | Default Value | -| ------------| -------------------------------------------------| ----------------------------| -| name | Name of the peer as per deployment YAML | peer0 | -| address | Address of the peer and its grpc cluster IP port | peer0.org1-net:7051 | -| localmspid | Local MSPID for the organization | Org1MSP | -| loglevel | Log level for the organization's peer | info | -| tlsstatus | TLS status for the organization's peer | true | - -### Vault - -| Name | Description | Default Value | -| ----------------------| ------------------------------------------------------------------| -----------------------------| -| role | Vault role for the organization | vault-role | -| address | Vault server address | "" | -| authpath | Kubernetes auth backend configured in Vault for the organization | devorg1-net-auth | -| adminsecretprefix | Vault secretprefix for admin | secretsv2/data/crypto/peerOrganizations/org1-net/users/admin | -| orderersecretprefix | Vault secretprefix for orderer | secretsv2/data/crypto/peerOrganizations/org1-net/orderer | -| serviceaccountname | Service account name for Vault | vault-auth | -| type | Provide the type of vault | hashicorp | -| imagesecretname | Imagesecret name for Vault | "" | - -### channel -| Name | Description | Default Value | -| ----------| ----------------------| -----------------| -| address | Name of the channel | mychannel | +## Installing the Chart -### Orderer +To install the chart with the release name `peer0-allchannel`: -| Name | Description | Default Value | -| ----------| --------------------------| -----------------------------| -| address | Address for the orderer | orderer1.org1proxy.blockchaincloudpoc.com:443 | - - - -## Deployment ---- - -To deploy the fabric-channel-join Helm chart, follow these steps: - -1. Modify the [values.yaml](https://github.com/hyperledger/bevel/blob/main/platforms/hyperledger-fabric/charts/fabric-channel-join/values.yaml) file to set the desired configuration values. -2. Run the following Helm command to install the chart: - ``` - $ helm repo add bevel https://hyperledger.github.io/bevel/ - $ helm install ./fabric-channel-join - ``` -Replace `` with the desired name for the release. - -This will deploy the fabric-channel-join node to the Kubernetes cluster based on the provided configurations. - - - -## Verification ---- - -To verify the deployment, we can use the following command: -``` -$ kubectl get jobs -n +```bash +helm repo add bevel https://hyperledger.github.io/bevel +helm install peer0-allchannel bevel/fabric-channel-join ``` -Replace `` with the actual namespace where the Job was created. This command will display information about the Job, including the number of completions and the current status of the Job's pods. +The command deploys the chart on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation. - -## Updating the Deployment ---- +> **Tip**: List all releases using `helm list` -If we need to update the deployment with new configurations or changes, modify the same [values.yaml](https://github.com/hyperledger/bevel/blob/main/platforms/hyperledger-fabric/charts/fabric-channel-join/values.yaml) file with the desired changes and run the following Helm command: -``` -$ helm upgrade ./fabric-channel-join -``` -Replace `` with the name of the release. This command will apply the changes to the deployment, ensuring the fabric-channel-join node is up to date. +## Uninstalling the Chart +To uninstall/delete the `peer0-allchannel` deployment: - -## Deletion ---- - -To delete the deployment and associated resources, run the following Helm command: -``` -$ helm uninstall +```bash +helm uninstall peer0-allchannel ``` -Replace `` with the name of the release. This command will remove all the resources created by the Helm chart. +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Parameters + +### Global parameters +These parameters are refered to as same in each parent or child chart +| Name | Description | Default Value | +|--------|---------|-------------| +|`global.version` | Fabric Version. | `2.5.4` | +|`global.serviceAccountName` | The serviceaccount name that will be created for Vault Auth and k8S Secret management| `vault-auth` | +| `global.cluster.provider` | Kubernetes cluster provider like AWS EKS or minikube. Currently ony `aws`, `azure` and `minikube` are tested | `aws` | +| `global.cluster.cloudNativeServices` | only `false` is implemented, `true` to use Cloud Native Services (SecretsManager and IAM for AWS; KeyVault & Managed Identities for Azure) is for future | `false` | +| `global.vault.type` | Type of Vault to support other providers. Currently, only `hashicorp` and `kubernetes` is supported. | `hashicorp` | +| `global.vault.role` | Role used for authentication with Vault | `vault-role` | +| `global.vault.network` | Network type that is being deployed | `fabric` | +| `global.vault.address`| URL of the Vault server. | `""` | +| `global.vault.authPath` | Authentication path for Vault | `supplychain` | +| `global.vault.secretEngine` | Vault secret engine name | `secretsv2` | +| `global.vault.secretPrefix` | Vault secret prefix which must start with `data/` | `data/supplychain` | +| `global.vault.tls` | Name of the Kubernetes secret which has certs to connect to TLS enabled Vault | `false` | + +### Image + +| Name | Description | Default Value | +| -------------| ---------- | --------- | +| `image.fabricTools` | Fabric Tools image repository | `ghcr.io/hyperledger/bevel-fabric-tools` | +| `image.alpineUtils` | Alpine utils image repository and tag | `ghcr.io/hyperledger/bevel-alpine:latest` | +| `image.pullSecret` | Secret name in the namespace containing private image registry credentials | `""` | - -## Contributing ---- -If you encounter any bugs, have suggestions, or would like to contribute to the [Join Channel Hyperledger Fabric Deployment Helm Chart](https://github.com/hyperledger/bevel/blob/main/platforms/hyperledger-fabric/charts/fabric-channel-join), please feel free to open an issue or submit a pull request on the [project's GitHub repository](https://github.com/hyperledger/bevel). +### Peer +| Name | Description | Default Value | +|--------|---------|-------------| +| `peer.name` | Name of the Peer that is joining the channel | `peer0` | +| `peer.type` | Type of the Peer that is joining the channel, choose between `anchor` or `general` | `anchor` | +| `peer.address` | Peer Internal or External Address with port | `peer0.supplychain-net:7051` | +| `peer.localMspId` | Peer MSP ID | `supplychainMSP` | +| `peer.logLevel` | Peer Log Level | `info` | +| `peer.tlsStatus` | TLS status of the peer | `true` | +| `peer.channelName` | Name of the channel this peer wants to join | `AllChannel` | +| `peer.ordererAddress` | Orderer Internal or External Address with port for Peer to connect | `orderer1.supplychain-net:7050` | - ## License This chart is licensed under the Apache v2.0 license. -Copyright © 2023 Accenture +Copyright © 2024 Accenture ### Attribution diff --git a/platforms/hyperledger-fabric/charts/fabric-channel-join/files/readme.txt b/platforms/hyperledger-fabric/charts/fabric-channel-join/files/readme.txt new file mode 100644 index 00000000000..bf16a121ea7 --- /dev/null +++ b/platforms/hyperledger-fabric/charts/fabric-channel-join/files/readme.txt @@ -0,0 +1 @@ +This is a dummy file. Place the channeltx_base64 file in this directory.. \ No newline at end of file diff --git a/platforms/hyperledger-fabric/charts/fabric-channel-join/templates/_helpers.tpl b/platforms/hyperledger-fabric/charts/fabric-channel-join/templates/_helpers.tpl index 7bf5f530a8e..869da311d62 100644 --- a/platforms/hyperledger-fabric/charts/fabric-channel-join/templates/_helpers.tpl +++ b/platforms/hyperledger-fabric/charts/fabric-channel-join/templates/_helpers.tpl @@ -1,5 +1,46 @@ -{{- define "labels.custom" }} - {{ range $key, $val := $.Values.metadata.labels }} - {{ $key }}: {{ $val }} - {{ end }} -{{- end }} \ No newline at end of file +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "fabric-channel-join.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "fabric-channel-join.fullname" -}} +{{- $name := default .Chart.Name -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" $name .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "fabric-channel-join.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create orderer tls configmap name depending on Configmap existance +*/}} +{{- define "fabric-channel-join.orderercrt" -}} +{{- $secret := lookup "v1" "ConfigMap" .Release.Namespace "orderer-tls-cacert" -}} +{{- if $secret -}} +{{/* + Use this configmap +*/}} +{{- printf "orderer-tls-cacert" -}} +{{- else -}} +{{/* + Use the release configmap +*/}} +{{- printf "%s-orderer-tls-cacert" $.Values.peer.name -}} +{{- end -}} +{{- end -}} diff --git a/platforms/hyperledger-fabric/charts/fabric-channel-join/templates/anchorpeer.yaml b/platforms/hyperledger-fabric/charts/fabric-channel-join/templates/anchorpeer.yaml new file mode 100644 index 00000000000..8be70ed1a79 --- /dev/null +++ b/platforms/hyperledger-fabric/charts/fabric-channel-join/templates/anchorpeer.yaml @@ -0,0 +1,230 @@ +############################################################################################## +# Copyright Accenture. All Rights Reserved. +# +# SPDX-License-Identifier: Apache-2.0 +############################################################################################## +{{- if eq .Values.peer.type "anchor" }} +apiVersion: batch/v1 +kind: Job +metadata: + name: anchorpeer-{{ .Release.Name }} + namespace: {{ .Release.Namespace }} + annotations: + "helm.sh/hook": post-install + "helm.sh/hook-weight": "1" + "helm.sh/hook-delete-policy": "before-hook-creation" + labels: + app: {{ .Release.Name }} + app.kubernetes.io/name: anchorpeer-{{ .Release.Name }} + app.kubernetes.io/component: fabric-anchorpeer-job + app.kubernetes.io/part-of: anchorpeer-{{ .Release.Name }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} + app.kubernetes.io/managed-by: helm +spec: + backoffLimit: 3 + template: + metadata: + labels: + app: {{ .Release.Name }} + app.kubernetes.io/name: anchorpeer-{{ .Release.Name }} + app.kubernetes.io/component: fabric-anchorpeer-job + app.kubernetes.io/part-of: anchorpeer-{{ .Release.Name }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} + app.kubernetes.io/managed-by: helm + spec: + restartPolicy: "OnFailure" + serviceAccountName: {{ .Values.global.serviceAccountName }} + {{- if .Values.image.pullSecret }} + imagePullSecrets: + - name: {{ .Values.image.pullSecret }} + {{- end }} + volumes: + {{ if .Values.global.vault.tls }} + - name: vaultca + secret: + secretName: {{ .Values.global.vault.tls }} + items: + - key: ca.crt.pem + path: ca-certificates.crt # curl expects certs to be in /etc/ssl/certs/ca-certificates.crt + {{ end }} + - name: certificates + emptyDir: + medium: Memory + - name: orderer-tls-cacert + configMap: + name: {{ include "fabric-channel-join.orderercrt" . }} + defaultMode: 0775 + items: + - key: cacert + path: orderer.crt + {{- if ne (.Values.global.version | trunc 3) "2.5" }} + - name: anchorpeer-artifacts + configMap: + name: {{ .Release.Name }}-anchor-artifacts + {{- end }} + - name: scripts-volume + configMap: + name: bevel-vault-script + initContainers: + - name: certificates-init + image: {{ .Values.image.alpineUtils }} + imagePullPolicy: IfNotPresent + env: + - name: VAULT_ADDR + value: {{ .Values.global.vault.address }} + - name: VAULT_APP_ROLE + value: {{ .Values.global.vault.role }} + - name: KUBERNETES_AUTH_PATH + value: {{ .Values.global.vault.authPath }} + - name: VAULT_SECRET_ENGINE + value: "{{ .Values.global.vault.secretEngine }}" + - name: VAULT_SECRET_PREFIX + value: "{{ .Values.global.vault.secretPrefix }}" + - name: VAULT_TYPE + value: "{{ .Values.global.vault.type }}" + - name: MOUNT_PATH + value: /secret + command: ["sh", "-c"] + args: + - |- + #!/usr/bin/env sh +{{- if eq .Values.global.vault.type "hashicorp" }} + + . /scripts/bevel-vault.sh + # Calling a function to retrieve the vault token. + vaultBevelFunc "init" + + function getAdminMspSecret { + KEY=$1 + + echo "Getting MSP certificates from Vault." + vaultBevelFunc "readJson" "${VAULT_SECRET_ENGINE}/${VAULT_SECRET_PREFIX}/users/${KEY}" + + ADMINCERT=$(echo ${VAULT_SECRET} | jq -r '.["admincerts"]') + CACERTS=$(echo ${VAULT_SECRET} | jq -r '.["cacerts"]') + KEYSTORE=$(echo ${VAULT_SECRET} | jq -r '.["keystore"]') + SIGNCERTS=$(echo ${VAULT_SECRET} | jq -r '.["signcerts"]') + TLSCACERTS=$(echo ${VAULT_SECRET} | jq -r '.["tlscacerts"]') + + echo "${ADMINCERT}" > ${OUTPUT_PATH}/admincerts/admin.crt + echo "${CACERTS}" > ${OUTPUT_PATH}/cacerts/ca.crt + echo "${KEYSTORE}" > ${OUTPUT_PATH}/keystore/server.key + echo "${SIGNCERTS}" > ${OUTPUT_PATH}/signcerts/server.crt + echo "${TLSCACERTS}" > ${OUTPUT_PATH}/tlscacerts/tlsca.crt + } + +{{- else }} + + function getAdminMspSecret { + KEY=$1 + KUBENETES_SECRET=$(kubectl get secret ${KEY} --namespace {{ .Release.Namespace }} -o json) + + ADMINCERT=$(echo ${KUBENETES_SECRET} | jq -r '.data.admincerts' | base64 -d) + CACERTS=$(echo ${KUBENETES_SECRET} | jq -r '.data.cacerts' | base64 -d) + KEYSTORE=$(echo ${KUBENETES_SECRET} | jq -r '.data.keystore' | base64 -d) + SIGNCERTS=$(echo ${KUBENETES_SECRET} | jq -r '.data.signcerts' | base64 -d) + TLSCACERTS=$(echo ${KUBENETES_SECRET} | jq -r '.data.tlscacerts' | base64 -d) + + echo "${ADMINCERT}" > ${OUTPUT_PATH}/admincerts/admin.crt + echo "${CACERTS}" > ${OUTPUT_PATH}/cacerts/ca.crt + echo "${KEYSTORE}" > ${OUTPUT_PATH}/keystore/server.key + echo "${SIGNCERTS}" > ${OUTPUT_PATH}/signcerts/server.crt + echo "${TLSCACERTS}" > ${OUTPUT_PATH}/tlscacerts/tlsca.crt + } +{{- end }} + + OUTPUT_PATH="${MOUNT_PATH}/admin/msp" + mkdir -p ${OUTPUT_PATH}/admincerts + mkdir -p ${OUTPUT_PATH}/cacerts + mkdir -p ${OUTPUT_PATH}/keystore + mkdir -p ${OUTPUT_PATH}/signcerts + mkdir -p ${OUTPUT_PATH}/tlscacerts + getAdminMspSecret admin-msp + + volumeMounts: + {{ if .Values.global.vault.tls }} + - name: vaultca + mountPath: "/etc/ssl/certs/" + readOnly: true + {{ end }} + - name: certificates + mountPath: /secret + - name: scripts-volume + mountPath: /scripts/bevel-vault.sh + subPath: bevel-vault.sh + containers: + - name: anchorpeer + image: {{ .Values.image.fabricTools }}:{{ .Values.global.version }} + imagePullPolicy: IfNotPresent + stdin: true + tty: true + command: ["sh", "-c"] + args: + - |- + #!/usr/bin/env sh + + version2_5=`echo $NETWORK_VERSION | grep -c 2.5` + + if [ $version2_5 = 1 ] + then + echo "Fetching the most recent configuration block for the channel" + peer channel fetch config config_block.pb -o ${ORDERER_URL} -c ${CHANNEL_NAME} --tls --cafile ${ORDERER_CA} + + echo "Decoding config block to JSON and isolating config to ${CORE_PEER_LOCALMSPID}config.json" + configtxlator proto_decode --input config_block.pb --type common.Block --output config_block.json + jq .data.data[0].payload.data.config config_block.json >"${CORE_PEER_LOCALMSPID}config.json" + + CHECK_ANCHOR=$(jq '.channel_group.groups.Application.groups.'${CORE_PEER_LOCALMSPID}'.values.AnchorPeers.value.anchor_peers' ${CORE_PEER_LOCALMSPID}config.json) + + PORT="${CORE_PEER_ADDRESS##*:}" + HOST="${CORE_PEER_ADDRESS%%:*}" + if echo "$CHECK_ANCHOR" | grep -q "$HOST"; then + echo "The anchopeer has already been created" + else + jq '.channel_group.groups.Application.groups.'${CORE_PEER_LOCALMSPID}'.values += {"AnchorPeers":{"mod_policy": "Admins","value":{"anchor_peers": [{"host": "'$HOST'","port": '$PORT'}]},"version": "0"}}' ${CORE_PEER_LOCALMSPID}config.json > ${CORE_PEER_LOCALMSPID}modified_config.json + + configtxlator proto_encode --input "${CORE_PEER_LOCALMSPID}config.json" --type common.Config --output original_config.pb + configtxlator proto_encode --input "${CORE_PEER_LOCALMSPID}modified_config.json" --type common.Config --output modified_config.pb + configtxlator compute_update --channel_id "${CHANNEL_NAME}" --original original_config.pb --updated modified_config.pb --output config_update.pb + configtxlator proto_decode --input config_update.pb --type common.ConfigUpdate --output config_update.json + echo '{"payload":{"header":{"channel_header":{"channel_id":"'$CHANNEL_NAME'", "type":2}},"data":{"config_update":'$(cat config_update.json)'}}}' | jq . >config_update_in_envelope.json + configtxlator proto_encode --input config_update_in_envelope.json --type common.Envelope --output "${CORE_PEER_LOCALMSPID}anchors.tx" + + peer channel update -o ${ORDERER_URL} -c ${CHANNEL_NAME} -f ${CORE_PEER_LOCALMSPID}anchors.tx --tls --cafile ${ORDERER_CA} + fi + else + echo "Updating anchor peer for the channel ${CHANNEL_NAME}" + tls_status=${CORE_PEER_TLS_ENABLED} + if [ "$tls_status" = "true" ] + then + peer channel fetch 0 ${CHANNEL_NAME}.block -o ${ORDERER_URL} -c ${CHANNEL_NAME} --tls --cafile ${ORDERER_CA} + else + peer channel fetch 0 ${CHANNEL_NAME}.block -o ${ORDERER_URL} -c ${CHANNEL_NAME} + fi + cat ./channel-artifacts/anchors.tx.json | base64 -d > ${CORE_PEER_LOCALMSPID}anchors.tx + if [ "$tls_status" = "true" ] + then + peer channel update -o ${ORDERER_URL} -c ${CHANNEL_NAME} -f ${CORE_PEER_LOCALMSPID}anchors.tx --tls --cafile ${ORDERER_CA} + else + peer channel update -o ${ORDERER_URL} -c ${CHANNEL_NAME} -f ${CORE_PEER_LOCALMSPID}anchors.tx + fi + fi + workingDir: /opt/gopath/src/github.com/hyperledger/fabric/peer + envFrom: + - configMapRef: + name: {{ .Release.Name }}-config + volumeMounts: + - name: certificates + mountPath: /opt/gopath/src/github.com/hyperledger/fabric/crypto + readOnly: true + - name: orderer-tls-cacert + mountPath: /opt/gopath/src/github.com/hyperledger/fabric/orderer/tls/orderer.crt + subPath: orderer.crt + {{- if ne (.Values.global.version | trunc 3) "2.5" }} + - name: anchorpeer-artifacts + mountPath: /opt/gopath/src/github.com/hyperledger/fabric/peer/channel-artifacts + readOnly: true + {{- end }} +{{- end }} diff --git a/platforms/hyperledger-fabric/charts/fabric-channel-join/templates/configmap.yaml b/platforms/hyperledger-fabric/charts/fabric-channel-join/templates/configmap.yaml index 51609ec30ee..df2f3b2d68a 100644 --- a/platforms/hyperledger-fabric/charts/fabric-channel-join/templates/configmap.yaml +++ b/platforms/hyperledger-fabric/charts/fabric-channel-join/templates/configmap.yaml @@ -7,22 +7,43 @@ apiVersion: v1 kind: ConfigMap metadata: - name: joinchannel-{{ $.Values.channel.name }}-{{ $.Values.peer.name }}-config - namespace: {{ $.Values.metadata.namespace }} + name: {{ .Release.Name }}-config + namespace: {{ .Release.Namespace }} labels: - app.kubernetes.io/name: joinchannel-{{ $.Values.channel.name }}-{{ $.Values.peer.name }}-config - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} - {{- include "labels.custom" . | nindent 2 }} + app.kubernetes.io/name: {{ .Release.Name }}-config + app.kubernetes.io/component: configmap + app.kubernetes.io/part-of: {{ include "fabric-channel-join.fullname" . }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} + app.kubernetes.io/managed-by: helm data: - CHANNEL_NAME: {{ $.Values.channel.name }} - FABRIC_LOGGING_SPEC: {{ $.Values.peer.loglevel }} - CORE_PEER_ID: {{ $.Values.peer.name }}.{{ $.Values.metadata.namespace }} - CORE_PEER_ADDRESS: {{ $.Values.peer.address }} - CORE_PEER_LOCALMSPID: {{ $.Values.peer.localmspid }} - CORE_PEER_TLS_ENABLED: "{{ $.Values.peer.tlsstatus }}" + CHANNEL_NAME: {{ .Values.peer.channelName | lower }} + FABRIC_LOGGING_SPEC: {{ .Values.peer.logLevel }} + CORE_PEER_ID: {{ .Values.peer.name }}.{{ .Release.Namespace }} + CORE_PEER_ADDRESS: {{ .Values.peer.address }} + CORE_PEER_LOCALMSPID: {{ .Values.peer.localMspId }} + CORE_PEER_TLS_ENABLED: "{{ .Values.peer.tlsStatus }}" CORE_PEER_TLS_ROOTCERT_FILE: /opt/gopath/src/github.com/hyperledger/fabric/crypto/admin/msp/tlscacerts/tlsca.crt - ORDERER_CA: /opt/gopath/src/github.com/hyperledger/fabric/crypto/orderer/tls/ca.crt - ORDERER_URL: {{ $.Values.orderer.address }} + ORDERER_CA: /opt/gopath/src/github.com/hyperledger/fabric/orderer/tls/orderer.crt + ORDERER_URL: {{ .Values.peer.ordererAddress }} CORE_PEER_MSPCONFIGPATH: /opt/gopath/src/github.com/hyperledger/fabric/crypto/admin/msp + NETWORK_VERSION: {{ .Values.global.version }} + +--- +{{- if ne (.Values.global.version | trunc 3) "2.5" }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ .Release.Name }}-anchor-artifacts + namespace: {{ .Release.Namespace }} + labels: + app.kubernetes.io/name: {{ .Release.Name }}-anchor-artifacts + app.kubernetes.io/component: anchorpeer-artifacts + app.kubernetes.io/part-of: {{ include "fabric-channel-join.fullname" . }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} + app.kubernetes.io/managed-by: helm +data: + anchors.tx.json: |- + {{ .Files.Get "files/anchortx.json" | nindent 8 }} +{{- end }} diff --git a/platforms/hyperledger-fabric/charts/fabric-channel-join/templates/join_channel.yaml b/platforms/hyperledger-fabric/charts/fabric-channel-join/templates/join_channel.yaml index 85fd38380cf..a41ad4a719d 100644 --- a/platforms/hyperledger-fabric/charts/fabric-channel-join/templates/join_channel.yaml +++ b/platforms/hyperledger-fabric/charts/fabric-channel-join/templates/join_channel.yaml @@ -7,37 +7,44 @@ apiVersion: batch/v1 kind: Job metadata: - name: joinchannel-{{ $.Values.peer.name }}-{{ $.Values.channel.name }} - namespace: {{ $.Values.metadata.namespace }} + name: joinchannel-{{ .Release.Name }} + namespace: {{ .Release.Namespace }} + annotations: + "helm.sh/hook": post-install + "helm.sh/hook-weight": "0" + "helm.sh/hook-delete-policy": "before-hook-creation" labels: - app: joinchannel-{{ $.Values.peer.name }}-{{ $.Values.channel.name }} - app.kubernetes.io/name: joinchannel-{{ $.Values.peer.name }}-{{ $.Values.channel.name }} - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} - {{- include "labels.custom" . | nindent 2 }} + app: {{ .Release.Name }} + app.kubernetes.io/name: channel-join-{{ .Release.Name }} + app.kubernetes.io/component: fabric-channel-join-job + app.kubernetes.io/part-of: {{ include "fabric-channel-join.fullname" . }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} + app.kubernetes.io/managed-by: helm spec: backoffLimit: 6 template: metadata: labels: - app: joinchannel-{{ $.Values.peer.name }}-{{ $.Values.channel.name }} - app.kubernetes.io/name: joinchannel-{{ $.Values.peer.name }}-{{ $.Values.channel.name }} - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} + app: {{ .Release.Name }} + app.kubernetes.io/name: channel-join-{{ .Release.Name }} + app.kubernetes.io/component: fabric-channel-join-job + app.kubernetes.io/part-of: {{ include "fabric-channel-join.fullname" . }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} + app.kubernetes.io/managed-by: helm spec: restartPolicy: "OnFailure" - serviceAccountName: {{ $.Values.vault.serviceaccountname }} - {{- if .Values.vault.imagesecretname }} + serviceAccountName: {{ .Values.global.serviceAccountName }} + {{- if .Values.image.pullSecret }} imagePullSecrets: - - name: {{ $.Values.vault.imagesecretname }} + - name: {{ .Values.image.pullSecret }} {{- end }} volumes: - {{ if .Values.vault.tls }} + {{ if .Values.global.vault.tls }} - name: vaultca secret: - secretName: {{ $.Values.vault.tls }} + secretName: {{ .Values.global.vault.tls }} items: - key: ca.crt.pem path: ca-certificates.crt # curl expects certs to be in /etc/ssl/certs/ca-certificates.crt @@ -48,50 +55,80 @@ spec: - name: scripts-volume configMap: name: bevel-vault-script + - name: orderer-tls-cacert + configMap: + name: {{ include "fabric-channel-join.orderercrt" . }} + defaultMode: 0775 + items: + - key: cacert + path: orderer.crt initContainers: - name: certificates-init - image: {{ $.Values.metadata.images.alpineutils }} + image: {{ .Values.image.alpineUtils }} imagePullPolicy: IfNotPresent env: - name: VAULT_ADDR - value: {{ $.Values.vault.address }} - - name: KUBERNETES_AUTH_PATH - value: {{ $.Values.vault.authpath }} + value: {{ .Values.global.vault.address }} - name: VAULT_APP_ROLE - value: {{ $.Values.vault.role }} - - name: VAULT_PEER_SECRET_PREFIX - value: "{{ $.Values.vault.adminsecretprefix }}" - - name: VAULT_ORDERER_SECRET_PREFIX - value: "{{ $.Values.vault.orderersecretprefix }}" + value: {{ .Values.global.vault.role }} + - name: KUBERNETES_AUTH_PATH + value: {{ .Values.global.vault.authPath }} + - name: VAULT_SECRET_ENGINE + value: "{{ .Values.global.vault.secretEngine }}" + - name: VAULT_SECRET_PREFIX + value: "{{ .Values.global.vault.secretPrefix }}" + - name: VAULT_TYPE + value: "{{ .Values.global.vault.type }}" - name: MOUNT_PATH value: /secret - - name: VAULT_TYPE - value: "{{ $.Values.vault.type }}" command: ["sh", "-c"] args: - |- #!/usr/bin/env sh - . /scripts/bevel-vault.sh +{{- if eq .Values.global.vault.type "hashicorp" }} - # Calling a function to retrieve the vault token. + . /scripts/bevel-vault.sh vaultBevelFunc "init" - echo "Getting Orderer TLS certificates from Vault." - vaultBevelFunc "readJson" "${VAULT_ORDERER_SECRET_PREFIX}/tls" + function getAdminMspSecret { + KEY=$1 + + echo "Getting MSP certificates from Vault." + vaultBevelFunc "readJson" "${VAULT_SECRET_ENGINE}/${VAULT_SECRET_PREFIX}/users/${KEY}" + + ADMINCERT=$(echo ${VAULT_SECRET} | jq -r '.["admincerts"]') + CACERTS=$(echo ${VAULT_SECRET} | jq -r '.["cacerts"]') + KEYSTORE=$(echo ${VAULT_SECRET} | jq -r '.["keystore"]') + SIGNCERTS=$(echo ${VAULT_SECRET} | jq -r '.["signcerts"]') + TLSCACERTS=$(echo ${VAULT_SECRET} | jq -r '.["tlscacerts"]') + + echo "${ADMINCERT}" > ${OUTPUT_PATH}/admincerts/admin.crt + echo "${CACERTS}" > ${OUTPUT_PATH}/cacerts/ca.crt + echo "${KEYSTORE}" > ${OUTPUT_PATH}/keystore/server.key + echo "${SIGNCERTS}" > ${OUTPUT_PATH}/signcerts/server.crt + echo "${TLSCACERTS}" > ${OUTPUT_PATH}/tlscacerts/tlsca.crt + } - TLS_CA_CERT=$(echo ${VAULT_SECRET} | jq -r '.["ca.crt"]') - OUTPUT_PATH="${MOUNT_PATH}/orderer/tls" - mkdir -p ${OUTPUT_PATH} - echo "${TLS_CA_CERT}" >> ${OUTPUT_PATH}/ca.crt +{{- else }} - echo "Getting MSP certificates from Vault." - vaultBevelFunc "readJson" "${VAULT_PEER_SECRET_PREFIX}/msp" + function getAdminMspSecret { + KEY=$1 + KUBENETES_SECRET=$(kubectl get secret ${KEY} --namespace {{ .Release.Namespace }} -o json) - ADMINCERT=$(echo ${VAULT_SECRET} | jq -r '.["admincerts"]') - CACERTS=$(echo ${VAULT_SECRET} | jq -r '.["cacerts"]') - KEYSTORE=$(echo ${VAULT_SECRET} | jq -r '.["keystore"]') - SIGNCERTS=$(echo ${VAULT_SECRET} | jq -r '.["signcerts"]') - TLSCACERTS=$(echo ${VAULT_SECRET} | jq -r '.["tlscacerts"]') + ADMINCERT=$(echo ${KUBENETES_SECRET} | jq -r '.data.admincerts' | base64 -d) + CACERTS=$(echo ${KUBENETES_SECRET} | jq -r '.data.cacerts' | base64 -d) + KEYSTORE=$(echo ${KUBENETES_SECRET} | jq -r '.data.keystore' | base64 -d) + SIGNCERTS=$(echo ${KUBENETES_SECRET} | jq -r '.data.signcerts' | base64 -d) + TLSCACERTS=$(echo ${KUBENETES_SECRET} | jq -r '.data.tlscacerts' | base64 -d) + + echo "${ADMINCERT}" > ${OUTPUT_PATH}/admincerts/admin.crt + echo "${CACERTS}" > ${OUTPUT_PATH}/cacerts/ca.crt + echo "${KEYSTORE}" > ${OUTPUT_PATH}/keystore/server.key + echo "${SIGNCERTS}" > ${OUTPUT_PATH}/signcerts/server.crt + echo "${TLSCACERTS}" > ${OUTPUT_PATH}/tlscacerts/tlsca.crt + } + +{{- end }} OUTPUT_PATH="${MOUNT_PATH}/admin/msp" mkdir -p ${OUTPUT_PATH}/admincerts @@ -99,14 +136,9 @@ spec: mkdir -p ${OUTPUT_PATH}/keystore mkdir -p ${OUTPUT_PATH}/signcerts mkdir -p ${OUTPUT_PATH}/tlscacerts - - echo "${ADMINCERT}" >> ${OUTPUT_PATH}/admincerts/admin.crt - echo "${CACERTS}" >> ${OUTPUT_PATH}/cacerts/ca.crt - echo "${KEYSTORE}" >> ${OUTPUT_PATH}/keystore/server.key - echo "${SIGNCERTS}" >> ${OUTPUT_PATH}/signcerts/server.crt - echo "${TLSCACERTS}" >> ${OUTPUT_PATH}/tlscacerts/tlsca.crt + getAdminMspSecret admin-msp volumeMounts: - {{ if .Values.vault.tls }} + {{ if .Values.global.vault.tls }} - name: vaultca mountPath: "/etc/ssl/certs/" readOnly: true @@ -118,7 +150,7 @@ spec: subPath: bevel-vault.sh containers: - name: joinchannel - image: {{ $.Values.metadata.images.fabrictools }} + image: {{ .Values.image.fabricTools }}:{{ .Values.global.version }} imagePullPolicy: IfNotPresent stdin: true tty: true @@ -145,8 +177,11 @@ spec: workingDir: /opt/gopath/src/github.com/hyperledger/fabric/peer envFrom: - configMapRef: - name: joinchannel-{{ $.Values.channel.name }}-{{ $.Values.peer.name }}-config + name: {{ .Release.Name }}-config volumeMounts: - name: certificates mountPath: /opt/gopath/src/github.com/hyperledger/fabric/crypto readOnly: true + - name: orderer-tls-cacert + mountPath: /opt/gopath/src/github.com/hyperledger/fabric/orderer/tls/orderer.crt + subPath: orderer.crt diff --git a/platforms/hyperledger-fabric/charts/fabric-channel-join/values.yaml b/platforms/hyperledger-fabric/charts/fabric-channel-join/values.yaml index 6562c4161d5..ec0d8db02f4 100644 --- a/platforms/hyperledger-fabric/charts/fabric-channel-join/values.yaml +++ b/platforms/hyperledger-fabric/charts/fabric-channel-join/values.yaml @@ -3,75 +3,75 @@ # # SPDX-License-Identifier: Apache-2.0 ############################################################################################## +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. +--- +# The following are for overriding global values +global: + # HLF Network Version + #Eg. version: 2.5.4 + version: 2.5.4 + #Provide the service account name which will be created. + serviceAccountName: vault-auth + cluster: + provider: aws # choose from: minikube | aws | azure | gcp + cloudNativeServices: false # only 'false' is implemented + vault: + #Provide the type of vault + #Eg. type: hashicorp + type: hashicorp + #Provide the vaultrole for an organization + #Eg. vaultrole: org1-vault-role + role: vault-role + #Provide the network type + network: fabric + #Provide the vault server address + #Eg. vaultaddress: http://Vault-884963190.eu-west-1.elb.amazonaws.com + address: + #Provide the kubernetes auth backed configured in vault for an organization + #Eg. authpath: supplychain + authPath: supplychain + #Provide the secret engine. + secretEngine: secretsv2 + #Provide the vault path where the secrets will be stored + secretPrefix: "data/supplychain" + #Enable or disable TLS for vault communication + #Eg. tls: true + tls: -metadata: - #Provide the namespace for organization's peer - #Eg. namespace: org1-net - namespace: org1-net - images: - #Provide the valid image name and version for fabric tools - #Eg. fabric-tools: hyperledger/fabrictools:1.4.0 - fabrictools: ghcr.io/hyperledger/bevel-fabric-tools:2.2.2 - #Provide the valid image name and version to read certificates from vault server - #Eg.alpineutils: ghcr.io/hyperledger/bevel-alpine:latest - alpineutils: ghcr.io/hyperledger/bevel-alpine:latest - #Provide the custom labels - #NOTE: Provide labels other than name, release name , release service, chart version , chart name , app. - #Eg. labels: - # role: join_channel - labels: +image: + #Provide the valid image name and version for fabric tools + #Eg. fabricTools: hyperledger/fabrictools + fabricTools: ghcr.io/hyperledger/bevel-fabric-tools + #Provide the valid image name and version to read certificates from vault server + #Eg.alpineutils: ghcr.io/hyperledger/bevel-alpine:latest + alpineUtils: ghcr.io/hyperledger/bevel-alpine:latest + #Provide the secret to use if private repository + #Eg. pullSecret: regcred + pullSecret: peer: #Provide the name of the peer as per deployment yaml. #Eg. name: peer0 name: peer0 + #Provide the type of peer + #Eg. type: anchor | general + type: anchor #Provide the address of the peer who wants to join channel and port to be mentioned is grpc cluster IP port - #Eg. address: peer0.org1-net:7051 - address: peer0.org1-net:7051 - #Provide the localmspid for organization - #Eg. localmspid: Org1MSP - localmspid: Org1MSP - #Provide the loglevel for organization's peer - #Eg. loglevel: info - loglevel: info + #Eg. address: peer0.supplychain-net:7051 + address: peer0.supplychain-net:7051 + #Provide the localMspId for organization + #Eg. localMspId: supplychainMSP + localMspId: supplychainMSP + #Provide the logLevel for organization's peer + #Eg. logLevel: info + logLevel: info #Provide the value for tlsstatus to be true or false for organization's peer #Eg. tlsstatus: true - tlsstatus: true - -vault: - #Provide the vaultrole for an organization - #Eg. vaultrole: org1-vault-role - role: vault-role - #Provide the vault server address - #Eg. vaultaddress: http://Vault-884963190.eu-west-1.elb.amazonaws.com - address: - #Provide the kubernetes auth backed configured in vault for an organization - #Eg. authpath: devorg1-net-auth - authpath: devorg1-net-auth - #Provide the value for vault secretprefix - #Eg. adminsecretprefix: secretsv2/data/crypto/peerOrganizations/.../users/admin - adminsecretprefix: secretsv2/data/crypto/peerOrganizations/org1-net/users/admin - #Provide the value for vault secretprefix - #Eg. orderersecretprefix: secretsv2/data/crypto/peerOrganizations/.../orderer - orderersecretprefix: secretsv2/data/crypto/peerOrganizations/org1-net/orderer - #Provide the serviceaccountname for vault - #Eg. serviceaccountname: vault-auth - serviceaccountname: vault-auth - #Provide the type of vault - #Eg. type: hashicorp - type: hashicorp - #Provide the imagesecretname for vault - #Eg. imagesecretname: regcred - imagesecretname: "" - #Kuberenetes secret for vault ca.cert - - -channel: - #Provide the name of the channel - #Eg. name: mychannel - name: mychannel - -orderer: + tlsStatus: true + #Provide the name of the channel which peer will join + #Eg. channelName: AllChannel + channelName: AllChannel #Provide the address for orderer - #Eg. address: orderer1.org1proxy.blockchaincloudpoc.com:443 - address: orderer1.org1proxy.blockchaincloudpoc.com:443 + #Eg. ordererAddress: orderer1.test.yourdomain.com:443 + ordererAddress: orderer1.supplychain-net:7050 diff --git a/platforms/hyperledger-fabric/charts/fabric-cli/.helmignore b/platforms/hyperledger-fabric/charts/fabric-cli/.helmignore new file mode 100644 index 00000000000..014fa775608 --- /dev/null +++ b/platforms/hyperledger-fabric/charts/fabric-cli/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +generated_config/ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/platforms/hyperledger-fabric/charts/fabric-cli/Chart.yaml b/platforms/hyperledger-fabric/charts/fabric-cli/Chart.yaml index 67b27977c0a..0bc0fd2ec8b 100644 --- a/platforms/hyperledger-fabric/charts/fabric-cli/Chart.yaml +++ b/platforms/hyperledger-fabric/charts/fabric-cli/Chart.yaml @@ -5,7 +5,23 @@ ############################################################################################## apiVersion: v1 -appVersion: "2.0" -description: "Hyperledger Fabric: Deploys Fabric Cli." name: fabric-cli -version: 1.0.0 +description: "Hyperledger Fabric: Deploys Fabric CLI" +version: 1.1.0 +appVersion: latest +keywords: + - bevel + - hlf + - fabric + - hyperledger + - enterprise + - blockchain + - deployment + - accenture +home: https://hyperledger-bevel.readthedocs.io/en/latest/ +sources: + - https://github.com/hyperledger/bevel +maintainers: + - name: Hyperledger Bevel maintainers + email: bevel@lists.hyperledger.org + diff --git a/platforms/hyperledger-fabric/charts/fabric-cli/README.md b/platforms/hyperledger-fabric/charts/fabric-cli/README.md index 3741f56ee44..dac4d8572fd 100644 --- a/platforms/hyperledger-fabric/charts/fabric-cli/README.md +++ b/platforms/hyperledger-fabric/charts/fabric-cli/README.md @@ -3,175 +3,101 @@ [//]: # (SPDX-License-Identifier: Apache-2.0) [//]: # (##############################################################################################) - -# Fabric Cli Hyperledger Fabric Deployment +# fabric-cli -- [Fabric Cli Hyperledger Fabric Deployment Helm Chart](#fabric-cli-hyperledger-fabric-deployment-helm-chart) -- [Prerequisites](#prerequisites) -- [Chart Structure](#chart-structure) -- [Configuration](#configuration) -- [Deployment](#deployment) -- [Verification](#verification) -- [Updating the Deployment](#updating-the-deployment) -- [Deletion](#deletion) -- [Contributing](#contributing) -- [License](#license) +This chart is a component of Hyperledger Bevel. The fabric-cli chart deploys a Fabric CLI attached to a Peer node to the Kubernetes cluster. This chart is a dependency and is deployed by the [fabric-peernode](../fabric-peernode/README.md) chart. See [Bevel documentation](https://hyperledger-bevel.readthedocs.io/en/latest/) for details. +## TL;DR - -## Fabric Cli Hyperledger Fabric Deployment Helm Chart ---- -A [Helm chart](https://github.com/hyperledger/bevel/blob/develop/platforms/hyperledger-fabric/charts/fabric-cli) for Fabric Cli. - - - -## Prerequisites ---- -Before deploying the Helm chart, make sure to have the following prerequisites: - -- Kubernetes cluster up and running. -- A HashiCorp Vault instance is set up and configured to use Kubernetes service account token-based authentication. -- The Vault is unsealed and initialized. -- Helm installed. - - - -## Chart Structure ---- -The structure of the Helm chart is as follows: - -``` -fabric-cli/ - |- templates/ - |- _helpers.yaml - |- deployment.yaml - |- volume.yaml - |- Chart.yaml - |- README.md - |- values.yaml +```bash +helm repo add bevel https://hyperledger.github.io/bevel +helm install peer0-cli bevel/fabric-cli ``` -- `templates/`: Contains the Kubernetes manifest templates that define the resources to be deployed. -- `helpers.tpl`: Contains custom label definitions used in other templates. -- `deployment.yaml`: The certificates-init retrieves TLS certificates and cryptographic materials from HashiCorp Vault, ensuring secure communication. The cli runs Hyperledger Fabric CLI tools, using the fetched certificates for secure interaction with the network. -- `volume.yaml`: Requests storage resources for the cli container -- `Chart.yaml`: Contains the metadata for the Helm chart, such as the name, version, and description. -- `README.md`: Provides information and instructions about the Helm chart. -- `values.yaml`: Contains the default configuration values for the Helm chart. - - - -## Configuration ---- -The [values.yaml](https://github.com/hyperledger/bevel/blob/develop/platforms/hyperledger-fabric/charts/fabric-cli/values.yaml) file contains configurable values for the Helm chart. We can modify these values according to the deployment requirements. Here are some important configuration options: - -### Metadata - -| Name | Description | Default Value | -| ----------------------| ----------------------------------------------------------------------| --------------------------------------------------| -| namespace | Namespace for organization's peer deployment | org1-net | -| images.fabrictools | Valid image name and version for fabric tools |ghcr.io/hyperledger/bevel-fabric-tools:2.2.2 | -| images.alpineutils | Valid image name and version to read certificates from vault server | ghcr.io/hyperledger/bevel-alpine:latest | - -### Storage - -| Name | Description | Default Value | -| ------------| ----------------------| ---------------| -| class | Storage class name | aws-storageclass | -| size | Storage size | 256Mi | - -### Vault - -| Name | Description | Default Value | -| ----------------------| ------------------------------------------------------------------| -------------------------------| -| role | Vault role for the organization | vault-role | -| address | Vault server address | "" | -| authpath | Kubernetes auth backend configured in Vault for the organization | devorg1-net-auth | -| adminsecretprefix | Vault secret prefix for admin | secretsv2/data/crypto/peerOrganizations/org1-net/users/admin | -| orderersecretprefix | Vault secret prefix for orderer | secretsv2/data/crypto/peerOrganizations/org1-net/orderer | -| serviceaccountname | Service account name for Vault | vault-auth | -| type | Provide the type of vault | hashicorp | -| imagesecretname | Image secret name for Vault | "" | -| tls | TLS status for Vault communication | "" | - -### Peer Configuration - -| Name | Description | Default Value | -| --------------| --------------------------------------------| -----------------------------| -| name | Name of the peer as per deployment YAML | peer0 | -| localmspid | Local MSP ID for the organization's peer | Org1MSP | -| tlsstatus | TLS status for the organization's peer | true | -| address | Address for the peer | peer0.org1-net:7051 | - -### Orderer Configuration - -| Name | Description | Default Value | -| ------------| -------------------------| -----------------------------| -| address | Address for the orderer | orderer1.org1proxy.blockchaincloudpoc.com:443 | - - - -## Deployment ---- - -To deploy the fabric-cli Helm chart, follow these steps: +## Prerequisites -1. Modify the [values.yaml](https://github.com/hyperledger/bevel/blob/main/platforms/hyperledger-fabric/charts/fabric-cli/values.yaml) file to set the desired configuration values. -2. Run the following Helm command to install the chart: - ``` - $ helm repo add bevel https://hyperledger.github.io/bevel/ - $ helm install ./fabric-cli - ``` -Replace `` with the desired name for the release. +- Kubernetes 1.19+ +- Helm 3.2.0+ -This will deploy the fabric-cli node to the Kubernetes cluster based on the provided configurations. +If Hashicorp Vault is used, then +- HashiCorp Vault Server 1.13.1+ +## Installing the Chart - -## Verification ---- +To install the chart with the release name `peer0-cli`: -To verify the deployment, we can use the following command: +```bash +helm repo add bevel https://hyperledger.github.io/bevel +helm install peer0-cli bevel/fabric-cli ``` -$ kubectl get deployments -n -``` -Replace `` with the actual namespace where the deployment was created. The command will display information about the deployment, including the number of replicas and their current status. - - -## Updating the Deployment ---- +The command deploys the chart on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation. -If we need to update the deployment with new configurations or changes, modify the same [values.yaml](https://github.com/hyperledger/bevel/blob/main/platforms/hyperledger-fabric/charts/fabric-cli/values.yaml) file with the desired changes and run the following Helm command: -``` -$ helm upgrade ./fabric-cli -``` -Replace `` with the name of the release. This command will apply the changes to the deployment, ensuring the fabric-cli node is up to date. +> **Tip**: List all releases using `helm list` +## Uninstalling the Chart - -## Deletion ---- +To uninstall/delete the `peer0-cli` deployment: -To delete the deployment and associated resources, run the following Helm command: +```bash +helm uninstall peer0-cli ``` -$ helm uninstall -``` -Replace `` with the name of the release. This command will remove all the resources created by the Helm chart. - - - -## Contributing ---- -If you encounter any bugs, have suggestions, or would like to contribute to the [Fabric Cli Hyperledger Fabric Deployment Helm Chart](https://github.com/hyperledger/bevel/blob/main/platforms/hyperledger-fabric/charts/fabric-cli), please feel free to open an issue or submit a pull request on the [project's GitHub repository](https://github.com/hyperledger/bevel). +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Parameters + +### Global parameters +These parameters are refered to as same in each parent or child chart +| Name | Description | Default Value | +|--------|---------|-------------| +|`global.version` | Fabric Version. | `2.5.4` | +|`global.serviceAccountName` | The serviceaccount name that will be created for Vault Auth and k8S Secret management| `vault-auth` | +| `global.cluster.provider` | Kubernetes cluster provider like AWS EKS or minikube. Currently ony `aws`, `azure` and `minikube` are tested | `aws` | +| `global.cluster.cloudNativeServices` | only `false` is implemented, `true` to use Cloud Native Services (SecretsManager and IAM for AWS; KeyVault & Managed Identities for Azure) is for future | `false` | +| `global.vault.type` | Type of Vault to support other providers. Currently, only `hashicorp` and `kubernetes` is supported. | `hashicorp` | +| `global.vault.role` | Role used for authentication with Vault | `vault-role` | +| `global.vault.address`| URL of the Vault server. | `""` | +| `global.vault.authPath` | Authentication path for Vault | `supplychain` | +| `global.vault.secretEngine` | Vault secret engine name | `secretsv2` | +| `global.vault.secretPrefix` | Vault secret prefix which must start with `data/` | `data/supplychain` | +| `global.vault.tls` | Name of the Kubernetes secret which has certs to connect to TLS enabled Vault | `false` | + +### Image + +| Name | Description | Default Value | +| -------------| ---------- | --------- | +| `image.fabricTools` | Fabric Tools image repository | `ghcr.io/hyperledger/bevel-fabric-tools` | +| `image.alpineUtils` | Alpine utils image repository and tag | `ghcr.io/hyperledger/bevel-alpine:latest` | +| `image.pullSecret` | Secret name in the namespace containing private image registry credentials | `""` | + +### Configuration + +| Name | Description | Default Value | +|--------|---------|-------------| +| `peerName` | Name of the Peer that this CLI will connect. Leave empty when created using `fabric-peernode` chart | `""` | +| `storageClass` | Storage Class for the cli, Storage Class should be already created by `fabric-peernode` chart. Pass existing storage class for independent CLI creation | `""` | +| `storageSize` | PVC Storage Size for the cli | `256Mi` | +| `localMspId` | Local MSP ID of the organization| `supplychainMSP` | +| `tlsStatus` | TLS status of the peer | `true` | +| `ports.grpc.clusterIpPort` | GRPC Internal Port for Peer | `7051` | +| `ordererAddress` | Orderer Internal or External Address with port for CLI to connect | `orderer1.supplychain-net:7050` | +| `healthCheck.retries` | Retry count to connect to the Peer | `20` | +| `healthCheck.sleepTimeAfterError` | Wait seconds after unsuccessful connection attempt | `15` | + +### Labels + +| Name | Description | Default Value | +| ----------------| ----------- | ------------- | +| `labels.service` | Array of Labels for service object | `[]` | +| `labels.pvc` | Array of Labels for PVC object | `[]` | +| `labels.deployment` | Array of Labels for deployment or statefulset object | `[]` | - ## License This chart is licensed under the Apache v2.0 license. -Copyright © 2023 Accenture +Copyright © 2024 Accenture ### Attribution diff --git a/platforms/hyperledger-fabric/charts/fabric-cli/templates/_helpers.tpl b/platforms/hyperledger-fabric/charts/fabric-cli/templates/_helpers.tpl index d43c09d8cef..6c628b76553 100644 --- a/platforms/hyperledger-fabric/charts/fabric-cli/templates/_helpers.tpl +++ b/platforms/hyperledger-fabric/charts/fabric-cli/templates/_helpers.tpl @@ -1,5 +1,75 @@ -{{- define "labels.custom" }} - {{ range $key, $val := $.Values.metadata.labels }} - {{ $key }}: {{ $val }} - {{ end }} +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "fabric-cli.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "fabric-cli.fullname" -}} +{{- $name := default .Chart.Name -}} +{{- if contains $name .Release.Name -}} +{{- printf "%s-cli" .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" $name .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "fabric-cli.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create orderer tls configmap name depending on Configmap existance +*/}} +{{- define "fabric-cli.orderercrt" -}} +{{- $secret := lookup "v1" "ConfigMap" .Release.Namespace "orderer-tls-cacert" -}} +{{- if $secret -}} +{{/* + Use this configmap +*/}} +{{- printf "orderer-tls-cacert" -}} +{{- else -}} +{{/* + Use the release configmap +*/}} +{{- printf "%s-orderer-tls-cacert" .Release.Name -}} +{{- end -}} +{{- end -}} + +{{/* +Peer name can be passed by Values or by Parent chart release name +*/}} +{{- define "fabric-cli.peername" -}} +{{- if .Values.peerName -}} +{{- printf .Values.peerName -}} +{{- else -}} +{{- printf .Release.Name -}} +{{- end -}} +{{- end -}} + +{{- define "labels.deployment" -}} +{{- range $value := .Values.labels.deployment }} +{{ toYaml $value }} +{{- end }} +{{- end }} + +{{- define "labels.service" -}} +{{- range $value := .Values.labels.service }} +{{ toYaml $value }} +{{- end }} +{{- end }} + +{{- define "labels.pvc" -}} +{{- range $value := .Values.labels.pvc }} +{{ toYaml $value }} +{{- end }} {{- end }} diff --git a/platforms/hyperledger-fabric/charts/fabric-cli/templates/deployment.yaml b/platforms/hyperledger-fabric/charts/fabric-cli/templates/deployment.yaml index f302063ccb3..5b73303d31a 100644 --- a/platforms/hyperledger-fabric/charts/fabric-cli/templates/deployment.yaml +++ b/platforms/hyperledger-fabric/charts/fabric-cli/templates/deployment.yaml @@ -7,31 +7,41 @@ apiVersion: apps/v1 kind: Deployment metadata: - name: {{ .Values.peer.name }}-cli - namespace: {{ .Values.metadata.namespace }} + name: {{ template "fabric-cli.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + app: cli + app.kubernetes.io/name: {{ .Release.Name }} + app.kubernetes.io/component: fabric + {{ include "labels.deployment" . | nindent 4 }} spec: replicas: 1 selector: matchLabels: app: cli + app.kubernetes.io/name: {{ .Release.Name }} + app.kubernetes.io/component: fabric template: metadata: labels: app: cli + app.kubernetes.io/name: {{ .Release.Name }} + app.kubernetes.io/component: fabric + {{ include "labels.deployment" . | nindent 8 }} spec: - serviceAccountName: {{ $.Values.vault.serviceaccountname }} - {{- if .Values.vault.imagesecretname }} + serviceAccountName: {{ .Values.global.serviceAccountName }} + {{- if .Values.image.pullSecret }} imagePullSecrets: - - name: {{ $.Values.vault.imagesecretname }} + - name: {{ .Values.image.pullSecret }} {{- end }} volumes: - - name: {{ .Values.peer.name }}-cli-pv + - name: {{ .Release.Name }}-cli-pv persistentVolumeClaim: - claimName: {{ .Values.peer.name }}-cli-pvc - {{ if .Values.vault.tls }} + claimName: {{ .Release.Name }}-cli-pvc + {{ if .Values.global.vault.tls }} - name: vaultca secret: - secretName: {{ $.Values.vault.tls }} + secretName: {{ .Values.global.vault.tls }} items: - key: ca.crt.pem path: ca-certificates.crt @@ -42,69 +52,129 @@ spec: - name: scripts-volume configMap: name: bevel-vault-script + - name: package-manager + configMap: + name: package-manager + - name: orderer-tls-cacert + configMap: + name: {{ include "fabric-cli.orderercrt" . }} + defaultMode: 0775 + items: + - key: cacert + path: orderer.crt initContainers: - name: certificates-init - image: {{ $.Values.metadata.images.alpineutils }} + image: {{ .Values.image.alpineUtils }} imagePullPolicy: IfNotPresent env: - name: VAULT_ADDR - value: {{ $.Values.vault.address }} + value: {{ .Values.global.vault.address }} + - name: VAULT_SECRET_ENGINE + value: "{{ .Values.global.vault.secretEngine }}" + - name: VAULT_SECRET_PREFIX + value: "{{ .Values.global.vault.secretPrefix }}" - name: KUBERNETES_AUTH_PATH - value: {{ $.Values.vault.authpath }} + value: {{ .Values.global.vault.authPath }} - name: VAULT_APP_ROLE - value: {{ $.Values.vault.role }} - - name: VAULT_PEER_SECRET_PREFIX - value: "{{ $.Values.vault.adminsecretprefix }}" - - name: VAULT_ORDERER_SECRET_PREFIX - value: "{{ $.Values.vault.orderersecretprefix }}" + value: {{ .Values.global.vault.role }} - name: MOUNT_PATH value: "/secret" - name: VAULT_TYPE - value: "{{ $.Values.vault.type }}" + value: "{{ .Values.global.vault.type }}" + - name: CORE_PEER_ADDRESS + value: "{{ include "fabric-cli.peername" . }}.{{ .Release.Namespace }}:{{ .Values.ports.grpc.clusterIpPort }}" command: ["sh", "-c"] args: - |- #!/usr/bin/env sh . /scripts/bevel-vault.sh +{{- if eq .Values.global.vault.type "hashicorp" }} + # Calling a function to retrieve the vault token. vaultBevelFunc "init" + function getAdminMspSecret { + KEY=$1 + + echo "Getting MSP certificates from Vault." + vaultBevelFunc "readJson" "${VAULT_SECRET_ENGINE}/${VAULT_SECRET_PREFIX}/users/${KEY}" + if [ "$SECRETS_AVAILABLE" == "yes" ] + then + ADMINCERT=$(echo ${VAULT_SECRET} | jq -r '.["admincerts"]') + CACERTS=$(echo ${VAULT_SECRET} | jq -r '.["cacerts"]') + KEYSTORE=$(echo ${VAULT_SECRET} | jq -r '.["keystore"]') + SIGNCERTS=$(echo ${VAULT_SECRET} | jq -r '.["signcerts"]') + TLSCACERTS=$(echo ${VAULT_SECRET} | jq -r '.["tlscacerts"]') + + echo "${ADMINCERT}" > ${OUTPUT_PATH}/admincerts/admin.crt + echo "${CACERTS}" > ${OUTPUT_PATH}/cacerts/ca.crt + echo "${KEYSTORE}" > ${OUTPUT_PATH}/keystore/server.key + echo "${SIGNCERTS}" > ${OUTPUT_PATH}/signcerts/server.crt + echo "${TLSCACERTS}" > ${OUTPUT_PATH}/tlscacerts/tlsca.crt + ADMIN_MSP_SECRET=true + else + ADMIN_MSP_SECRET=false + fi + } + +{{- else }} + + function getAdminMspSecret { + KEY=$1 + KUBENETES_SECRET=$(kubectl get secret ${KEY} --namespace {{ .Release.Namespace }} -o json) + if [ "$KUBENETES_SECRET" = "" ]; then + ADMIN_MSP_SECRET=false + else + ADMINCERT=$(echo ${KUBENETES_SECRET} | jq -r '.data.admincerts' | base64 -d) + CACERTS=$(echo ${KUBENETES_SECRET} | jq -r '.data.cacerts' | base64 -d) + KEYSTORE=$(echo ${KUBENETES_SECRET} | jq -r '.data.keystore' | base64 -d) + SIGNCERTS=$(echo ${KUBENETES_SECRET} | jq -r '.data.signcerts' | base64 -d) + TLSCACERTS=$(echo ${KUBENETES_SECRET} | jq -r '.data.tlscacerts' | base64 -d) - echo "Getting Orderer TLS certificates from Vault using key $vault_secret_key" - vaultBevelFunc "readJson" "${VAULT_ORDERER_SECRET_PREFIX}/tls" + echo "${ADMINCERT}" > ${OUTPUT_PATH}/admincerts/admin.crt + echo "${CACERTS}" > ${OUTPUT_PATH}/cacerts/ca.crt + echo "${KEYSTORE}" > ${OUTPUT_PATH}/keystore/server.key + echo "${SIGNCERTS}" > ${OUTPUT_PATH}/signcerts/server.crt + echo "${TLSCACERTS}" > ${OUTPUT_PATH}/tlscacerts/tlsca.crt + ADMIN_MSP_SECRET=true + fi + } - TLS_CA_CERT=$(echo ${VAULT_SECRET} | jq -r '.["ca.crt"]') - OUTPUT_PATH="${MOUNT_PATH}/orderer/tls" - mkdir -p ${OUTPUT_PATH} - echo "${TLS_CA_CERT}" >> ${OUTPUT_PATH}/ca.crt +{{- end }} - ############################################################################### + COUNTER=1 + while [ "$COUNTER" -le {{ .Values.healthCheck.retries }} ] + do - echo "Getting MSP certificates from Vault using key $vault_secret_key" - vaultBevelFunc "readJson" "${VAULT_PEER_SECRET_PREFIX}/msp" + OUTPUT_PATH="${MOUNT_PATH}/admin/msp" + mkdir -p ${OUTPUT_PATH}/admincerts + mkdir -p ${OUTPUT_PATH}/cacerts + mkdir -p ${OUTPUT_PATH}/keystore + mkdir -p ${OUTPUT_PATH}/signcerts + mkdir -p ${OUTPUT_PATH}/tlscacerts + getAdminMspSecret admin-msp - ADMINCERT=$(echo ${VAULT_SECRET} | jq -r '.["admincerts"]') - CACERTS=$(echo ${VAULT_SECRET} | jq -r '.["cacerts"]') - KEYSTORE=$(echo ${VAULT_SECRET} | jq -r '.["keystore"]') - SIGNCERTS=$(echo ${VAULT_SECRET} | jq -r '.["signcerts"]') - TLSCACERTS=$(echo ${VAULT_SECRET} | jq -r '.["tlscacerts"]') + if [ "$ADMIN_MSP_SECRET" = "true" ] + then + echo "Peer certificates have been obtained correctly" + break + else + echo "Peer certificates have not been obtained, sleeping for {{ .Values.healthCheck.sleepTimeAfterError }}" + sleep {{ .Values.healthCheck.sleepTimeAfterError }} + COUNTER=`expr "$COUNTER" + 1` + fi + done - OUTPUT_PATH="${MOUNT_PATH}/admin/msp" - mkdir -p ${OUTPUT_PATH}/admincerts - mkdir -p ${OUTPUT_PATH}/cacerts - mkdir -p ${OUTPUT_PATH}/keystore - mkdir -p ${OUTPUT_PATH}/signcerts - mkdir -p ${OUTPUT_PATH}/tlscacerts + if [ "$COUNTER" -gt {{ .Values.healthCheck.retries }} ] + then + echo "Retry attempted `expr $COUNTER - 1` times, The peer certificates have not been obtained." + exit 1 + fi - echo "${ADMINCERT}" >> ${OUTPUT_PATH}/admincerts/admin.crt - echo "${CACERTS}" >> ${OUTPUT_PATH}/cacerts/ca.crt - echo "${KEYSTORE}" >> ${OUTPUT_PATH}/keystore/server.key - echo "${SIGNCERTS}" >> ${OUTPUT_PATH}/signcerts/server.crt - echo "${TLSCACERTS}" >> ${OUTPUT_PATH}/tlscacerts/tlsca.crt volumeMounts: - name: certificates mountPath: /secret - {{ if .Values.vault.tls }} + {{ if .Values.global.vault.tls }} - name: vaultca mountPath: "/etc/ssl/certs/" readOnly: true @@ -112,9 +182,12 @@ spec: - name: scripts-volume mountPath: /scripts/bevel-vault.sh subPath: bevel-vault.sh + - name: package-manager + mountPath: /scripts/package-manager.sh + subPath: package-manager.sh containers: - name: cli - image: {{ $.Values.metadata.images.fabrictools }} + image: {{ .Values.image.fabricTools }}:{{ .Values.global.version }} imagePullPolicy: IfNotPresent stdin: true tty: true @@ -126,23 +199,29 @@ spec: - name: FABRIC_LOGGING_SPEC value: "debug" - name: CORE_PEER_ID - value: "{{ .Values.peer.name }}.{{ .Values.metadata.namespace }}" + value: "{{ include "fabric-cli.peername" . }}.{{ .Release.Namespace }}" - name: CORE_PEER_ADDRESS - value: "{{ .Values.peer.address }}" + value: "{{ include "fabric-cli.peername" . }}.{{ .Release.Namespace }}:{{ .Values.ports.grpc.clusterIpPort }}" - name: CORE_PEER_LOCALMSPID - value: "{{ .Values.peer.localmspid }}" + value: "{{ .Values.localMspId }}" - name: CORE_PEER_TLS_ENABLED - value: "{{ .Values.peer.tlsstatus }}" + value: "{{ .Values.tlsStatus }}" - name: CORE_PEER_TLS_ROOTCERT_FILE value: /opt/gopath/src/github.com/hyperledger/fabric/crypto/admin/msp/tlscacerts/tlsca.crt - name: ORDERER_CA - value: /opt/gopath/src/github.com/hyperledger/fabric/crypto/orderer/tls/ca.crt + value: /opt/gopath/src/github.com/hyperledger/fabric/orderer/tls/orderer.crt - name: ORDERER_URL - value: "{{ .Values.orderer.address }}" + value: "{{ .Values.ordererAddress }}" - name: CORE_PEER_MSPCONFIGPATH value: /opt/gopath/src/github.com/hyperledger/fabric/crypto/admin/msp volumeMounts: - name: certificates mountPath: /opt/gopath/src/github.com/hyperledger/fabric/crypto - - name: {{ .Values.peer.name }}-cli-pv + - name: {{ .Release.Name }}-cli-pv mountPath: /opt/gopath/src/github.com/chaincode + - name: package-manager + mountPath: /scripts/package-manager.sh + subPath: package-manager.sh + - name: orderer-tls-cacert + mountPath: /opt/gopath/src/github.com/hyperledger/fabric/orderer/tls/orderer.crt + subPath: orderer.crt diff --git a/platforms/hyperledger-fabric/charts/fabric-cli/templates/volume.yaml b/platforms/hyperledger-fabric/charts/fabric-cli/templates/volume.yaml index 8afd2d3e6c7..b22d5c5fa10 100644 --- a/platforms/hyperledger-fabric/charts/fabric-cli/templates/volume.yaml +++ b/platforms/hyperledger-fabric/charts/fabric-cli/templates/volume.yaml @@ -8,12 +8,18 @@ kind: PersistentVolumeClaim apiVersion: v1 metadata: - name: {{ $.Values.peer.name }}-cli-pvc - namespace: {{ $.Values.metadata.namespace }} + name: {{ .Release.Name }}-cli-pvc + namespace: {{ .Release.Namespace }} + labels: + {{ include "labels.pvc" . | nindent 4 }} spec: - storageClassName: {{ $.Values.storage.class }} + {{- if .Values.storageClass }} + storageClassName: {{ .Values.storageClass }} + {{- else }} + storageClassName: storage-{{ .Release.Name }} + {{- end }} accessModes: - ReadWriteOnce resources: requests: - storage: {{ $.Values.storage.size }} + storage: {{ .Values.storageSize }} diff --git a/platforms/hyperledger-fabric/charts/fabric-cli/values.yaml b/platforms/hyperledger-fabric/charts/fabric-cli/values.yaml index 15a12e758f3..92a319a1ba7 100644 --- a/platforms/hyperledger-fabric/charts/fabric-cli/values.yaml +++ b/platforms/hyperledger-fabric/charts/fabric-cli/values.yaml @@ -6,71 +6,74 @@ # This is a YAML-formatted file. # Declare variables to be passed into your templates. +global: + # HLF Network Version + #Eg. version: 2.5.4 + version: 2.5.4 + #Provide the service account name which will be created. + serviceAccountName: vault-auth + cluster: + provider: aws # choose from: minikube | aws | azure | gcp + cloudNativeServices: false # only 'false' is implemented + vault: + #Provide the type of vault + #Eg. type: hashicorp + type: hashicorp + #Provide the vaultrole for an organization + #Eg. vaultrole: org1-vault-role + role: vault-role + #Provide the vault server address + #Eg. vaultaddress: http://Vault-884963190.eu-west-1.elb.amazonaws.com + address: + #Provide the kubernetes auth backed configured in vault for an organization + #Eg. authpath: supplychain + authPath: supplychain + #Provide the secret engine. + secretEngine: secretsv2 + #Provide the vault path where the secrets will be stored + secretPrefix: "data/supplychain" + #Kuberenetes secret for vault ca.cert + #Enable or disable TLS for vault communication if value present or not + #Eg. tls: vaultca + tls: -metadata: - #Provide the namespace for organization's peer - #Eg. namespace: org1-net - namespace: org1-net - images: - #Provide the valid image name and version for fabric tools - #Eg. fabrictools: hyperledger/fabric-tools:1.4.0 - fabrictools: ghcr.io/hyperledger/bevel-fabric-tools:2.2.2 - #Provide the valid image name and version to read certificates from vault server - #Eg.alpineutils: ghcr.io/hyperledger/bevel-alpine:latest - alpineutils: ghcr.io/hyperledger/bevel-alpine:latest +image: + #Provide the valid image repository for fabric tools + #Eg. fabricTools: hyperledger/fabric-tools + fabricTools: ghcr.io/hyperledger/bevel-fabric-tools + #Provide the valid image name and version to read certificates from vault server + #Eg.alpineUtils: ghcr.io/hyperledger/bevel-alpine:latest + alpineUtils: ghcr.io/hyperledger/bevel-alpine:latest + #Provide the secret to use if private repository + #Eg. pullSecret: regcred + pullSecret: -storage: - #Provide the storageclassname - class: aws-storageclass - #Provide the storagesize - size: 256Mi +#Provide the peer name this CLI will connect to. Keep empty for creation via peernode dependency +peerName: +#Provide the StorageClass for CLI PVC +storageClass: +#Provide the size for CLI PVC +storageSize: 256Mi +#Provide the localMspId for organization +#Eg. localMspId: supplychainMSP +localMspId: supplychainMSP +#Provide the value for tlsStatus to be true or false for organization's peer +#Eg. tlsStatus: true +tlsStatus: true +ports: + grpc: + #Provide a cluster IP port for grpc service to be exposed + #Eg. clusterIpPort: 7051 + clusterIpPort: 7051 +#Provide the address for orderer +#Eg. ordererAddress: orderer1.supplychain-net:7050 +ordererAddress: orderer1.supplychain-net:7050 -vault: - #Provide the vaultrole for an organization - #Eg. vaultrole: vault-role - role: vault-role - #Provide the vault server address - #Eg. vaultaddress: http://Vault-884963190.eu-west-1.elb.amazonaws.com - address: - #Provide the kubernetes auth backed configured in vault for an organization - #Eg. authpath: fra-demo-hlkube-cluster-org1 - authpath: devorg1-net-auth - #Provide the value for vault secretprefix - #Eg. adminsecretprefix: secretsv2/data/crypto/peerOrganizations/.../users/admin - adminsecretprefix: secretsv2/data/crypto/peerOrganizations/org1-net/users/admin - #Provide the value for vault secretprefix - #Eg. orderersecretprefix: secretsv2/data/crypto/peerOrganizations/.../orderer - orderersecretprefix: secretsv2/data/crypto/peerOrganizations/org1-net/orderer - #Provide the serviceaccountname for vault - #Eg. serviceaccountname: vault-auth - serviceaccountname: vault-auth - #Provide the type of vault - #Eg. type: hashicorp - type: hashicorp - #Provide the imagesecretname for vault - #Eg. imagesecretname: regcred - imagesecretname: "" - #Kuberenetes secret for vault ca.cert - #Enable or disable TLS for vault communication if value present or not - #Eg. tls: vaultca - tls: +healthCheck: + retries: 20 + sleepTimeAfterError: 15 - -peer: - #Provide the name of the peer as per deployment yaml. - #Eg. name: peer0 - name: peer0 - #Provide the localmspid for organization - #Eg. localmspid: Org1MSP - localmspid: Org1MSP - #Provide the value for tlsstatus to be true or false for organization's peer - #Eg. tlsstatus: true - tlsstatus: true - #Provide the address for the peer - #Eg: address: peer0.org1-net:7051 - address: peer0.org1-net:7051 - -orderer: - #Provide the address for orderer - #Eg. address: orderer1.org1proxy.blockchaincloudpoc.com:443 - address: orderer1.org1proxy.blockchaincloudpoc.com:443 +labels: + service: [] + pvc: [] + deployment: [] diff --git a/platforms/hyperledger-fabric/charts/fabric-external-chaincode/templates/_helpers.tpl b/platforms/hyperledger-fabric/charts/fabric-external-chaincode/templates/_helpers.tpl index 7bf5f530a8e..390a404650d 100644 --- a/platforms/hyperledger-fabric/charts/fabric-external-chaincode/templates/_helpers.tpl +++ b/platforms/hyperledger-fabric/charts/fabric-external-chaincode/templates/_helpers.tpl @@ -2,4 +2,34 @@ {{ range $key, $val := $.Values.metadata.labels }} {{ $key }}: {{ $val }} {{ end }} +{{- end }} + +{{- define "labels.deployment" -}} +{{- if $.Values.labels }} +{{- range $key, $value := $.Values.labels.deployment }} +{{- range $k, $v := $value }} + {{ $k }}: {{ $v | quote }} +{{- end }} +{{- end }} +{{- end }} +{{- end }} + +{{- define "labels.service" -}} +{{- if $.Values.labels }} +{{- range $key, $value := $.Values.labels.service }} +{{- range $k, $v := $value }} + {{ $k }}: {{ $v | quote }} +{{- end }} +{{- end }} +{{- end }} +{{- end }} + +{{- define "labels.pvc" -}} +{{- if $.Values.labels }} +{{- range $key, $value := $.Values.labels.pvc }} +{{- range $k, $v := $value }} + {{ $k }}: {{ $v | quote }} +{{- end }} +{{- end }} +{{- end }} {{- end }} \ No newline at end of file diff --git a/platforms/hyperledger-fabric/charts/fabric-external-chaincode/templates/deployment.yaml b/platforms/hyperledger-fabric/charts/fabric-external-chaincode/templates/deployment.yaml index ef7c0d9f9a1..bf44cac9996 100644 --- a/platforms/hyperledger-fabric/charts/fabric-external-chaincode/templates/deployment.yaml +++ b/platforms/hyperledger-fabric/charts/fabric-external-chaincode/templates/deployment.yaml @@ -17,6 +17,7 @@ metadata: app.kubernetes.io/managed-by: {{ .Release.Service }} app.kubernetes.io/instance: {{ .Release.Name }} {{- include "labels.custom" . | nindent 2 }} + {{- include "labels.deployment" . | nindent 2 }} spec: replicas: 1 selector: @@ -32,6 +33,7 @@ spec: app.kubernetes.io/name: cc-{{ $.Values.chaincode.name }} helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} app.kubernetes.io/instance: {{ .Release.Name }} + {{- include "labels.deployment" . | nindent 6 }} spec: serviceAccountName: {{ $.Values.vault.serviceaccountname }} {{- if .Values.vault.imagesecretname }} @@ -108,7 +110,7 @@ spec: - name: scripts-volume mountPath: /scripts/bevel-vault.sh subPath: bevel-vault.sh - {{ end }} + {{ end }} containers: - image: {{ $.Values.metadata.images.external_chaincode }} name: "{{ $.Values.chaincode.name }}-{{ $.Values.chaincode.version }}" diff --git a/platforms/hyperledger-fabric/charts/fabric-external-chaincode/templates/service.yaml b/platforms/hyperledger-fabric/charts/fabric-external-chaincode/templates/service.yaml index 631fe5e0548..763b5798988 100644 --- a/platforms/hyperledger-fabric/charts/fabric-external-chaincode/templates/service.yaml +++ b/platforms/hyperledger-fabric/charts/fabric-external-chaincode/templates/service.yaml @@ -17,6 +17,7 @@ metadata: app.kubernetes.io/managed-by: {{ .Release.Service }} app.kubernetes.io/instance: {{ .Release.Name }} {{- include "labels.custom" . | nindent 2 }} + {{- include "labels.service" . | nindent 2 }} spec: type: {{ $.Values.service.servicetype }} selector: diff --git a/platforms/hyperledger-fabric/charts/fabric-external-chaincode/values.yaml b/platforms/hyperledger-fabric/charts/fabric-external-chaincode/values.yaml index 14ebb45fbd2..ebc915097b3 100644 --- a/platforms/hyperledger-fabric/charts/fabric-external-chaincode/values.yaml +++ b/platforms/hyperledger-fabric/charts/fabric-external-chaincode/values.yaml @@ -74,3 +74,8 @@ service: #Provide a cluster IP port for grpc service to be exposed #Eg. clusteripport: 7051 clusteripport: 7052 + +labels: + service: [] + pvc: [] + deployment: [] diff --git a/platforms/hyperledger-fabric/charts/fabric-genesis/.helmignore b/platforms/hyperledger-fabric/charts/fabric-genesis/.helmignore new file mode 100644 index 00000000000..014fa775608 --- /dev/null +++ b/platforms/hyperledger-fabric/charts/fabric-genesis/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +generated_config/ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/platforms/hyperledger-fabric/charts/fabric-genesis/Chart.yaml b/platforms/hyperledger-fabric/charts/fabric-genesis/Chart.yaml new file mode 100644 index 00000000000..74d721a4785 --- /dev/null +++ b/platforms/hyperledger-fabric/charts/fabric-genesis/Chart.yaml @@ -0,0 +1,26 @@ +############################################################################################## +# Copyright Accenture. All Rights Reserved. +# +# SPDX-License-Identifier: Apache-2.0 +############################################################################################## + +apiVersion: v1 +name: fabric-genesis +description: "Hyperledger Fabric: Generates configtx and genesis files." +version: 1.1.0 +appVersion: latest +keywords: + - bevel + - hlf + - fabric + - hyperledger + - enterprise + - blockchain + - deployment + - accenture +home: https://hyperledger-bevel.readthedocs.io/en/latest/ +sources: + - https://github.com/hyperledger/bevel +maintainers: + - name: Hyperledger Bevel maintainers + email: bevel@lists.hyperledger.org diff --git a/platforms/hyperledger-fabric/charts/fabric-genesis/README.md b/platforms/hyperledger-fabric/charts/fabric-genesis/README.md new file mode 100644 index 00000000000..86803bb2066 --- /dev/null +++ b/platforms/hyperledger-fabric/charts/fabric-genesis/README.md @@ -0,0 +1,150 @@ +[//]: # (##############################################################################################) +[//]: # (Copyright Accenture. All Rights Reserved.) +[//]: # (SPDX-License-Identifier: Apache-2.0) +[//]: # (##############################################################################################) + +# fabric-genesis + +This chart is a component of Hyperledger Bevel. The fabric-genesis chart creates the genesis file and other channel artifacts for a Hyperfabric network. If enabled, the keys are then stored on the configured vault and stored as Kubernetes secrets. See [Bevel documentation](https://hyperledger-bevel.readthedocs.io/en/latest/) for details. + +## TL;DR + +```bash +helm repo add bevel https://hyperledger.github.io/bevel +helm install genesis bevel/fabric-genesis +``` + +## Prerequisites + +- Kubernetes 1.19+ +- Helm 3.2.0+ + +If Hashicorp Vault is used, then +- HashiCorp Vault Server 1.13.1+ + +The [Orderers](../fabric-orderernode/README.md) and [Peers](../fabric-peernode/README.md) should already be installed and this chart should generally be installed from the Orderer namespace as it has most of the admin permissions. + +After the peers have been installed, get certificates and the configuration file of each peer organization, place in `fabric-genesis/files` +```bash +cd ./fabric-genesis/files +kubectl --namespace carrier-net get secret admin-msp -o json > carrier.json +kubectl --namespace carrier-net get configmap peer0-msp-config -o json > carrier-config-file.json +``` + +If additional orderer(s) from a different organization is needed in genesis, then get that TLS cert and place in `fabric-genesis/files` +```bash +cd ./fabric-genesis/files +kubectl --namespace carrier-net get secret orderer5-tls -o json > orderer5-orderer-tls.json +``` + +## Installing the Chart + +To install the chart with the release name `genesis`: + +```bash +helm repo add bevel https://hyperledger.github.io/bevel +helm install genesis bevel/fabric-genesis +``` + +The command deploys the chart on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation. + +> **Tip**: List all releases using `helm list` + +## Uninstalling the Chart + +To uninstall/delete the `genesis` deployment: + +```bash +helm uninstall genesis +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Parameters + +### Global parameters + +These parameters are refered to as same in each parent or child chart +| Name | Description | Default Value | +|--------|---------|-------------| +| `global.version` | Fabric Version.| `2.5.4` | +| `global.serviceAccountName` | The serviceaccount name that will be created for Vault Auth and k8S Secret management| `vault-auth` | +| `global.cluster.provider` | Kubernetes cluster provider like AWS EKS or minikube. Currently ony `aws` and `minikube` is tested | `aws` | +| `global.cluster.cloudNativeServices` | only `false` is implemented, `true` to use Cloud Native Services (SecretsManager and IAM for AWS; KeyVault & Managed Identities for Azure) is for future | `false` | +| `global.vault.type` | Type of Vault to support other providers. Currently, only `hashicorp` and `kubernetes` is supported. | `hashicorp` | +| `global.vault.role` | Role used for authentication with Vault | `vault-role` | +| `global.vault.network` | Network type that is being deployed | `fabric` | +| `global.vault.address`| URL of the Vault server. | `""` | +| `global.vault.authPath` | Authentication path for Vault | `supplychain` | +| `global.vault.secretEngine` | Vault secret engine name | `secretsv2` | +| `global.vault.secretPrefix` | Vault secret prefix which must start with `data/` | `data/supplychain` | +| `global.proxy.provider` | The proxy or Ingress provider. Can be `none` or `haproxy` | `haproxy` | +| `global.proxy.externalUrlSuffix` | The External URL suffix at which the Fabric services will be available | `test.blockchaincloudpoc.com` | + +### Image + +| Name | Description | Default Value | +| -------------| ---------- | --------- | +| `image.fabricTools` | Fabric Tools image repository | `ghcr.io/hyperledger/bevel-fabric-tools` | +| `image.alpineUtils` | Alpine utils image repository and tag | `ghcr.io/hyperledger/bevel-alpine:latest` | +| `image.pullSecret` | Secret name in the namespace containing private image registry credentials | `""` | + +### Organizations + +List of Organizations participating in the Network with their Peer and Orderer Addresses. + +| Name | Description | Default Value | +|--------|---------|-------------| +| `organizations.name` | Organization Name | `supplychain` | +| `organizations.orderers` | List of organization's orderer nodes and their addresses. This list presents two fields `orderer.name` and `orderer.ordererAddress` | `- name: orderer1`
`ordererAddress: orderer1.supplychain-net:7050`
`- name: orderer2`
`ordererAddress: orderer2.supplychain-net:7050`
`- name: orderer3`
`ordererAddress: orderer3.supplychain-net:7050` | +| `organizations.peers` | List of the organization's peer nodes and their addresses. This list presents two fields `peer.name` and `peer.peerAddress` | `- name: peer0`
`peerAddress: peer0.supplychain-net:7051`
`- name: peer1`
`peerAddress: peer1.supplychain-net:7051` | + +### Consensus + +| Name | Description | Default Value | +| ---------| ----------------------------| ----------------| +| `consensus` | Name of the consensus | `raft` | +| `kafka.brokers` | Array of Kafka broker Addresses, only valid for `kafka` consensus | `""` | + + +### Channels +List of Channels you want to create the artifacts for. + +| Name | Description | Default Value | +|--------|---------|-------------| +| `channels.name` | Name of the channel | `allchannel` | +| `channels.consortium`| Consortium Name | `SupplyChainConsortium` | +| `channels.orderers` | List of orderer type organizations (from the list above) on the network | `- supplychain` | +| `channels.participants` | List of participating channel organizations (from the list above) on the network | `- supplychain`
`- carrier` | + + +### Settings + +| Name | Description | Default Value | +|--------|---------|-------------| +| `settings.generateGenesis` | Flag to generate the syschannel genesis for Fabric 2.2.x | `true` | +| `settings.removeConfigMapOnDelete` | Flag to delete the genesis ConfigMap when uninstalling the release | `true` | + +## License + +This chart is licensed under the Apache v2.0 license. + +Copyright © 2024 Accenture + +### Attribution + +This chart is adapted from the [charts](https://hyperledger.github.io/bevel/) which is licensed under the Apache v2.0 License which is reproduced here: + +``` +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +``` diff --git a/platforms/hyperledger-fabric/charts/fabric-genesis/files/configtx_2_2.tpl b/platforms/hyperledger-fabric/charts/fabric-genesis/files/configtx_2_2.tpl new file mode 100644 index 00000000000..f83c950fd5c --- /dev/null +++ b/platforms/hyperledger-fabric/charts/fabric-genesis/files/configtx_2_2.tpl @@ -0,0 +1,154 @@ +# Configtx template for Fabric 2.2.x +Organizations: +{{- range $org := $.Values.organizations }} + - &{{ $org.name }}Org + Name: {{ $org.name }}MSP + ID: {{ $org.name }}MSP + MSPDir: ./crypto-config/organizations/{{ $org.name }}/msp + Policies: + Readers: + Type: Signature + Rule: "OR('{{ $org.name }}MSP.member')" + Writers: + Type: Signature + Rule: "OR('{{ $org.name }}MSP.member')" + Admins: + Type: Signature + Rule: "OR('{{ $org.name }}MSP.admin')" + Endorsement: + Type: Signature + Rule: "OR('{{ $org.name }}MSP.member')" + {{- if $org.orderers }} + OrdererEndpoints: + {{- range $orderer := $org.orderers }} + - {{ $orderer.ordererAddress }} + {{- end }} + {{- end }} + AnchorPeers: + {{- range $peer := $org.peers }} + {{- $split := split ":" $peer.peerAddress }} + - Host: {{ $split._0 }} + Port: {{ $split._1 }} + {{- end }} + {{- printf "\n" }} +{{- end }} + +Capabilities: + Channel: &ChannelCapabilities + V2_0: true + Orderer: &OrdererCapabilities + V2_0: true + Application: &ApplicationCapabilities + V2_0: true + +Application: &ApplicationDefaults + Organizations: + Policies: + LifecycleEndorsement: + Type: ImplicitMeta + Rule: "MAJORITY Endorsement" + Endorsement: + Type: ImplicitMeta + Rule: "MAJORITY Endorsement" + Readers: + Type: ImplicitMeta + Rule: "ANY Readers" + Writers: + Type: ImplicitMeta + Rule: "ANY Writers" + Admins: + Type: ImplicitMeta + Rule: "MAJORITY Admins" + Capabilities: + <<: *ApplicationCapabilities + +Channel: &ChannelDefaults + Policies: + Readers: + Type: ImplicitMeta + Rule: "ANY Readers" + Writers: + Type: ImplicitMeta + Rule: "ANY Writers" + Admins: + Type: ImplicitMeta + Rule: "MAJORITY Admins" + Capabilities: + <<: *ChannelCapabilities + +Orderer: &OrdererDefaults +{{- if eq $.Values.consensus "raft" }} + OrdererType: etcdraft + EtcdRaft: + Consenters: + {{- range $org := $.Values.organizations }} + {{- range $orderer := $org.orderers }} + {{- $split := split ":" $orderer.ordererAddress }} + - Host: {{ $split._0 }} + Port: {{ $split._1 }} + ClientTLSCert: ./crypto-config/organizations/{{ $org.name }}/orderers/{{ $orderer.name }}/tls/server.crt + ServerTLSCert: ./crypto-config/organizations/{{ $org.name }}/orderers/{{ $orderer.name }}/tls/server.crt + {{- end }} + {{- end }} +{{- end }} + BatchTimeout: 2s + BatchSize: + MaxMessageCount: 10 + AbsoluteMaxBytes: 98 MB + PreferredMaxBytes: 1024 KB +{{- if eq $.Values.consensus "kafka" }} + OrdererType: {{ $.Values.consensus }} + Kafka: + Brokers: + {{- range $.Values.kafka.brokers }} + - {{ . }} + {{- end }} +{{- end }} + Organizations: + Policies: + Readers: + Type: ImplicitMeta + Rule: "ANY Readers" + Writers: + Type: ImplicitMeta + Rule: "ANY Writers" + Admins: + Type: ImplicitMeta + Rule: "MAJORITY Admins" + BlockValidation: + Type: ImplicitMeta + Rule: "ANY Writers" + +Profiles: + OrdererGenesis: + <<: *ChannelDefaults + {{- with (first $.Values.channels) }} + Orderer: + <<: *OrdererDefaults + Organizations: + {{- range $org := .orderers }} + - *{{ $org }}Org + {{- end }} + Capabilities: + <<: *OrdererCapabilities + Consortiums: + {{ .consortium }}: + Organizations: + {{- range $org := .participants }} + - *{{ $org }}Org + {{- end }} + {{- end }} +{{- range $channel := $.Values.channels }} + {{ $channel.name }}: + Consortium: {{ $channel.consortium }} + <<: *ChannelDefaults + Application: + <<: *ApplicationDefaults + Organizations: + {{- range $org := $channel.participants }} + - *{{ $org }}Org + {{- end }} + Capabilities: + <<: *ApplicationCapabilities + {{- printf "\n" }} +{{- end }} diff --git a/platforms/hyperledger-fabric/charts/fabric-genesis/files/configtx_2_5.tpl b/platforms/hyperledger-fabric/charts/fabric-genesis/files/configtx_2_5.tpl new file mode 100644 index 00000000000..c312a4d1c45 --- /dev/null +++ b/platforms/hyperledger-fabric/charts/fabric-genesis/files/configtx_2_5.tpl @@ -0,0 +1,127 @@ +# Configtx template for Fabric 2.5.x +Organizations: +{{- range $org := $.Values.organizations }} + - &{{ $org.name }}Org + Name: {{ $org.name }}MSP + ID: {{ $org.name }}MSP + MSPDir: ./crypto-config/organizations/{{ $org.name }}/msp + Policies: + Readers: + Type: Signature + Rule: "OR('{{ $org.name }}MSP.member')" + Writers: + Type: Signature + Rule: "OR('{{ $org.name }}MSP.member')" + Admins: + Type: Signature + Rule: "OR('{{ $org.name }}MSP.admin')" + Endorsement: + Type: Signature + Rule: "OR('{{ $org.name }}MSP.member')" + {{- if $org.orderers }} + OrdererEndpoints: + {{- range $orderer := $org.orderers }} + - {{ $orderer.ordererAddress }} + {{- end }} + {{- end }} + {{- printf "\n" }} +{{- end }} + +Capabilities: + Channel: &ChannelCapabilities + V2_0: true + Orderer: &OrdererCapabilities + V2_0: true + Application: &ApplicationCapabilities + V2_5: true + +Application: &ApplicationDefaults + Organizations: + Policies: + LifecycleEndorsement: + Type: ImplicitMeta + Rule: "MAJORITY Endorsement" + Endorsement: + Type: ImplicitMeta + Rule: "MAJORITY Endorsement" + Readers: + Type: ImplicitMeta + Rule: "ANY Readers" + Writers: + Type: ImplicitMeta + Rule: "ANY Writers" + Admins: + Type: ImplicitMeta + Rule: "MAJORITY Admins" + Capabilities: + <<: *ApplicationCapabilities + +Channel: &ChannelDefaults + Policies: + Readers: + Type: ImplicitMeta + Rule: "ANY Readers" + Writers: + Type: ImplicitMeta + Rule: "ANY Writers" + Admins: + Type: ImplicitMeta + Rule: "MAJORITY Admins" + Capabilities: + <<: *ChannelCapabilities + +Orderer: &OrdererDefaults + BatchTimeout: 2s + BatchSize: + MaxMessageCount: 10 + AbsoluteMaxBytes: 98 MB + PreferredMaxBytes: 1024 KB + Organizations: + Policies: + Readers: + Type: ImplicitMeta + Rule: "ANY Readers" + Writers: + Type: ImplicitMeta + Rule: "ANY Writers" + Admins: + Type: ImplicitMeta + Rule: "MAJORITY Admins" + BlockValidation: + Type: ImplicitMeta + Rule: "ANY Writers" + +Profiles: +{{- range $channel := $.Values.channels }} + {{ $channel.name }}: + <<: *ChannelDefaults + {{- if eq $.Values.consensus "raft" }} + Orderer: + <<: *OrdererDefaults + OrdererType: etcdraft + EtcdRaft: + Consenters: + {{- range $org := $.Values.organizations }} + {{- range $orderer := $org.orderers }} + {{- $split := split ":" $orderer.ordererAddress }} + - Host: {{ $split._0 }} + Port: {{ $split._1 }} + ClientTLSCert: ./crypto-config/organizations/{{ $org.name }}/orderers/{{ $orderer.name }}/tls/server.crt + ServerTLSCert: ./crypto-config/organizations/{{ $org.name }}/orderers/{{ $orderer.name }}/tls/server.crt + {{- end }} + {{- end }} + Organizations: + {{- range $orderer := $channel.orderers }} + - *{{ $orderer }}Org + {{- end }} + Capabilities: *OrdererCapabilities + {{- end }} + Application: + <<: *ApplicationDefaults + Organizations: + {{- range $org := $channel.participants }} + - *{{ $org }}Org + {{- end }} + Capabilities: *ApplicationCapabilities + {{- printf "\n" }} +{{- end }} diff --git a/platforms/hyperledger-fabric/charts/fabric-genesis/templates/_helpers.tpl b/platforms/hyperledger-fabric/charts/fabric-genesis/templates/_helpers.tpl new file mode 100644 index 00000000000..94a126b4377 --- /dev/null +++ b/platforms/hyperledger-fabric/charts/fabric-genesis/templates/_helpers.tpl @@ -0,0 +1,28 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "fabric-genesis.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "fabric-genesis.fullname" -}} +{{- $name := default .Chart.Name -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" $name .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "fabric-genesis.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} diff --git a/platforms/hyperledger-fabric/charts/fabric-genesis/templates/configmap.yaml b/platforms/hyperledger-fabric/charts/fabric-genesis/templates/configmap.yaml new file mode 100644 index 00000000000..9975157e676 --- /dev/null +++ b/platforms/hyperledger-fabric/charts/fabric-genesis/templates/configmap.yaml @@ -0,0 +1,59 @@ + ############################################################################################## +# Copyright Accenture. All Rights Reserved. +# +# SPDX-License-Identifier: Apache-2.0 +############################################################################################## + +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ .Release.Name }}-configtx-yaml + namespace: {{ .Release.Namespace }} + labels: + app.kubernetes.io/name: configtx-yaml + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/part-of: {{ include "fabric-genesis.fullname" . }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} + app.kubernetes.io/managed-by: helm +data: + configtx.yaml: |- +{{- if eq ($.Values.global.version | trunc 3) "2.5" }} + {{ tpl (.Files.Get "files/configtx_2_5.tpl") . | nindent 8 }} +{{- else }} + {{ tpl (.Files.Get "files/configtx_2_2.tpl") . | nindent 8 }} +{{- end }} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ .Release.Name }}-admin-msp-certs + namespace: {{ .Release.Namespace }} + labels: + app.kubernetes.io/name: admin-msp-certs + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/part-of: {{ include "fabric-genesis.fullname" . }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} + app.kubernetes.io/managed-by: helm +data: +{{- range $org := .Values.organizations }} +{{- $file := $.Files.Get (printf "files/%s.json" $org.name) }} +{{- if $file }} + {{ $org.name }}.json: |- + {{ $.Files.Get (printf "files/%s.json" $org.name) | nindent 4 }} +{{- end }} +{{- $file := $.Files.Get (printf "files/%s-config-file.json" $org.name) }} +{{- if $file }} + {{ $org.name }}-config-file.json: |- + {{ $.Files.Get (printf "files/%s-config-file.json" $org.name) | nindent 4 }} +{{- end }} +{{- range $orderer := $org.orderers }} +{{- $ordfile := $.Files.Get (printf "files/%s-orderer-tls.json" $orderer.name) }} +{{- if $ordfile }} + {{ $orderer.name }}-orderer-tls.json: |- + {{ $.Files.Get (printf "files/%s-orderer-tls.json" $orderer.name) | nindent 4 }} +{{- end }} +{{- end }} +{{- end }} diff --git a/platforms/hyperledger-fabric/charts/fabric-genesis/templates/genesis-job-cleanup.yaml b/platforms/hyperledger-fabric/charts/fabric-genesis/templates/genesis-job-cleanup.yaml new file mode 100644 index 00000000000..aa02bbd789c --- /dev/null +++ b/platforms/hyperledger-fabric/charts/fabric-genesis/templates/genesis-job-cleanup.yaml @@ -0,0 +1,68 @@ +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ .Release.Name }}-cleanup + labels: + app.kubernetes.io/name: {{ .Release.Name }}-cleanup + app.kubernetes.io/component: genesis-job-cleanup + app.kubernetes.io/part-of: {{ include "fabric-genesis.fullname" . }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/managed-by: helm + namespace: {{ .Release.Namespace }} + annotations: + helm.sh/hook-weight: "0" + helm.sh/hook: "pre-delete" + helm.sh/hook-delete-policy: "hook-succeeded" +spec: + backoffLimit: 3 + completions: 1 + template: + metadata: + labels: + app.kubernetes.io/name: {{ .Release.Name }}-cleanup + app.kubernetes.io/component: genesis-job-cleanup + app.kubernetes.io/part-of: {{ include "fabric-genesis.fullname" . }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/managed-by: helm + spec: + serviceAccountName: {{ .Values.global.serviceAccountName }} + restartPolicy: "Never" + containers: + - name: delete-secrets + image: "{{ .Values.image.alpineUtils }}" + securityContext: + runAsUser: 0 + imagePullPolicy: IfNotPresent + env: + command: ["sh", "-c"] + args: + - |- +{{- if .Values.settings.removeConfigMapOnDelete }} + {{- range $channel := $.Values.channels }} + if kubectl get configmap --namespace {{ $.Release.Namespace }} {{ $channel.name }}-genesis &> /dev/null; then + echo "Deleting genesis-file configmap in k8s ..." + kubectl delete configmap --namespace {{ $.Release.Namespace }} {{ $channel.name }}-genesis + fi + if kubectl get configmap --namespace {{ $.Release.Namespace }} syschannel-genesis &> /dev/null; then + echo "Deleting genesis-file configmap in k8s ..." + kubectl delete configmap --namespace {{ $.Release.Namespace }} syschannel-genesis + fi + if kubectl get configmap --namespace {{ $.Release.Namespace }} {{ $channel.name }}-channeltx &> /dev/null; then + echo "Deleting channeltx configmap in k8s ..." + kubectl delete configmap --namespace {{ $.Release.Namespace }} {{ $channel.name }}-channeltx + fi + {{- range $participant := $channel.participants }} + if kubectl get configmap --namespace {{ $.Release.Namespace }} {{ $channel.name }}-{{ $participant }}-anchortx &> /dev/null; then + echo "Deleting anchortx configmap in k8s ..." + kubectl delete configmap --namespace {{ $.Release.Namespace }} {{ $channel.name }}-{{ $participant }}-anchortx + fi + {{- end }} + {{- end }} +{{- end}} + + + + + + diff --git a/platforms/hyperledger-fabric/charts/fabric-genesis/templates/job.yaml b/platforms/hyperledger-fabric/charts/fabric-genesis/templates/job.yaml new file mode 100644 index 00000000000..29e5e4e47aa --- /dev/null +++ b/platforms/hyperledger-fabric/charts/fabric-genesis/templates/job.yaml @@ -0,0 +1,346 @@ +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ .Release.Name }}-job + namespace: {{ .Release.Namespace }} + annotations: + helm.sh/hook-delete-policy: "before-hook-creation" + labels: + app: {{ .Release.Name }} + app.kubernetes.io/name: {{ .Release.Name }}-job + app.kubernetes.io/component: fabric-genesis-job + app.kubernetes.io/part-of: {{ include "fabric-genesis.fullname" . }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} + app.kubernetes.io/managed-by: helm +spec: + backoffLimit: 6 + template: + metadata: + labels: + app: {{ .Release.Name }} + app.kubernetes.io/name: {{ .Release.Name }}-job + app.kubernetes.io/component: fabric-genesis-job + app.kubernetes.io/part-of: {{ include "fabric-genesis.fullname" . }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} + app.kubernetes.io/managed-by: helm + spec: + serviceAccountName: {{ .Values.global.serviceAccountName }} + restartPolicy: OnFailure + imagePullSecrets: + {{- if .Values.image.pullSecret }} + - name: {{ .Values.image.pullSecret }} + {{- end }} + volumes: + - name: scripts-volume + configMap: + name: bevel-vault-script + - name: package-manager + configMap: + name: package-manager + - name: configtx-file + configMap: + name: {{ .Release.Name }}-configtx-yaml + defaultMode: 0775 + {{- range $org := $.Values.organizations }} + {{- $file := $.Files.Get (printf "files/%s.json" $org.name) }} + {{- if $file }} + - name: {{ $org.name }}-admin-msp + configMap: + name: {{ $.Release.Name }}-admin-msp-certs + items: + - key: {{ $org.name }}.json + path: {{ $org.name }}.json + {{- end }} + {{- $file := $.Files.Get (printf "files/%s-config-file.json" $org.name) }} + {{- if $file }} + - name: {{ $org.name }}-config-file + configMap: + name: {{ $.Release.Name }}-admin-msp-certs + items: + - key: {{ $org.name }}-config-file.json + path: {{ $org.name }}-config-file.json + {{- end }} + {{- range $orderer := $org.orderers }} + {{- $ordfile := $.Files.Get (printf "files/%s-orderer-tls.json" $orderer.name) }} + {{- if $ordfile }} + - name: {{ $orderer.name }}-tls-config + configMap: + name: {{ $.Release.Name }}-admin-msp-certs + items: + - key: {{ $orderer.name }}-orderer-tls.json + path: {{ $orderer.name }}-orderer-tls.json + {{- end }} + {{- end }} + {{- end }} + - name: certificates + emptyDir: + medium: Memory + containers: + - name: "generate-artifacts" + image: {{ .Values.image.fabricTools }}:{{ .Values.global.version }} + imagePullPolicy: IfNotPresent + env: + - name: VAULT_ADDR + value: {{ $.Values.global.vault.address }} + - name: VAULT_APP_ROLE + value: {{ $.Values.global.vault.role }} + - name: KUBERNETES_AUTH_PATH + value: {{ $.Values.global.vault.authPath }} + - name: VAULT_SECRET_ENGINE + value: "{{ .Values.global.vault.secretEngine }}" + - name: VAULT_SECRET_PREFIX + value: "{{ .Values.global.vault.secretPrefix }}" + - name: VAULT_TYPE + value: "{{ $.Values.global.vault.type }}" + - name: NETWORK_VERSION + value: "{{ $.Values.global.version }}" + - name: PROXY + value: {{ .Values.global.proxy.provider }} + - name: EXTERNAL_URL_SUFFIX + value: {{ .Values.global.proxy.externalUrlSuffix }} + - name: COMPONENT_NAME + value: {{ .Release.Namespace }} + command: ["bash", "-c"] + args: + - |- + #!/usr/bin/env sh + safeConfigmap () { + FOLDER_PATH=$1 + NAME=$2 + KEY=$3 + kubectl get configmap --namespace ${COMPONENT_NAME} ${NAME} + if [ $? -ne 0 ]; then + kubectl create configmap --namespace ${COMPONENT_NAME} ${NAME} --from-file=${KEY}=$FOLDER_PATH + fi + } + . /scripts/package-manager.sh + # Define the packages to install + apt-get update --allow-releaseinfo-change + packages_to_install="jq curl wget" + install_packages "$packages_to_install" + # Download kubectl binary + curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.27.0/bin/linux/amd64/kubectl; + chmod u+x kubectl && mv kubectl /usr/local/bin/kubectl; + +{{- if eq .Values.global.vault.type "hashicorp" }} + . /scripts/bevel-vault.sh + # Calling a function to retrieve the vault token. + vaultBevelFunc "init" + + function getMSPCerts { + KEY=$1 + path=$2 + mkdir -p ${path}/msp/admincerts + mkdir -p ${path}/msp/cacerts + mkdir -p ${path}/msp/tlscacerts + + echo "Getting TLS certificates from Vault." + vaultBevelFunc "readJson" "${VAULT_SECRET_ENGINE}/${VAULT_SECRET_PREFIX}/users/${KEY}" + + ADMINCERT=$(echo ${VAULT_SECRET} | jq -r '.["admincerts"]') + CACERTS=$(echo ${VAULT_SECRET} | jq -r '.["cacerts"]') + TLSCACERTS=$(echo ${VAULT_SECRET} | jq -r '.["tlscacerts"]') + + echo "${ADMINCERT}" > ${path}/msp/admincerts/Admin@${COMPONENT_NAME}-cert.pem + echo "${TLSCACERTS}" > ${path}/msp/tlscacerts/ca.crt + echo "${CACERTS}" > ${path}/msp/cacerts/ca.crt + } + + function getOrdererTLSCerts { + KEY=$1 + path=$2 + mkdir -p ${path}/orderers/${KEY}/tls + echo "Getting TLS certificates from Vault." + vaultBevelFunc "readJson" "${VAULT_SECRET_ENGINE}/${VAULT_SECRET_PREFIX}/orderers/${KEY}-tls" + + TLS_SERVER_CERT=$(echo ${VAULT_SECRET} | jq -r '.["server_crt"]') + + echo "${TLS_SERVER_CERT}" > ${path}/orderers/${KEY}/tls/server.crt + } + function writeSafeSecret { + key=$1 + file=$2 + cat $file | base64 > ${key}.base64 + + vaultBevelFunc "init" + FILE_B64=$(cat ${key}.base64) + + echo " + { + \"data\": + { + \"${key}_base64\": \"${FILE_B64}\" + } + }" > payload.json + + vaultBevelFunc 'write' "${VAULT_SECRET_ENGINE}/${VAULT_SECRET_PREFIX}/channel-artifacts/${key}" 'payload.json' + rm payload.json + + kubectl get configmap --namespace ${COMPONENT_NAME} ${key} + if [ $? -ne 0 ]; then + kubectl create configmap --namespace ${COMPONENT_NAME} ${key} --from-file=${key}_base64=${key}.base64 + fi + } + {{- else }} + function getMSPCerts { + key=$1 + path=$2 + mkdir -p ${path}/msp/admincerts + mkdir -p ${path}/msp/cacerts + mkdir -p ${path}/msp/tlscacerts + KUBENETES_SECRET=$(kubectl get secret ${key} --namespace ${COMPONENT_NAME} -o json) + if [ "$KUBENETES_SECRET" = "" ]; then + echo "Certficates absent in kuberenetes secrets" + exit 1 + else + ADMINCERT=$(echo "$KUBENETES_SECRET" | jq -r '.data.admincerts' | base64 -d) + CACERTS=$(echo "$KUBENETES_SECRET" | jq -r '.data.cacerts' | base64 -d) + TLSCACERTS=$(echo "$KUBENETES_SECRET" | jq -r '.data.tlscacerts' | base64 -d) + + echo "${ADMINCERT}" > ${path}/msp/admincerts/Admin@${COMPONENT_NAME}-cert.pem + echo "${TLSCACERTS}" > ${path}/msp/tlscacerts/ca.crt + echo "${CACERTS}" > ${path}/msp/cacerts/ca.crt + + fi + } + + function getOrdererTLSCerts { + key=$1 + path=$2 + mkdir -p ${path}/orderers/${key}/tls + + KUBENETES_SECRET=$(kubectl get secret ${key}-tls --namespace ${COMPONENT_NAME} -o json) + if [ "$KUBENETES_SECRET" = "" ]; then + echo "Certficates absent in kuberenetes secrets" + exit 1 + else + TLS_SERVER_CERT=$(echo "$KUBENETES_SECRET" | jq -r '.data.servercrt' | base64 -d) + echo "${TLS_SERVER_CERT}" > ${path}/orderers/${key}/tls/server.crt + fi + } + + function writeSafeSecret { + key=$1 + file=$2 + cat $file | base64 > ${key}.base64 + + kubectl get configmap --namespace ${COMPONENT_NAME} ${key} + if [ $? -ne 0 ]; then + kubectl create configmap --namespace ${COMPONENT_NAME} ${key} --from-file=${key}_base64=${key}.base64 + fi + } + {{- end }} + + OUTPUT_PATH_PEER="/templates/crypto-config/organizations" + {{- range $org := $.Values.organizations }} + if [ -e /templates/{{ $org.name }}.json ]; then + # Read the admin-msp details from files for other orgs + ADMINCERTS=$(jq -r '.data.admincerts' /templates/{{ $org.name}}.json) + CACERTS=$(jq -r '.data.cacerts' /templates/{{ $org.name}}.json) + TLSCACERTS=$(jq -r '.data.tlscacerts' /templates/{{ $org.name}}.json) + + mkdir -p ${OUTPUT_PATH_PEER}/{{ $org.name }}/msp/admincerts + mkdir -p ${OUTPUT_PATH_PEER}/{{ $org.name }}/msp/cacerts + mkdir -p ${OUTPUT_PATH_PEER}/{{ $org.name }}/msp/tlscacerts + + echo "$ADMINCERTS" | base64 -d > ${OUTPUT_PATH_PEER}/{{ $org.name }}/msp/admincerts/Admin@{{ $org.name }}-net-cert.pem + echo "$TLSCACERTS" | base64 -d > ${OUTPUT_PATH_PEER}/{{ $org.name }}/msp/tlscacerts/ca.crt + echo "$CACERTS" | base64 -d > ${OUTPUT_PATH_PEER}/{{ $org.name }}/msp/cacerts/ca.crt + else + # Read the admin-msp details from k8s secrets for org that is executing the genesis + getMSPCerts admin-msp ${OUTPUT_PATH_PEER}/{{ $org.name }} + fi + {{- if $org.peers }} + if [ -e /templates/{{ $org.name }}-config-file.json ]; then + # Read the MSP Configfile from files for other orgs + CONFIG_FILE=$(jq -r '.data."mspConfig"' /templates/{{ $org.name }}-config-file.json) + echo "$CONFIG_FILE" > ${OUTPUT_PATH_PEER}/{{ $org.name }}/msp/config.yaml + else + # Read the MSP Configfile from k8s secrets for org that is executing the genesis + {{- with (first $org.peers) }} + KUBENETES_SECRET=$(kubectl get configmap {{ .name }}-msp-config --namespace ${COMPONENT_NAME} -o json) + if [ "$KUBENETES_SECRET" = "" ]; then + echo "MSP Config absent in Kuberenetes" + exit 1 + else + CONFIG_FILE=$(echo "$KUBENETES_SECRET" | jq -r '.data.mspConfig') + echo "${CONFIG_FILE}" > ${OUTPUT_PATH_PEER}/{{ $org.name }}/msp/config.yaml + fi + {{- end }} + fi + {{- end }} + {{- if $org.orderers }} + {{- range $orderer := $org.orderers }} + if [ -e /templates/{{ $orderer.name }}-orderer-tls.json ]; then + # Read the MSP Configfile from files for other orgs + CONFIG_FILE=$(jq -r '.data.servercrt' /templates/{{ $orderer.name }}-orderer-tls.json) + echo "$CONFIG_FILE" | base64 -d > ${OUTPUT_PATH_PEER}/{{ $org.name }}/orderers/{{ $orderer.name }}/tls/server.crt + else + getOrdererTLSCerts {{ $orderer.name }} ${OUTPUT_PATH_PEER}/{{ $org.name }} + fi + {{- end }} + {{- end }} + {{- end }} + cd /templates + version2_5=`echo $NETWORK_VERSION | grep -c 2.5` + {{- if .Values.settings.generateGenesis }} + if [ $version2_5 = 1 ]; then + echo "version 2.5.x does not need syschannel genesis" + else + echo "version 2.2.x syschannel genesis" + configtxgen -configPath "/templates" -profile OrdererGenesis -channelID syschannel -outputBlock genesis.block + writeSafeSecret syschannel-genesis genesis.block + fi + {{- end }} + {{- range $channel := $.Values.channels }} + if [ $version2_5 = 1 ]; then + echo "version 2.5.x" + configtxgen -configPath "/templates" -profile {{ $channel.name }} -channelID {{ $channel.name }} -outputBlock {{ $channel.name }}.genesis.block + writeSafeSecret {{ $channel.name }}-genesis {{ $channel.name }}.genesis.block + else + echo "version 2.2.x" + configtxgen -configPath "/templates" -profile {{ $channel.name }} -channelID {{ $channel.name }} -outputCreateChannelTx {{ $channel.name }}.tx + writeSafeSecret {{ $channel.name }}-channeltx {{ $channel.name }}.tx + {{- range $participant := $channel.participants }} + configtxgen -configPath "/templates" -profile {{ $channel.name }} -channelID {{ $channel.name }} -asOrg {{ $participant }}MSP -outputAnchorPeersUpdate {{ $channel.name }}{{ $participant }}MSPAnchor.tx + writeSafeSecret {{ $channel.name }}-{{ $participant }}-anchortx {{ $channel.name }}{{ $participant }}MSPAnchor.tx + {{- end }} + fi + {{- end }} + volumeMounts: + - name: scripts-volume + mountPath: /scripts/bevel-vault.sh + subPath: bevel-vault.sh + - name: package-manager + mountPath: /scripts/package-manager.sh + subPath: package-manager.sh + - name: configtx-file + mountPath: /templates/configtx.yaml + subPath: configtx.yaml + - name: certificates + mountPath: /templates/crypto-config + {{- range $org := $.Values.organizations }} + {{- $file := $.Files.Get (printf "files/%s.json" $org.name) }} + {{- if $file }} + - name: {{ $org.name }}-admin-msp + mountPath: /templates/{{ $org.name }}.json + subPath: {{ $org.name }}.json + {{- end }} + {{- $file := $.Files.Get (printf "files/%s-config-file.json" $org.name) }} + {{- if $file }} + - name: {{ $org.name }}-config-file + mountPath: /templates/{{ $org.name }}-config-file.json + subPath: {{ $org.name }}-config-file.json + {{- end }} + {{- range $orderer := $org.orderers }} + {{- $ordfile := $.Files.Get (printf "files/%s-orderer-tls.json" $orderer.name) }} + {{- if $ordfile }} + - name: {{ $orderer.name }}-tls-config + mountPath: /templates/{{ $orderer.name }}-orderer-tls.json + subPath: {{ $orderer.name }}-orderer-tls.json + {{- end }} + {{- end }} + {{- end }} diff --git a/platforms/hyperledger-fabric/charts/fabric-genesis/values.yaml b/platforms/hyperledger-fabric/charts/fabric-genesis/values.yaml new file mode 100644 index 00000000000..44b7f300420 --- /dev/null +++ b/platforms/hyperledger-fabric/charts/fabric-genesis/values.yaml @@ -0,0 +1,109 @@ +############################################################################################## +# Copyright Accenture. All Rights Reserved. +# +# SPDX-License-Identifier: Apache-2.0 +############################################################################################## +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. +--- +# The following are for overriding global values +global: + # HLF Network Version + #Eg. version: 2.5.4 + version: 2.5.4 + #Provide the service account name which will be created. + serviceAccountName: vault-auth + cluster: + provider: aws # choose from: minikube | aws | azure | gcp + cloudNativeServices: false # only 'false' is implemented + + vault: + #Provide the type of vault + #Eg. type: hashicorp + type: hashicorp + #Provide the vaultrole for an organization + #Eg. vaultrole: org1-vault-role + role: vault-role + #Provide the network type + network: fabric + #Provide the vault server address + #Eg. vaultaddress: http://Vault-884963190.eu-west-1.elb.amazonaws.com + address: + #Provide the kubernetes auth backed configured in vault for an organization + #Eg. authpath: supplychain + authPath: supplychain + #Provide the secret engine. + secretEngine: secretsv2 + #Provide the vault path where the secrets will be stored + secretPrefix: "data/supplychain" + + proxy: + #This will be the proxy/ingress provider. Can have values "haproxy" or "none" + #Eg. provider: "haproxy" + provider: haproxy + #This field specifies the external url for the organization + #Eg. externalUrlSuffix: test.blockchaincloudpoc.com + externalUrlSuffix: test.blockchaincloudpoc.com + +image: + #Provide the valid image repository for fabric tools + #Eg. fabricTools: hyperledger/fabric-tools + fabricTools: ghcr.io/hyperledger/bevel-fabric-tools + #Provide the valid image name and version to read certificates from vault server + #Eg.alpineutils: ghcr.io/hyperledger/bevel-alpine:latest + alpineUtils: ghcr.io/hyperledger/bevel-alpine:latest + #Provide the secret to use if private repository + #Eg. pullSecret: regcred + pullSecret: + +# Allows specification of one or many organizations that will be connecting to a network. +organizations: + # Specification for the 1st organization. + - name: supplychain + orderers: + - name: orderer1 + ordererAddress: orderer1.supplychain-net:7050 # Internal/External URI of the orderer + - name: orderer2 + ordererAddress: orderer2.supplychain-net:7050 + - name: orderer3 + ordererAddress: orderer3.supplychain-net:7050 + peers: + - name: peer0 + peerAddress: peer0.supplychain-net:7051 # Internal/External URI of the peer + - name: peer1 + peerAddress: peer1.supplychain-net:7051 + + # Specification for the 2nd organization. + - name: carrier + peers: + - name: peer0 + peerAddress: peer0.carrier-net:7051 # Internal/External URI of the peer + +#Provide name of the consensus. Currently support raft and kafka +#Eg. consensus: raft +consensus: raft +# kafka is only valid for consensus: kafka +#Provide the kafka broker list +kafka: + #Eg. brokers: + # - kafka-0.broker.example-com.svc.cluster.local:9092 + # - kafka-1.broker.example-com.svc.cluster.local:9092 + # - kafka-2.broker.example-com.svc.cluster.local:9092 + # - kafka-3.broker.example-com.svc.cluster.local:9092 + brokers: + +# The channels defined for a network with participating peers in each channel +channels: + - name: allchannel + consortium: SupplyChainConsortium + orderers: + - supplychain + participants: + - supplychain + - carrier + +settings: + # Flag to generate the genesis file for Fabrix 2.2.x + generateGenesis: true + # Flag to ensure the genesis configmap is removed on helm uninstall + removeConfigMapOnDelete: true diff --git a/platforms/hyperledger-fabric/charts/fabric-operations-console/templates/_helpers.tpl b/platforms/hyperledger-fabric/charts/fabric-operations-console/templates/_helpers.tpl index 7bf5f530a8e..8823df47301 100644 --- a/platforms/hyperledger-fabric/charts/fabric-operations-console/templates/_helpers.tpl +++ b/platforms/hyperledger-fabric/charts/fabric-operations-console/templates/_helpers.tpl @@ -2,4 +2,34 @@ {{ range $key, $val := $.Values.metadata.labels }} {{ $key }}: {{ $val }} {{ end }} -{{- end }} \ No newline at end of file +{{- end }} + +{{- define "labels.deployment" -}} +{{- if $.Values.labels }} +{{- range $key, $value := $.Values.labels.deployment }} +{{- range $k, $v := $value }} + {{ $k }}: {{ $v | quote }} +{{- end }} +{{- end }} +{{- end }} +{{- end }} + +{{- define "labels.service" -}} +{{- if $.Values.labels }} +{{- range $key, $value := $.Values.labels.service }} +{{- range $k, $v := $value }} + {{ $k }}: {{ $v | quote }} +{{- end }} +{{- end }} +{{- end }} +{{- end }} + +{{- define "labels.pvc" -}} +{{- if $.Values.labels }} +{{- range $key, $value := $.Values.labels.pvc }} +{{- range $k, $v := $value }} + {{ $k }}: {{ $v | quote }} +{{- end }} +{{- end }} +{{- end }} +{{- end }} diff --git a/platforms/hyperledger-fabric/charts/fabric-operations-console/templates/deployment.yaml b/platforms/hyperledger-fabric/charts/fabric-operations-console/templates/deployment.yaml index 4a1224d9ead..a23be73df84 100644 --- a/platforms/hyperledger-fabric/charts/fabric-operations-console/templates/deployment.yaml +++ b/platforms/hyperledger-fabric/charts/fabric-operations-console/templates/deployment.yaml @@ -16,6 +16,7 @@ metadata: app.kubernetes.io/managed-by: {{ .Release.Service }} app.kubernetes.io/instance: {{ .Release.Name }} {{- include "labels.custom" . | nindent 2 }} + {{ include "labels.deployment" . | nindent 2 }} annotations: {{- if $.Values.annotations }} {{- range $key, $value := $.Values.annotations.deployment }} @@ -44,6 +45,7 @@ spec: app.kubernetes.io/name: {{ $.Values.service.name }} helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} app.kubernetes.io/instance: {{ .Release.Name }} + {{ include "labels.deployment" . | nindent 6 }} spec: serviceAccountName: {{ $.Values.service.serviceaccountname }} {{- if .Values.service.imagesecretname }} diff --git a/platforms/hyperledger-fabric/charts/fabric-operations-console/templates/pvc.yaml b/platforms/hyperledger-fabric/charts/fabric-operations-console/templates/pvc.yaml index 94dbc75d7bd..b341d78e267 100644 --- a/platforms/hyperledger-fabric/charts/fabric-operations-console/templates/pvc.yaml +++ b/platforms/hyperledger-fabric/charts/fabric-operations-console/templates/pvc.yaml @@ -15,6 +15,7 @@ metadata: helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} app.kubernetes.io/managed-by: {{ .Release.Service }} app.kubernetes.io/instance: {{ .Release.Name }} + {{ include "labels.pvc" . | nindent 2 }} annotations: {{- if $.Values.annotations }} {{- range $key, $value := $.Values.annotations.pvc }} diff --git a/platforms/hyperledger-fabric/charts/fabric-operations-console/templates/service.yaml b/platforms/hyperledger-fabric/charts/fabric-operations-console/templates/service.yaml index ce803a8d957..97d6e197379 100644 --- a/platforms/hyperledger-fabric/charts/fabric-operations-console/templates/service.yaml +++ b/platforms/hyperledger-fabric/charts/fabric-operations-console/templates/service.yaml @@ -29,6 +29,7 @@ metadata: app.kubernetes.io/managed-by: {{ .Release.Service }} app.kubernetes.io/instance: {{ .Release.Name }} {{- include "labels.custom" . | nindent 2 }} + {{ include "labels.service" . | nindent 2 }} spec: type: {{ $.Values.service.servicetype }} selector: diff --git a/platforms/hyperledger-fabric/charts/fabric-operations-console/values.yaml b/platforms/hyperledger-fabric/charts/fabric-operations-console/values.yaml index 0b1b1049069..3b4f69d49a9 100644 --- a/platforms/hyperledger-fabric/charts/fabric-operations-console/values.yaml +++ b/platforms/hyperledger-fabric/charts/fabric-operations-console/values.yaml @@ -80,3 +80,8 @@ proxy: #This field contains the external URL of the organization #Eg. external_url_suffix: orderer1.org1proxy.blockchaincloudpoc.com:443 external_url_suffix: orderer1.org1proxy.blockchaincloudpoc.com:443 + +labels: + service: [] + pvc: [] + deployment: [] diff --git a/platforms/hyperledger-fabric/charts/fabric-orderernode/.helmignore b/platforms/hyperledger-fabric/charts/fabric-orderernode/.helmignore new file mode 100644 index 00000000000..014fa775608 --- /dev/null +++ b/platforms/hyperledger-fabric/charts/fabric-orderernode/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +generated_config/ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/platforms/hyperledger-fabric/charts/fabric-orderernode/Chart.yaml b/platforms/hyperledger-fabric/charts/fabric-orderernode/Chart.yaml index dda2a22b87b..fcce9ef2506 100644 --- a/platforms/hyperledger-fabric/charts/fabric-orderernode/Chart.yaml +++ b/platforms/hyperledger-fabric/charts/fabric-orderernode/Chart.yaml @@ -5,7 +5,23 @@ ############################################################################################## apiVersion: v1 -appVersion: "2.0" -description: "Hyperledger Fabric: Deploys orderer node." name: fabric-orderernode -version: 1.0.0 +description: "Hyperledger Fabric: Deploys orderer node." +version: 1.1.0 +appVersion: latest +keywords: + - bevel + - hlf + - fabric + - hyperledger + - enterprise + - blockchain + - deployment + - accenture +home: https://hyperledger-bevel.readthedocs.io/en/latest/ +sources: + - https://github.com/hyperledger/bevel +maintainers: + - name: Hyperledger Bevel maintainers + email: bevel@lists.hyperledger.org + diff --git a/platforms/hyperledger-fabric/charts/fabric-orderernode/README.md b/platforms/hyperledger-fabric/charts/fabric-orderernode/README.md index 1c9f8f4fa48..6e45da36e52 100644 --- a/platforms/hyperledger-fabric/charts/fabric-orderernode/README.md +++ b/platforms/hyperledger-fabric/charts/fabric-orderernode/README.md @@ -3,225 +3,148 @@ [//]: # (SPDX-License-Identifier: Apache-2.0) [//]: # (##############################################################################################) - -# Orderer Node Hyperledger Fabric Deployment +# fabric-orderernode -- [Orderer Node Hyperledger Fabric Deployment Helm Chart](#orderer-node-hyperledger-fabric-deployment-helm-chart) -- [Prerequisites](#prerequisites) -- [Chart Structure](#chart-structure) -- [Configuration](#configuration) -- [Deployment](#deployment) -- [Verification](#verification) -- [Updating the Deployment](#updating-the-deployment) -- [Deletion](#deletion) -- [Contributing](#contributing) -- [License](#license) +This chart is a component of Hyperledger Bevel. The fabric-orderernode chart deploys a Orderer Node for Hyperledger Fabric blockchain network. If enabled, the keys are stored on the configured vault and stored as Kubernetes secrets. See [Bevel documentation](https://hyperledger-bevel.readthedocs.io/en/latest/) for details. +## TL;DR - -## Orderer Node Hyperledger Fabric Deployment Helm Chart ---- -A [Helm chart](https://github.com/hyperledger/bevel/blob/develop/platforms/hyperledger-fabric/charts/fabric-orderernode) for orderer node. - - - -## Prerequisites ---- -Before deploying the Helm chart, make sure to have the following prerequisites: - -- Kubernetes cluster up and running. -- A HashiCorp Vault instance is set up and configured to use Kubernetes service account token-based authentication. -- The Vault is unsealed and initialized. -- HAproxy is required as ingress controller. -- Helm installed. - - - -## Chart Structure ---- -The structure of the Helm chart is as follows: - -``` -fabric-orderernode/ - |- templates/ - |- _helpers.yaml - |- configmap.yaml - |- deployment.yaml - |- service.yaml - |- servicemonitor.yaml - |- Chart.yaml - |- README.md - |- values.yaml +```bash +helm repo add bevel https://hyperledger.github.io/bevel +helm install orderer1 bevel/fabric-orderernode ``` -- `templates/`: Contains the Kubernetes manifest templates that define the resources to be deployed. -- `helpers.tpl`: Contains custom label definitions used in other templates. -- `configmap.yaml`: Defines two ConfigMaps, one for the orderer configuration and one for the genesis block. -- `deployment.yaml`: The kafka-healthcheck checks the health of the Kafka brokers before the main container is started. The certificates-init fetches the TLS and MSP certificates from Vault and stores them in a local directory. The {{ $.Values.orderer.name }} runs the Hyperledger Fabric orderer. The grpc-web exposes the orderer's gRPC API over HTTP/WebSockets. These containers are responsible for ensuring that the orderer is up and running, that it has the necessary certificates, and that it can be accessed by clients. -- `service.yaml`: Ensures internal and external access with exposed ports for gRPC (7050), gRPC-Web (7443), and operations (9443), and optionally uses HAProxy for external exposure and secure communication. -- `servicemonitor.yaml`: Define a ServiceMonitor resource that allows Prometheus to collect metrics from the orderer node's "operations" port. The configuration is conditionally applied based on the availability of the Prometheus Operator's API version and whether metrics are enabled for the orderer service. -- `Chart.yaml`: Contains the metadata for the Helm chart, such as the name, version, and description. -- `README.md`: Provides information and instructions about the Helm chart. -- `values.yaml`: Contains the default configuration values for the Helm chart. - - - -## Configuration ---- -The [values.yaml](https://github.com/hyperledger/bevel/blob/develop/platforms/hyperledger-fabric/charts/fabric-orderernode/values.yaml) file contains configurable values for the Helm chart. We can modify these values according to the deployment requirements. Here are some important configuration options: - -### Metadata - -| Name | Description | Default Value | -| ---------------------- | ----------------------------------------------------------------------| -------------------------------------------------| -| namespace | Namespace for orderer | org1-net | -| network.version | HyperLedger Fabric network version | 2.2.2 | -| images.orderer | Valid image name and version for fabric orderer | ghcr.io/hyperledger/bevel-fabric-orderer:2.2.2 | -| images.alpineutils | Valid image name and version to read certificates from vault server | ghcr.io/hyperledger/bevel-alpine:latest | -| images.healthcheck | Valid image name and version for health check of Kafka | busybox | -| labels | Custom labels | "" | - -### Orderer - -| Name | Description | Default Value | -| --------------------------- | ----------------------------------------------------------------------- | ----------------| -| name | Name for the orderer node | orderer | -| loglevel | Log level for orderer deployment | info | -| localmspid | Local MSP ID for orderer deployment | OrdererMSP | -| tlsstatus | Enable/disable TLS for orderer deployment | true | -| keepaliveserverinterval | Interval in which the orderer signals the connection has kept alive | 10s | -| address | Provide the address for orderer | orderer1.org1proxy.blockchaincloudpoc.com:443 | - -### Consensus - -| Name | Description | Default Value | -| ---------| ----------------------------| ----------------| -| name | Name of the consensus | raft | - -### Storage - -| Name | Description | Default Value | -| ----------------------| -----------------------------------| ----------------| -| storageclassname | Storage class name for orderer | aws-storageclassname | -| storagesize | Storage size for storage class | 512Mi | - -### Service - -| Name | Description | Default Value | -| ------------------------------| ------------------------------------------| ----------------| -| servicetype | Service type for orderer | ClusterIP | -| ports.grpc.nodeport | Cluster IP port for grpc service | "" | -| ports.grpc.clusteripport | Cluster IP port for grpc service | 7050 | -| ports.metrics.enabled | Enable/disable metrics service | false | -| ports.metrics.clusteripport | Cluster IP port for metrics service | 9443 | - -### Annotations - -| Name | Description | Default Value | -| ---------------| --------------------------------------- | --------------| -| service | Extra annotations for service | "" | -| deployment | Extra annotations for deployment | "" | - -### Vault - -| Name | Description | Default Value | -| --------------------------- | --------------------------------------------------------------------| --------------------------------- | -| address | Vault server address | "" | -| role | Vault role for orderer deployment | vault-role | -| authpath | Kubernetes auth backend configured in vault for orderer deployment | devorg1-net-auth | -| type | Provide the type of vault | hashicorp | -| secretprefix | Vault secretprefix | secretsv2/data/crypto/ordererOrganizations/org1-net/orderers/orderer.org1-net | -| imagesecretname | Image secret name for vault | "" | -| serviceaccountname | Service account name for vault | vault-auth | -| tls | Enable/disable TLS for vault communication | "" | - -### Kafka - -| Name | Description | Default Value | -| --------------------------- | ------------------------------------------------------------------------| ----------------| -| readinesscheckinterval | Interval in seconds to check readiness of Kafka services | 5 | -| readinessthreshold | Threshold for checking if specified Kafka brokers are up and running | 4 | -| brokers | List of Kafka broker addresses | "" | - -### Proxy - -| Name | Description | Default Value | -| --------------------------- | --------------------------------------- | ------------------------------ | -| provider | Proxy/ingress provider | none | -| external_url_suffix | External URL suffix of the organization | org1proxy.blockchaincloudpoc.com:443 | - -### Config - -| Name | Description | Default Value | -| --------------------------- | --------------------------------------- | ------------------------------ | -| pod.resources.limits.memory | Limit memory for node | 512M | -| pod.resources.limits.cpu | Limit CPU for node | 1 | -| pod.resources.requests.memory | Requested memory for node | 512M | -| pod.resources.requests.cpu | Requested CPU for node | 0.25 | +## Prerequisites +- Kubernetes 1.19+ +- Helm 3.2.0+ - -## Deployment ---- +If Hashicorp Vault is used, then +- HashiCorp Vault Server 1.13.1+ -To deploy the fabric-orderernode Helm chart, follow these steps: +> **Important**: Also check the dependent charts. -1. Modify the [values.yaml](https://github.com/hyperledger/bevel/blob/main/platforms/hyperledger-fabric/charts/fabric-orderernode/values.yaml) file to set the desired configuration values. -2. Run the following Helm command to install the chart: - ``` - $ helm repo add bevel https://hyperledger.github.io/bevel/ - $ helm install ./fabric-orderernode - ``` -Replace `` with the desired name for the release. +## Installing the Chart -This will deploy the fabric-orderernode node to the Kubernetes cluster based on the provided configurations. +To install the chart with the release name `orderer1`: +```bash +helm repo add bevel https://hyperledger.github.io/bevel +helm install orderer1 bevel/fabric-orderernode +``` - -## Verification ---- +The command deploys the chart on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation. -To verify the deployment, we can use the following command: -``` -$ kubectl get statefulsets -n -``` -Replace `` with the actual namespace where the StatefulSet was created. This command will display information about the StatefulSet, including the number of replicas and their current status. +> **Tip**: List all releases using `helm list` +## Uninstalling the Chart - -## Updating the Deployment ---- +To uninstall/delete the `orderer1` deployment: -If we need to update the deployment with new configurations or changes, modify the same [values.yaml](https://github.com/hyperledger/bevel/blob/main/platforms/hyperledger-fabric/charts/fabric-orderernode/values.yaml) file with the desired changes and run the following Helm command: +```bash +helm uninstall orderer1 ``` -$ helm upgrade ./fabric-orderernode -``` -Replace `` with the name of the release. This command will apply the changes to the deployment, ensuring the fabric-orderernode node is up to date. - - -## Deletion ---- +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Parameters + +### Global + +These parameters are refered to as same in each parent or child chart +| Name | Description | Default Value | +|--------|---------|-------------| +|`global.version` | Fabric Version. | `2.5.4` | +|`global.serviceAccountName` | The serviceaccount name that will be created for Vault Auth and k8S Secret management| `vault-auth` | +| `global.cluster.provider` | Kubernetes cluster provider like AWS EKS or minikube. Currently ony `aws`, `azure` and `minikube` are tested | `aws` | +| `global.cluster.cloudNativeServices` | only `false` is implemented, `true` to use Cloud Native Services (SecretsManager and IAM for AWS; KeyVault & Managed Identities for Azure) is for future | `false` | +| `global.vault.type` | Type of Vault to support other providers. Currently, only `hashicorp` and `kubernetes` is supported. | `hashicorp` | +| `global.vault.role` | Role used for authentication with Vault | `vault-role` | +| `global.vault.address`| URL of the Vault server. | `""` | +| `global.vault.authPath` | Authentication path for Vault | `supplychain` | +| `global.vault.secretEngine` | Vault secret engine name | `secretsv2` | +| `global.vault.secretPrefix` | Vault secret prefix which must start with `data/` | `data/supplychain` | +| `global.vault.tls` | Name of the Kubernetes secret which has certs to connect to TLS enabled Vault | `""` | +| `global.proxy.provider` | The proxy or Ingress provider. Can be `none` or `haproxy` | `haproxy` | +| `global.proxy.externalUrlSuffix` | The External URL suffix at which the Fabric GRPC services will be available | `test.blockchaincloudpoc.com` | -To delete the deployment and associated resources, run the following Helm command: -``` -$ helm uninstall -``` -Replace `` with the name of the release. This command will remove all the resources created by the Helm chart. +### Storage +| Name | Description | Default Value | +|--------|---------|-------------| +| `storage.size` | Size of the PVC needed for Orderer Node | `512Mi` | +| `storage.reclaimPolicy` | Reclaim policy for the PVC. Choose from: `Delete` or `Retain` | `Delete` | +| `storage.volumeBindingMode` | Volume binding mode for the PVC. Choose from: `Immediate` or `WaitForFirstConsumer` | `Immediate` | +| `storage.allowedTopologies.enabled` | Check [bevel-storageclass](../../../shared/charts/bevel-storageclass/README.md) for details | `false` | + +### Certs + +| Name | Description | Default Value | +|--------|---------|-------------| +| `certs.generateCertificates` | Flag to generate certificates for the Orderer Node | `true` | +| `certs.orgData.caAddress` | Address of the CA Server without https | `ca.supplychain-net:7051` | +| `certs.orgData.caAdminUser` | CA Admin Username | `supplychain-admin` | +| `certs.orgData.caAdminPassword` | CA Admin Password | `supplychain-adminpw` | +| `certs.orgData.orgName` | Organization Name | `supplychain` | +| `certs.orgData.type` | Type of certificate to generate, choosed from `orderer` or `peer` | `orderer` | +| `certs.orgData.componentSubject` | X.509 subject for the organization | `"O=Orderer,L=51.50/-0.13/London,C=GB"` | +| `certs.settings.createConfigMaps` | Flag to create configmaps. Must be set to `false` for additional orderers/peers in the same organization. | `true` | +| `certs.settings.refreshCertValue` | Flag to refresh User certificates | `false` | +| `certs.settings.addPeerValue` | Flag to be used when adding a new peer to the organization | `false` | +| `certs.settings.removeCertsOnDelete` | Flag to delete the user and peer certificates on uninstall | `false` | +| `certs.settings.removeOrdererTlsOnDelete` | Flag to delete the orderer TLS certificates on uninstall | `false` | + +### Image + +| Name | Description | Default Value | +| -------------| ---------- | --------- | +| `image.orderer` |Fabric Orderer image repository | `ghcr.io/hyperledger/bevel-fabric-orderer` | +| `image.alpineUtils` | Alpine utils image repository and tag | `ghcr.io/hyperledger/bevel-alpine:latest` | +| `image.healthCheck` | Busybox image repository and tag | `busybox` | +| `image.pullSecret` | Secret name in the namespace containing private image registry credentials | `""` | - -## Contributing ---- -If you encounter any bugs, have suggestions, or would like to contribute to the [Orderer Node Hyperledger Fabric Deployment Helm Chart](https://github.com/hyperledger/bevel/blob/main/platforms/hyperledger-fabric/charts/fabric-orderernode), please feel free to open an issue or submit a pull request on the [project's GitHub repository](https://github.com/hyperledger/bevel). +### Orderer +| Name | Description | Default Value | +|--------|---------|-------------| +| `orderer.consensus` | Consensus type for the Orderer Node | `raft` | +| `orderer.logLevel` | Log level for the Orderer Node | `info` | +| `orderer.localMspId` | Local MSP ID for the Orderer Organization | `supplychainMSP` | +| `orderer.tlsStatus` | TLS status of the Orderer Node | `true` | +| `orderer.keepAliveServerInterval` | Keep Alive Interval in Seconds | `10s` | +| `orderer.serviceType` | Service Type for the Ordering Service | `ClusterIP` | +| `orderer.ports.grpc.nodePort` | NodePort for the Orderer GRPC Service | `""` | +| `orderer.ports.grpc.clusterIpPort` | TCP Port for the Orderer GRPC Service | `7050` | +| `orderer.ports.metrics.enabled` | Flag to enable metrics port | `false` | +| `orderer.ports.metrics.clusterIpPort` | TCP Port for the Orderer metrics | `9443` | +| `orderer.resources.limits.memory` | Memory limit for the Orderer Node | `512M` | +| `orderer.resources.limits.cpu` | CPU limit for the Orderer Node | `1` | +| `orderer.resources.requests.memory` | Memory request for the Orderer Node | `512M` | +| `orderer.resources.requests.cpu` | CPU request for the Orderer Node | `0.25` | + +### Settings + +| Name | Description | Default Value | +| ----------------| ----------- | ------------- | +| `kafka.readinessCheckInterval` | Interval between readiness checks for the Brokers | `5` | +| `kafka.readinessThresHold` | Threshold for readiness checks for the Brokers | `1` | +| `kafka.brokers` | List of Kafka Broker Addresses | `""` | +| `healthCheck.retries` | Retry count to connect to Vault | `20` | +| `healthCheck.sleepTimeAfterError` | Wait seconds after unsuccessful connection attempt | `15` | + +### Labels + +| Name | Description | Default Value | +| ----------------| ----------- | ------------- | +| `labels.service` | Array of Labels for service object | `[]` | +| `labels.pvc` | Array of Labels for PVC object | `[]` | +| `labels.deployment` | Array of Labels for deployment or statefulset object | `[]` | - ## License This chart is licensed under the Apache v2.0 license. -Copyright © 2023 Accenture +Copyright © 2024 Accenture ### Attribution diff --git a/platforms/hyperledger-fabric/charts/fabric-orderernode/requirements.yaml b/platforms/hyperledger-fabric/charts/fabric-orderernode/requirements.yaml new file mode 100644 index 00000000000..77e9a174a8a --- /dev/null +++ b/platforms/hyperledger-fabric/charts/fabric-orderernode/requirements.yaml @@ -0,0 +1,14 @@ +dependencies: + - name: bevel-storageclass + alias: storage + repository: "file://../../../shared/charts/bevel-storageclass" + tags: + - storage + version: ~1.0.0 + - name: fabric-catools + alias: certs + repository: "file://../fabric-catools" + tags: + - catools + version: ~1.1.0 + condition: certs.generateCertificates diff --git a/platforms/hyperledger-fabric/charts/fabric-orderernode/templates/_helpers.tpl b/platforms/hyperledger-fabric/charts/fabric-orderernode/templates/_helpers.tpl index 7bf5f530a8e..c5697ed561c 100644 --- a/platforms/hyperledger-fabric/charts/fabric-orderernode/templates/_helpers.tpl +++ b/platforms/hyperledger-fabric/charts/fabric-orderernode/templates/_helpers.tpl @@ -1,5 +1,46 @@ -{{- define "labels.custom" }} - {{ range $key, $val := $.Values.metadata.labels }} - {{ $key }}: {{ $val }} - {{ end }} -{{- end }} \ No newline at end of file +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "fabric-orderernode.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "fabric-orderernode.fullname" -}} +{{- $name := default .Chart.Name -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" $name .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "fabric-orderernode.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- define "labels.deployment" -}} +{{- range $value := $.Values.labels.deployment }} +{{ toYaml $value }} +{{- end }} +{{- end }} + +{{- define "labels.service" -}} +{{- range $value := $.Values.labels.service }} +{{ toYaml $value }} +{{- end }} +{{- end }} + +{{- define "labels.pvc" -}} +{{- range $value := $.Values.labels.pvc }} +{{ toYaml $value }} +{{- end }} +{{- end }} diff --git a/platforms/hyperledger-fabric/charts/fabric-orderernode/templates/configmap.yaml b/platforms/hyperledger-fabric/charts/fabric-orderernode/templates/configmap.yaml index 03e3239be3b..f7c63d12aec 100644 --- a/platforms/hyperledger-fabric/charts/fabric-orderernode/templates/configmap.yaml +++ b/platforms/hyperledger-fabric/charts/fabric-orderernode/templates/configmap.yaml @@ -7,27 +7,29 @@ apiVersion: v1 kind: ConfigMap metadata: - name: {{ $.Values.orderer.name }}-config - namespace: {{ $.Values.metadata.namespace }} + name: {{ .Release.Name }}-config + namespace: {{ .Release.Namespace }} labels: - app.kubernetes.io/name: {{ $.Values.orderer.name }}-config - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} + app: {{ .Release.Name }} + app.kubernetes.io/name: {{ .Release.Name }} + app.kubernetes.io/component: fabric + app.kubernetes.io/part-of: {{ include "fabric-orderernode.fullname" . }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} - {{- include "labels.custom" . | nindent 2 }} data: - FABRIC_LOGGING_SPEC: {{ $.Values.orderer.loglevel }} + FABRIC_LOGGING_SPEC: {{ .Values.orderer.logLevel }} ORDERER_GENERAL_LISTENADDRESS: 0.0.0.0 -{{ if contains "2.5" $.Values.metadata.network.version }} +{{ if contains "2.5" .Values.global.version }} ORDERER_GENERAL_BOOTSTRAPMETHOD: "none" {{ else }} ORDERER_GENERAL_GENESISMETHOD: file ORDERER_GENERAL_GENESISFILE: /var/hyperledger/orderer/orderer.genesis.block {{ end }} - ORDERER_GENERAL_LOCALMSPID: {{ $.Values.orderer.localmspid }} - ORDERER_GENERAL_KEEPALIVE_SERVERINTERVAL: {{ $.Values.orderer.keepaliveserverinterval }} + ORDERER_GENERAL_LOCALMSPID: {{ .Values.orderer.localMspId }} + ORDERER_GENERAL_KEEPALIVE_SERVERINTERVAL: {{ .Values.orderer.keepAliveServerInterval }} ORDERER_GENERAL_LOCALMSPDIR: /var/hyperledger/orderer/crypto/msp - ORDERER_GENERAL_TLS_ENABLED: "{{ $.Values.orderer.tlsstatus }}" + ORDERER_GENERAL_TLS_ENABLED: "{{ .Values.orderer.tlsStatus }}" ORDERER_GENERAL_TLS_PRIVATEKEY: /var/hyperledger/orderer/crypto/tls/server.key ORDERER_GENERAL_TLS_CERTIFICATE: /var/hyperledger/orderer/crypto/tls/server.crt ORDERER_GENERAL_TLS_ROOTCAS: '[/var/hyperledger/orderer/crypto/tls/ca.crt]' @@ -39,8 +41,8 @@ data: ORDERER_KAFKA_RETRY_SHORTTOTAL: "30s" ORDERER_KAFKA_VERBOSE: "true" GODEBUG: "netdns=go" - ORDERER_OPERATIONS_LISTENADDRESS: 0.0.0.0:10443 -{{ if contains "2.5" $.Values.metadata.network.version }} + ORDERER_OPERATIONS_LISTENADDRESS: 0.0.0.0:9443 +{{ if contains "2.5" .Values.global.version }} ORDERER_ADMIN_LISTENADDRESS: 0.0.0.0:7055 ORDERER_ADMIN_TLS_ENABLED: "true" ORDERER_ADMIN_TLS_PRIVATEKEY: /var/hyperledger/orderer/crypto/tls/server.key @@ -49,19 +51,3 @@ data: ORDERER_ADMIN_TLS_CLIENTROOTCAS: '[/var/hyperledger/orderer/crypto/tls/ca.crt]' {{ end }} ---- -{{- if ne $.Values.metadata.network.version "2.5.4" }} -apiVersion: v1 -kind: ConfigMap -metadata: - name: genesis-block-{{ $.Values.orderer.name }} - namespace: {{ $.Values.metadata.namespace }} - labels: - app.kubernetes.io/name: genesis-block - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} - {{- include "labels.custom" . | nindent 2 }} -data: - genesis.block.base64: {{ .Values.genesis | quote }} -{{ end }} diff --git a/platforms/hyperledger-fabric/charts/fabric-orderernode/templates/deployment.yaml b/platforms/hyperledger-fabric/charts/fabric-orderernode/templates/deployment.yaml deleted file mode 100644 index 0946683fde2..00000000000 --- a/platforms/hyperledger-fabric/charts/fabric-orderernode/templates/deployment.yaml +++ /dev/null @@ -1,268 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: {{ $.Values.orderer.name }} - namespace: {{ $.Values.metadata.namespace }} - labels: - app.kubernetes.io/name: {{ $.Values.orderer.name }} - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} - {{- include "labels.custom" . | nindent 2 }} - annotations: - {{- if $.Values.annotations }} - {{- range $key, $value := $.Values.annotations.deployment }} - {{- range $k, $v := $value }} - {{ $k }}: {{ $v | quote }} - {{- end }} - {{- end }} - {{- end }} -spec: - updateStrategy: - type: RollingUpdate - serviceName: "{{ $.Values.orderer.name }}" - replicas: 1 - selector: - matchLabels: - app: {{ $.Values.orderer.name }} - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/name: {{ $.Values.orderer.name }} - app.kubernetes.io/instance: {{ .Release.Name }} - template: - metadata: - annotations: - checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} - labels: - app: {{ $.Values.orderer.name }} - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/name: {{ $.Values.orderer.name }} - app.kubernetes.io/instance: {{ .Release.Name }} - spec: - serviceAccountName: {{ $.Values.vault.serviceaccountname }} - {{- if .Values.vault.imagesecretname }} - imagePullSecrets: - - name: {{ $.Values.vault.imagesecretname }} - {{- end }} - volumes: - - name: certificates - emptyDir: - medium: Memory - {{ if .Values.vault.tls }} - - name: vaultca - secret: - secretName: {{ $.Values.vault.tls }} - items: - - key: ca.crt.pem - path: ca-certificates.crt # curl expects certs to be in /etc/ssl/certs/ca-certificates.crt - {{ end }} - {{- if ne $.Values.metadata.network.version "2.5.4" }} - - name: {{ $.Values.orderer.name }}-genesis-volume - configMap: - name: genesis-block-{{ $.Values.orderer.name }} - items: - - key: genesis.block.base64 - path: genesis.block.base64 - {{ end }} - - name: scripts-volume - configMap: - name: bevel-vault-script - initContainers: - - name: kafka-healthcheck - image: {{ $.Values.metadata.images.healthcheck }} - imagePullPolicy: IfNotPresent - command: ["sh", "-c"] - args: - - |- - #!/usr/bin/env sh - if [ {{ $.Values.consensus.name }} == kafka ] - then - COUNTER=1 - FLAG=true - KAFKACOUNT=0 - COUNT=0 - {{ range $.Values.kafka.brokers}} - COUNT=`expr "$COUNT" + 1` - {{ end }} - while [ "$COUNTER" -le {{ $.Values.kafka.readinessthreshold }} ] - do - {{ range $.Values.kafka.brokers}} - KAFKA_BROKERS={{ . }} - STATUS=$(nc -vz $KAFKA_BROKERS 2>&1 | grep -c open ) - if [ "$STATUS" == 0 ] - then - FLAG=false - else - FLAG=true - KAFKACOUNT=`expr "$KAFKACOUNT" + 1` - echo "$KAFKACOUNT kafka brokers out of $COUNT are up and running" - fi - {{ end }} - if [ "$FLAG" == false ] - then - echo "$KAFKACOUNT kafka brokers out of $COUNT are up and running!" - echo "Retry attempted $COUNTER times, retrying after {{ $.Values.kafka.readinesscheckinterval }} seconds" - COUNTER=`expr "$COUNTER" + 1` - sleep {{ $.Values.kafka.readinesscheckinterval }} - else - echo "SUCCESS!" - echo "All $KAFKACOUNT kafka broker are up and running!" - exit 0 - break - fi - done - if [ "$COUNTER" -gt {{ $.Values.kafka.readinessthreshold }} ] || [ "$FLAG" == false ] - then - echo "Retry attempted $COUNTER times, no kafka brokers are up and running. Giving up!" - exit 1 - break - fi - fi - - name: certificates-init - image: {{ $.Values.metadata.images.alpineutils }} - imagePullPolicy: IfNotPresent - env: - - name: VAULT_ADDR - value: {{ $.Values.vault.address }} - - name: VAULT_SECRET_PREFIX - value: "{{ $.Values.vault.secretprefix }}" - - name: KUBERNETES_AUTH_PATH - value: {{ $.Values.vault.authpath }} - - name: VAULT_APP_ROLE - value: {{ $.Values.vault.role }} - - name: MOUNT_PATH - value: /secret - - name: VAULT_TYPE - value: "{{ $.Values.vault.type }}" - command: ["sh", "-c"] - args: - - |- - #!/usr/bin/env sh - . /scripts/bevel-vault.sh - - # Calling a function to retrieve the vault token. - vaultBevelFunc "init" - - echo "Getting TLS certificates from Vault." - vaultBevelFunc "readJson" "${VAULT_SECRET_PREFIX}/tls" - - TLS_CA_CERT=$(echo ${VAULT_SECRET} | jq -r '.["ca.crt"]') - TLS_SERVER_CERT=$(echo ${VAULT_SECRET} | jq -r '.["server.crt"]') - TLS_SERVER_KEY=$(echo ${VAULT_SECRET} | jq -r '.["server.key"]') - - OUTPUT_PATH="${MOUNT_PATH}/tls" - mkdir -p ${OUTPUT_PATH} - echo "${TLS_CA_CERT}" >> ${OUTPUT_PATH}/ca.crt - echo "${TLS_SERVER_CERT}" >> ${OUTPUT_PATH}/server.crt - echo "${TLS_SERVER_KEY}" >> ${OUTPUT_PATH}/server.key - - echo "Getting MSP certificates from Vault." - vaultBevelFunc "readJson" "${VAULT_SECRET_PREFIX}/msp" - - ADMINCERT=$(echo ${VAULT_SECRET} | jq -r '.["admincerts"]') - CACERTS=$(echo ${VAULT_SECRET} | jq -r '.["cacerts"]') - KEYSTORE=$(echo ${VAULT_SECRET} | jq -r '.["keystore"]') - SIGNCERTS=$(echo ${VAULT_SECRET} | jq -r '.["signcerts"]') - TLSCACERTS=$(echo ${VAULT_SECRET} | jq -r '.["tlscacerts"]') - - OUTPUT_PATH="${MOUNT_PATH}/msp" - mkdir -p ${OUTPUT_PATH}/admincerts - mkdir -p ${OUTPUT_PATH}/cacerts - mkdir -p ${OUTPUT_PATH}/keystore - mkdir -p ${OUTPUT_PATH}/signcerts - mkdir -p ${OUTPUT_PATH}/tlscacerts - - echo "${ADMINCERT}" >> ${OUTPUT_PATH}/admincerts/admin.crt - echo "${CACERTS}" >> ${OUTPUT_PATH}/cacerts/ca.crt - echo "${KEYSTORE}" >> ${OUTPUT_PATH}/keystore/server.key - echo "${SIGNCERTS}" >> ${OUTPUT_PATH}/signcerts/server.crt - echo "${TLSCACERTS}" >> ${OUTPUT_PATH}/tlscacerts/tlsca.crt - volumeMounts: - - name: certificates - mountPath: /secret - {{ if .Values.vault.tls }} - - name: vaultca - mountPath: "/etc/ssl/certs/" - readOnly: true - {{ end }} - - name: scripts-volume - mountPath: /scripts/bevel-vault.sh - subPath: bevel-vault.sh - containers: - - name: {{ $.Values.orderer.name }} - image: {{ $.Values.metadata.images.orderer }} - imagePullPolicy: IfNotPresent - workingDir: /opt/gopath/src/github.com/hyperledger/fabric - command: ["sh", "-c", "cat /var/hyperledger/orderer/genesis/genesis.block.base64 | base64 -d > /var/hyperledger/orderer/orderer.genesis.block && orderer"] - ports: - - containerPort: 7050 - - name: operations - containerPort: 10443 - envFrom: - - configMapRef: - name: {{ $.Values.orderer.name }}-config - volumeMounts: - - name: datadir - mountPath: /var/hyperledger/production/orderer - {{- if ne $.Values.metadata.network.version "2.5.4" }} - - name: {{ $.Values.orderer.name }}-genesis-volume - mountPath: /var/hyperledger/orderer/genesis - readOnly: true - {{- end }} - - name: certificates - mountPath: /var/hyperledger/orderer/crypto - readOnly: true - resources: - requests: - memory: {{ .Values.config.pod.resources.requests.memory }} - cpu: {{ .Values.config.pod.resources.requests.cpu }} - limits: - memory: {{ .Values.config.pod.resources.limits.memory }} - cpu: {{ .Values.config.pod.resources.limits.cpu }} - - name: grpc-web - image: "ghcr.io/hyperledger-labs/grpc-web:latest" - imagePullPolicy: IfNotPresent - ports: - - name: grpc-web - containerPort: 7443 - env: - - name: BACKEND_ADDRESS - value: "{{ $.Values.orderer.name }}.{{ $.Values.metadata.namespace }}:{{ $.Values.service.ports.grpc.clusteripport }}" - - name: SERVER_TLS_CERT_FILE - value: "/certs/tls/server.crt" - - name: SERVER_TLS_KEY_FILE - value: "/certs/tls/server.key" - - name: BACKEND_TLS_CA_FILES - value: "/certs/tls/ca.crt" - - name: SERVER_BIND_ADDRESS - value: "0.0.0.0" - - name: SERVER_HTTP_DEBUG_PORT - value: "8080" - - name: SERVER_HTTP_TLS_PORT - value: "7443" - - name: BACKEND_TLS - value: "true" - - name: SERVER_HTTP_MAX_WRITE_TIMEOUT - value: 5m - - name: SERVER_HTTP_MAX_READ_TIMEOUT - value: 5m - - name: USE_WEBSOCKETS - value: "true" - volumeMounts: - - name: certificates - mountPath: /certs - volumeClaimTemplates: - #Lables are not being taken by Kubernetes as it dynamically creates PVC - - metadata: - name: datadir - spec: - accessModes: [ "ReadWriteOnce" ] - storageClassName: {{ $.Values.storage.storageclassname }} - resources: - requests: - storage: {{ $.Values.storage.storagesize }} diff --git a/platforms/hyperledger-fabric/charts/fabric-orderernode/templates/node-statefulset.yaml b/platforms/hyperledger-fabric/charts/fabric-orderernode/templates/node-statefulset.yaml new file mode 100644 index 00000000000..cb5e927c8e8 --- /dev/null +++ b/platforms/hyperledger-fabric/charts/fabric-orderernode/templates/node-statefulset.yaml @@ -0,0 +1,361 @@ +############################################################################################## +# Copyright Accenture. All Rights Reserved. +# +# SPDX-License-Identifier: Apache-2.0 +############################################################################################## + +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ template "fabric-orderernode.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + app: {{ .Release.Name }} + app.kubernetes.io/name: {{ .Release.Name }} + app.kubernetes.io/component: fabric + app.kubernetes.io/part-of: {{ include "fabric-orderernode.fullname" . }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} + app.kubernetes.io/managed-by: helm + {{- include "labels.deployment" . | nindent 4 }} +spec: + updateStrategy: + type: RollingUpdate + serviceName: "{{ .Release.Name }}" + replicas: 1 + selector: + matchLabels: + app: {{ .Release.Name }} + app.kubernetes.io/name: {{ .Release.Name }} + app.kubernetes.io/component: fabric + app.kubernetes.io/part-of: {{ include "fabric-orderernode.fullname" . }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} + app.kubernetes.io/managed-by: helm + template: + metadata: + annotations: + checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + labels: + app: {{ .Release.Name }} + app.kubernetes.io/name: {{ .Release.Name }} + app.kubernetes.io/component: fabric + app.kubernetes.io/part-of: {{ include "fabric-orderernode.fullname" . }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} + app.kubernetes.io/managed-by: helm + {{- include "labels.deployment" . | nindent 8 }} + spec: + serviceAccountName: {{ .Values.global.serviceAccountName }} + {{- if .Values.image.pullSecret }} + imagePullSecrets: + - name: {{ .Values.image.pullSecret }} + {{- end }} + volumes: + - name: certificates + emptyDir: + medium: Memory + {{ if .Values.global.vault.tls }} + - name: vaultca + secret: + secretName: {{ .Values.global.vault.tls }} + items: + - key: ca.crt.pem + path: ca-certificates.crt # curl expects certs to be in /etc/ssl/certs/ca-certificates.crt + {{ end }} + {{- if ne ($.Values.global.version | trunc 3) "2.5" }} + - name: {{ .Release.Name }}-genesis-volume + configMap: + name: syschannel-genesis + items: + - key: syschannel-genesis_base64 + path: genesis.block.base64 + {{ end }} + - name: scripts-volume + configMap: + name: bevel-vault-script + initContainers: + - name: kafka-healthcheck + image: {{ .Values.image.healthCheck }} + imagePullPolicy: IfNotPresent + command: ["sh", "-c"] + args: + - |- + #!/usr/bin/env sh + if [ {{ .Values.orderer.consensus }} == kafka ] + then + COUNTER=1 + FLAG=true + KAFKACOUNT=0 + COUNT=0 + {{ range .Values.kafka.brokers}} + COUNT=`expr "$COUNT" + 1` + {{ end }} + while [ "$COUNTER" -le {{ .Values.kafka.readinessThresHold }} ] + do + {{ range .Values.kafka.brokers}} + KAFKA_BROKERS={{ . }} + STATUS=$(nc -vz $KAFKA_BROKERS 2>&1 | grep -c open ) + if [ "$STATUS" == 0 ] + then + FLAG=false + else + FLAG=true + KAFKACOUNT=`expr "$KAFKACOUNT" + 1` + echo "$KAFKACOUNT kafka brokers out of $COUNT are up and running" + fi + {{ end }} + if [ "$FLAG" == false ] + then + echo "$KAFKACOUNT kafka brokers out of $COUNT are up and running!" + echo "Retry attempted $COUNTER times, retrying after {{ .Values.kafka.readinessCheckInterval }} seconds" + COUNTER=`expr "$COUNTER" + 1` + sleep {{ .Values.kafka.readinessCheckInterval }} + else + echo "SUCCESS!" + echo "All $KAFKACOUNT kafka broker are up and running!" + exit 0 + break + fi + done + if [ "$COUNTER" -gt {{ .Values.kafka.readinessThresHold }} ] || [ "$FLAG" == false ] + then + echo "Retry attempted $COUNTER times, no kafka brokers are up and running. Giving up!" + exit 1 + break + fi + fi + - name: certificates-init + image: {{ .Values.image.alpineUtils }} + imagePullPolicy: IfNotPresent + env: + - name: VAULT_ADDR + value: {{ .Values.global.vault.address }} + - name: VAULT_SECRET_ENGINE + value: "{{ .Values.global.vault.secretEngine }}" + - name: VAULT_SECRET_PREFIX + value: "{{ .Values.global.vault.secretPrefix }}" + - name: KUBERNETES_AUTH_PATH + value: {{ .Values.global.vault.authPath }} + - name: VAULT_APP_ROLE + value: {{ .Values.global.vault.role }} + - name: MOUNT_PATH + value: /secret + - name: VAULT_TYPE + value: "{{ .Values.global.vault.type }}" + - name: ORDERER_NAME + value: {{ .Release.Name }} + command: ["sh", "-c"] + args: + - |- + #!/usr/bin/env sh + +{{- if eq .Values.global.vault.type "hashicorp" }} + . /scripts/bevel-vault.sh + + # Calling a function to retrieve the vault token. + vaultBevelFunc "init" + + function getOrdererTlsSecret { + KEY=$1 + + echo "Getting TLS certificates from Vault." + vaultBevelFunc "readJson" "${VAULT_SECRET_ENGINE}/${VAULT_SECRET_PREFIX}/orderers/${KEY}" + if [ "$SECRETS_AVAILABLE" == "yes" ] + then + TLS_CA_CERT=$(echo ${VAULT_SECRET} | jq -r '.["ca_crt"]') + TLS_SERVER_CERT=$(echo ${VAULT_SECRET} | jq -r '.["server_crt"]') + TLS_SERVER_KEY=$(echo ${VAULT_SECRET} | jq -r '.["server_key"]') + + echo "${TLS_CA_CERT}" > ${OUTPUT_PATH}/ca.crt + echo "${TLS_SERVER_CERT}" > ${OUTPUT_PATH}/server.crt + echo "${TLS_SERVER_KEY}" > ${OUTPUT_PATH}/server.key + ORDERER_TLS_SECRET=true + else + ORDERER_TLS_SECRET=false + fi + } + + function getOrdererMspSecret { + KEY=$1 + + echo "Getting MSP certificates from Vault." + vaultBevelFunc "readJson" "${VAULT_SECRET_ENGINE}/${VAULT_SECRET_PREFIX}/orderers/${KEY}" + if [ "$SECRETS_AVAILABLE" == "yes" ] + then + ADMINCERT=$(echo ${VAULT_SECRET} | jq -r '.["admincerts"]') + CACERTS=$(echo ${VAULT_SECRET} | jq -r '.["cacerts"]') + KEYSTORE=$(echo ${VAULT_SECRET} | jq -r '.["keystore"]') + SIGNCERTS=$(echo ${VAULT_SECRET} | jq -r '.["signcerts"]') + TLSCACERTS=$(echo ${VAULT_SECRET} | jq -r '.["tlscacerts"]') + + echo "${ADMINCERT}" > ${OUTPUT_PATH}/admincerts/admin.crt + echo "${CACERTS}" > ${OUTPUT_PATH}/cacerts/ca.crt + echo "${KEYSTORE}" > ${OUTPUT_PATH}/keystore/server.key + echo "${SIGNCERTS}" > ${OUTPUT_PATH}/signcerts/server.crt + echo "${TLSCACERTS}" > ${OUTPUT_PATH}/tlscacerts/tlsca.crt + ORDERER_MSP_SECRET=true + else + ORDERER_MSP_SECRET=false + fi + } + +{{- else }} + function getOrdererTlsSecret { + KEY=$1 + KUBENETES_SECRET=$(kubectl get secret ${KEY} --namespace {{ .Release.Namespace }} -o json) + if [ "$KUBENETES_SECRET" = "" ]; then + ORDERER_TLS_SECRET=false + else + TLS_CA_CERT=$(echo ${KUBENETES_SECRET} | jq -r '.data.cacrt' | base64 -d) + TLS_SERVER_CERT=$(echo ${KUBENETES_SECRET} | jq -r '.data.servercrt' | base64 -d) + TLS_SERVER_KEY=$(echo ${KUBENETES_SECRET} | jq -r '.data.serverkey' | base64 -d) + + echo "${TLS_CA_CERT}" > ${OUTPUT_PATH}/ca.crt + echo "${TLS_SERVER_CERT}" > ${OUTPUT_PATH}/server.crt + echo "${TLS_SERVER_KEY}" > ${OUTPUT_PATH}/server.key + ORDERER_TLS_SECRET=true + fi + } + + function getOrdererMspSecret { + KEY=$1 + KUBENETES_SECRET=$(kubectl get secret ${KEY} --namespace {{ .Release.Namespace }} -o json) + if [ "$KUBENETES_SECRET" = "" ]; then + ORDERER_MSP_SECRET=false + else + ADMINCERT=$(echo ${KUBENETES_SECRET} | jq -r '.data.admincerts' | base64 -d) + CACERTS=$(echo ${KUBENETES_SECRET} | jq -r '.data.cacerts' | base64 -d) + KEYSTORE=$(echo ${KUBENETES_SECRET} | jq -r '.data.keystore' | base64 -d) + SIGNCERTS=$(echo ${KUBENETES_SECRET} | jq -r '.data.signcerts' | base64 -d) + TLSCACERTS=$(echo ${KUBENETES_SECRET} | jq -r '.data.tlscacerts' | base64 -d) + + echo "${ADMINCERT}" > ${OUTPUT_PATH}/admincerts/admin.crt + echo "${CACERTS}" > ${OUTPUT_PATH}/cacerts/ca.crt + echo "${KEYSTORE}" > ${OUTPUT_PATH}/keystore/server.key + echo "${SIGNCERTS}" > ${OUTPUT_PATH}/signcerts/server.crt + echo "${TLSCACERTS}" > ${OUTPUT_PATH}/tlscacerts/tlsca.crt + ORDERER_MSP_SECRET=true + fi + } + +{{- end }} + COUNTER=1 + while [ "$COUNTER" -le {{ .Values.healthCheck.retries }} ] + do + OUTPUT_PATH="${MOUNT_PATH}/tls" + mkdir -p ${OUTPUT_PATH} + getOrdererTlsSecret ${ORDERER_NAME}-tls + + OUTPUT_PATH="${MOUNT_PATH}/msp" + mkdir -p ${OUTPUT_PATH}/admincerts + mkdir -p ${OUTPUT_PATH}/cacerts + mkdir -p ${OUTPUT_PATH}/keystore + mkdir -p ${OUTPUT_PATH}/signcerts + mkdir -p ${OUTPUT_PATH}/tlscacerts + getOrdererMspSecret ${ORDERER_NAME}-msp + + if [ "$ORDERER_TLS_SECRET" = "true" ] && [ "$ORDERER_MSP_SECRET" = "true" ] + then + echo "Orderer certificates have been obtained correctly" + break + else + echo "Orderer certificates have not been obtained, sleeping for {{ .Values.healthCheck.sleepTimeAfterError }}" + sleep {{ .Values.healthCheck.sleepTimeAfterError }} + COUNTER=`expr "$COUNTER" + 1` + fi + done + + if [ "$COUNTER" -gt {{ .Values.healthCheck.retries }} ] + then + echo "Retry attempted `expr $COUNTER - 1` times, Orderer certificates have not been obtained." + exit 1 + fi + volumeMounts: + - name: certificates + mountPath: /secret + {{ if .Values.global.vault.tls }} + - name: vaultca + mountPath: "/etc/ssl/certs/" + readOnly: true + {{ end }} + - name: scripts-volume + mountPath: /scripts/bevel-vault.sh + subPath: bevel-vault.sh + containers: + - name: fabric-orderer + image: {{ .Values.image.orderer }}:{{ .Values.global.version }} + imagePullPolicy: IfNotPresent + workingDir: /opt/gopath/src/github.com/hyperledger/fabric + command: ["sh", "-c", "cat /var/hyperledger/orderer/genesis/genesis.block.base64 | base64 -d > /var/hyperledger/orderer/orderer.genesis.block && orderer"] + ports: + - containerPort: 7050 + - name: operations + containerPort: 9443 + - name: onsadmin + containerPort: 7055 + envFrom: + - configMapRef: + name: {{ .Release.Name }}-config + volumeMounts: + - name: datadir + mountPath: /var/hyperledger/production/orderer + {{- if ne ($.Values.global.version | trunc 3) "2.5" }} + - name: {{ .Release.Name }}-genesis-volume + mountPath: /var/hyperledger/orderer/genesis + readOnly: true + {{- end }} + - name: certificates + mountPath: /var/hyperledger/orderer/crypto + readOnly: true + resources: + requests: + memory: {{ .Values.orderer.resources.requests.memory }} + cpu: {{ .Values.orderer.resources.requests.cpu }} + limits: + memory: {{ .Values.orderer.resources.limits.memory }} + cpu: {{ .Values.orderer.resources.limits.cpu }} + - name: grpc-web + image: "ghcr.io/hyperledger-labs/grpc-web:latest" + imagePullPolicy: IfNotPresent + ports: + - name: grpc-web + containerPort: 7443 + env: + - name: BACKEND_ADDRESS + value: "{{ .Release.Name }}.{{ .Release.Namespace }}:{{ .Values.orderer.ports.grpc.clusterIpPort }}" + - name: SERVER_TLS_CERT_FILE + value: "/certs/tls/server.crt" + - name: SERVER_TLS_KEY_FILE + value: "/certs/tls/server.key" + - name: BACKEND_TLS_CA_FILES + value: "/certs/tls/ca.crt" + - name: SERVER_BIND_ADDRESS + value: "0.0.0.0" + - name: SERVER_HTTP_DEBUG_PORT + value: "8080" + - name: SERVER_HTTP_TLS_PORT + value: "7443" + - name: BACKEND_TLS + value: "true" + - name: SERVER_HTTP_MAX_WRITE_TIMEOUT + value: 5m + - name: SERVER_HTTP_MAX_READ_TIMEOUT + value: 5m + - name: USE_WEBSOCKETS + value: "true" + volumeMounts: + - name: certificates + mountPath: /certs + volumeClaimTemplates: + #Lables are not being taken by Kubernetes as it dynamically creates PVC + - metadata: + name: datadir + labels: + {{- include "labels.pvc" . | nindent 8 }} + spec: + accessModes: [ "ReadWriteOnce" ] + storageClassName: storage-{{ .Release.Name }} + resources: + requests: + storage: {{ .Values.storage.size }} diff --git a/platforms/hyperledger-fabric/charts/fabric-orderernode/templates/service.yaml b/platforms/hyperledger-fabric/charts/fabric-orderernode/templates/service.yaml index ee998da18d2..114b5d2d2db 100644 --- a/platforms/hyperledger-fabric/charts/fabric-orderernode/templates/service.yaml +++ b/platforms/hyperledger-fabric/charts/fabric-orderernode/templates/service.yaml @@ -7,33 +7,28 @@ apiVersion: v1 kind: Service metadata: - name: {{ $.Values.orderer.name }} - namespace: {{ $.Values.metadata.namespace }} - annotations: - {{- if $.Values.annotations }} - {{- range $key, $value := $.Values.annotations.service }} - {{- range $k, $v := $value }} - {{ $k }}: {{ $v | quote }} - {{- end }} - {{- end }} - {{- end }} + name: {{ .Release.Name }} + namespace: {{ .Release.Namespace }} labels: - run: {{ $.Values.orderer.name }} - app.kubernetes.io/name: {{ $.Values.orderer.name }} - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} + run: {{ .Release.Name }} + app.kubernetes.io/name: {{ .Release.Name }} + app: {{ .Release.Name }} + app.kubernetes.io/component: fabric + app.kubernetes.io/part-of: {{ include "fabric-orderernode.fullname" . }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} - {{- include "labels.custom" . | nindent 2 }} + {{- include "labels.service" . | nindent 4 }} spec: - type: {{ $.Values.service.servicetype }} + type: {{ .Values.orderer.serviceType }} selector: - app: {{ $.Values.orderer.name }} + app: {{ .Release.Name }} ports: - protocol: TCP targetPort: 7050 - port: {{ $.Values.service.ports.grpc.clusteripport }} - {{- if (ne $.Values.service.servicetype "ClusterIP") }} - nodePort: {{ $.Values.service.ports.grpc.nodeport }} + port: {{ .Values.orderer.ports.grpc.clusterIpPort }} + {{- if (ne .Values.orderer.serviceType "ClusterIP") }} + nodePort: {{ .Values.orderer.ports.grpc.nodeport }} {{- end }} name: grpc - name: grpc-web @@ -43,61 +38,66 @@ spec: - name: operations protocol: TCP targetPort: 9443 - port: {{ $.Values.service.ports.metrics.clusteripport }} - {{- if (eq $.Values.service.servicetype "ClusterIP") }} + port: {{ .Values.orderer.ports.metrics.clusterIpPort }} +{{ if contains "2.5" .Values.global.version }} + - name: onsadmin + protocol: TCP + targetPort: 7055 + port: 7055 +{{- end }} + {{- if (eq .Values.orderer.serviceType "ClusterIP") }} clusterIP: None {{- end }} -{{ if eq $.Values.proxy.provider "haproxy" }} +{{ if eq .Values.global.proxy.provider "haproxy" }} --- apiVersion: networking.k8s.io/v1 kind: Ingress metadata: - name: {{ $.Values.orderer.name }} - namespace: {{ $.Values.metadata.namespace }} + name: {{ .Release.Name }} + namespace: {{ .Release.Namespace }} annotations: - kubernetes.io/ingress.class: "haproxy" ingress.kubernetes.io/ssl-passthrough: "true" spec: + ingressClassName: "haproxy" rules: - - host: {{ $.Values.orderer.name }}.{{ $.Values.proxy.external_url_suffix }} + - host: {{ .Release.Name }}.{{ .Release.Namespace }}.{{ .Values.global.proxy.externalUrlSuffix }} http: paths: - path: / pathType: Prefix backend: service: - name: {{ $.Values.orderer.name }} + name: {{ .Release.Name }} port: - number: {{ $.Values.service.ports.grpc.clusteripport }} - - host: {{ $.Values.orderer.name }}-proxy.{{ $.Values.proxy.external_url_suffix }} + number: {{ .Values.orderer.ports.grpc.clusterIpPort }} + - host: {{ .Release.Name }}-proxy.{{ .Release.Namespace }}.{{ .Values.global.proxy.externalUrlSuffix }} http: paths: - path: / pathType: Prefix backend: service: - name: {{ $.Values.orderer.name }} + name: {{ .Release.Name }} port: number: 7443 --- apiVersion: networking.k8s.io/v1 kind: Ingress metadata: - name: {{ $.Values.orderer.name }}-ops - namespace: {{ $.Values.metadata.namespace }} - annotations: - kubernetes.io/ingress.class: "haproxy" + name: {{ .Release.Name }}-ops + namespace: {{ .Release.Namespace }} spec: + ingressClassName: "haproxy" rules: - - host: {{ $.Values.orderer.name }}-ops.{{ $.Values.proxy.external_url_suffix }} + - host: {{ .Release.Name }}-ops.{{ .Release.Namespace }}.{{ .Values.global.proxy.externalUrlSuffix }} http: paths: - path: / pathType: Prefix backend: service: - name: {{ $.Values.orderer.name }} + name: {{ .Release.Name }} port: - number: 10443 + number: 9443 {{ end }} diff --git a/platforms/hyperledger-fabric/charts/fabric-orderernode/templates/servicemonitor.yaml b/platforms/hyperledger-fabric/charts/fabric-orderernode/templates/servicemonitor.yaml index 62850b18082..92ad3e488fe 100644 --- a/platforms/hyperledger-fabric/charts/fabric-orderernode/templates/servicemonitor.yaml +++ b/platforms/hyperledger-fabric/charts/fabric-orderernode/templates/servicemonitor.yaml @@ -1,14 +1,18 @@ -{{- if $.Values.service.ports.metrics.enabled }} +{{- if .Values.orderer.ports.metrics.enabled }} {{- if $.Capabilities.APIVersions.Has "monitoring.coreos.com/v1/ServiceMonitor" }} apiVersion: monitoring.coreos.com/v1 kind: ServiceMonitor metadata: labels: - app: {{ $.Values.orderer.name }} + app: {{ .Release.Name }} + app.kubernetes.io/name: {{ .Release.Name }} + app.kubernetes.io/component: fabric + app.kubernetes.io/part-of: {{ include "fabric-orderernode.fullname" . }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} - name: {{ $.Values.orderer.name }} - namespace: {{ $.Values.metadata.namespace }} + name: {{ .Release.Name }} + namespace: {{ .Release.Namespace }} spec: jobLabel: {{ .Release.Name }} endpoints: @@ -16,10 +20,10 @@ spec: port: operations namespaceSelector: matchNames: - - {{ $.Values.metadata.namespace }} + - {{ .Release.Namespace }} selector: matchLabels: app.kubernetes.io/instance: {{ .Release.Name }} - run: {{ $.Values.orderer.name }} + run: {{ .Release.Name }} {{- end }} {{- end }} diff --git a/platforms/hyperledger-fabric/charts/fabric-orderernode/values.yaml b/platforms/hyperledger-fabric/charts/fabric-orderernode/values.yaml index a08d5e3488d..270207e8376 100644 --- a/platforms/hyperledger-fabric/charts/fabric-orderernode/values.yaml +++ b/platforms/hyperledger-fabric/charts/fabric-orderernode/values.yaml @@ -4,150 +4,164 @@ # SPDX-License-Identifier: Apache-2.0 ############################################################################################## -metadata: - #Provide the namespace for orderer - #Eg. namespace: org1-net - namespace: org1-net +global: # HLF Network Version - network: - version: 2.2.2 - images: - #Provide the valid image name and version for fabric orderer - #Eg. orderer: hyperledger/fabric-orderer:1.4.0 - orderer: ghcr.io/hyperledger/bevel-fabric-orderer:2.2.2 - #Provide the valid image name and version to read certificates from vault server - #Eg.alpineutils: ghcr.io/hyperledger/bevel-alpine:latest - alpineutils: ghcr.io/hyperledger/bevel-alpine:latest - #Provide the valid image name and version for healthcheck of kafka - #Eg. healthcheck: busybox - healthcheck: busybox - #Provide the custom labels - #NOTE: Provide labels other than name, release name , release service, chart version , chart name , run - #These lables will not be applied to VolumeClaimTemplate of StatefulSet as labels are automatically picked up by Kubernetes - #Eg. labels: - # role: orderer - labels: - -orderer: - #Provide the name for the orderer node - #Eg. name: orderer - name: orderer - #Provide the loglevel for orderer deployment - #Eg. loglevel: info - loglevel: info - #Provide the localmspid for orderer deployment - #Eg. localmspid: OrdererMSP - localmspid: OrdererMSP - #Provide the value for tlsstatus to be true or false for orderer deployment - #Eg. tlsstatus: true - tlsstatus: true - #Provide the interval in which the orderer to signal the connection has kept alive - #Eg. keepaliveserverinterval: 10s - keepaliveserverinterval: 10s - #Provide the address for orderer - #Eg. address: orderer1.org1proxy.blockchaincloudpoc.com:443 - ordererAddress: orderer1.org1proxy.blockchaincloudpoc.com:443 + #Eg. version: 2.5.4 + version: 2.5.4 + #Provide the service account name which will be created. + serviceAccountName: vault-auth + cluster: + provider: aws # choose from: minikube | aws | azure | gcp + cloudNativeServices: false # only 'false' is implemented + vault: + #Provide the type of vault + #Eg. type: hashicorp + type: hashicorp + #Provide the vaultrole for an organization + #Eg. vaultrole: org1-vault-role + role: vault-role + #Provide the vault server address + #Eg. vaultaddress: http://Vault-884963190.eu-west-1.elb.amazonaws.com + address: + #Provide the kubernetes auth backed configured in vault for an organization + #Eg. authpath: supplychain + authPath: supplychain + #Provide the secret engine. + secretEngine: secretsv2 + #Provide the vault path where the secrets will be stored + secretPrefix: "data/supplychain" + #Kuberenetes secret for vault ca.cert + #Enable or disable TLS for vault communication if value present or not + #Eg. tls: vaultca + tls: + + proxy: + #This will be the proxy/ingress provider. Can have values "none" or "haproxy" + #Eg. provider: "haproxy" + provider: "haproxy" + #This field contains the external URL of the organization + #Eg. externalUrlSuffix: test.blockchaincloudpoc.com + externalUrlSuffix: test.blockchaincloudpoc.com -consensus: - #Provide name of the consensus. Currently support raft and kafka - #Eg. name: raft - name: raft - storage: - #Provide the storageclassname for orderer - #Eg. storageclassname: aws-storage - storageclassname: aws-storageclass - #Provide the storagesize for storage class - #Eg. storagesize: 512Mi - storagesize: 512Mi + #Provide storage size for Orderer Volume + #Eg. size: 512Mi + size: 512Mi + # NOTE: when you set this to Retain, the volume WILL persist after the chart is delete and you need to manually delete it + reclaimPolicy: "Delete" # choose from: Delete | Retain + volumeBindingMode: Immediate # choose from: Immediate | WaitForFirstConsumer + allowedTopologies: + enabled: false + +certs: + # Flag indicating the creation of certificates. + generateCertificates: true + orgData: + caAddress: ca.supplychain-net:7051 + caAdminUser: supplychain-admin + caAdminPassword: supplychain-adminpw + #Provide organization's name in lowercases + #Eg. orgName: supplychain + orgName: supplychain + #Provide organization's type (orderer or peer) + #Eg. type: orderer + type: orderer + #Provide organization's subject + #Eg. componentSubject: "O=Orderer,L=51.50/-0.13/London,C=GB" + componentSubject: "O=Orderer,L=51.50/-0.13/London,C=GB" + settings: + #Flag to create configmaps for the organization. This flag must be set to true when installing the first orderer/peer in organization and false for others. + createConfigMaps: true + refreshCertValue: false + addPeerValue: false + removeCertsOnDelete: false + removeOrdererTlsOnDelete: false -service: - #Provide the servicetype a peer - #Eg. servicetype: NodePort - servicetype: ClusterIP +image: + #Provide the valid image repository for fabric orderer + #Eg. orderer: hyperledger/fabric-orderer + orderer: ghcr.io/hyperledger/bevel-fabric-orderer + #Provide the valid image name and version to read certificates from vault server + #Eg.alpineUtils: ghcr.io/hyperledger/bevel-alpine:latest + alpineUtils: ghcr.io/hyperledger/bevel-alpine:latest + #Provide the valid image name and version for healthCheck of kafka + #Eg. healthCheck: busybox + healthCheck: busybox + #Provide the secret to use if private repository + #Eg. pullSecret: regcred + pullSecret: + +orderer: + #Provide the name of the consensus. Currently support raft and kafka + #Eg. consensus: raft + consensus: raft + #Provide the logLevel for orderer deployment + #Eg. logLevel: info + logLevel: info + #Provide the localMspId for orderer deployment + #Eg. localMspId: supplychainMSP + localMspId: supplychainMSP + #Provide the value for tlsStatus to be true or false for orderer deployment + #Eg. tlsStatus: true + tlsStatus: true + #Provide the interval in which the orderer to signal the connection has kept alive + #Eg. keepAliveServerInterval: 10s + keepAliveServerInterval: 10s + #Provide the serviceType a peer + #Eg. serviceType: NodePort + serviceType: ClusterIP ports: grpc: #Provide a nodeport for orderer in the range of 30000-32767 (optional) #Eg. nodeport: 30001 nodeport: #Provide a cluster IP port for orderer to be exposed. - #Eg. clusteripport: 7050 - clusteripport: 7050 + #Eg. clusterIpPort: 7050 + clusterIpPort: 7050 metrics: enabled: false - clusteripport: 9443 -annotations: - #Extra annotations - service: {} - deployment: {} - -vault: - #Provide the vault server address - #Eg. vaultaddress: http://Vault-884963190.eu-west-1.elb.amazonaws.com - address: - #Provide the vaultrole for orderer deployment - #Eg. vaultrole: orderer-vault-role - role: vault-role - #Provide the kubernetes auth backed configured in vault for orderer deployment - #Eg. authpath: devorg1-net-auth - authpath: devorg1-net-auth - #Provide the type of vault - #Eg. type: hashicorp - type: hashicorp - #Provide the value for vault secretprefix - #Eg. secretprefix: secretsv2/data/crypto/ordererOrganizations/.../orderers/.... - secretprefix: secretsv2/data/crypto/ordererOrganizations/org1-net/orderers/orderer.org1-net - #Provide the imagesecretname for vault - #Eg. imagesecretname: regcred - imagesecretname: "" - #Provide the serviceaccountname for vault - #Eg. serviceaccountname: vault-auth - serviceaccountname: vault-auth - #Kuberenetes secret for vault ca.cert - #Enable or disable TLS for vault communication if value present or not - #Eg. tls: vaultca - tls: - - -kafka: - #Provide the interval in seconds you want to iterate till all kafka services to be ready - #Eg. readinesscheckinterval: 5 - readinesscheckinterval: 5 - #Provide the threshold till you want to check if all specified kafka brokers are up and running - #Eg. readinessthreshold: 4 - readinessthreshold: 4 - #Provide the kafka broker list - #Eg. brokers: - # - kafka-0.broker.org1-net.svc.cluster.local:9092 - # - kafka-1.broker.org1-net.svc.cluster.local:9092 - # - kafka-2.broker.org1-net.svc.cluster.local:9092 - # - kafka-3.broker.org1-net.svc.cluster.local:9092 - brokers: - -proxy: - #This will be the proxy/ingress provider. Can have values "none" or "haproxy" - #Eg. provider: "haproxy" - provider: "haproxy" - #This field contains the external URL of the organization - #Eg. external_url_suffix: org1.blockchaincloudpoc.com - external_url_suffix: org1proxy.blockchaincloudpoc.com:443 - -config: - # Set limits and requests of pod - pod: - resources: - limits: + clusterIpPort: 9443 + resources: + limits: # Provide the limit memory for node # Eg. memory: 512M - memory: 512M + memory: 512M # Provide the limit cpu for node # Eg. cpu: 1 - cpu: 1 - requests: + cpu: 1 + requests: # Provide the requests memory for node # Eg. memory: 512M - memory: 512M + memory: 512M # Provide the requests cpu for node # Eg. cpu: 0.25 - cpu: 0.25 + cpu: 0.25 + +kafka: + #Provide the interval in seconds you want to iterate till all kafka services to be ready + #Eg. readinessCheckInterval: 5 + readinessCheckInterval: 5 + #Provide the threshold till you want to check if all specified kafka brokers are up and running + #Eg. readinessThresHold: 4 + readinessThresHold: 4 + #Provide the kafka broker list + #Eg. brokers: + # - kafka-0.broker.org1-net.svc.cluster.local:9092 + # - kafka-1.broker.org1-net.svc.cluster.local:9092 + # - kafka-2.broker.org1-net.svc.cluster.local:9092 + # - kafka-3.broker.org1-net.svc.cluster.local:9092 + brokers: + +healthCheck: + # The amount of times to retry fetching from/writing to Vault before giving up. + # Eg. retries: 10 + retries: 10 + # The amount of time in seconds to wait after an error occurs when fetching from/writing to Vault. + # Eg. sleepTimeAfterError: 15 + sleepTimeAfterError: 15 + +labels: + service: [] + pvc: [] + deployment: [] diff --git a/platforms/hyperledger-fabric/charts/fabric-osnadmin-channel-create/.helmignore b/platforms/hyperledger-fabric/charts/fabric-osnadmin-channel-create/.helmignore new file mode 100644 index 00000000000..014fa775608 --- /dev/null +++ b/platforms/hyperledger-fabric/charts/fabric-osnadmin-channel-create/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +generated_config/ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/platforms/hyperledger-fabric/charts/fabric-osnadmin-channel-create/Chart.yaml b/platforms/hyperledger-fabric/charts/fabric-osnadmin-channel-create/Chart.yaml index d2a165f9379..adadd4aceca 100644 --- a/platforms/hyperledger-fabric/charts/fabric-osnadmin-channel-create/Chart.yaml +++ b/platforms/hyperledger-fabric/charts/fabric-osnadmin-channel-create/Chart.yaml @@ -5,7 +5,22 @@ ############################################################################################## apiVersion: v1 -appVersion: "2.0" -description: A Helm chart for create channel name: fabric-osnadmin-channel-create -version: 1.0.0 +description: "Hyperledger Fabric: Creates channel using OSNAdmin" +version: 1.1.0 +appVersion: latest +keywords: + - bevel + - hlf + - fabric + - hyperledger + - enterprise + - blockchain + - deployment + - accenture +home: https://hyperledger-bevel.readthedocs.io/en/latest/ +sources: + - https://github.com/hyperledger/bevel +maintainers: + - name: Hyperledger Bevel maintainers + email: bevel@lists.hyperledger.org diff --git a/platforms/hyperledger-fabric/charts/fabric-osnadmin-channel-create/README.md b/platforms/hyperledger-fabric/charts/fabric-osnadmin-channel-create/README.md index e164d0b4082..58328cc9896 100644 --- a/platforms/hyperledger-fabric/charts/fabric-osnadmin-channel-create/README.md +++ b/platforms/hyperledger-fabric/charts/fabric-osnadmin-channel-create/README.md @@ -3,179 +3,92 @@ [//]: # (SPDX-License-Identifier: Apache-2.0) [//]: # (##############################################################################################) - -# Create Channel Hyperledger Fabric Deployment +# fabric-osnadmin-channel-create -- [Osn Create Channel Hyperledger Fabric Deployment Helm Chart](#osn-create-channel-hyperledger-fabric-deployment-helm-chart) -- [Prerequisites](#prerequisites) -- [Chart Structure](#chart-structure) -- [Configuration](#configuration) -- [Deployment](#deployment) -- [Verification](#verification) -- [Updating the Deployment](#updating-the-deployment) -- [Deletion](#deletion) -- [Contributing](#contributing) -- [License](#license) +This chart is a component of Hyperledger Bevel. The fabric-osnadmin-channel-create chart deploys a Kubernetes job to create a channel. The channel name is same as the release name. This chart should be executed after the [fabric-genesis](../fabric-genesis/README.md) chart. See [Bevel documentation](https://hyperledger-bevel.readthedocs.io/en/latest/) for details. +## TL;DR - -## Osn Create Channel Hyperledger Fabric Deployment Helm Chart ---- -A [Helm chart](https://github.com/hyperledger/bevel/blob/develop/platforms/hyperledger-fabric/charts/fabric-osnadmin-channel-create) to create a channel with fabric 2.5.4. - - - -## Prerequisites ---- -Before deploying the Helm chart, make sure to have the following prerequisites: - -- Kubernetes cluster up and running. -- A HashiCorp Vault instance is set up and configured to use Kubernetes service account token-based authentication. -- The Vault is unsealed and initialized. -- Helm installed. - - - -## Chart Structure ---- -The structure of the Helm chart is as follows: - +```bash +helm repo add bevel https://hyperledger.github.io/bevel +helm install allchannel bevel/fabric-osnadmin-channel-create ``` -fabric-osnadmin-channel-create/ - |- templates/ - |- _helpers.yaml - |- configmap.yaml - |- osn_create_channel.yaml - |- Chart.yaml - |- README.md - |- values.yaml -``` - -- `templates/`: Contains the Kubernetes manifest templates that define the resources to be deployed. -- `helpers.tpl`: Contains custom label definitions used in other templates. -- `configmap.yaml`: Store configuration data that can be consumed by containers. The first ConfigMap stores various configuration data as key-value pairs and the second ConfigMap stores the base64-encoded content of the channel configuration file (channel.tx.base64). -- `osn_create_channel.yaml`: The certificates-init fetches TLS certificates from a Vault server and stores them in a local directory. The createchannel check the channel creation. If the channel does not exist, the createchannel creates the channel. -- `Chart.yaml`: Contains the metadata for the Helm chart, such as the name, version, and description. -- `README.md`: Provides information and instructions about the Helm chart. -- `values.yaml`: Contains the default configuration values for the Helm chart. - - - -## Configuration ---- -The [values.yaml](https://github.com/hyperledger/bevel/blob/develop/platforms/hyperledger-fabric/charts/fabric-osnadmin-channel-create/values.yaml) file contains configurable values for the Helm chart. We can modify these values according to the deployment requirements. Here are some important configuration options: - -### Metadata - -| Name | Description | Default Value | -| ----------------------| ----------------------------------------------------------------------|---------------------------------------------------| -| namespace | Provide the namespace for organization's peer | org1-net | -| network.version | Provide Fabric version | 2.5.4 | -| images.fabrictools | Valid image name and version for fabric tools | ghcr.io/hyperledger/bevel-fabric-tools:2.5.4 | -| images.alpineutils | Valid image name and version to read certificates from vault server | ghcr.io/hyperledger/bevel-alpine:latest | -| labels | Custom labels (other than specified) | "" | - - -### Deployment - -| Name | Description | Default Value | -| ------------ | ------------------------------------------- | -------------- | -| annotations | Deployment annotations | "" | - -### Vault -| Name | Description | Default Value | -| ------------------- | --------------------------------------------------------------------| ------------------------------| -| role | Vault role for the organization | vault-role | -| address | Vault server address | "" | -| authpath | Kubernetes auth backend configured in vault for the organization | devorg1-net-auth | -| orderersecretprefix | Vault secret prefix for orderer | secret/secretsv2/crypto/ordererOrganizations/org1-net/orderers | -| serviceaccountname | Service account name for vault | vault-auth | -| type | Provide the type of vault | hashicorp | -| imagesecretname | Image secret name for vault | "" | -| tls | Vault ca.cert Kubernetes secret | "" | - -### Channel - -| Name | Description | Default Value | -| ------ | --------------------------------- | -------------- | -| name | Name of the channel | mychannel | - -### Orderer - -| Name | Description | Default Value | -| ------- | ----------------------------| --------------------------| -| orderer_info | Provide orderer's names | orderer1 | - -### Other - -| Name | Description | Default Value | -| ---------- | ---------------------------------------------| --------------- | -| genesis | Provide the base64 encoded genesis file | "" | - - - -## Deployment ---- +## Prerequisites -To deploy the fabric-channel-create Helm chart, follow these steps: +- Kubernetes 1.19+ +- Helm 3.2.0+ -1. Modify the [values.yaml](https://github.com/hyperledger/bevel/blob/main/platforms/hyperledger-fabric/charts/fabric-osnadmin-channel-create/values.yaml) file to set the desired configuration values. -2. Run the following Helm command to install the chart: - ``` - $ helm repo add bevel https://hyperledger.github.io/bevel/ - $ helm install ./fabric-osnadmin-channel-create - ``` -Replace `` with the desired name for the release. +If Hashicorp Vault is used, then +- HashiCorp Vault Server 1.13.1+ -This will deploy the fabric-channel-create node to the Kubernetes cluster based on the provided configurations. +Also, [fabric-genesis](../fabric-genesis/README.md) chart should be installed and this chart should be executed from the same namespace as the Orderer Organization. +## Installing the Chart - -## Verification ---- +To install the chart with the channel name `allchannel`: -To verify the deployment, we can use the following command: -``` -$ kubectl get jobs -n +```bash +helm repo add bevel https://hyperledger.github.io/bevel +helm install allchannel bevel/fabric-osnadmin-channel-create ``` -Replace `` with the actual namespace where the Job was created. This command will display information about the Job, including the number of completions and the current status of the Job's pods. +The command deploys the chart on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation. - -## Updating the Deployment ---- - -If we need to update the deployment with new configurations or changes, modify the same [values.yaml](https://github.com/hyperledger/bevel/blob/main/platforms/hyperledger-fabric/charts/fabric-osnadmin-channel-create/values.yaml) file with the desired changes and run the following Helm command: -``` -$ helm upgrade ./fabric-channel-create -``` -Replace `` with the name of the release. This command will apply the changes to the deployment, ensuring the fabric-channel-create node is up to date. +> **Tip**: List all releases using `helm list` +## Uninstalling the Chart - -## Deletion ---- +To uninstall/delete the `allchannel` deployment: -To delete the deployment and associated resources, run the following Helm command: -``` -$ helm uninstall +```bash +helm uninstall allchannel ``` -Replace `` with the name of the release. This command will remove all the resources created by the Helm chart. - - - -## Contributing ---- -If you encounter any bugs, have suggestions, or would like to contribute to the [Osn Create Channel Hyperledger Fabric Deployment Helm Chart](https://github.com/hyperledger/bevel/blob/main/platforms/hyperledger-fabric/charts/fabric-osnadmin-channel-create), please feel free to open an issue or submit a pull request on the [project's GitHub repository](https://github.com/hyperledger/bevel). +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Parameters + +### Global parameters +These parameters are refered to as same in each parent or child chart +| Name | Description | Default Value | +|--------|---------|-------------| +|`global.version` | Fabric Version. This chart is only used for `2.5.x` | `2.5.4` | +|`global.serviceAccountName` | The serviceaccount name that will be created for Vault Auth and k8S Secret management| `vault-auth` | +| `global.cluster.provider` | Kubernetes cluster provider like AWS EKS or minikube. Currently ony `aws`, `azure` and `minikube` are tested | `aws` | +| `global.cluster.cloudNativeServices` | only `false` is implemented, `true` to use Cloud Native Services (SecretsManager and IAM for AWS; KeyVault & Managed Identities for Azure) is for future | `false` | +| `global.vault.type` | Type of Vault to support other providers. Currently, only `hashicorp` and `kubernetes` is supported. | `hashicorp` | +| `global.vault.role` | Role used for authentication with Vault | `vault-role` | +| `global.vault.network` | Network type that is being deployed | `fabric` | +| `global.vault.address`| URL of the Vault server. | `""` | +| `global.vault.authPath` | Authentication path for Vault | `supplychain` | +| `global.vault.secretEngine` | Vault secret engine name | `secretsv2` | +| `global.vault.secretPrefix` | Vault secret prefix which must start with `data/` | `data/supplychain` | +| `global.vault.tls` | Name of the Kubernetes secret which has certs to connect to TLS enabled Vault | `false` | + +### Image + +| Name | Description | Default Value | +| -------------| ---------- | --------- | +| `image.fabricTools` | Fabric Tools image repository | `ghcr.io/hyperledger/bevel-fabric-tools` | +| `image.alpineUtils` | Alpine utils image repository and tag | `ghcr.io/hyperledger/bevel-alpine:latest` | +| `image.pullSecret` | Secret name in the namespace containing private image registry credentials | `""` | + +### Settings + +| Name | Description | Default Value | +|--------|---------|-------------| +| `orderers` | List of Orderer nodes in the network and their OSN Admin addresses. This list presents two fields `name` and `adminAddress` | `- name: orderer1`
`adminAddress: orderer1.supplychain-net:7055`
`- name: orderer2`
`adminAddress: orderer2.supplychain-net:7055`
`- name: orderer3`
`adminAddress: orderer3.supplychain-net:7055` | +| `orderer.addOrderer` | Flag to add new Orderer node to the network | `false` | +| `orderer.name` | Name of the new Orderer node to be addded | `neworderer` | +| `orderer.localMspId` | New Orderer MSP ID | `newordererMSP` | +| `orderer.ordererAddress` | New Orderer Internal or External Address with port for Peer to connect | `neworderer.neworg-net:7050` | - ## License This chart is licensed under the Apache v2.0 license. -Copyright © 2023 Accenture +Copyright © 2024 Accenture ### Attribution diff --git a/platforms/hyperledger-fabric/charts/fabric-osnadmin-channel-create/templates/_helpers.tpl b/platforms/hyperledger-fabric/charts/fabric-osnadmin-channel-create/templates/_helpers.tpl index d43c09d8cef..6d9284abc4b 100644 --- a/platforms/hyperledger-fabric/charts/fabric-osnadmin-channel-create/templates/_helpers.tpl +++ b/platforms/hyperledger-fabric/charts/fabric-osnadmin-channel-create/templates/_helpers.tpl @@ -1,5 +1,28 @@ -{{- define "labels.custom" }} - {{ range $key, $val := $.Values.metadata.labels }} - {{ $key }}: {{ $val }} - {{ end }} -{{- end }} +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "fabric-osnadmin-channel-create.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "fabric-osnadmin-channel-create.fullname" -}} +{{- $name := default .Chart.Name -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" $name .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "fabric-osnadmin-channel-create.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} diff --git a/platforms/hyperledger-fabric/charts/fabric-osnadmin-channel-create/templates/configmap.yaml b/platforms/hyperledger-fabric/charts/fabric-osnadmin-channel-create/templates/configmap.yaml index 4c583456ac7..0e9eb9014e4 100644 --- a/platforms/hyperledger-fabric/charts/fabric-osnadmin-channel-create/templates/configmap.yaml +++ b/platforms/hyperledger-fabric/charts/fabric-osnadmin-channel-create/templates/configmap.yaml @@ -7,43 +7,24 @@ apiVersion: v1 kind: ConfigMap metadata: - name: {{ $.Values.channel.name }}-config - namespace: {{ $.Values.metadata.namespace }} - {{- if $.Values.deployment.annotations }} - annotations: -{{ toYaml $.Values.deployment.annotations | nindent 8 }} - {{- end }} + name: {{ .Release.Name }}-osnadmin-config + namespace: {{ .Release.Namespace }} labels: - app.kubernetes.io/name: {{ $.Values.channel.name }}-config - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} - {{- include "labels.custom" . | nindent 2 }} + app.kubernetes.io/name: {{ .Release.Name }}-osnadmin-config + app.kubernetes.io/component: fabric-osnadmin-channel-create-job + app.kubernetes.io/part-of: {{ include "fabric-osnadmin-channel-create.fullname" . }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} + app.kubernetes.io/managed-by: helm data: - CHANNEL_NAME: {{ $.Values.channel.name }} + CHANNEL_NAME: {{ .Release.Name }} ADMIN_TLS_CERTS: /opt/gopath/src/github.com/hyperledger/fabric/crypto GENESIS_FILE: /opt/gopath/src/github.com/hyperledger/fabric/orderer/genesis -{{- if $.Values.add_orderer }} - CORE_PEER_LOCALMSPID: {{ $.Values.orderer.localmspid }} +{{- if $.Values.orderer.addOrderer }} + CORE_PEER_LOCALMSPID: {{ $.Values.orderer.localMspId }} CORE_PEER_TLS_ROOTCERT_FILE: /opt/gopath/src/github.com/hyperledger/fabric/crypto/admin/msp/tlscacerts/tlsca.crt CORE_PEER_MSPCONFIGPATH: /opt/gopath/src/github.com/hyperledger/fabric/crypto/admin/msp - CORE_PEER_ADDRESS: {{ $.Values.orderer.address }} + CORE_PEER_ADDRESS: {{ $.Values.orderer.ordererAddress }} ORDERER_CA: /opt/gopath/src/github.com/hyperledger/fabric/crypto/{{ $.Values.orderer.name }}/tls/ca.crt {{ end }} ---- -{{- if not $.Values.add_orderer }} -apiVersion: v1 -kind: ConfigMap -metadata: - name: {{ $.Values.channel.name }}-genesis-block - namespace: {{ $.Values.metadata.namespace }} - labels: - app.kubernetes.io/name: genesis-block - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} - {{- include "labels.custom" . | nindent 2 }} -data: - genesis.block.base64: {{ $.Values.genesis | quote }} -{{ end }} diff --git a/platforms/hyperledger-fabric/charts/fabric-osnadmin-channel-create/templates/osn_create_channel.yaml b/platforms/hyperledger-fabric/charts/fabric-osnadmin-channel-create/templates/osn_create_channel.yaml index 36272c3b0e0..426ce6c520c 100644 --- a/platforms/hyperledger-fabric/charts/fabric-osnadmin-channel-create/templates/osn_create_channel.yaml +++ b/platforms/hyperledger-fabric/charts/fabric-osnadmin-channel-create/templates/osn_create_channel.yaml @@ -7,50 +7,50 @@ apiVersion: batch/v1 kind: Job metadata: - name: osn-createchannel-{{ $.Values.channel.name }} - namespace: {{ $.Values.metadata.namespace }} - {{- if $.Values.deployment.annotations }} - annotations: -{{ toYaml $.Values.deployment.annotations | nindent 8 }} - {{- end }} + name: channel-create-{{ .Release.Name }} + namespace: {{ .Release.Namespace }} labels: - app: osn-createchannel-{{ $.Values.channel.name }} - app.kubernetes.io/name: osn-createchannel-{{ $.Values.channel.name }} - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} - {{- include "labels.custom" . | nindent 2 }} + app: {{ .Release.Name }} + app.kubernetes.io/name: osn-createchannel-{{ .Release.Name }} + app.kubernetes.io/component: fabric-osnadmin-channel-create-job + app.kubernetes.io/part-of: {{ include "fabric-osnadmin-channel-create.fullname" . }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} + app.kubernetes.io/managed-by: helm spec: backoffLimit: 6 template: metadata: labels: - app: osn-createchannel-{{ $.Values.channel.name }} - app.kubernetes.io/name: osn-createchannel-{{ $.Values.channel.name }} - app.kubernetes.io/instance: {{ .Release.Name }} - app.kubernetes.io/managed-by: {{ .Release.Service }} + app: {{ .Release.Name }} + app.kubernetes.io/name: osn-createchannel-{{ .Release.Name }} + app.kubernetes.io/component: fabric-osnadmin-channel-create-job + app.kubernetes.io/part-of: {{ include "fabric-osnadmin-channel-create.fullname" . }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} + app.kubernetes.io/managed-by: helm spec: restartPolicy: "OnFailure" - serviceAccountName: {{ $.Values.vault.serviceaccountname }} - {{- if .Values.vault.imagesecretname }} + serviceAccountName: {{ .Values.global.serviceAccountName }} + {{- if .Values.image.pullSecret }} imagePullSecrets: - - name: {{ $.Values.vault.imagesecretname }} + - name: {{ .Values.image.pullSecret }} {{- end }} volumes: - {{ if .Values.vault.tls }} + {{ if .Values.global.vault.tls }} - name: vaultca secret: - secretName: {{ $.Values.vault.tls }} + secretName: {{ .Values.global.vault.tls }} items: - key: ca.crt.pem path: ca-certificates.crt {{ end }} - {{- if not $.Values.add_orderer }} + {{- if not $.Values.orderer.addOrderer }} - name: genesis configMap: - name: {{ $.Values.channel.name }}-genesis-block + name: {{ .Release.Name }}-genesis items: - - key: genesis.block.base64 + - key: {{ .Release.Name }}-genesis_base64 path: genesis.block.base64 {{ end }} - name: certificates @@ -61,41 +61,53 @@ spec: name: bevel-vault-script initContainers: - name: certificates-init - image: {{ $.Values.metadata.images.alpineutils }} + image: {{ $.Values.image.alpineUtils }} imagePullPolicy: IfNotPresent env: - name: VAULT_ADDR - value: {{ $.Values.vault.address }} - - name: KUBERNETES_AUTH_PATH - value: {{ $.Values.vault.authpath }} + value: {{ $.Values.global.vault.address }} - name: VAULT_APP_ROLE - value: {{ $.Values.vault.role }} - - name: VAULT_ADMIN_SECRET_PREFIX - value: "{{ $.Values.vault.adminsecretprefix }}" - - name: VAULT_ORDERER_SECRET_PREFIX - value: "{{ $.Values.vault.orderersecretprefix }}" - - name: ORDERERS_NAMES - value: "{{ $.Values.orderers.orderer_info }}" - - name: COMPONENT_NAME - value: {{ $.Values.metadata.namespace }} + value: {{ $.Values.global.vault.role }} + - name: KUBERNETES_AUTH_PATH + value: {{ $.Values.global.vault.authPath }} + - name: VAULT_SECRET_ENGINE + value: "{{ .Values.global.vault.secretEngine }}" + - name: VAULT_SECRET_PREFIX + value: "{{ .Values.global.vault.secretPrefix }}" + - name: VAULT_TYPE + value: "{{ $.Values.global.vault.type }}" - name: MOUNT_PATH value: /secret - - name: VAULT_TYPE - value: "{{ $.Values.vault.type }}" command: ["sh", "-c"] args: - |- #!/usr/bin/env sh - source /scripts/bevel-vault.sh +{{- if eq .Values.global.vault.type "hashicorp" }} + + source /scripts/bevel-vault.sh vaultBevelFunc "init" - list=$(echo "$ORDERERS_NAMES" | tr "*" "\n") - for ORDERER in $list - do - ORDERER_NAME="${ORDERER%%,*}" - # Check if orderer msp already created - vaultBevelFunc "readJson" "${VAULT_ORDERER_SECRET_PREFIX}/${ORDERER_NAME}.${COMPONENT_NAME}/msp" + function getOrdererTlsSecret { + KEY=$1 + + echo "Getting TLS certificates from Vault." + vaultBevelFunc "readJson" "${VAULT_SECRET_ENGINE}/${VAULT_SECRET_PREFIX}/orderers/${KEY}" + + TLS_CA_CERT=$(echo ${VAULT_SECRET} | jq -r '.["ca_crt"]') + TLS_SERVER_CERT=$(echo ${VAULT_SECRET} | jq -r '.["server_crt"]') + TLS_SERVER_KEY=$(echo ${VAULT_SECRET} | jq -r '.["server_key"]') + + echo "${TLS_CA_CERT}" > ${OUTPUT_PATH}/ca.crt + echo "${TLS_SERVER_CERT}" > ${OUTPUT_PATH}/server.crt + echo "${TLS_SERVER_KEY}" > ${OUTPUT_PATH}/server.key + } + + function getOrdererMspSecret { + type=$1 + KEY=$2 + echo "Getting MSP certificates from Vault." + vaultBevelFunc "readJson" "${VAULT_SECRET_ENGINE}/${VAULT_SECRET_PREFIX}/${type}/${KEY}" ADMINCERT=$(echo ${VAULT_SECRET} | jq -r '.["admincerts"]') CACERTS=$(echo ${VAULT_SECRET} | jq -r '.["cacerts"]') @@ -103,59 +115,71 @@ spec: SIGNCERTS=$(echo ${VAULT_SECRET} | jq -r '.["signcerts"]') TLSCACERTS=$(echo ${VAULT_SECRET} | jq -r '.["tlscacerts"]') + echo "${ADMINCERT}" > ${OUTPUT_PATH}/admincerts/admin.crt + echo "${CACERTS}" > ${OUTPUT_PATH}/cacerts/ca.crt + echo "${KEYSTORE}" > ${OUTPUT_PATH}/keystore/server.key + echo "${SIGNCERTS}" > ${OUTPUT_PATH}/signcerts/server.crt + echo "${TLSCACERTS}" > ${OUTPUT_PATH}/tlscacerts/tlsca.crt + } +{{- else }} + + function getOrdererTlsSecret { + KEY=$1 + KUBENETES_SECRET=$(kubectl get secret ${KEY} --namespace {{ .Release.Namespace }} -o json) + CACERT=$(echo ${KUBENETES_SECRET} | jq -r '.data.cacrt' | base64 -d) + CLIENTSEVER=$(echo ${KUBENETES_SECRET} | jq -r '.data.servercrt' | base64 -d) + CLIENTKEY=$(echo ${KUBENETES_SECRET} | jq -r '.data.serverkey' | base64 -d) + + echo "${CACERT}" > ${OUTPUT_PATH}/ca.crt + echo "${CLIENTSEVER}" > ${OUTPUT_PATH}/server.crt + echo "${CLIENTKEY}" > ${OUTPUT_PATH}/server.key + } + + function getOrdererMspSecret { + type=$1 + KEY=$2 + KUBENETES_SECRET=$(kubectl get secret ${KEY} --namespace {{ .Release.Namespace }} -o json) + + ADMINCERT=$(echo ${KUBENETES_SECRET} | jq -r '.data.admincerts' | base64 -d) + CACERTS=$(echo ${KUBENETES_SECRET} | jq -r '.data.cacerts' | base64 -d) + KEYSTORE=$(echo ${KUBENETES_SECRET} | jq -r '.data.keystore' | base64 -d) + SIGNCERTS=$(echo ${KUBENETES_SECRET} | jq -r '.data.signcerts' | base64 -d) + TLSCACERTS=$(echo ${KUBENETES_SECRET} | jq -r '.data.tlscacerts' | base64 -d) + + echo "${ADMINCERT}" > ${OUTPUT_PATH}/admincerts/admin.crt + echo "${CACERTS}" > ${OUTPUT_PATH}/cacerts/ca.crt + echo "${KEYSTORE}" > ${OUTPUT_PATH}/keystore/server.key + echo "${SIGNCERTS}" > ${OUTPUT_PATH}/signcerts/server.crt + echo "${TLSCACERTS}" > ${OUTPUT_PATH}/tlscacerts/tlsca.crt + + } +{{- end }} + + OUTPUT_PATH="${MOUNT_PATH}/admin/msp" + mkdir -p ${OUTPUT_PATH}/admincerts + mkdir -p ${OUTPUT_PATH}/cacerts + mkdir -p ${OUTPUT_PATH}/keystore + mkdir -p ${OUTPUT_PATH}/signcerts + mkdir -p ${OUTPUT_PATH}/tlscacerts + getOrdererMspSecret users admin-msp + + {{- range $orderer := .Values.orderers }} + ORDERER_NAME={{ .name }} OUTPUT_PATH="${MOUNT_PATH}/${ORDERER_NAME}/msp" mkdir -p ${OUTPUT_PATH}/admincerts mkdir -p ${OUTPUT_PATH}/cacerts mkdir -p ${OUTPUT_PATH}/keystore mkdir -p ${OUTPUT_PATH}/signcerts mkdir -p ${OUTPUT_PATH}/tlscacerts - - echo "${ADMINCERT}" >> ${OUTPUT_PATH}/admincerts/admin.crt - echo "${CACERTS}" >> ${OUTPUT_PATH}/cacerts/ca.crt - echo "${KEYSTORE}" >> ${OUTPUT_PATH}/keystore/server.key - echo "${SIGNCERTS}" >> ${OUTPUT_PATH}/signcerts/server.crt - echo "${TLSCACERTS}" >> ${OUTPUT_PATH}/tlscacerts/tlsca.crt - - # Check if orderers tls already created - vaultBevelFunc "readJson" "${VAULT_ORDERER_SECRET_PREFIX}/${ORDERER_NAME}.${COMPONENT_NAME}/tls" - - CACERT=$(echo ${VAULT_SECRET} | jq -r '.["ca.crt"]') - CLIENTSEVER=$(echo ${VAULT_SECRET} | jq -r '.["server.crt"]') - CLIENTKEY=$(echo ${VAULT_SECRET} | jq -r '.["server.key"]') + getOrdererMspSecret orderers ${ORDERER_NAME}-msp OUTPUT_PATH="${MOUNT_PATH}/${ORDERER_NAME}/tls" mkdir -p ${OUTPUT_PATH} - - echo "${CACERT}" >> ${OUTPUT_PATH}/ca.crt - echo "${CLIENTSEVER}" >> ${OUTPUT_PATH}/server.crt - echo "${CLIENTKEY}" >> ${OUTPUT_PATH}/server.key - done - - ############################################################################### - echo "Getting MSP certificates from Vault using key $vault_secret_key" - vaultBevelFunc "readJson" "${VAULT_ADMIN_SECRET_PREFIX}/msp" - - ADMINCERT=$(echo ${VAULT_SECRET} | jq -r '.["admincerts"]') - CACERTS=$(echo ${VAULT_SECRET} | jq -r '.["cacerts"]') - KEYSTORE=$(echo ${VAULT_SECRET} | jq -r '.["keystore"]') - SIGNCERTS=$(echo ${VAULT_SECRET} | jq -r '.["signcerts"]') - TLSCACERTS=$(echo ${VAULT_SECRET} | jq -r '.["tlscacerts"]') - - OUTPUT_PATH="${MOUNT_PATH}/admin/msp" - mkdir -p ${OUTPUT_PATH}/admincerts - mkdir -p ${OUTPUT_PATH}/cacerts - mkdir -p ${OUTPUT_PATH}/keystore - mkdir -p ${OUTPUT_PATH}/signcerts - mkdir -p ${OUTPUT_PATH}/tlscacerts - - echo "${ADMINCERT}" >> ${OUTPUT_PATH}/admincerts/admin.crt - echo "${CACERTS}" >> ${OUTPUT_PATH}/cacerts/ca.crt - echo "${KEYSTORE}" >> ${OUTPUT_PATH}/keystore/server.key - echo "${SIGNCERTS}" >> ${OUTPUT_PATH}/signcerts/server.crt - echo "${TLSCACERTS}" >> ${OUTPUT_PATH}/tlscacerts/tlsca.crt + getOrdererTlsSecret ${ORDERER_NAME}-tls + {{- end }} volumeMounts: - {{ if .Values.vault.tls }} + {{ if .Values.global.vault.tls }} - name: vaultca mountPath: "/etc/ssl/certs/" readOnly: true @@ -167,22 +191,22 @@ spec: subPath: bevel-vault.sh containers: - name: createchannel - image: {{ $.Values.metadata.images.fabrictools }} + image: {{ .Values.image.fabricTools }}:{{ .Values.global.version }} imagePullPolicy: IfNotPresent stdin: true tty: true + envFrom: + - configMapRef: + name: {{ .Release.Name }}-osnadmin-config env: - - name: ORDERERS_INFO - value: "{{ $.Values.orderers.orderer_info }}" - - name: NAMESPACE - value: "{{ $.Values.metadata.namespace }}" - name: ADD_ORDERER - value: "{{ $.Values.add_orderer }}" + value: "{{ .Values.orderer.addOrderer }}" command: ["sh", "-c"] args: - |- + #!/usr/bin/env sh - echo "Fetch genesis file..." + echo "Format or fetch genesis file..." if [ $ADD_ORDERER = false ] then cat ./genesis/genesis.block.base64 | base64 -d > orderer.genesis.block @@ -190,13 +214,13 @@ spec: peer channel fetch config ${CHANNEL_NAME}_config_block.pb -o ${CORE_PEER_ADDRESS} -c ${CHANNEL_NAME} --tls --cafile ${ORDERER_CA} fi - list=$(echo "$ORDERERS_INFO" | tr "*" "\n") - for ORDERER_NAME in $list - do + {{- range $orderer := .Values.orderers }} + ORDERER_NAME={{ .name }} ADMIN_TLS_PRIVATE_KEY="${ADMIN_TLS_CERTS}/${ORDERER_NAME}/tls/server.key" ADMIN_TLS_SIGN_CERT="${ADMIN_TLS_CERTS}/${ORDERER_NAME}/tls/server.crt" OSN_TLS_CA_ROOT_CERT="${ADMIN_TLS_CERTS}/${ORDERER_NAME}/tls/ca.crt" - ORDERER_URL="${ORDERER_NAME}.${NAMESPACE}:7055" + # The ORDERER_URL is hardcoded to use local orderer URL as of now + ORDERER_URL="{{ .adminAddress }}" CHANNEL_LIST_QUERY_RESPONSE=$(osnadmin channel list --channelID ${CHANNEL_NAME} -o "${ORDERER_URL}" --ca-file "${OSN_TLS_CA_ROOT_CERT}" --client-cert "${ADMIN_TLS_SIGN_CERT}" --client-key "${ADMIN_TLS_PRIVATE_KEY}") if echo "$CHANNEL_LIST_QUERY_RESPONSE" | grep '404'; then @@ -210,20 +234,17 @@ spec: osnadmin channel list -o "${ORDERER_URL}" --ca-file "${OSN_TLS_CA_ROOT_CERT}" --client-cert "${ADMIN_TLS_SIGN_CERT}" --client-key "${ADMIN_TLS_PRIVATE_KEY}" elif echo "$CHANNEL_LIST_QUERY_RESPONSE" | grep '200\|201'; then echo "Channel ${CHANNEL_NAME}, is already created for url: ${ORDERER_URL}" - osnadmin channel list --channelID ${CHANNEL_NAME} -o "${ORDERER_URL}" --ca-file "${OSN_TLS_CA_ROOT_CERT}" --client-cert "${ADMIN_TLS_SIGN_CERT}" --client-key "${ADMIN_TLS_PRIVATE_KEY}" + echo "$CHANNEL_LIST_QUERY_RESPONSE" else echo $CHANNEL_LIST_QUERY_RESPONSE fi - done + {{- end }} workingDir: /opt/gopath/src/github.com/hyperledger/fabric/orderer - envFrom: - - configMapRef: - name: {{ $.Values.channel.name }}-config volumeMounts: - name: certificates mountPath: /opt/gopath/src/github.com/hyperledger/fabric/crypto readOnly: true - {{- if not $.Values.add_orderer }} + {{- if not $.Values.orderer.addOrderer }} - name: genesis mountPath: /opt/gopath/src/github.com/hyperledger/fabric/orderer/genesis readOnly: true diff --git a/platforms/hyperledger-fabric/charts/fabric-osnadmin-channel-create/values.yaml b/platforms/hyperledger-fabric/charts/fabric-osnadmin-channel-create/values.yaml index 44059765ab8..e155f09470f 100644 --- a/platforms/hyperledger-fabric/charts/fabric-osnadmin-channel-create/values.yaml +++ b/platforms/hyperledger-fabric/charts/fabric-osnadmin-channel-create/values.yaml @@ -3,64 +3,64 @@ # # SPDX-License-Identifier: Apache-2.0 ############################################################################################## - -metadata: - #Provide the namespace for organization's peer - #Eg. namespace: org1-net - namespace: org1-net - #Provide Fabric version +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. +--- +# The following are for overriding global values +global: + # HLF Network Version #Eg. version: 2.5.4 - network: - version: 2.5.4 - images: - #Provide the valid image name and version for fabric tools - #Eg. fabrictools: hyperledger/fabric-tools:2.2.2 - fabrictools: ghcr.io/hyperledger/bevel-fabric-tools:2.5.4 - #Provide the valid image name and version to read certificates from vault server - #Eg. alpineutils: hyperledgerlabs/alpine-utils:1.0 - alpineutils: ghcr.io/hyperledger/bevel-alpine:latest - #Provide the custom labels - #NOTE: Provide labels other than name, release name , release service, chart version , chart name , app. - #Eg. labels: - # role: create_channel - labels: - -deployment: - annotations: + version: 2.5.4 + #Provide the service account name which will be created. + serviceAccountName: vault-auth + cluster: + provider: aws # choose from: minikube | aws | azure | gcp + cloudNativeServices: false # only 'false' is implemented + vault: + #Provide the type of vault + #Eg. type: hashicorp + type: hashicorp + #Provide the vaultrole for an organization + #Eg. vaultrole: org1-vault-role + role: vault-role + #Provide the network type + network: fabric + #Provide the vault server address + #Eg. vaultaddress: http://Vault-884963190.eu-west-1.elb.amazonaws.com + address: + #Provide the kubernetes auth backed configured in vault for an organization + #Eg. authpath: supplychain + authPath: supplychain + #Provide the secret engine. + secretEngine: secretsv2 + #Provide the vault path where the secrets will be stored + secretPrefix: "data/supplychain" + #Enable or disable TLS for vault communication + #Eg. tls: true + tls: -vault: - #Provide the vaultrole for an organization - #Eg. vaultrole: vault-role - role: vault-role - #Provide the vault server address - #Eg. vaultaddress: http://vault.internal.example.com:9000 - address: - #Provide the kubernetes auth backed configured in vault for an organization - #Eg. authpath: devorg1-net-auth - authpath: devorg1-net-auth - #Provide the value for vault secretprefix - #Provide the value for vault secretprefix where orderers certificates are stored. - #Eg. orderersecretprefix: secretsv2/crypto/ordererOrganizations/org1-net/orderers - orderersecretprefix: secretsv2/crypto/ordererOrganizations/org1-net/orderers - #Provide the serviceaccountname for vault - #Eg. serviceaccountname: vault-auth - serviceaccountname: vault-auth - #Provide the type of vault - #Eg. type: hashicorp - type: hashicorp - #Provide the imagesecretname for vault - #Eg. imagesecretname: regcred - imagesecretname: "" +image: + #Provide the valid image name and version for fabric tools + #Eg. fabricTools: hyperledger/fabric-tools + fabricTools: ghcr.io/hyperledger/bevel-fabric-tools + #Provide the valid image name and version to read certificates from vault server + #Eg. alpineUtils: hyperledgerlabs/alpine-utils:1.0 + alpineUtils: ghcr.io/hyperledger/bevel-alpine:latest + #Provide the secret to use if private repository + #Eg. pullSecret: regcred + pullSecret: -channel: - #Provide the name of the channel - #Eg. name: mychannel - name: mychannel +#Provide orderer details +orderers: + - name: orderer1 + adminAddress: orderer1.supplychain-net:7055 # Internal URI of the orderer ONS Admin service + - name: orderer2 + adminAddress: orderer2.supplychain-net:7055 + - name: orderer3 + adminAddress: orderer3.supplychain-net:7055 orderer: - #Provide orderer's names - #Eg. address: orderer1*orderer2*orderer3 - orderer_info: orderer1 - -#Provide the base64 encoded genesis file -genesis: + addOrderer: false + name: neworderer + localMspId: newordererMSP + ordererAddress: neworderer.neworg-net:7050 diff --git a/platforms/hyperledger-fabric/charts/fabric-peernode/.helmignore b/platforms/hyperledger-fabric/charts/fabric-peernode/.helmignore new file mode 100644 index 00000000000..014fa775608 --- /dev/null +++ b/platforms/hyperledger-fabric/charts/fabric-peernode/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +generated_config/ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/platforms/hyperledger-fabric/charts/fabric-peernode/Chart.yaml b/platforms/hyperledger-fabric/charts/fabric-peernode/Chart.yaml index e4003f788ce..407c54689c0 100644 --- a/platforms/hyperledger-fabric/charts/fabric-peernode/Chart.yaml +++ b/platforms/hyperledger-fabric/charts/fabric-peernode/Chart.yaml @@ -5,7 +5,22 @@ ############################################################################################## apiVersion: v1 -appVersion: "2.0" -description: "Hyperledger Fabric: Deploys peer node." name: fabric-peernode -version: 1.0.0 +description: "Hyperledger Fabric: Deploys peer node." +version: 1.1.0 +appVersion: latest +keywords: + - bevel + - hlf + - fabric + - hyperledger + - enterprise + - blockchain + - deployment + - accenture +home: https://hyperledger-bevel.readthedocs.io/en/latest/ +sources: + - https://github.com/hyperledger/bevel +maintainers: + - name: Hyperledger Bevel maintainers + email: bevel@lists.hyperledger.org diff --git a/platforms/hyperledger-fabric/charts/fabric-peernode/README.md b/platforms/hyperledger-fabric/charts/fabric-peernode/README.md index 1ee770a923d..81a7d8b6f76 100644 --- a/platforms/hyperledger-fabric/charts/fabric-peernode/README.md +++ b/platforms/hyperledger-fabric/charts/fabric-peernode/README.md @@ -3,232 +3,159 @@ [//]: # (SPDX-License-Identifier: Apache-2.0) [//]: # (##############################################################################################) - -# Peer Node Hyperledger Fabric Deployment +# fabric-peernode -- [Peer Node Hyperledger Fabric Deployment Helm Chart](#peer-node-hyperledger-fabric-deployment-helm-chart) -- [Prerequisites](#prerequisites) -- [Chart Structure](#chart-structure) -- [Configuration](#configuration) -- [Deployment](#deployment) -- [Verification](#verification) -- [Updating the Deployment](#updating-the-deployment) -- [Deletion](#deletion) -- [Contributing](#contributing) -- [License](#license) +This chart is a component of Hyperledger Bevel. The fabric-peernode chart deploys a Peer Node for Hyperledger Fabric blockchain network. If enabled, the keys are stored on the configured vault and stored as Kubernetes secrets. See [Bevel documentation](https://hyperledger-bevel.readthedocs.io/en/latest/) for details. +## TL;DR - -## Peer Node Hyperledger Fabric Deployment Helm Chart ---- -A [Helm chart](https://github.com/hyperledger/bevel/blob/develop/platforms/hyperledger-fabric/charts/fabric-peernode) for peer node. - +```bash +helm repo add bevel https://hyperledger.github.io/bevel +helm install peer0 bevel/fabric-peernode +``` - ## Prerequisites ---- -Before deploying the Helm chart, make sure to have the following prerequisites: -- Kubernetes cluster up and running. -- A HashiCorp Vault instance is set up and configured to use Kubernetes service account token-based authentication. -- The Vault is unsealed and initialized. -- HAproxy is required as ingress controller. -- Helm installed. +- Kubernetes 1.19+ +- Helm 3.2.0+ +If Hashicorp Vault is used, then +- HashiCorp Vault Server 1.13.1+ - -## Chart Structure ---- -The structure of the Helm chart is as follows: - -``` -fabric-peernode/ - |- conf/ - |- default_core.yaml - |- templates/ - |- _helpers.yaml - |- configmap.yaml - |- deployment.yaml - |- service.yaml - |- servicemonitor.yaml - |- Chart.yaml - |- README.md - |- values.yaml -``` +> **Important**: Also check the dependent charts. -- `default_core.yaml`: Default configuration file for the peer node. -- `templates/`: Contains the Kubernetes manifest templates that define the resources to be deployed. -- `helpers.tpl`: Contains custom label definitions used in other templates. -- `configmap.yaml`: Provides a way to configure the Hyperledger Fabric peer and enable it to join the network, interact with other nodes. The environment variables that are defined in the peer-config ConfigMap are used to configure the peer's runtime behavior. The configuration for the MSP is defined in the msp-config ConfigMap. The core.yaml file is used to configure the chaincode builder -- `deployment.yaml`: The certificates-init container fetches TLS certificates and other secrets from Vault. The couchdb container runs a CouchDB database that is used to store the ledger state. The {{ $.Values.peer.name }} container runs a Hyperledger Fabric peer that manages the ledger and provides access to the blockchain network. The grpc-web container runs a gRPC-Web proxy that allows gRPC services to be accessed via a web browser. -- `service.yaml`: Ensures internal and external access with exposed ports for gRPC (7051), events (7053), CouchDB (5984), gRPC-Web (7443), and operations (9443), and optionally uses HAProxy for external exposure and secure communication. -- `servicemonitor.yaml`: Define a ServiceMonitor resource that allows Prometheus to collect metrics from the peer node's "operations" port. The configuration is conditionally applied based on the availability of the Prometheus Operator's API version and whether metrics are enabled for the peer service. -- `Chart.yaml`: Contains the metadata for the Helm chart, such as the name, version, and description. -- `README.md`: Provides information and instructions about the Helm chart. -- `values.yaml`: Contains the default configuration values for the Helm chart. - - - -## Configuration ---- -The [values.yaml](https://github.com/hyperledger/bevel/blob/develop/platforms/hyperledger-fabric/charts/fabric-peernode/values.yaml) file contains configurable values for the Helm chart. We can modify these values according to the deployment requirements. Here are some important configuration options: - -### Metadata - -| Name | Description | Default Value | -| ----------------------| ----------------------------------------------------------------------| --------------------------------------------------| -| namespace | Provide the namespace for organization's peer | org1-net | -| images.couchdb | valid image name and version for fabric couchdb | ghcr.io/hyperledger/bevel-fabric-couchdb:2.2.2 | -| images.peer | valid image name and version for fabric peer | ghcr.io/hyperledger/bevel-fabric-peer:2.2.2 | -| images.alpineutils | valid image name and version to read certificates from vault server | ghcr.io/hyperledger/bevel-alpine:latest | -| labels | Provide custom labels | "" | - -### Annotations - -| Name | Description | Default Value | -| ---------------| --------------------------------------- | --------------| -| service | Extra annotations for service | "" | -| pvc | Extra annotations for pvc | "" | -| deployment | Extra annotations for deployment | "" | +## Installing the Chart -### Peer +To install the chart with the release name `peer0`: -| Name | Description | Default Value | -| ------------------------------------------| ----------------------------------------------------------------------| ----------------------------------------------| -| name | Name of the peer as per deployment yaml | peer0 | -| gossippeeraddress | URL of gossipping peer and port for grpc | peer1.org1-net.svc.cluster.local:7051 | -| gossipexternalendpoint | URL of gossip external endpoint and port for haproxy https service | peer0.org1-net.org1proxy.blockchaincloudpoc.com:443 | -| localmspid | Local MSP ID for the organization | Org1MSP | -| loglevel | Log level for organization's peer | info | -| tlsstatus | Set to true or false for organization's peer | true | -| builder | Valid chaincode builder image for Fabric | hyperledger/fabric-ccenv:2.2.2 | -| couchdb.username | CouchDB username (mandatory if provided) | org1-user | -| configpath | Provide the configuration path | "" | -| core | Provide core configuration | "" | -| mspconfig.organizationalunitidentifiers | Provide the members of the MSP in organizational unit identifiers | "" | -| mspconfig.nodeOUs.clientOUidentifier.organizationalunitidentifier | Organizational unit identifier for client nodes | client | -| mspconfig.nodeOUs.peerOUidentifier.organizationalunitidentifier | Organizational unit identifier for peer nodes | peer | -| mspconfig.nodeOUs.adminOUidentifier.organizationalunitidentifier | Organizational unit identifier for admin nodes (2.2.x) | admin | -| mspconfig.nodeOUs.ordererOUidentifier.organizationalunitidentifier | Organizational unit identifier for orderer nodes (2.2.x) | orderer | +```bash +helm repo add bevel https://hyperledger.github.io/bevel +helm install peer0 bevel/fabric-peernode +``` -### Storage +The command deploys the chart on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation. -| Name | Description | Default Value | -| --------------------------| -------------------------------- | ------------------- | -| peer.storageclassname | Storage class name for peer | aws-storageclass | -| peer.storagesize | Storage size for peer | 512Mi | -| couchdb.storageclassname | Storage class name for CouchDB | aws-storageclass | -| couchdb.storagesize | Storage size for CouchDB | 512Mi | - -### Vault - -| Name | Description | Default Value | -| ----------------------| ----------------------------------------------------------------------| --------------------------------------------------| -| role | Vault role for the organization | vault-role | -| address | Vault server address | "" | -| authpath | Kubernetes auth backend configured in vault for the organization | devorg1-net-auth | -| secretprefix | Vault secret prefix | ssecretsv2/data/crypto/peerOrganizations/org1-net/peers/peer0.org1-net | -| serviceaccountname | Service account name for vault | vault-auth | -| type | Provide the type of vault | hashicorp | -| imagesecretname | Image secret name for vault | "" | -| secretcouchdbpass | Vault path for secret CouchDB password | secretsv2/data/credentials/org1-net/couchdb/org1?user | -| tls | Enable or disable TLS for vault communication | "" | - -### Service - -| Name | Description | Default Value | -| ----------------------------- | ------------------------------------------| ------------------- | -| servicetype | Service type for the peer | ClusterIP | -| loadBalancerType | Load balancer type for the peer | "" | -| ports.grpc.nodeport | Cluster IP port for grpc service | "" | -| ports.grpc.clusteripport | Cluster IP port for grpc service | 7051 | -| ports.events.nodeport | Cluster IP port for event service | "" | -| ports.events.clusteripport | Cluster IP port for event service | 7053 | -| ports.couchdb.nodeport | Cluster IP port for CouchDB service | "" | -| ports.couchdb.clusteripport | Cluster IP port for CouchDB service | 5984 | -| ports.metrics.enabled | Enable/disable metrics service | false | -| ports.metrics.clusteripport | Cluster IP port for metrics service | 9443 | - -### Proxy - -| Name | Description | Default Value | -| ----------------------| ----------------------------------------------------------| ------------------- | -| provider | Proxy/ingress provider ( haproxy or none) | none | -| external_url_suffix | External URL of the organization | org1proxy.blockchaincloudpoc.com | -| port | External port on proxy service | 443 | - -### Config - -| Name | Description | Default Value | -| ----------------------------- | --------------------------- | ------------------- | -| pod.resources.limits.memory | Limit memory for node | 512M | -| pod.resources.limits.cpu | Limit CPU for node | 1 | -| pod.resources.requests.memory | Requested memory for node | 512M | -| pod.resources.requests.cpu | Requested CPU for node | 0.25 | - - - -## Deployment ---- - -To deploy the fabric-peernode Helm chart, follow these steps: - -1. Modify the [values.yaml](https://github.com/hyperledger/bevel/blob/main/platforms/hyperledger-fabric/charts/fabric-peernode/values.yaml) file to set the desired configuration values. -2. Run the following Helm command to install the chart: - ``` - $ helm repo add bevel https://hyperledger.github.io/bevel/ - $ helm install ./fabric-peernode - ``` -Replace `` with the desired name for the release. - -This will deploy the fabric-peernode node to the Kubernetes cluster based on the provided configurations. - - - -## Verification ---- - -To verify the deployment, we can use the following command: -``` -$ kubectl get statefulsets -n -``` -Replace `` with the actual namespace where the StatefulSet was created. This command will display information about the StatefulSet, including the number of replicas and their current status. +> **Tip**: List all releases using `helm list` +## Uninstalling the Chart - -## Updating the Deployment ---- +To uninstall/delete the `peer0` deployment: -If we need to update the deployment with new configurations or changes, modify the same [values.yaml](https://github.com/hyperledger/bevel/blob/main/platforms/hyperledger-fabric/charts/fabric-peernode/values.yaml) file with the desired changes and run the following Helm command: +```bash +helm uninstall peer0 ``` -$ helm upgrade ./fabric-peernode -``` -Replace `` with the name of the release. This command will apply the changes to the deployment, ensuring the fabric-peernode node is up to date. - - -## Deletion ---- +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Parameters + +### Global + +These parameters are refered to as same in each parent or child chart +| Name | Description | Default Value | +|--------|---------|-------------| +|`global.version` | Fabric Version. | `2.5.4` | +|`global.serviceAccountName` | The serviceaccount name that will be created for Vault Auth and k8S Secret management| `vault-auth` | +| `global.cluster.provider` | Kubernetes cluster provider like AWS EKS or minikube. Currently ony `aws`, `azure` and `minikube` are tested | `aws` | +| `global.cluster.cloudNativeServices` | only `false` is implemented, `true` to use Cloud Native Services (SecretsManager and IAM for AWS; KeyVault & Managed Identities for Azure) is for future | `false` | +| `global.vault.type` | Type of Vault to support other providers. Currently, only `hashicorp` and `kubernetes` is supported. | `hashicorp` | +| `global.vault.role` | Role used for authentication with Vault | `vault-role` | +| `global.vault.address`| URL of the Vault server. | `""` | +| `global.vault.authPath` | Authentication path for Vault | `supplychain` | +| `global.vault.secretEngine` | Vault secret engine name | `secretsv2` | +| `global.vault.secretPrefix` | Vault secret prefix which must start with `data/` | `data/supplychain` | +| `global.vault.tls` | Name of the Kubernetes secret which has certs to connect to TLS enabled Vault | `""` | +| `global.proxy.provider` | The proxy or Ingress provider. Can be `none` or `haproxy` | `haproxy` | +| `global.proxy.externalUrlSuffix` | The External URL suffix at which the Fabric GRPC services will be available | `test.blockchaincloudpoc.com` | +| `global.proxy.port` | The External Port on the proxy | `443` | -To delete the deployment and associated resources, run the following Helm command: -``` -$ helm uninstall -``` -Replace `` with the name of the release. This command will remove all the resources created by the Helm chart. +### Storage +| Name | Description | Default Value | +|--------|---------|-------------| +| `storage.enabled` | Flag to enable Storage Class creation for the Peer, set to `false` when using same peer name in different organizations | `true` | +| `storage.peer` | Size of the PVC needed for Peer Node | `512Mi` | +| `storage.couchdb` | Size of the PVC needed for CouchDB Database | `512Mi` | +| `storage.reclaimPolicy` | Reclaim policy for the PVC. Choose from: `Delete` or `Retain` | `Delete` | +| `storage.volumeBindingMode` | Volume binding mode for the PVC. Choose from: `Immediate` or `WaitForFirstConsumer` | `Immediate` | +| `storage.allowedTopologies.enabled` | Check [bevel-storageclass](../../../shared/charts/bevel-storageclass/README.md) for details | `false` | + +### Certs + +| Name | Description | Default Value | +|--------|---------|-------------| +| `certs.generateCertificates` | Flag to generate certificates for the Peer Node | `true` | +| `certs.orgData.caAddress` | Address of the CA Server without https | `ca.supplychain-net:7051` | +| `certs.orgData.caAdminUser` | CA Admin Username | `supplychain-admin` | +| `certs.orgData.caAdminPassword` | CA Admin Password | `supplychain-adminpw` | +| `certs.orgData.orgName` | Organization Name | `supplychain` | +| `certs.orgData.type` | Type of certificate to generate, choosed from `orderer` or `peer` | `peer` | +| `certs.orgData.componentSubject` | X.509 subject for the organization | `"O=Peer,L=51.50/-0.13/London,C=GB"` | +| `certs.users.usersList` | Array of Users with their attributes | `""` | +| `certs.settings.createConfigMaps` | Flag to create configmaps. Must be set to `false` for additional orderers/peers in the same organization. | `false` | +| `certs.settings.refreshCertValue` | Flag to refresh User certificates | `false` | +| `certs.settings.addPeerValue` | Flag to be used when adding a new peer to the organization | `false` | +| `certs.settings.removeCertsOnDelete` | Flag to delete the user and peer certificates on uninstall | `false` | +| `certs.settings.removePeerTlsOnDelete` | Flag to delete the orderer TLS certificates on uninstall | `false` | + +### Image + +| Name | Description | Default Value | +| -------------| ---------- | --------- | +| `image.couchdb` | CouchDB image repository | `ghcr.io/hyperledger/bevel-fabric-couchdb` | +| `image.peer` | Fabric Peer image repository | `ghcr.io/hyperledger/bevel-fabric-peer` | +| `image.alpineUtils` | Alpine utils image repository and tag | `ghcr.io/hyperledger/bevel-alpine:latest` | +| `image.pullSecret` | Secret name in the namespace containing private image registry credentials | `""` | - -## Contributing ---- -If you encounter any bugs, have suggestions, or would like to contribute to the [Peer Node Hyperledger Fabric Deployment Helm Chart](https://github.com/hyperledger/bevel/blob/main/platforms/hyperledger-fabric/charts/fabric-peernode), please feel free to open an issue or submit a pull request on the [project's GitHub repository](https://github.com/hyperledger/bevel). +### Peer +| Name | Description | Default Value | +|--------|---------|-------------| +| `peer.gossipPeerAddress` | Internal or External Address of the Gossip Peer Node, leave empty to use Peer's own address | `peer1.supplychain-net:7051` | +| `peer.logLevel` | Log level for the Peer Node | `info` | +| `peer.localMspId` | Local MSP ID for the Peer Organization | `supplychainMSP` | +| `peer.tlsStatus` | TLS status of the Peer Node | `true` | +| `peer.cliEnabled` | Flag to deploy the Peer CLI. Check [fabric-cli](../fabric-cli/README.md) for details | `false` | +| `peer.ordererAddress` | Orderer Internal or External Address with port for CLI to connect | `orderer1.supplychain-net:7050` | +| `peer.builder` | Chaincode Builder Image repository | `hyperledger/fabric-ccenv` | +| `peer.couchdb.username` | CouchDB User Name | `supplychain-user` | +| `peer.couchdb.password` | CouchDB User Password | ` supplychain-userpw` | +| `peer.mspConfig.organizationalUnitIdentifiers` | List of Organizational Unit Identifiers for Peer MSP Config | `""` | +| `peer.mspConfig.nodeOUs.clientOUIdentifier` | Organizational Unit Identifier to identify node as client | `client` | +| `peer.mspConfig.nodeOUs.peerOUIdentifier` | Organizational Unit Identifier to identify node as peer | `peer` | +| `peer.mspConfig.nodeOUs.adminOUIdentifier` | Organizational Unit Identifier to identify node as admin | `admin` | +| `peer.mspConfig.nodeOUs.ordererOUIdentifier` | Organizational Unit Identifier to identify node as orderer | `orderer` | +| `peer.serviceType` | Service Type for the GRPC Service | `ClusterIP` | +| `peer.loadBalancerType` | Load Balancer Type for the GRPC Service | `""` | +| `peer.ports.grpc.nodePort` | NodePort for the Peer GRPC Service | `""` | +| `peer.ports.grpc.clusterIpPort` | TCP Port for the Peer GRPC Service | `7051` | +| `peer.ports.events.nodePort` | NodePort for the Peer Events Service | `""` | +| `peer.ports.events.clusterIpPort` | TCP Port for the Peer Events Service | `7053` | +| `peer.ports.couchdb.nodePort` | NodePort for the CouchDB Service | `""` | +| `peer.ports.couchdb.clusterIpPort` | TCP Port for the CouchDB Service | `5984` | +| `peer.ports.metrics.enabled` | Flag to enable metrics port | `false` | +| `peer.ports.metrics.clusterIpPort` | TCP Port for the Peer metrics | `9443` | +| `peer.resources.limits.memory` | Memory limit for the Peer Node | `1Gi` | +| `peer.resources.limits.cpu` | CPU limit for the Peer Node | `1` | +| `peer.resources.requests.memory` | Memory request for the Peer Node | `512M` | +| `peer.resources.requests.cpu` | CPU request for the Peer Node | `0.25` | +| `peer.upgrade` | Flag to denote that Peer is being upgraded | `false` | +| `peer.healthCheck.retries` | Retry count to connect to Vault | `20` | +| `peer.healthCheck.sleepTimeAfterError` | Wait seconds after unsuccessful connection attempt | `15` | + +### Labels + +| Name | Description | Default Value | +| ----------------| ----------- | ------------- | +| `labels.service` | Array of Labels for service object | `[]` | +| `labels.pvc` | Array of Labels for PVC object | `[]` | +| `labels.deployment` | Array of Labels for deployment or statefulset object | `[]` | - ## License This chart is licensed under the Apache v2.0 license. -Copyright © 2023 Accenture +Copyright © 2024 Accenture ### Attribution diff --git a/platforms/hyperledger-fabric/charts/fabric-peernode/files/readme.txt b/platforms/hyperledger-fabric/charts/fabric-peernode/files/readme.txt new file mode 100644 index 00000000000..1a177b74f91 --- /dev/null +++ b/platforms/hyperledger-fabric/charts/fabric-peernode/files/readme.txt @@ -0,0 +1 @@ +This is a dummy file. Place the orderer.crt file in this directory.. \ No newline at end of file diff --git a/platforms/hyperledger-fabric/charts/fabric-peernode/requirements.yaml b/platforms/hyperledger-fabric/charts/fabric-peernode/requirements.yaml new file mode 100644 index 00000000000..d00f80f0495 --- /dev/null +++ b/platforms/hyperledger-fabric/charts/fabric-peernode/requirements.yaml @@ -0,0 +1,22 @@ +dependencies: + - name: bevel-storageclass + alias: storage + repository: "file://../../../shared/charts/bevel-storageclass" + tags: + - storage + version: ~1.0.0 + condition: storage.enabled + - name: fabric-catools + alias: certs + repository: "file://../fabric-catools" + tags: + - catools + version: ~1.1.0 + condition: certs.generateCertificates + - name: fabric-cli + alias: peer + repository: "file://../fabric-cli" + tags: + - cli + version: ~1.1.0 + condition: peer.cliEnabled diff --git a/platforms/hyperledger-fabric/charts/fabric-peernode/templates/_helpers.tpl b/platforms/hyperledger-fabric/charts/fabric-peernode/templates/_helpers.tpl index 7bf5f530a8e..3996d38ea7c 100644 --- a/platforms/hyperledger-fabric/charts/fabric-peernode/templates/_helpers.tpl +++ b/platforms/hyperledger-fabric/charts/fabric-peernode/templates/_helpers.tpl @@ -1,5 +1,46 @@ -{{- define "labels.custom" }} - {{ range $key, $val := $.Values.metadata.labels }} - {{ $key }}: {{ $val }} - {{ end }} -{{- end }} \ No newline at end of file +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "fabric-peernode.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "fabric-peernode.fullname" -}} +{{- $name := default .Chart.Name -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" $name .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "fabric-peernode.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- define "labels.deployment" -}} +{{- range $value := .Values.labels.deployment }} +{{ toYaml $value }} +{{- end }} +{{- end }} + +{{- define "labels.service" -}} +{{- range $value := .Values.labels.service }} +{{ toYaml $value }} +{{- end }} +{{- end }} + +{{- define "labels.pvc" -}} +{{- range $value := .Values.labels.pvc }} +{{ toYaml $value }} +{{- end }} +{{- end }} diff --git a/platforms/hyperledger-fabric/charts/fabric-peernode/templates/configmap.yaml b/platforms/hyperledger-fabric/charts/fabric-peernode/templates/configmap.yaml index d582db7b489..380ab1500c3 100644 --- a/platforms/hyperledger-fabric/charts/fabric-peernode/templates/configmap.yaml +++ b/platforms/hyperledger-fabric/charts/fabric-peernode/templates/configmap.yaml @@ -7,28 +7,35 @@ apiVersion: v1 kind: ConfigMap metadata: - name: {{ $.Values.peer.name }}-config - namespace: {{ $.Values.metadata.namespace }} + name: {{ .Release.Name }}-config + namespace: {{ .Release.Namespace }} labels: - app.kubernetes.io/name: {{ $.Values.peer.name }}-config - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} + app.kubernetes.io/name: {{ .Release.Name }}-config + app.kubernetes.io/component: fabric + app.kubernetes.io/part-of: {{ include "fabric-peernode.fullname" . }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} - {{- include "labels.custom" . | nindent 2 }} data: CORE_VM_ENDPOINT: unix:///host/var/run/docker.sock - CORE_PEER_ID: {{ $.Values.peer.name }}.{{ $.Values.metadata.namespace }} - FABRIC_LOGGING_SPEC: "grpc=debug:{{ $.Values.peer.loglevel }}" + CORE_PEER_ID: {{ .Release.Name }}.{{ .Release.Namespace }} + FABRIC_LOGGING_SPEC: "grpc=debug:{{ .Values.peer.logLevel }}" CORE_LEDGER_STATE_STATEDATABASE: CouchDB CORE_LEDGER_STATE_COUCHDBCONFIG_COUCHDBADDRESS: localhost:5984 - CORE_LEDGER_STATE_COUCHDBCONFIG_USERNAME: "{{ $.Values.peer.couchdb.username }}" - CORE_PEER_ADDRESS: {{ $.Values.peer.name }}.{{ $.Values.metadata.namespace }}:{{ $.Values.service.ports.grpc.clusteripport }} - CORE_PEER_GOSSIP_BOOTSTRAP: {{ $.Values.peer.gossippeeraddress }} - {{ if $.Values.peer.gossipexternalendpoint }} - CORE_PEER_GOSSIP_EXTERNALENDPOINT: {{ $.Values.peer.gossipexternalendpoint }} - {{ end }} - CORE_PEER_LOCALMSPID: {{ $.Values.peer.localmspid }} - CORE_PEER_TLS_ENABLED: "{{ $.Values.peer.tlsstatus }}" + CORE_LEDGER_STATE_COUCHDBCONFIG_USERNAME: "{{ .Values.peer.couchdb.username }}" + CORE_PEER_ADDRESS: {{ .Release.Name }}.{{ .Release.Namespace }}:{{ .Values.peer.ports.grpc.clusterIpPort }} + {{- if .Values.peer.gossipPeerAddress }} + CORE_PEER_GOSSIP_BOOTSTRAP: {{ .Values.peer.gossipPeerAddress }} + {{- else }} + CORE_PEER_GOSSIP_BOOTSTRAP: {{ .Release.Name }}.{{ .Release.Namespace }}:{{ .Values.peer.ports.grpc.clusterIpPort }} + {{- end }} + {{- if eq .Values.global.proxy.provider "none" }} + CORE_PEER_GOSSIP_EXTERNALENDPOINT: {{ .Release.Name }}.{{ .Release.Namespace }}:{{ .Values.peer.ports.grpc.clusterIpPort }} + {{- else }} + CORE_PEER_GOSSIP_EXTERNALENDPOINT: {{ .Release.Name }}.{{ .Release.Namespace }}.{{ .Values.global.proxy.externalUrlSuffix }}:{{ .Values.global.proxy.port }} + {{- end }} + CORE_PEER_LOCALMSPID: {{ .Values.peer.localMspId }} + CORE_PEER_TLS_ENABLED: "{{ .Values.peer.tlsStatus }}" CORE_PEER_TLS_CERT_FILE: /etc/hyperledger/fabric/crypto/tls/server.crt CORE_PEER_TLS_KEY_FILE: /etc/hyperledger/fabric/crypto/tls/server.key CORE_PEER_TLS_ROOTCERT_FILE: /etc/hyperledger/fabric/crypto/msp/tlscacerts/tlsca.crt @@ -36,58 +43,82 @@ data: CORE_PEER_GOSSIP_ORGLEADER: "false" CORE_PEER_PROFILE_ENABLED: "true" CORE_PEER_ADDRESSAUTODETECT: "true" - CORE_PEER_NETWORKID: {{ $.Values.peer.name }}.{{ $.Values.metadata.namespace }} + CORE_PEER_NETWORKID: {{ .Release.Name }}.{{ .Release.Namespace }} CORE_PEER_MSPCONFIGPATH: /etc/hyperledger/fabric/crypto/msp GODEBUG: "netdns=go" CORE_PEER_GOSSIP_SKIPHANDSHAKE: "true" - CORE_CHAINCODE_BUILDER: "{{ $.Values.peer.builder }}" + CORE_CHAINCODE_BUILDER: "{{ .Values.peer.builder }}:{{ .Values.global.version }}" CORE_OPERATIONS_LISTENADDRESS: 0.0.0.0:9443 --- apiVersion: v1 kind: ConfigMap metadata: - name: {{ $.Values.peer.name }}-msp-config - namespace: {{ $.Values.metadata.namespace }} + name: {{ .Release.Name }}-msp-config + namespace: {{ .Release.Namespace }} labels: - app.kubernetes.io/name: {{ $.Values.peer.name }}-msp-config - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} + app.kubernetes.io/name: {{ .Release.Name }}-msp-config + app.kubernetes.io/component: fabric + app.kubernetes.io/part-of: {{ include "fabric-peernode.fullname" . }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} - {{- include "labels.custom" . | nindent 2 }} data: - mspconfig: | - {{if ($.Values.peer.mspconfig.organizationalunitidentifiers) }} - OrganizationalUnitIdentifiers:{{ range $.Values.peer.mspconfig.organizationalunitidentifiers }} + mspConfig: |- + + {{- if (.Values.peer.mspConfig.organizationalUnitIdentifiers) }} + OrganizationalUnitIdentifiers: + {{- range .Values.peer.mspConfig.organizationalUnitIdentifiers }} - Certificate: cacerts/ca.crt - OrganizationalUnitIdentifier: {{ . }}{{ end }}{{end}} + OrganizationalUnitIdentifier: {{ . }} + {{- end }} + {{- end }} NodeOUs: Enable: true ClientOUIdentifier: Certificate: cacerts/ca.crt - OrganizationalUnitIdentifier: {{ $.Values.peer.mspconfig.nodeOUs.clientOUidentifier.organizationalunitidentifier }} + OrganizationalUnitIdentifier: {{ .Values.peer.mspConfig.nodeOUs.clientOUIdentifier }} PeerOUIdentifier: Certificate: cacerts/ca.crt - OrganizationalUnitIdentifier: {{ $.Values.peer.mspconfig.nodeOUs.peerOUidentifier.organizationalunitidentifier }} + OrganizationalUnitIdentifier: {{ .Values.peer.mspConfig.nodeOUs.peerOUIdentifier }} AdminOUIdentifier: Certificate: cacerts/ca.crt - OrganizationalUnitIdentifier: {{ $.Values.peer.mspconfig.nodeOUs.adminOUidentifier.organizationalunitidentifier }} + OrganizationalUnitIdentifier: {{ .Values.peer.mspConfig.nodeOUs.adminOUIdentifier }} OrdererOUIdentifier: Certificate: cacerts/ca.crt - OrganizationalUnitIdentifier: {{ $.Values.peer.mspconfig.nodeOUs.ordererOUidentifier.organizationalunitidentifier }} + OrganizationalUnitIdentifier: {{ .Values.peer.mspConfig.nodeOUs.ordererOUIdentifier }} -{{ if $.Values.peer.configpath }} --- apiVersion: v1 kind: ConfigMap metadata: - name: builders-config - namespace: {{ $.Values.metadata.namespace }} + name: {{ .Release.Name }}-builders-config + namespace: {{ .Release.Namespace }} labels: app.kubernetes.io/name: builders-config - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} - {{- include "labels.custom" . | nindent 2 }} + app.kubernetes.io/component: fabric + app.kubernetes.io/part-of: {{ include "fabric-peernode.fullname" . }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} + app.kubernetes.io/managed-by: helm data: - core.yaml.base64: {{ .Values.peer.core | quote }} -{{ end }} \ No newline at end of file + core.yaml: {{ .Files.Get "conf/default_core.yaml" | nindent 8 | quote }} + +{{- $orderercrt := .Files.Get "files/orderer.crt" }} +{{ if $orderercrt }} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ .Release.Name }}-orderer-tls-cacert + namespace: {{ .Release.Namespace }} + labels: + app.kubernetes.io/name: orderer-tls-cacert + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/part-of: {{ include "fabric-peernode.fullname" . }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} + app.kubernetes.io/managed-by: helm +data: + cacert: |- + {{ .Files.Get "files/orderer.crt" | nindent 8 }} +{{- end }} \ No newline at end of file diff --git a/platforms/hyperledger-fabric/charts/fabric-peernode/templates/deployment.yaml b/platforms/hyperledger-fabric/charts/fabric-peernode/templates/deployment.yaml deleted file mode 100755 index f3123e7ddfd..00000000000 --- a/platforms/hyperledger-fabric/charts/fabric-peernode/templates/deployment.yaml +++ /dev/null @@ -1,326 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: {{ $.Values.peer.name }} - namespace: {{ $.Values.metadata.namespace }} - labels: - name: {{ $.Values.peer.name }} - app.kubernetes.io/name: {{ $.Values.peer.name }} - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} - {{- include "labels.custom" . | nindent 2 }} - annotations: - {{- if $.Values.annotations }} - {{- range $key, $value := $.Values.annotations.deployment }} - {{- range $k, $v := $value }} - {{ $k }}: {{ $v | quote }} - {{- end }} - {{- end }} - {{- end }} -spec: - updateStrategy: - type: RollingUpdate - serviceName: "{{ $.Values.peer.name }}" - replicas: 1 - selector: - matchLabels: - app: {{ $.Values.peer.name }} - app.kubernetes.io/name: {{ $.Values.peer.name }} - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/instance: {{ .Release.Name }} - template: - metadata: - annotations: - checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} - labels: - app: {{ $.Values.peer.name }} - app.kubernetes.io/name: {{ $.Values.peer.name }} - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/instance: {{ .Release.Name }} - spec: - serviceAccountName: {{ $.Values.vault.serviceaccountname }} - {{- if .Values.vault.imagesecretname }} - imagePullSecrets: - - name: {{ $.Values.vault.imagesecretname }} - {{- end }} - initContainers: - - name: certificates-init - image: {{ $.Values.metadata.images.alpineutils}} - imagePullPolicy: IfNotPresent - env: - - name: VAULT_ADDR - value: {{ $.Values.vault.address }} - - name: VAULT_SECRET_PREFIX - value: "{{ $.Values.vault.secretprefix }}" - - name: KUBERNETES_AUTH_PATH - value: {{ $.Values.vault.authpath }} - - name: VAULT_APP_ROLE - value: {{ $.Values.vault.role }} - - name: MOUNT_PATH - value: /secret - - name: VAULT_TYPE - value: "{{ $.Values.vault.type }}" - command: ["sh", "-c"] - args: - - |- - #!/usr/bin/env sh - . /scripts/bevel-vault.sh - - # Calling a function to retrieve the vault token. - vaultBevelFunc "init" - - echo "Getting TLS certificates from Vault." - vaultBevelFunc "readJson" "${VAULT_SECRET_PREFIX}/tls" - - TLS_CA_CERT=$(echo ${VAULT_SECRET} | jq -r '.["ca.crt"]') - TLS_SERVER_CERT=$(echo ${VAULT_SECRET} | jq -r '.["server.crt"]') - TLS_SERVER_KEY=$(echo ${VAULT_SECRET} | jq -r '.["server.key"]') - - OUTPUT_PATH="${MOUNT_PATH}/tls" - mkdir -p ${OUTPUT_PATH} - echo "${TLS_CA_CERT}" >> ${OUTPUT_PATH}/ca.crt - echo "${TLS_SERVER_CERT}" >> ${OUTPUT_PATH}/server.crt - echo "${TLS_SERVER_KEY}" >> ${OUTPUT_PATH}/server.key - - echo "Getting MSP certificates from Vault." - vaultBevelFunc "readJson" "${VAULT_SECRET_PREFIX}/msp" - - ADMINCERT=$(echo ${VAULT_SECRET} | jq -r '.["admincerts"]') - CACERTS=$(echo ${VAULT_SECRET} | jq -r '.["cacerts"]') - KEYSTORE=$(echo ${VAULT_SECRET} | jq -r '.["keystore"]') - SIGNCERTS=$(echo ${VAULT_SECRET} | jq -r '.["signcerts"]') - TLSCACERTS=$(echo ${VAULT_SECRET} | jq -r '.["tlscacerts"]') - - OUTPUT_PATH="${MOUNT_PATH}/msp" - mkdir -p ${OUTPUT_PATH}/admincerts - mkdir -p ${OUTPUT_PATH}/cacerts - mkdir -p ${OUTPUT_PATH}/keystore - mkdir -p ${OUTPUT_PATH}/signcerts - mkdir -p ${OUTPUT_PATH}/tlscacerts - - echo "${ADMINCERT}" >> ${OUTPUT_PATH}/admincerts/admin.crt - echo "${CACERTS}" >> ${OUTPUT_PATH}/cacerts/ca.crt - echo "${KEYSTORE}" >> ${OUTPUT_PATH}/keystore/server.key - echo "${SIGNCERTS}" >> ${OUTPUT_PATH}/signcerts/server.crt - echo "${TLSCACERTS}" >> ${OUTPUT_PATH}/tlscacerts/tlsca.crt - - # COUCH_DB CREDENTIALS - echo "Getting couch db credentials" - SECRET_COUCHDB_PASS={{ $.Values.vault.secretcouchdbpass }} - if [ ! -z $SECRET_COUCHDB_PASS ] - then - vault_secret_key=$(echo ${SECRET_COUCHDB_PASS} |awk -F "?" '{print $1}') - vault_data_key=$(echo ${SECRET_COUCHDB_PASS} |awk -F "?" '{print $2}') - - # Calling a function to retrieve secrets from Vault only if they exist. - vaultBevelFunc "readJson" "${vault_secret_key}" - - PASSWORD=$(echo ${VAULT_SECRET} | jq -r ".[\"${vault_data_key}\"]") - echo "${PASSWORD}" >> ${MOUNT_PATH}/user_cred - fi - volumeMounts: - {{ if .Values.vault.tls }} - - name: vaultca - mountPath: "/etc/ssl/certs/" - readOnly: true - {{ end }} - - name: certificates - mountPath: /secret - - name: scripts-volume - mountPath: /scripts/bevel-vault.sh - subPath: bevel-vault.sh - containers: - - name: couchdb - image: {{ $.Values.metadata.images.couchdb }} - imagePullPolicy: IfNotPresent - command: ["sh", "-c"] - args: - - |- - chown -R couchdb:couchdb /opt/couchdb - chmod -R 0770 /opt/couchdb/data - chmod 664 /opt/couchdb/etc/*.ini - chmod 664 /opt/couchdb/etc/local.d/*.ini - chmod 775 /opt/couchdb/etc/*.d - if [ -e /etc/hyperledger/fabric/crypto/user_cred ] && [ -z $COUCHDB_USER ] - then - echo " Error! Please provide username for the password " - exit 1 - break - elif [ -e /etc/hyperledger/fabric/crypto/user_cred ] && [ ! -z $COUCHDB_USER ] - then - export COUCHDB_PASSWORD=`cat /etc/hyperledger/fabric/crypto/user_cred` - break - elif [ ! -e /etc/hyperledger/fabric/crypto/user_cred ] && [ ! -z $COUCHDB_USER ] - then - echo " Error! Please provide password for username $COUCHDB_USER " - exit 1 - break - else - : - fi - tini -- /docker-entrypoint.sh /opt/couchdb/bin/couchdb - ports: - - containerPort: 5984 - env: - - name: COUCHDB_USER - value: "{{ $.Values.peer.couchdb.username }}" - volumeMounts: - - name: datadir-couchdb - mountPath: /opt/couchdb/data - - name: certificates - mountPath: /etc/hyperledger/fabric/crypto - - name: {{ $.Values.peer.name }} - image: {{ $.Values.metadata.images.peer }} - imagePullPolicy: IfNotPresent - command: ["sh", "-c"] - args: - - |- - if [ -e /builders/external/core.yaml.base64 ]; then - cat /builders/external/core.yaml.base64 | base64 -d > $FABRIC_CFG_PATH/core.yaml - fi - cp /etc/hyperledger/fabric/NodeOUconfig/mspconfig /etc/hyperledger/fabric/crypto/msp/config.yaml - export CORE_LEDGER_STATE_COUCHDBCONFIG_PASSWORD=`cat /etc/hyperledger/fabric/crypto/user_cred` - version=$( echo ${PEER_IMAGE} | sed 's/.*://' | cut -d '.' -f -2 ) - if [ $version = "2.2" ] && [ ${IS_UPGRADE} = "true" ] - then - peer node upgrade-dbs - fi - peer node start - ports: - - name: grpc - containerPort: 7051 - - name: events - containerPort: 7053 - - name: operations - containerPort: 9443 - env: - - name: PEER_IMAGE - value: "{{ $.Values.metadata.images.peer }}" - - name: IS_UPGRADE - value: "{{ $.Values.upgrade }}" - envFrom: - - configMapRef: - name: {{ $.Values.peer.name }}-config - volumeMounts: - - name: datadir - mountPath: /var/hyperledger/production - - name: dockersocket - mountPath: /host/var/run/docker.sock - - name: certificates - mountPath: /etc/hyperledger/fabric/crypto - - name: {{ $.Values.peer.name }}-msp-config-volume - mountPath: /etc/hyperledger/fabric/NodeOUconfig - readOnly: true - {{ if $.Values.peer.configpath }} - - name: builders-config - mountPath: /builders/external - {{ end }} - resources: - requests: - memory: {{ .Values.config.pod.resources.requests.memory }} - cpu: {{ .Values.config.pod.resources.requests.cpu }} - limits: - memory: {{ .Values.config.pod.resources.limits.memory }} - cpu: {{ .Values.config.pod.resources.limits.cpu }} - - name: grpc-web - image: "ghcr.io/hyperledger-labs/grpc-web:latest" - imagePullPolicy: IfNotPresent - ports: - - name: grpc-web - containerPort: 7443 - env: - - name: BACKEND_ADDRESS - value: "{{ $.Values.peer.name }}.{{ $.Values.metadata.namespace }}:{{ $.Values.service.ports.grpc.clusteripport }}" - - name: SERVER_TLS_CERT_FILE - value: /certs/tls/server.crt - - name: SERVER_TLS_KEY_FILE - value: /certs/tls/server.key - - name: BACKEND_TLS_CA_FILES - value: /certs/tls/ca.crt - - name: SERVER_BIND_ADDRESS - value: "0.0.0.0" - - name: SERVER_HTTP_DEBUG_PORT - value: "8080" - - name: SERVER_HTTP_TLS_PORT - value: "7443" - - name: BACKEND_TLS - value: "true" - - name: SERVER_HTTP_MAX_WRITE_TIMEOUT - value: 5m - - name: SERVER_HTTP_MAX_READ_TIMEOUT - value: 5m - - name: USE_WEBSOCKETS - value: "true" - volumeMounts: - - name: certificates - mountPath: /certs - volumes: - {{ if .Values.vault.tls }} - - name: vaultca - secret: - secretName: {{ $.Values.vault.tls }} - items: - - key: ca.crt.pem - path: ca-certificates.crt - {{ end }} - {{ if $.Values.peer.configpath }} - - name: builders-config - configMap: - name: builders-config - {{ end }} - - name: certificates - emptyDir: - medium: Memory - - name: dockersocket - hostPath: - path: /var/run/docker.sock - - name: {{ $.Values.peer.name }}-msp-config-volume - configMap: - name: {{ $.Values.peer.name }}-msp-config - items: - - key: mspconfig - path: mspconfig - - name: scripts-volume - configMap: - name: bevel-vault-script - volumeClaimTemplates: - #Lables are not being taken by Kubernetes as it dynamically creates PVC - - metadata: - name: datadir - annotations: - {{- if $.Values.annotations }} - {{- range $key, $value := $.Values.annotations.pvc }} - {{- range $k, $v := $value }} - {{ $k }}: {{ $v | quote }} - {{- end }} - {{- end }} - {{- end }} - spec: - accessModes: [ "ReadWriteOnce" ] - storageClassName: {{ .Values.storage.peer.storageclassname }} - resources: - requests: - storage: {{ .Values.storage.peer.storagesize }} - - metadata: - name: datadir-couchdb - annotations: - {{- if $.Values.annotations }} - {{- range $key, $value := $.Values.annotations.pvc }} - {{- range $k, $v := $value }} - {{ $k }}: {{ $v | quote }} - {{- end }} - {{- end }} - {{- end }} - spec: - accessModes: [ "ReadWriteOnce" ] - storageClassName: {{ .Values.storage.couchdb.storageclassname }} - resources: - requests: - storage: {{ .Values.storage.couchdb.storagesize }} diff --git a/platforms/hyperledger-fabric/charts/fabric-peernode/templates/node-statefulset.yaml b/platforms/hyperledger-fabric/charts/fabric-peernode/templates/node-statefulset.yaml new file mode 100755 index 00000000000..221e18c0d10 --- /dev/null +++ b/platforms/hyperledger-fabric/charts/fabric-peernode/templates/node-statefulset.yaml @@ -0,0 +1,383 @@ +############################################################################################## +# Copyright Accenture. All Rights Reserved. +# +# SPDX-License-Identifier: Apache-2.0 +############################################################################################## + +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ template "fabric-peernode.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + app: {{ .Release.Name }} + app.kubernetes.io/name: {{ .Release.Name }} + app.kubernetes.io/component: fabric + app.kubernetes.io/part-of: {{ template "fabric-peernode.fullname" . }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} + app.kubernetes.io/managed-by: helm + {{- include "labels.deployment" . | nindent 4 }} +spec: + updateStrategy: + type: RollingUpdate + serviceName: {{ .Release.Name }} + replicas: 1 + selector: + matchLabels: + app: {{ .Release.Name }} + app.kubernetes.io/name: {{ .Release.Name }} + app.kubernetes.io/component: fabric + app.kubernetes.io/part-of: {{ template "fabric-peernode.fullname" . }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} + app.kubernetes.io/managed-by: helm + template: + metadata: + annotations: + checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + labels: + app: {{ .Release.Name }} + app.kubernetes.io/name: {{ .Release.Name }} + app.kubernetes.io/component: fabric + app.kubernetes.io/part-of: {{ template "fabric-peernode.fullname" . }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} + app.kubernetes.io/managed-by: helm + {{- include "labels.deployment" . | nindent 8 }} + spec: + serviceAccountName: {{ .Values.global.serviceAccountName }} + {{- if .Values.image.pullSecret }} + imagePullSecrets: + - name: {{ .Values.image.pullSecret }} + {{- end }} + initContainers: + - name: certificates-init + image: {{ .Values.image.alpineUtils }} + imagePullPolicy: IfNotPresent + env: + - name: VAULT_ADDR + value: {{ .Values.global.vault.address }} + - name: VAULT_SECRET_ENGINE + value: "{{ .Values.global.vault.secretEngine }}" + - name: VAULT_SECRET_PREFIX + value: "{{ .Values.global.vault.secretPrefix }}" + - name: KUBERNETES_AUTH_PATH + value: {{ .Values.global.vault.authPath }} + - name: VAULT_APP_ROLE + value: {{ .Values.global.vault.role }} + - name: MOUNT_PATH + value: /secret + - name: VAULT_TYPE + value: "{{ .Values.global.vault.type }}" + - name: PEER_NAME + value: {{ .Release.Name }} + command: ["sh", "-c"] + args: + - |- + #!/usr/bin/env sh + +{{- if eq .Values.global.vault.type "hashicorp" }} + . /scripts/bevel-vault.sh + + # Calling a function to retrieve the vault token. + vaultBevelFunc "init" + + function getPeerTlsSecret { + KEY=$1-tls + + echo "Getting TLS certificates from Vault." + vaultBevelFunc "readJson" "${VAULT_SECRET_ENGINE}/${VAULT_SECRET_PREFIX}/peers/${KEY}" + if [ "$SECRETS_AVAILABLE" == "yes" ] + then + TLS_CA_CERT=$(echo ${VAULT_SECRET} | jq -r '.["ca_crt"]') + TLS_SERVER_CERT=$(echo ${VAULT_SECRET} | jq -r '.["server_crt"]') + TLS_SERVER_KEY=$(echo ${VAULT_SECRET} | jq -r '.["server_key"]') + + echo "${TLS_CA_CERT}" > ${OUTPUT_PATH}/ca.crt + echo "${TLS_SERVER_CERT}" > ${OUTPUT_PATH}/server.crt + echo "${TLS_SERVER_KEY}" > ${OUTPUT_PATH}/server.key + PEER_TLS_SECRET=true + else + PEER_TLS_SECRET=false + fi + } + + function getPeerMspSecret { + KEY=$1-msp + + echo "Getting MSP certificates from Vault." + vaultBevelFunc "readJson" "${VAULT_SECRET_ENGINE}/${VAULT_SECRET_PREFIX}/peers/${KEY}" + if [ "$SECRETS_AVAILABLE" == "yes" ] + then + ADMINCERT=$(echo ${VAULT_SECRET} | jq -r '.["admincerts"]') + CACERTS=$(echo ${VAULT_SECRET} | jq -r '.["cacerts"]') + KEYSTORE=$(echo ${VAULT_SECRET} | jq -r '.["keystore"]') + SIGNCERTS=$(echo ${VAULT_SECRET} | jq -r '.["signcerts"]') + TLSCACERTS=$(echo ${VAULT_SECRET} | jq -r '.["tlscacerts"]') + + echo "${ADMINCERT}" > ${OUTPUT_PATH}/admincerts/admin.crt + echo "${CACERTS}" > ${OUTPUT_PATH}/cacerts/ca.crt + echo "${KEYSTORE}" > ${OUTPUT_PATH}/keystore/server.key + echo "${SIGNCERTS}" > ${OUTPUT_PATH}/signcerts/server.crt + echo "${TLSCACERTS}" > ${OUTPUT_PATH}/tlscacerts/tlsca.crt + PEER_MSP_SECRET=true + else + PEER_MSP_SECRET=false + fi + } + +{{- else }} + function getPeerTlsSecret { + KEY=$1-tls + KUBENETES_SECRET=$(kubectl get secret ${KEY} --namespace {{ .Release.Namespace }} -o json) + if [ "$KUBENETES_SECRET" = "" ]; then + PEER_TLS_SECRET=false + else + TLS_CA_CERT=$(echo ${KUBENETES_SECRET} | jq -r '.data.cacrt' | base64 -d) + TLS_SERVER_CERT=$(echo ${KUBENETES_SECRET} | jq -r '.data.servercrt' | base64 -d) + TLS_SERVER_KEY=$(echo ${KUBENETES_SECRET} | jq -r '.data.serverkey' | base64 -d) + + echo "${TLS_CA_CERT}" > ${OUTPUT_PATH}/ca.crt + echo "${TLS_SERVER_CERT}" > ${OUTPUT_PATH}/server.crt + echo "${TLS_SERVER_KEY}" > ${OUTPUT_PATH}/server.key + PEER_TLS_SECRET=true + fi + } + + function getPeerMspSecret { + KEY=$1-msp + KUBENETES_SECRET=$(kubectl get secret ${KEY} --namespace {{ .Release.Namespace }} -o json) + if [ "$KUBENETES_SECRET" = "" ]; then + PEER_MSP_SECRET=false + else + ADMINCERT=$(echo ${KUBENETES_SECRET} | jq -r '.data.admincerts' | base64 -d) + CACERTS=$(echo ${KUBENETES_SECRET} | jq -r '.data.cacerts' | base64 -d) + KEYSTORE=$(echo ${KUBENETES_SECRET} | jq -r '.data.keystore' | base64 -d) + SIGNCERTS=$(echo ${KUBENETES_SECRET} | jq -r '.data.signcerts' | base64 -d) + TLSCACERTS=$(echo ${KUBENETES_SECRET} | jq -r '.data.tlscacerts' | base64 -d) + + echo "${ADMINCERT}" > ${OUTPUT_PATH}/admincerts/admin.crt + echo "${CACERTS}" > ${OUTPUT_PATH}/cacerts/ca.crt + echo "${KEYSTORE}" > ${OUTPUT_PATH}/keystore/server.key + echo "${SIGNCERTS}" > ${OUTPUT_PATH}/signcerts/server.crt + echo "${TLSCACERTS}" > ${OUTPUT_PATH}/tlscacerts/tlsca.crt + PEER_MSP_SECRET=true + fi + + } +{{- end }} + + COUNTER=1 + while [ "$COUNTER" -le {{ .Values.peer.healthCheck.retries }} ] + do + OUTPUT_PATH="${MOUNT_PATH}/tls" + mkdir -p ${OUTPUT_PATH} + getPeerTlsSecret ${PEER_NAME} + + OUTPUT_PATH="${MOUNT_PATH}/msp" + mkdir -p ${OUTPUT_PATH}/admincerts + mkdir -p ${OUTPUT_PATH}/cacerts + mkdir -p ${OUTPUT_PATH}/keystore + mkdir -p ${OUTPUT_PATH}/signcerts + mkdir -p ${OUTPUT_PATH}/tlscacerts + getPeerMspSecret ${PEER_NAME} + + if [ "$PEER_TLS_SECRET" = "true" ] && [ "$PEER_MSP_SECRET" = "true" ] + then + echo "Peer certificates have been obtained correctly" + break + else + echo "Peer certificates have not been obtained, sleeping for {{ .Values.peer.healthCheck.sleepTimeAfterError }}" + sleep {{ .Values.peer.healthCheck.sleepTimeAfterError }} + COUNTER=`expr "$COUNTER" + 1` + fi + done + + if [ "$COUNTER" -gt {{ .Values.peer.healthCheck.retries }} ] + then + echo "Retry attempted `expr $COUNTER - 1` times, The peer certificates have not been obtained." + exit 1 + fi + + volumeMounts: + {{ if .Values.global.vault.tls }} + - name: vaultca + mountPath: "/etc/ssl/certs/" + readOnly: true + {{ end }} + - name: certificates + mountPath: /secret + - name: scripts-volume + mountPath: /scripts/bevel-vault.sh + subPath: bevel-vault.sh + containers: + - name: couchdb + image: {{ .Values.image.couchdb }}:{{ .Values.global.version }} + imagePullPolicy: IfNotPresent + command: ["sh", "-c"] + args: + - |- + chown -R couchdb:couchdb /opt/couchdb + chmod -R 0770 /opt/couchdb/data + chmod 664 /opt/couchdb/etc/*.ini + chmod 664 /opt/couchdb/etc/local.d/*.ini + chmod 775 /opt/couchdb/etc/*.d + if [ -z $COUCHDB_USER ] + then + echo " Error! Please provide username for CouchDB." + exit 1 + break + elif [ -z $COUCHDB_PASSWORD ] + then + echo " Error! Please provide password for username $COUCHDB_USER." + exit 1 + break + fi + tini -- /docker-entrypoint.sh /opt/couchdb/bin/couchdb + ports: + - containerPort: 5984 + env: + - name: COUCHDB_USER + value: "{{ .Values.peer.couchdb.username }}" + - name: COUCHDB_PASSWORD + value: "{{ .Values.peer.couchdb.password }}" + volumeMounts: + - name: datadir-couchdb + mountPath: /opt/couchdb/data + - name: certificates + mountPath: /etc/hyperledger/fabric/crypto + - name: {{ .Release.Name }} + image: {{ .Values.image.peer }}:{{ .Values.global.version }} + imagePullPolicy: IfNotPresent + command: ["sh", "-c"] + args: + - |- + + if [ -e /builders/external/core.yaml ]; then + cp /builders/external/core.yaml $FABRIC_CFG_PATH/core.yaml + fi + + cp /etc/hyperledger/fabric/NodeOUconfig/mspConfig /etc/hyperledger/fabric/crypto/msp/config.yaml + export CORE_LEDGER_STATE_COUCHDBCONFIG_PASSWORD="{{ .Values.peer.couchdb.password }}" + version=$( echo ${PEER_IMAGE} | sed 's/.*://' | cut -d '.' -f -2 ) + if [ $version = "2.2" ] && [ ${IS_UPGRADE} = "true" ] + then + peer node upgrade-dbs + fi + peer node start + ports: + - name: grpc + containerPort: 7051 + - name: events + containerPort: 7053 + - name: operations + containerPort: 9443 + env: + - name: PEER_IMAGE + value: "{{ .Values.image.peer }}:{{ .Values.global.version }}" + - name: IS_UPGRADE + value: "{{ .Values.peer.upgrade }}" + envFrom: + - configMapRef: + name: {{ .Release.Name }}-config + volumeMounts: + - name: datadir + mountPath: /var/hyperledger/production + - name: dockersocket + mountPath: /host/var/run/docker.sock + - name: certificates + mountPath: /etc/hyperledger/fabric/crypto + - name: {{ .Release.Name }}-msp-config-volume + mountPath: /etc/hyperledger/fabric/NodeOUconfig + readOnly: true + - name: builders-config + mountPath: /builders/external + resources: + requests: + memory: {{ .Values.peer.resources.requests.memory }} + cpu: {{ .Values.peer.resources.requests.cpu }} + limits: + memory: {{ .Values.peer.resources.limits.memory }} + cpu: {{ .Values.peer.resources.limits.cpu }} + - name: grpc-web + image: "ghcr.io/hyperledger-labs/grpc-web:latest" + imagePullPolicy: IfNotPresent + ports: + - name: grpc-web + containerPort: 7443 + env: + - name: BACKEND_ADDRESS + value: "{{ .Release.Name }}.{{ .Release.Namespace }}:{{ .Values.peer.ports.grpc.clusterIpPort }}" + - name: SERVER_TLS_CERT_FILE + value: /certs/tls/server.crt + - name: SERVER_TLS_KEY_FILE + value: /certs/tls/server.key + - name: BACKEND_TLS_CA_FILES + value: /certs/tls/ca.crt + - name: SERVER_BIND_ADDRESS + value: "0.0.0.0" + - name: SERVER_HTTP_DEBUG_PORT + value: "8080" + - name: SERVER_HTTP_TLS_PORT + value: "7443" + - name: BACKEND_TLS + value: "true" + - name: SERVER_HTTP_MAX_WRITE_TIMEOUT + value: 5m + - name: SERVER_HTTP_MAX_READ_TIMEOUT + value: 5m + - name: USE_WEBSOCKETS + value: "true" + volumeMounts: + - name: certificates + mountPath: /certs + volumes: + {{ if .Values.global.vault.tls }} + - name: vaultca + secret: + secretName: {{ .Values.global.vault.tls }} + items: + - key: ca.crt.pem + path: ca-certificates.crt + {{ end }} + - name: builders-config + configMap: + name: {{ .Release.Name }}-builders-config + - name: certificates + emptyDir: + medium: Memory + - name: dockersocket + hostPath: + path: /var/run/docker.sock + - name: {{ .Release.Name }}-msp-config-volume + configMap: + name: {{ .Release.Name }}-msp-config + items: + - key: mspConfig + path: mspConfig + - name: scripts-volume + configMap: + name: bevel-vault-script + volumeClaimTemplates: + #Lables are not being taken by Kubernetes as it dynamically creates PVC + - metadata: + name: datadir + labels: + {{- include "labels.pvc" . | nindent 8 }} + spec: + accessModes: [ "ReadWriteOnce" ] + storageClassName: storage-{{ .Release.Name }} + resources: + requests: + storage: {{ .Values.storage.peer }} + - metadata: + name: datadir-couchdb + labels: + {{- include "labels.pvc" . | nindent 8 }} + spec: + accessModes: [ "ReadWriteOnce" ] + storageClassName: storage-{{ .Release.Name }} + resources: + requests: + storage: {{ .Values.storage.couchdb }} diff --git a/platforms/hyperledger-fabric/charts/fabric-peernode/templates/service.yaml b/platforms/hyperledger-fabric/charts/fabric-peernode/templates/service.yaml index 3b015f0aba2..e96eb0f4caf 100644 --- a/platforms/hyperledger-fabric/charts/fabric-peernode/templates/service.yaml +++ b/platforms/hyperledger-fabric/charts/fabric-peernode/templates/service.yaml @@ -7,53 +7,47 @@ apiVersion: v1 kind: Service metadata: - name: {{ $.Values.peer.name }} - namespace: {{ $.Values.metadata.namespace }} - {{- if or $.Values.proxy (and $.Values.service.loadBalancerType (eq $.Values.service.loadBalancerType "Internal")) }} - annotations: - {{- if $.Values.annotations }} - {{- range $key, $value := $.Values.annotations.service }} - {{- range $k, $v := $value }} - {{ $k }}: {{ $v | quote }} - {{- end }} - {{- end }} - {{- end }} - {{- if and $.Values.service.loadBalancerType (eq $.Values.service.loadBalancerType "Internal") }} + name: {{ .Release.Name }} + namespace: {{ .Release.Namespace }} + {{- if or .Values.global.proxy (and .Values.peer.loadBalancerType (eq .Values.peer.loadBalancerType "Internal")) }} + {{- if and .Values.peer.loadBalancerType (eq .Values.peer.loadBalancerType "Internal") }} cloud.google.com/load-balancer-type: "Internal" {{- end }} {{- end }} labels: - run: {{ $.Values.peer.name }} - app.kubernetes.io/name: {{ $.Values.peer.name }} - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} + run: {{ .Release.Name }} + app.kubernetes.io/name: {{ .Release.Name }} + app.kubernetes.io/component: fabric + app.kubernetes.io/part-of: {{ include "fabric-peernode.fullname" . }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} - {{- include "labels.custom" . | nindent 2 }} + {{- include "labels.service" . | nindent 4 }} spec: - type: {{ $.Values.service.servicetype }} + type: {{ .Values.peer.serviceType }} selector: - app: {{ $.Values.peer.name }} + app: {{ .Release.Name }} ports: - name: grpc protocol: TCP targetPort: 7051 - port: {{ $.Values.service.ports.grpc.clusteripport }} - {{- if $.Values.service.ports.grpc.nodeport }} - nodePort: {{ $.Values.service.ports.grpc.nodeport }} + port: {{ .Values.peer.ports.grpc.clusterIpPort }} + {{- if .Values.peer.ports.grpc.nodePort }} + nodePort: {{ .Values.peer.ports.grpc.nodePort }} {{- end }} - name: events protocol: TCP targetPort: 7053 - port: {{ $.Values.service.ports.events.clusteripport }} - {{- if $.Values.service.ports.events.nodeport }} - nodePort: {{ $.Values.service.ports.events.nodeport }} + port: {{ .Values.peer.ports.events.clusterIpPort }} + {{- if .Values.peer.ports.events.nodePort }} + nodePort: {{ .Values.peer.ports.events.nodePort }} {{- end }} - protocol: TCP name: couchdb targetPort: 5984 - port: {{ $.Values.service.ports.couchdb.clusteripport }} - {{- if $.Values.service.ports.couchdb.nodeport }} - nodePort: {{ $.Values.service.ports.couchdb.nodeport }} + port: {{ .Values.peer.ports.couchdb.clusterIpPort }} + {{- if .Values.peer.ports.couchdb.nodePort }} + nodePort: {{ .Values.peer.ports.couchdb.nodePort }} {{- end }} - name: grpc-web protocol: TCP @@ -62,64 +56,63 @@ spec: - name: operations protocol: TCP targetPort: 9443 - port: {{ $.Values.service.ports.metrics.clusteripport }} - {{- if (eq $.Values.service.servicetype "ClusterIP") }} + port: {{ .Values.peer.ports.metrics.clusterIpPort }} + {{- if (eq .Values.peer.serviceType "ClusterIP") }} clusterIP: None {{- end }} - {{- if $.Values.service.loadBalancerIP }} - loadBalancerIP: {{ $.Values.service.loadBalancerIP }} + {{- if .Values.peer.loadBalancerIP }} + loadBalancerIP: {{ .Values.peer.loadBalancerIP }} {{- end }} -{{- if eq $.Values.proxy.provider "haproxy" }} +{{- if eq .Values.global.proxy.provider "haproxy" }} --- apiVersion: networking.k8s.io/v1 kind: Ingress metadata: - name: {{ $.Values.peer.name }} - namespace: {{ $.Values.metadata.namespace }} + name: {{ .Release.Name }} + namespace: {{ .Release.Namespace }} annotations: - kubernetes.io/ingress.class: "haproxy" ingress.kubernetes.io/ssl-passthrough: "true" spec: + ingressClassName: "haproxy" rules: - - host: {{ $.Values.peer.name }}.{{ $.Values.metadata.namespace }}.{{ $.Values.proxy.external_url_suffix }} + - host: {{ .Release.Name }}.{{ .Release.Namespace }}.{{ .Values.global.proxy.externalUrlSuffix }} http: paths: - path: / pathType: Prefix backend: service: - name: {{ $.Values.peer.name }} + name: {{ .Release.Name }} port: - number: {{ $.Values.service.ports.grpc.clusteripport }} - - host: {{ $.Values.peer.name }}-proxy.{{ $.Values.metadata.namespace }}.{{ $.Values.proxy.external_url_suffix }} + number: {{ .Values.peer.ports.grpc.clusterIpPort }} + - host: {{ .Release.Name }}-proxy.{{ .Release.Namespace }}.{{ .Values.global.proxy.externalUrlSuffix }} http: paths: - path: / pathType: Prefix backend: service: - name: {{ $.Values.peer.name }} + name: {{ .Release.Name }} port: number: 7443 --- apiVersion: networking.k8s.io/v1 kind: Ingress metadata: - name: {{ $.Values.peer.name }}-ops - namespace: {{ $.Values.metadata.namespace }} - annotations: - kubernetes.io/ingress.class: "haproxy" + name: {{ .Release.Name }}-ops + namespace: {{ .Release.Namespace }} spec: + ingressClassName: "haproxy" rules: - - host: {{ $.Values.peer.name }}-ops.{{ $.Values.metadata.namespace }}.{{ $.Values.proxy.external_url_suffix }} + - host: {{ .Release.Name }}-ops.{{ .Release.Namespace }}.{{ .Values.global.proxy.externalUrlSuffix }} http: paths: - path: / pathType: Prefix backend: service: - name: {{ $.Values.peer.name }} + name: {{ .Release.Name }} port: number: 9443 {{- end }} diff --git a/platforms/hyperledger-fabric/charts/fabric-peernode/templates/servicemonitor.yaml b/platforms/hyperledger-fabric/charts/fabric-peernode/templates/servicemonitor.yaml index 581d6be3294..5071e48f840 100644 --- a/platforms/hyperledger-fabric/charts/fabric-peernode/templates/servicemonitor.yaml +++ b/platforms/hyperledger-fabric/charts/fabric-peernode/templates/servicemonitor.yaml @@ -1,14 +1,18 @@ -{{- if $.Values.service.ports.metrics.enabled }} +{{- if .Values.peer.ports.metrics.enabled }} {{- if $.Capabilities.APIVersions.Has "monitoring.coreos.com/v1/ServiceMonitor" }} apiVersion: monitoring.coreos.com/v1 kind: ServiceMonitor metadata: labels: - app: {{ $.Values.peer.name }} + app: {{ .Release.Name }} + app.kubernetes.io/name: {{ .Release.Name}} + app.kubernetes.io/component: fabric + app.kubernetes.io/part-of: {{ include "fabric-orderernode.fullname" . }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} - name: {{ $.Values.peer.name }} - namespace: {{ $.Values.metadata.namespace }} + name: {{ .Release.Name }} + namespace: {{ .Release.Namespace }} spec: jobLabel: {{ .Release.Name }} endpoints: @@ -16,10 +20,10 @@ spec: port: operations namespaceSelector: matchNames: - - {{ $.Values.metadata.namespace }} + - {{ .Release.Namespace }} selector: matchLabels: app.kubernetes.io/instance: {{ .Release.Name }} - run: {{ $.Values.peer.name }} + run: {{ .Release.Name }} {{- end }} {{- end }} diff --git a/platforms/hyperledger-fabric/charts/fabric-peernode/values.yaml b/platforms/hyperledger-fabric/charts/fabric-peernode/values.yaml index 4e6954c6648..16d943f2d9a 100644 --- a/platforms/hyperledger-fabric/charts/fabric-peernode/values.yaml +++ b/platforms/hyperledger-fabric/charts/fabric-peernode/values.yaml @@ -4,190 +4,226 @@ # SPDX-License-Identifier: Apache-2.0 ############################################################################################## -metadata: - #Provide the namespace for organization's peer - #Eg. namespace: org1-net - namespace: org1-net - images: - #Provide the valid image name and version for fabric couchdb - #Eg. couchdb: hyperledger/fabric-couchdb:0.4.14 - couchdb: ghcr.io/hyperledger/bevel-fabric-couchdb:2.2.2 - #Provide the valid image name and version for fabric peer - #Eg. hyperledger/fabric-peer:2.2.2 - peer: ghcr.io/hyperledger/bevel-fabric-peer:2.2.2 - #Provide the valid image name and version to read certificates from vault server - #Eg. alpineutils: ghcr.io/hyperledger/bevel-alpine:latest - alpineutils: ghcr.io/hyperledger/bevel-alpine:latest - #Provide the custom labels - #NOTE: Provide labels other than name, release name , release service, chart version , chart name, run - #These lables will not be applied to VolumeClaimTemplate of StatefulSet as labels are automatically picked up by Kubernetes - #Eg. labels: - # role: peer - labels: -annotations: - #Extra annotations - service: {} - pvc: {} - deployment: {} +global: + # HLF Network Version + #Eg. version: 2.5.4 + version: 2.5.4 + #Provide the service account name which will be created. + serviceAccountName: vault-auth + cluster: + provider: aws # choose from: minikube | aws | azure | gcp + cloudNativeServices: false # only 'false' is implemented + vault: + #Provide the type of vault + #Eg. type: hashicorp + type: hashicorp + #Provide the vaultrole for an organization + #Eg. vaultrole: org1-vault-role + role: vault-role + #Provide the vault server address + #Eg. vaultaddress: http://Vault-884963190.eu-west-1.elb.amazonaws.com + address: + #Provide the kubernetes auth backed configured in vault for an organization + #Eg. authpath: supplychain + authPath: supplychain + #Provide the secret engine. + secretEngine: secretsv2 + #Provide the vault path where the secrets will be stored + secretPrefix: "data/supplychain" + #Kuberenetes secret for vault ca.cert + #Enable or disable TLS for vault communication if value present or not + #Eg. tls: vaultca + tls: + + proxy: + #This will be the proxy/ingress provider. Can have values "none" or "haproxy" + #Eg. provider: "haproxy" + provider: "haproxy" + #This field contains the external URL of the organization + #Eg. externalUrlSuffix: test.blockchaincloudpoc.com + externalUrlSuffix: test.blockchaincloudpoc.com + #This field contains the external port on haproxy + #Eg. port: 443 + port: 443 + +storage: + #Flag to create new storage class for organization. Set to false for existing storage class. + #Eg. enabled: true + enabled: true + #Provide storage size for Peer Volume + #Eg. peer: 512Mi + peer: 512Mi + #Provide storage size for CouchDB Volume + #Eg. couchdb: 512Mi + couchdb: 512Mi + # NOTE: when you set this to Retain, the volume WILL persist after the chart is delete and you need to manually delete it + reclaimPolicy: "Delete" # choose from: Delete | Retain + volumeBindingMode: Immediate # choose from: Immediate | WaitForFirstConsumer + allowedTopologies: + enabled: false + +certs: + # Flag indicating the creation of certificates. + generateCertificates: true + orgData: + caAddress: ca.supplychain-net:7051 + caAdminUser: supplychain-admin + caAdminPassword: supplychain-adminpw + #Provide organization's name in lowercases + #Eg. orgName: supplychain + orgName: supplychain + #Provide organization's type (orderer or peer) + #Eg. component_type: orderer + type: peer + #Provide organization's subject + #Eg. "O=Orderer,L=51.50/-0.13/London,C=GB" + componentSubject: "O=Orderer,L=51.50/-0.13/London,C=GB" + + users: + # Generating User Certificates with custom attributes using Fabric CA in Bevel for Peer Organizations + # Eg. + # usersList: + # - user: + # identity: user1 + # attributes: + # - key: "hf.Revoker" + # value: "true" + # - user: + # identity: user2 + # attributes: + # - key: "hf.Revoker" + # value: "true" + usersList: + # - user: + # identity: user1 + # attributes: + # - key: "hf.Revoker" + # value: "true" + settings: + #Flag to create configmaps for the organization. This flag must be set to true when installing the first orderer/peer in organization and false for others. + createConfigMaps: false + refreshCertValue: false + addPeerValue: false + removeCertsOnDelete: false + removeOrdererTlsOnDelete: false + +image: + #Provide the valid image repository for fabric couchdb + #Eg. couchdb: hyperledger/fabric-couchdb + couchdb: ghcr.io/hyperledger/bevel-fabric-couchdb + #Provide the valid repository for fabric peer + #Eg. peer: hyperledger/fabric-peer + peer: ghcr.io/hyperledger/bevel-fabric-peer + #Provide the valid image name and version to read certificates from vault server + #Eg. alpineUtils: ghcr.io/hyperledger/bevel-alpine:latest + alpineUtils: ghcr.io/hyperledger/bevel-alpine:latest + #Provide the secret to use if private repository + #Eg. pullSecret: regcred + pullSecret: + peer: - #Provide the name of the peer as per deployment yaml. - #Eg. name: peer0 - name: peer0 - #Provide the url of gossipping peer and port to be mentioned is grpc cluster IP port - #Eg. gossippeeraddress: peer1.org1-net.svc.cluster.local:7051 - gossippeeraddress: peer1.org1-net.svc.cluster.local:7051 - #Provide the url of gossip external endpoint and port to be mentioned is haproxy https service port - #Eg. gossipexternalendpoint: peer1-ext.org1-net:443 - gossipexternalendpoint: peer0.org1-net.org1proxy.blockchaincloudpoc.com:443 - #Provide the localmspid for organization - #Eg. localmspid: Org1MSP - localmspid: Org1MSP - #Provide the loglevel for organization's peer - #Eg. loglevel: info - loglevel: info - #Provide the value for tlsstatus to be true or false for organization's peer - #Eg. tlsstatus: true - tlsstatus: true + #Provide the url of the gossipping peer. If empty, this peer's own address will be used + #Eg. gossipPeerAddress: peer1.supplychain-net:7051 + gossipPeerAddress: peer1.supplychain-net:7051 + #Provide the logLevel for organization's peer + #Eg. logLevel: info + logLevel: info + #Provide the localMspId for organization + #Eg. localMspId: supplychainMSP + localMspId: supplychainMSP + #Provide the value for tlsStatus to be true or false for organization's peer + #Eg. tlsStatus: true + tlsStatus: true + #Flag to enable CLI for this peer + #Eg. cliEnabled: true + cliEnabled: false + #Provide the address for orderer; optional is cliEnabled: false + #Eg. ordererAddress: orderer1.test.blockchaincloudpoc.com:443 + ordererAddress: orderer1.supplychain-net:7050 #Provide a valid chaincode builder image for Fabric - #Eg. builder: hyperledger/fabric-ccenv:1.4.8 - builder: hyperledger/fabric-ccenv:2.2.2 + #Eg. builder: hyperledger/fabric-ccenv + builder: hyperledger/fabric-ccenv couchdb: #Provide the username for couchdb login - #If couchdb username is provided, it is mandatory to provide password for the same - #Eg. username: org1-user - username: org1-user - configpath: - core: - mspconfig: + #Eg. username: supplychain-user + username: supplychain-user + #Provide the password for couchdb login + #Eg. password: supplychain-userpw + password: supplychain-userpw + mspConfig: #Provide the members of the MSP in organizational unit identifiers - #Eg.organizationalunitidentifiers: + #Eg.organizationalUnitIdentifiers: # - client # - peer # following for 2.2.x # - admin # - orderer - organizationalunitidentifiers: + organizationalUnitIdentifiers: nodeOUs: - clientOUidentifier: - #Provide OU which will be used to identify node as client - #Eg.organizationalunitidentifier: client - organizationalunitidentifier: client - peerOUidentifier: - #Provide OU which will be used to identify node as peer - #Eg.organizationalunitidentifier: peer - organizationalunitidentifier: peer + #Provide OU which will be used to identify node as client + #Eg.clientOUIdentifier: client + clientOUIdentifier: client + #Provide OU which will be used to identify node as peer + #Eg.peerOUIdentifier: peer + peerOUIdentifier: peer # following for 2.2.x - adminOUidentifier: - organizationalunitidentifier: admin - ordererOUidentifier: - organizationalunitidentifier: orderer - -storage: - peer: - #Provide the storageclassname for peer - #Eg. storageclassname: aws-storage - storageclassname: aws-storageclass - #Provide the storagesize for storage class - #Eg. storagesize: 512Mi - storagesize: 512Mi - couchdb: - #Provide the storageclassname for couchdb - #Eg. storageclassname: aws-storage - storageclassname: aws-storageclass - #Provide the storagesize for storage class - #Eg. storagesize: 512Mi - storagesize: 512Mi - - - -vault: - #Provide the vaultrole for an organization - #Eg. vaultrole: org1-vault-role - role: vault-role - #Provide the vault server address - #Eg. vaultaddress: http://Vault-884963190.eu-west-1.elb.amazonaws.com - address: - #Provide the kubernetes auth backed configured in vault for an organization - #Eg. authpath: devorg1-net-auth - authpath: devorg1-net-auth - #Provide the value for vault secretprefix - #Eg. secretprefix: secretsv2/data/crypto/peerOrganizations/.../peers/... - secretprefix: secretsv2/data/crypto/peerOrganizations/org1-net/peers/peer0.org1-net - #Provide the serviceaccountname for vault - #Eg. serviceaccountname: vault-auth - serviceaccountname: vault-auth - #Provide the type of vault - #Eg. type: hashicorp - type: hashicorp - #Provide the imagesecretname for vault - #Eg. imagesecretname: regcred - imagesecretname: "" - #Provide the vault path for secret couchdb password - #Eg. secretcouchdbpass: secretsv2/data/credentials/org1-net/couchdb/org1?user - secretcouchdbpass: secretsv2/data/credentials/org1-net/couchdb/org1?user - #Kuberenetes secret for vault ca.cert - #Enable or disable TLS for vault communication if value present or not - #Eg. tls: vaultca - tls: - - -service: - #Provide the servicetype for a peer - #Eg. servicetype: NodePort - servicetype: ClusterIP + #Provide OU which will be used to identify node as admin + #Eg.adminOUIdentifier: admin + adminOUIdentifier: admin + #Provide OU which will be used to identify node as orderer + #Eg.ordererOUIdentifier: orderer + ordererOUIdentifier: orderer + #Provide the serviceType for a peer + #Eg. serviceType: NodePort + serviceType: ClusterIP loadBalancerType: "" ports: grpc: - #Provide a nodeport for grpc service in the range of 30000-32767 (optional) - #Eg. nodeport: 30001 - nodeport: + #Provide a nodePort for grpc service in the range of 30000-32767 (optional) + #Eg. nodePort: 30001 + nodePort: #Provide a cluster IP port for grpc service to be exposed - #Eg. clusteripport: 7051 - clusteripport: 7051 + #Eg. clusterIpPort: 7051 + clusterIpPort: 7051 events: - #Provide a nodeport for event service in the range of 30000-32767 (optional) - #Eg. nodeport: 30002 - nodeport: + #Provide a nodePort for event service in the range of 30000-32767 (optional) + #Eg. nodePort: 30002 + nodePort: #Provide a cluster IP port for event service to be exposed - #Eg. clusteripport: 7053 - clusteripport: 7053 + #Eg. clusterIpPort: 7053 + clusterIpPort: 7053 couchdb: - #Provide a nodeport for couchdb service in the range of 30000-32767 (optional) - #Eg. nodeport: 30003 - nodeport: + #Provide a nodePort for couchdb service in the range of 30000-32767 (optional) + #Eg. nodePort: 30003 + nodePort: #Provide a cluster IP port for couchdb service to be exposed - #Eg. clusteripport: 5984 - clusteripport: 5984 + #Eg. clusterIpPort: 5984 + clusterIpPort: 5984 metrics: enabled: false - clusteripport: 9443 - -proxy: - #This will be the proxy/ingress provider. Can have values "none" or "haproxy" - #Eg. provider: "haproxy" - provider: "none" - #This field contains the external URL of the organization - #Eg. external_url_suffix: org1.blockchaincloudpoc.com - external_url_suffix: org1proxy.blockchaincloudpoc.com - #This field contains the external port on haproxy - #Eg. port: 443 - port: 443 - -config: - # Set limits and requests of pod - pod: - resources: - limits: + clusterIpPort: 9443 + resources: + limits: # Provide the limit memory for node - # Eg. memory: 512M - memory: 512M + # Eg. memory: 1Gi + memory: 1Gi # Provide the limit cpu for node # Eg. cpu: 1 - cpu: 1 - requests: + cpu: 1 + requests: # Provide the requests memory for node # Eg. memory: 512M - memory: 512M + memory: 512M # Provide the requests cpu for node # Eg. cpu: 0.25 - cpu: 0.25 + cpu: 0.25 + #Flag to be set to true when network is upgraded + upgrade: false + healthCheck: + retries: 20 + sleepTimeAfterError: 15 + +labels: + service: [] + pvc: [] + deployment: [] diff --git a/platforms/hyperledger-fabric/charts/values/noproxy-and-novault/ca-orderer.yaml b/platforms/hyperledger-fabric/charts/values/noproxy-and-novault/ca-orderer.yaml new file mode 100644 index 00000000000..9e3275732bb --- /dev/null +++ b/platforms/hyperledger-fabric/charts/values/noproxy-and-novault/ca-orderer.yaml @@ -0,0 +1,20 @@ +#helm install supplychain-ca -f values/noproxy-and-novault/ca-server.yaml -n supplychain-net fabric-ca-server +global: + serviceAccountName: vault-auth + cluster: + provider: azure + cloudNativeServices: false + vault: + type: kubernetes + proxy: + provider: none + externalUrlSuffix: supplychain-net + +storage: + size: 512Mi +server: + removeCertsOnDelete: true + tlsStatus: true + adminUsername: supplychain-admin + adminPassword: supplychain-adminpw + subject: "/C=GB/ST=London/L=London/O=Orderer" diff --git a/platforms/hyperledger-fabric/charts/values/noproxy-and-novault/ca-peer.yaml b/platforms/hyperledger-fabric/charts/values/noproxy-and-novault/ca-peer.yaml new file mode 100644 index 00000000000..ba145a003a2 --- /dev/null +++ b/platforms/hyperledger-fabric/charts/values/noproxy-and-novault/ca-peer.yaml @@ -0,0 +1,20 @@ +#helm install carrier-ca -f values/noproxy-and-novault/ca-server.yaml -n carrier-net fabric-ca-server +global: + serviceAccountName: vault-auth + cluster: + provider: azure + cloudNativeServices: false + vault: + type: kubernetes + proxy: + provider: none + externalUrlSuffix: carrier-net + +storage: + size: 512Mi +server: + removeCertsOnDelete: true + tlsStatus: true + adminUsername: carrier-admin + adminPassword: carrier-adminpw + subject: "/C=GB/ST=London/L=London/O=Carrier" diff --git a/platforms/hyperledger-fabric/charts/values/noproxy-and-novault/carrier.yaml b/platforms/hyperledger-fabric/charts/values/noproxy-and-novault/carrier.yaml new file mode 100644 index 00000000000..87f053018e6 --- /dev/null +++ b/platforms/hyperledger-fabric/charts/values/noproxy-and-novault/carrier.yaml @@ -0,0 +1,52 @@ +#helm install pee0-carrier -f values/noproxy-and-novault/peer.yaml -n carrier-net fabric-peer +global: + version: 2.5.4 + serviceAccountName: vault-auth + vault: + type: kubernetes + + cluster: + provider: azure + cloudNativeServices: false + + proxy: + provider: "none" + externalUrlSuffix: carrier-net + +certs: + generateCertificates: true + orgData: + caAddress: ca.carrier-net:7054 + caAdminUser: carrier-admin + caAdminPassword: carrier-adminpw + orgName: carrier + type: peer + componentSubject: "O=Carrier,OU=Carrier,L=51.50/-0.13/London,C=GB" + users: + usersList: + - user: + identity: user1 + attributes: + - key: "hf.Revoker" + value: "true" + settings: + #Flag to create configmaps for the organization. This flag must be set to true when installing the first orderer/peer in organization and false for others. + createConfigMaps: true + refreshCertValue: false + addPeerValue: false + removeCertsOnDelete: true + removeOrdererTlsOnDelete: true +storage: + enabled: false +peer: + gossipPeerAddress: + logLevel: info + localMspId: carrierMSP + tlsStatus: true + cliEnabled: true + ordererAddress: orderer1.supplychain-net:7050 + builder: hyperledger/fabric-ccenv + couchdb: + username: carrier-user + password: carrier-userpw + upgrade: false diff --git a/platforms/hyperledger-fabric/charts/values/noproxy-and-novault/genesis.yaml b/platforms/hyperledger-fabric/charts/values/noproxy-and-novault/genesis.yaml new file mode 100644 index 00000000000..bf48e007515 --- /dev/null +++ b/platforms/hyperledger-fabric/charts/values/noproxy-and-novault/genesis.yaml @@ -0,0 +1,45 @@ +#helm install ca-certs -f values/noproxy-and-novault/genesis.yaml -n supplychain-net fabric-genesis +global: + version: 2.5.4 + serviceAccountName: vault-auth + cluster: + provider: azure + cloudNativeServices: false + vault: + type: kubernetes + proxy: + provider: none + externalUrlSuffix: supplychain-net + +organizations: + - name: supplychain + orderers: + - name: orderer1 + ordererAddress: orderer1.supplychain-net:7050 # Internal/External URI of the orderer + - name: orderer2 + ordererAddress: orderer2.supplychain-net:7050 + - name: orderer3 + ordererAddress: orderer3.supplychain-net:7050 + peers: + - name: peer0 + peerAddress: peer0.supplychain-net:7051 # Internal/External URI of the peer + - name: peer1 + peerAddress: peer1.supplychain-net:7051 + + - name: carrier + peers: + - name: peer0 + peerAddress: peer0.carrier-net:7051 # Internal/External URI of the peer + +consensus: raft +channels: + - name: allchannel + consortium: SupplyChainConsortium + orderers: + - supplychain + participants: + - supplychain + - carrier + +settings: + removeConfigMapOnDelete: true diff --git a/platforms/hyperledger-fabric/charts/values/noproxy-and-novault/join-channel.yaml b/platforms/hyperledger-fabric/charts/values/noproxy-and-novault/join-channel.yaml new file mode 100644 index 00000000000..39455b21a71 --- /dev/null +++ b/platforms/hyperledger-fabric/charts/values/noproxy-and-novault/join-channel.yaml @@ -0,0 +1,19 @@ +--- +global: + version: 2.5.4 + serviceAccountName: vault-auth + cluster: + provider: azure # choose from: minikube | aws | azure | gcp + cloudNativeServices: false # only 'false' is implemented + vault: + type: kubernetes + +peer: + name: peer0 + type: anchor + address: peer0.carrier-net:7051 + localMspId: carrierMSP + logLevel: info + tlsStatus: true + channelName: AllChannel + ordererAddress: orderer1.supplychain-net:7050 diff --git a/platforms/hyperledger-fabric/charts/values/noproxy-and-novault/orderer.yaml b/platforms/hyperledger-fabric/charts/values/noproxy-and-novault/orderer.yaml new file mode 100644 index 00000000000..8d07ca97ec5 --- /dev/null +++ b/platforms/hyperledger-fabric/charts/values/noproxy-and-novault/orderer.yaml @@ -0,0 +1,37 @@ +#helm install orderer1 -f values/noproxy-and-novault/orderer.yaml -n supplychain-net fabric-orderernode +global: + version: 2.5.4 + serviceAccountName: vault-auth + vault: + type: kubernetes + + cluster: + provider: azure + cloudNativeServices: false + + proxy: + provider: none + externalUrlSuffix: supplychain-net + +certs: + generateCertificates: true + orgData: + caAddress: ca.supplychain-net:7054 + caAdminUser: supplychain-admin + caAdminPassword: supplychain-adminpw + orgName: supplychain + type: orderer + componentSubject: "O=Orderer,L=51.50/-0.13/London,C=GB" + settings: + #Flag to create configmaps for the organization. This flag must be set to true when installing the first orderer/peer in organization and false for others. + createConfigMaps: true + refreshCertValue: false + addPeerValue: false + removeCertsOnDelete: true + removeOrdererTlsOnDelete: true + +orderer: + consensus: raft + logLevel: info + localMspId: supplychainMSP + tlsstatus: true diff --git a/platforms/hyperledger-fabric/charts/values/noproxy-and-novault/peer.yaml b/platforms/hyperledger-fabric/charts/values/noproxy-and-novault/peer.yaml new file mode 100644 index 00000000000..c660bc11f9f --- /dev/null +++ b/platforms/hyperledger-fabric/charts/values/noproxy-and-novault/peer.yaml @@ -0,0 +1,51 @@ +#helm install pee0-carrier -f values/noproxy-and-novault/peer.yaml -n carrier-net fabric-peer +global: + version: 2.5.4 + serviceAccountName: vault-auth + vault: + type: kubernetes + + cluster: + provider: azure + cloudNativeServices: false + + proxy: + provider: "none" + externalUrlSuffix: supplychain-net + +certs: + generateCertificates: true + orgData: + caAddress: ca.supplychain-net:7054 + caAdminUser: supplychain-admin + caAdminPassword: supplychain-adminpw + orgName: supplychain + type: peer + componentSubject: "O=Orderer,L=51.50/-0.13/London,C=GB" + users: + usersList: + - user: + identity: user1 + attributes: + - key: "hf.Revoker" + value: "true" + settings: + #Flag to create configmaps for the organization. This flag must be set to true when installing the first orderer/peer in organization and false for others. + createConfigMaps: false + refreshCertValue: false + addPeerValue: false + removeCertsOnDelete: true + removeOrdererTlsOnDelete: true + +peer: + gossipPeerAddress: peer1.supplychain-net:7051 + logLevel: info + localMspId: supplychainMSP + tlsStatus: true + cliEnabled: false + ordererAddress: orderer1.supplychain-net:7050 + builder: hyperledger/fabric-ccenv + couchdb: + username: supplychain-user + password: supplychain-userpw + upgrade: false diff --git a/platforms/hyperledger-fabric/charts/values/proxy-and-vault/ca-orderer.yaml b/platforms/hyperledger-fabric/charts/values/proxy-and-vault/ca-orderer.yaml new file mode 100644 index 00000000000..3416340af25 --- /dev/null +++ b/platforms/hyperledger-fabric/charts/values/proxy-and-vault/ca-orderer.yaml @@ -0,0 +1,27 @@ +#helm install supplychain-ca -f values/proxy-and-vault/ca-server.yaml -n supplychain-net fabric-ca-server +global: + serviceAccountName: vault-auth + cluster: + provider: azure + cloudNativeServices: false + kubernetesUrl: "https://yourkubernetes.com" + vault: + type: hashicorp + role: vault-role + address: http://vault.url:8200 + authPath: supplychain + secretEngine: secretsv2 + secretPrefix: "data/supplychain" + tls: false + proxy: + provider: haproxy + externalUrlSuffix: test.yourdomain.com + +storage: + size: 512Mi +server: + removeCertsOnDelete: true + tlsStatus: true + adminUsername: supplychain-admin + adminPassword: supplychain-adminpw + subject: "/C=GB/ST=London/L=London/O=Orderer" diff --git a/platforms/hyperledger-fabric/charts/values/proxy-and-vault/ca-peer.yaml b/platforms/hyperledger-fabric/charts/values/proxy-and-vault/ca-peer.yaml new file mode 100644 index 00000000000..2de4727a807 --- /dev/null +++ b/platforms/hyperledger-fabric/charts/values/proxy-and-vault/ca-peer.yaml @@ -0,0 +1,27 @@ +#helm install carrier-ca -f values/proxy-and-vault/ca-server.yaml -n carrier-net fabric-ca-server +global: + serviceAccountName: vault-auth + cluster: + provider: azure + cloudNativeServices: false + kubernetesUrl: "https://yourkubernetes.com" + vault: + type: hashicorp + role: vault-role + address: http://vault.url:8200 + authPath: carrier + secretEngine: secretsv2 + secretPrefix: "data/carrier" + tls: false + proxy: + provider: haproxy + externalUrlSuffix: test.yourdomain.com + +storage: + size: 512Mi +server: + removeCertsOnDelete: true + tlsStatus: true + adminUsername: carrier-admin + adminPassword: carrier-adminpw + subject: /C=GB/ST=London/L=London/O=Carrier diff --git a/platforms/hyperledger-fabric/charts/values/proxy-and-vault/carrier.yaml b/platforms/hyperledger-fabric/charts/values/proxy-and-vault/carrier.yaml new file mode 100644 index 00000000000..c16adb871b2 --- /dev/null +++ b/platforms/hyperledger-fabric/charts/values/proxy-and-vault/carrier.yaml @@ -0,0 +1,56 @@ +#helm install pee0-carrier -f values/noproxy-and-novault/peer.yaml -n carrier-net fabric-peer +global: + version: 2.5.4 + serviceAccountName: vault-auth + cluster: + provider: azure + cloudNativeServices: false + vault: + type: hashicorp + role: vault-role + address: http://vault.url:8200 + authPath: carrier + secretEngine: secretsv2 + secretPrefix: "data/carrier" + tls: false + proxy: + provider: haproxy + externalUrlSuffix: test.yourdomain.com + +certs: + generateCertificates: true + orgData: + caAddress: ca.carrier-net.test.yourdomain.com + caAdminUser: carrier-admin + caAdminPassword: carrier-adminpw + orgName: carrier + type: peer + componentSubject: "O=Carrier,OU=Carrier,L=51.50/-0.13/London,C=GB" + users: + usersList: + - user: + identity: user1 + attributes: + - key: "hf.Revoker" + value: "true" + settings: + #Flag to create configmaps for the organization. This flag must be set to true when installing the first orderer/peer in organization and false for others. + createConfigMaps: true + refreshCertValue: false + addPeerValue: false + removeCertsOnDelete: true + removeOrdererTlsOnDelete: true +storage: + enabled: false +peer: + gossipPeerAddress: + logLevel: info + localMspId: carrierMSP + tlsStatus: true + cliEnabled: true + ordererAddress: orderer1.supplychain-net.test.yourdomain.com:443 + builder: hyperledger/fabric-ccenv + couchdb: + username: carrier-user + password: carrier-userpw + upgrade: false diff --git a/platforms/hyperledger-fabric/charts/values/proxy-and-vault/create-channel.yaml b/platforms/hyperledger-fabric/charts/values/proxy-and-vault/create-channel.yaml new file mode 100644 index 00000000000..7e5e5db3f0c --- /dev/null +++ b/platforms/hyperledger-fabric/charts/values/proxy-and-vault/create-channel.yaml @@ -0,0 +1,27 @@ +global: + version: 2.2.2 + serviceAccountName: vault-auth + cluster: + provider: azure + cloudNativeServices: false + vault: + type: hashicorp + role: vault-role + address: http://vault.url:8200 + authPath: carrier + secretEngine: secretsv2 + secretPrefix: "data/carrier" + tls: false + proxy: + provider: haproxy + externalUrlSuffix: test.yourdomain.com + +peer: + name: peer0 + type: anchor + address: peer0.carrier-net.test.yourdomain.com:443 + localMspId: carrierMSP + logLevel: info + tlsStatus: true + channelName: AllChannel + ordererAddress: orderer1.supplychain-net.test.yourdomain.com:443 diff --git a/platforms/hyperledger-fabric/charts/values/proxy-and-vault/genesis.yaml b/platforms/hyperledger-fabric/charts/values/proxy-and-vault/genesis.yaml new file mode 100644 index 00000000000..2facfc098c1 --- /dev/null +++ b/platforms/hyperledger-fabric/charts/values/proxy-and-vault/genesis.yaml @@ -0,0 +1,50 @@ +#helm install ca-certs -f values/proxy-and-vault/genesis.yaml -n supplychain-net fabric-genesis +global: + version: 2.5.4 + serviceAccountName: vault-auth + cluster: + provider: azure + cloudNativeServices: false + vault: + type: hashicorp + role: vault-role + address: http://vault.url:8200 + authPath: supplychain + secretEngine: secretsv2 + secretPrefix: "data/supplychain" + tls: false + proxy: + provider: haproxy + externalUrlSuffix: test.yourdomain.com + +organizations: + - name: supplychain + orderers: + - name: orderer1 + ordererAddress: orderer1.supplychain-net.test.yourdomain.com:443 + - name: orderer2 + ordererAddress: orderer2.supplychain-net.test.yourdomain.com:443 + - name: orderer3 + ordererAddress: orderer3.supplychain-net.test.yourdomain.com:443 + peers: + - name: peer0 + peerAddress: peer0.supplychain-net.test.yourdomain.com:443 # Internal/External URI of the peer + - name: peer1 + peerAddress: peer1.supplychain-net.test.yourdomain.com:443 + + - name: carrier + peers: + - name: peer0 + peerAddress: peer0.carrier-net.test.yourdomain.com:443 # External URI of the peer +consensus: raft +channels: + - name: allchannel + consortium: SupplyChainConsortium + orderers: + - supplychain + participants: + - supplychain + - carrier + +settings: + removeConfigMapOnDelete: true diff --git a/platforms/hyperledger-fabric/charts/values/proxy-and-vault/join-channel.yaml b/platforms/hyperledger-fabric/charts/values/proxy-and-vault/join-channel.yaml new file mode 100644 index 00000000000..be85234b3a0 --- /dev/null +++ b/platforms/hyperledger-fabric/charts/values/proxy-and-vault/join-channel.yaml @@ -0,0 +1,24 @@ +global: + version: 2.5.4 + serviceAccountName: vault-auth + cluster: + provider: azure + cloudNativeServices: false + vault: + type: hashicorp + role: vault-role + address: http://vault.url:8200 + authPath: supplychain + secretEngine: secretsv2 + secretPrefix: "data/supplychain" + tls: false + +peer: + name: peer0 + type: anchor + address: peer0.supplychain-net.test.yourdomain.com:443 + localMspId: supplychainMSP + logLevel: info + tlsStatus: true + channelName: AllChannel + ordererAddress: orderer1.supplychain-net.test.yourdomain.com:443 diff --git a/platforms/hyperledger-fabric/charts/values/proxy-and-vault/orderer.yaml b/platforms/hyperledger-fabric/charts/values/proxy-and-vault/orderer.yaml new file mode 100644 index 00000000000..f3980f05b25 --- /dev/null +++ b/platforms/hyperledger-fabric/charts/values/proxy-and-vault/orderer.yaml @@ -0,0 +1,41 @@ +#helm install orderer1 -f values/proxy-and-vault/orderer.yaml -n supplychain-net fabric-orderernode +global: + version: 2.5.4 + serviceAccountName: vault-auth + cluster: + provider: azure + cloudNativeServices: false + vault: + type: hashicorp + role: vault-role + address: http://vault.url:8200 + authPath: supplychain + secretEngine: secretsv2 + secretPrefix: "data/supplychain" + tls: false + proxy: + provider: haproxy + externalUrlSuffix: test.yourdomain.com + +certs: + generateCertificates: true + orgData: + caAddress: ca.supplychain-net.test.yourdomain.com + caAdminUser: supplychain-admin + caAdminPassword: supplychain-adminpw + orgName: supplychain + type: orderer + componentSubject: "O=Orderer,L=51.50/-0.13/London,C=GB" + settings: + #Flag to create configmaps for the organization. This flag must be set to true when installing the first orderer/peer in organization and false for others. + createConfigMaps: true + refreshCertValue: false + addPeerValue: false + removeCertsOnDelete: true + removeOrdererTlsOnDelete: true + +orderer: + consensus: raft + logLevel: info + localMspId: supplychainMSP + tlsstatus: true diff --git a/platforms/hyperledger-fabric/charts/values/proxy-and-vault/osn-create-channel.yaml b/platforms/hyperledger-fabric/charts/values/proxy-and-vault/osn-create-channel.yaml new file mode 100644 index 00000000000..62ab1d0dcba --- /dev/null +++ b/platforms/hyperledger-fabric/charts/values/proxy-and-vault/osn-create-channel.yaml @@ -0,0 +1,24 @@ +global: + version: 2.5.4 + serviceAccountName: vault-auth + cluster: + provider: azure + cloudNativeServices: false + vault: + type: hashicorp + role: vault-role + address: http://vault.url:8200 + authPath: supplychain + secretEngine: secretsv2 + secretPrefix: "data/supplychain" + tls: false + proxy: + provider: haproxy + externalUrlSuffix: test.yourdomain.com +orderers: + - name: orderer1 + adminAddress: orderer1.supplychain-net:7055 # Internal URI of the orderer ONS Admin service + - name: orderer2 + adminAddress: orderer2.supplychain-net:7055 + - name: orderer3 + adminAddress: orderer3.supplychain-net:7055 diff --git a/platforms/hyperledger-fabric/charts/values/proxy-and-vault/peer.yaml b/platforms/hyperledger-fabric/charts/values/proxy-and-vault/peer.yaml new file mode 100644 index 00000000000..ab684308fd4 --- /dev/null +++ b/platforms/hyperledger-fabric/charts/values/proxy-and-vault/peer.yaml @@ -0,0 +1,55 @@ +#helm install pee0 -f values/proxy-and-vault/orderer.yaml -n carrier-net fabric-orderernode +global: + version: 2.5.4 + serviceAccountName: vault-auth + cluster: + provider: azure + cloudNativeServices: false + vault: + type: hashicorp + role: vault-role + address: http://vault.url:8200 + authPath: supplychain + secretEngine: secretsv2 + secretPrefix: "data/supplychain" + tls: false + proxy: + provider: haproxy + externalUrlSuffix: test.yourdomain.com + +certs: + generateCertificates: true + orgData: + caAddress: ca.supplychain-net:7054 + caAdminUser: supplychain-admin + caAdminPassword: supplychain-adminpw + orgName: supplychain + type: peer + componentSubject: "O=Orderer,L=51.50/-0.13/London,C=GB" + users: + usersList: + - user: + identity: user1 + attributes: + - key: "hf.Revoker" + value: "true" + settings: + #Flag to create configmaps for the organization. This flag must be set to true when installing the first orderer/peer in organization and false for others. + createConfigMaps: false + refreshCertValue: false + addPeerValue: false + removeCertsOnDelete: true + removeOrdererTlsOnDelete: true + +peer: + gossipPeerAddress: peer1.supplychain-net.test.yourdomain.com:443 + logLevel: info + localMspId: supplychainMSP + tlsStatus: true + cliEnabled: false + ordererAddress: orderer1.supplychain-net.test.yourdomain.com:443 + builder: hyperledger/fabric-ccenv + couchdb: + username: supplychain-user + password: supplychain-userpw + upgrade: false diff --git a/platforms/hyperledger-fabric/configuration/add-new-channel.yaml b/platforms/hyperledger-fabric/configuration/add-new-channel.yaml index e88c97a1d9e..86adf495296 100644 --- a/platforms/hyperledger-fabric/configuration/add-new-channel.yaml +++ b/platforms/hyperledger-fabric/configuration/add-new-channel.yaml @@ -24,45 +24,16 @@ file: path: "./build" state: absent - - # Create generate_crypto script for each organization - - include_role: - name: "create/crypto_script" - vars: - component_type: "{{ item.type | lower}}" - orderers: "{{ item.services.orderers }}" - loop: "{{ network['organizations'] }}" - - # Creating channel artifacts - # This role creates configtx.yaml file as the requirements mentioned in network.yaml - # which is then consumed by configtxgen tool - - include_role: - name: "create/configtx" - vars: - config_file: "./build/configtx.yaml" - - # This role generate channeltx - - include_role: - name: "create/channel_artifacts" - vars: - build_path: "./build" - genesis: "{{ item.genesis }}" - channel_name: "{{ item.channel_name | lower}}" - profile_name: "{{ item.channel_name }}" - add_new_org: 'false' - fetch_certs: "true" - loop: "{{ network['channels'] }}" - when: item.channel_status == 'new' - + - name: "Create genesis block" include_role: name: "create/genesis" vars: - build_path: "./build" - genesis: "{{ item.genesis }}" - channel_name: "{{ item.channel_name | lower }}" - loop: "{{ network['channels'] }}" - when: item.channel_status == 'new' and '2.5.' in network.version + org: "{{ network['organizations'] | first }}" + docker_url: "{{ network.docker.url }}" + kubernetes: "{{ org.k8s }}" + generateGenisis: false + when: genererate_configtx is defined and genererate_configtx == 'true' # This role creates the value file for creating channel from creator organization # to the vault. @@ -72,6 +43,7 @@ build_path: "./build" participants: "{{ item.participants }}" docker_url: "{{ network.docker.url }}" + channel_name: "{{ item.channel_name | lower }}" loop: "{{ network['channels'] }}" when: item.channel_status == 'new' and ('2.2.' in network.version or '1.4.' in network.version) @@ -97,20 +69,9 @@ loop: "{{ network['channels'] }}" when: item.channel_status == 'new' - # This role creates the value file for anchor peer update over channel for - # each organization which is the part of the channel. - - include_role: - name: "create/anchorpeer" - vars: - build_path: "./build" - participants: "{{ item.participants }}" - docker_url: "{{ network.docker.url }}" - loop: "{{ network['channels'] }}" - when: item.channel_status == 'new' - vars: #These variables can be overriden from the command line - privilege_escalate: false #Default to NOT escalate to root privledges - install_os: "linux" #Default to linux OS - install_arch: "amd64" #Default to amd64 architecture - bin_install_dir: "~/bin" #Default to /bin install directory for binaries + privilege_escalate: false # Default to NOT escalate to root privledges + install_os: "linux" # Default to linux OS + install_arch: "amd64" # Default to amd64 architecture + bin_install_dir: "~/bin" # Default to /bin install directory for binaries add_new_org: 'false' # Default to false as this is for main network creation diff --git a/platforms/hyperledger-fabric/configuration/add-orderer-organization.yaml b/platforms/hyperledger-fabric/configuration/add-orderer-organization.yaml index f504915daaa..9c419172fbb 100644 --- a/platforms/hyperledger-fabric/configuration/add-orderer-organization.yaml +++ b/platforms/hyperledger-fabric/configuration/add-orderer-organization.yaml @@ -36,6 +36,19 @@ release_dir: "{{ playbook_dir }}/../../../{{ item.gitops.release_dir }}/{{ item.name | lower }}" loop: "{{ network['organizations'] }}" when: item.org_status == 'new' + + # Setup script for Vault and OS Package Manager + - name: "Setup script for Vault and OS Package Manager" + include_role: + name: "{{ playbook_dir }}/../../shared/configuration/roles/setup/scripts" + vars: + namespace: "{{ org.name | lower }}-net" + network_type: "{{ network.type | lower }}" + kubernetes: "{{ org.k8s }}" + loop: "{{ network['organizations'] }}" + loop_control: + loop_var: org + when: org.org_status == 'new' # Setup Vault-Kubernetes accesses and Regcred for docker registry for new organization - name: "Create vault-auth for new org" @@ -78,6 +91,7 @@ component: "{{ item.name | lower}}" component_type: "{{ item.type | lower}}" component_services: "{{ item.services }}" + sc_name: "{{ component }}-bevel-storageclass" kubernetes: "{{ item.k8s }}" vault: "{{ item.vault }}" ca: "{{ item.services.ca }}" @@ -102,6 +116,7 @@ component: "{{ item.name | lower}}" component_type: "{{ item.type | lower}}" component_services: "{{ item.services }}" + sc_name: "{{ component }}-bevel-storageclass" kubernetes: "{{ item.k8s }}" vault: "{{ item.vault }}" ca: "{{ item.services.ca }}" @@ -109,7 +124,7 @@ gitops: "{{ item.gitops }}" values_dir: "{{playbook_dir}}/../../../{{item.gitops.release_dir}}/{{ item.name | lower }}" loop: "{{ network['organizations'] }}" - when: item.type == 'orderer' + when: item.type == 'orderer' and item.org_status == 'new' # Creating channel artifacts and putting them in vault # This role creates configtx.yaml file as the requirements mentioned in network.yaml @@ -146,7 +161,7 @@ loop: "{{ network.organizations }}" loop_control: loop_var: neworg - when: neworg.type == 'orderer' and neworg.org_status == 'new' + when: neworg.type == 'orderer' and neworg.org_status == 'new' and '2.5.' not in network.version # This role creates value file for zk-kafka (if kafka consensus is chosen) and orderer - name: Create all orderers @@ -157,6 +172,7 @@ namespace: "{{ item.name | lower}}-net" component_type: "{{ item.type | lower}}" component_services: "{{ item.services }}" + sys_channel_name: "syschannel" vault: "{{ item.vault }}" git_protocol: "{{ item.gitops.git_protocol }}" git_url: "{{ item.gitops.git_url }}" @@ -180,7 +196,7 @@ loop: "{{ network.organizations }}" loop_control: loop_var: neworg - when: neworg.type == 'orderer' and neworg.org_status == 'new' + when: neworg.type == 'orderer' and neworg.org_status == 'new' and '2.5.' not in network.version # This role adds the new org to the existing chnanel and updates the block with the application channel - name: "Modify the application channel with tls and address information of new orderer" diff --git a/platforms/hyperledger-fabric/configuration/cleanup.yaml b/platforms/hyperledger-fabric/configuration/cleanup.yaml index 68079c0386a..6aa92898092 100644 --- a/platforms/hyperledger-fabric/configuration/cleanup.yaml +++ b/platforms/hyperledger-fabric/configuration/cleanup.yaml @@ -36,7 +36,7 @@ component_name: "{{ item.name | lower }}-net" loop: "{{ network['organizations'] }}" when: - - item.type == "orderer" + - item.services.orderers is defined and item.services.orderers | length > 0 - network.env.type != 'operator' - include_role: diff --git a/platforms/hyperledger-fabric/configuration/deploy-network.yaml b/platforms/hyperledger-fabric/configuration/deploy-network.yaml index 42875f68745..b3370ab63fb 100644 --- a/platforms/hyperledger-fabric/configuration/deploy-network.yaml +++ b/platforms/hyperledger-fabric/configuration/deploy-network.yaml @@ -29,79 +29,45 @@ include_role: name: "create/namespace" vars: - component_name: "{{ item.name | lower }}-net" - component_type_name: "{{ item.type | lower }}" - kubernetes: "{{ item.k8s }}" - release_dir: "{{playbook_dir}}/../../../{{item.gitops.release_dir}}/{{ item.name | lower }}" - loop: "{{ network['organizations'] }}" - - # Setup script for Vault and OS Package Manager - - name: "Setup script for Vault and OS Package Manager" - include_role: - name: "{{ playbook_dir }}/../../shared/configuration/roles/setup/scripts" - vars: - namespace: "{{ org.name | lower }}-net" + component_name: "{{ org.name | lower }}-net" kubernetes: "{{ org.k8s }}" + release_dir: "{{playbook_dir}}/../../../{{org.gitops.release_dir}}/{{ org.name | lower }}" loop: "{{ network['organizations'] }}" loop_control: loop_var: org - - # Setup Vault-Kubernetes accesses and Regcred for docker registry - - name: Setup Vault Kubernetes for each organization - include_role: - name: "{{playbook_dir}}/../../shared/configuration/roles/setup/vault_kubernetes" + + # Create necessary secrets + - name: "Create k8s secrets" + include_role: + name: create/secrets vars: - name: "{{ org.name | lower }}" - component_name: "{{ org.name | lower }}-vaultk8s-job" - component_type: "{{ org.type | lower }}" component_ns: "{{ org.name | lower }}-net" - component_auth: "{{ org.k8s.cluster_id | default('')}}{{ network.env.type }}{{ name }}" kubernetes: "{{ org.k8s }}" vault: "{{ org.vault }}" - gitops: "{{ org.gitops }}" loop: "{{ network['organizations'] }}" loop_control: loop_var: org - - # Create Storageclass - - name: Create storageclass for each organization - include_role: - name: "{{ playbook_dir }}/../../../platforms/shared/configuration/roles/setup/storageclass" - vars: - org_name: "{{ org.name | lower }}" - sc_name: "{{ org_name }}-bevel-storageclass" - region: "{{ org.k8s.region | default('eu-west-1') }}" - loop: "{{ network['organizations'] }}" - loop_control: - loop_var: org - + when: + - org.org_status is not defined or org.org_status == 'new' + # Create CA Server helm-value files and check-in - name: Create CA server for each organization include_role: name: "create/ca_server" vars: - component_name: "{{ item.name | lower}}-net" - component: "{{ item.name | lower}}" - component_type: "{{ item.type | lower}}" - component_services: "{{ item.services }}" - sc_name: "{{ component }}-bevel-storageclass" - kubernetes: "{{ item.k8s }}" - vault: "{{ item.vault }}" - ca: "{{ item.services.ca }}" + component_ns: "{{ org.name | lower}}-net" + component: "{{ org.name | lower}}" + component_services: "{{ org.services }}" + kubernetes: "{{ org.k8s }}" + vault: "{{ org.vault }}" + ca: "{{ org.services.ca }}" docker_url: "{{ network.docker.url }}" - gitops: "{{ item.gitops }}" - values_dir: "{{playbook_dir}}/../../../{{item.gitops.release_dir}}/{{ item.name | lower }}" - loop: "{{ network['organizations'] }}" - when: item.services.ca is defined - - # Create generate_crypto script for each organization - - name: Create generate_crypto.sh for each organization - include_role: - name: "create/crypto_script" - vars: - component_type: "{{ item.type | lower }}" - orderers: "{{ item.services.orderers }}" + gitops: "{{ org.gitops }}" + values_dir: "{{playbook_dir}}/../../../{{org.gitops.release_dir}}/{{ org.name | lower }}" loop: "{{ network['organizations'] }}" + loop_control: + loop_var: org + when: org.services.ca is defined #Creating a pause so that the client certificates are valid # You can continue if the CA-server pods are running for more than 5 minutes @@ -109,92 +75,28 @@ prompt: "Sleeping... so that the client certificates are valid" minutes: 6 - # Create CA Tools helm-value files and check-in - - name: Create CA tools for each organization - include_role: - name: "create/ca_tools/orderer" - vars: - component_name: "{{ item.name | lower }}-net" - component: "{{ item.name | lower }}" - component_type: "{{ item.type | lower }}" - component_services: "{{ item.services }}" - sc_name: "{{ component }}-bevel-storageclass" - kubernetes: "{{ item.k8s }}" - vault: "{{ item.vault }}" - ca: "{{ item.services.ca }}" - docker_url: "{{ network.docker.url }}" - gitops: "{{ item.gitops }}" - values_dir: "{{playbook_dir}}/../../../{{item.gitops.release_dir}}/{{ item.name | lower }}" - loop: "{{ network['organizations'] }}" - when: item.type == 'orderer' - - # Create CA Tools helm-value files and check-in - - name: Create CA tools for each organization - include_role: - name: "create/ca_tools/peer" - vars: - component_name: "{{ item.name | lower}}-net" - component: "{{ item.name | lower}}" - component_type: "{{ item.type | lower}}" - component_services: "{{ item.services }}" - orderer_org: "{{ item.orderer_org | lower }}" - sc_name: "{{ component }}-bevel-storageclass" - kubernetes: "{{ item.k8s }}" - vault: "{{ item.vault }}" - ca: "{{ item.services.ca }}" - docker_url: "{{ network.docker.url }}" - gitops: "{{ item.gitops }}" - values_dir: "{{playbook_dir}}/../../../{{item.gitops.release_dir}}/{{ item.name | lower }}" - loop: "{{ network['organizations'] }}" - when: item.type == 'peer' - - # Creating channel artifacts and putting them in vault - # This role creates configtx.yaml file as the requirements mentioned in network.yaml - # which is then consumed by configtxgen tool - - name: Create configtx.yaml - include_role: - name: "create/configtx" - vars: - config_file: "./build/configtx.yaml" - - # This role generate genesis block and channeltx - - name: Create channel artifacts for all channels - include_role: - name: "create/channel_artifacts" - vars: - build_path: "./build" - channel_name: "{{ item.channel_name | lower}}" - profile_name: "{{ item.channel_name }}" - fetch_certs: "false" - loop: "{{ network['channels'] }}" - - - name: "Create genesis block" - include_role: - name: "create/genesis" - vars: - build_path: "./build" - genesis: "{{ item.genesis }}" - channel_name: "{{ item.channel_name | lower }}" - loop: "{{ network['channels'] }}" - # This role creates value file for zk-kafka (if kafka consensus is chosen) and orderer - name: Create all orderers include_role: name: "create/orderers" vars: build_path: "./build" - namespace: "{{ item.name | lower}}-net" - component_type: "{{ item.type | lower}}" - component_services: "{{ item.services }}" - vault: "{{ item.vault }}" - git_protocol: "{{ item.gitops.git_protocol }}" - git_url: "{{ item.gitops.git_url }}" - git_branch: "{{ item.gitops.branch }}" + namespace: "{{ org.name | lower}}-net" + org_name: "{{ org.name | lower }}" + component_services: "{{ org.services }}" + kubernetes: "{{ org.k8s }}" + vault: "{{ org.vault }}" + sys_channel_name: "syschannel" + git_protocol: "{{ org.gitops.git_protocol }}" + git_url: "{{ org.gitops.git_url }}" + git_branch: "{{ org.gitops.branch }}" docker_url: "{{ network.docker.url }}" - charts_dir: "{{ item.gitops.chart_source }}" - values_dir: "{{playbook_dir}}/../../../{{item.gitops.release_dir}}/{{ item.name | lower }}" + charts_dir: "{{ org.gitops.chart_source }}" + values_dir: "{{playbook_dir}}/../../../{{org.gitops.release_dir}}/{{ org.name | lower }}" loop: "{{ network['organizations'] }}" - when: item.type == 'orderer' + loop_control: + loop_var: org + when: org.services.orderers is defined and org.services.orderers | length > 0 # This role creates the value file for peers of organisations and write couch db credentials # to the vault. @@ -203,75 +105,30 @@ name: "create/peers" vars: build_path: "./build" - namespace: "{{ item.name | lower}}-net" - component_type: "{{ item.type | lower}}" - component_services: "{{ item.services }}" - vault: "{{ item.vault }}" - git_protocol: "{{ item.gitops.git_protocol }}" - git_url: "{{ item.gitops.git_url }}" - git_branch: "{{ item.gitops.branch }}" + namespace: "{{ org.name | lower}}-net" + component_type: "{{ org.type | lower}}" + component_services: "{{ org.services }}" + kubernetes: "{{ org.k8s }}" + vault: "{{ org.vault }}" + git_protocol: "{{ org.gitops.git_protocol }}" + git_url: "{{ org.gitops.git_url }}" + git_branch: "{{ org.gitops.branch }}" docker_url: "{{ network.docker.url }}" - charts_dir: "{{ item.gitops.chart_source }}" - values_dir: "{{playbook_dir}}/../../../{{item.gitops.release_dir}}/{{ item.name | lower }}" + charts_dir: "{{ org.gitops.chart_source }}" + values_dir: "{{playbook_dir}}/../../../{{org.gitops.release_dir}}/{{ org.name | lower }}" loop: "{{ network['organizations'] }}" - when: item.type == 'peer' - - # This role creates the value file for creating channel from creator organization - # to the vault. - - name: Create all create-channel jobs - include_role: - name: "create/channels" - vars: - build_path: "./build" - participants: "{{ item.participants }}" - docker_url: "{{ network.docker.url }}" - loop: "{{ network['channels'] }}" - when: add_new_org == 'false' and ('2.2.' in network.version or '1.4.' in network.version) - - # This role creates the value file for creating channel from creator organization - # to the vault. - - name: Create all create-channel jobs - include_role: - name: "create/osnchannels" - vars: - build_path: "./build" - docker_url: "{{ network.docker.url }}" - loop: "{{ network['channels'] }}" - when: add_new_org == 'false' and '2.5.' in network.version - - # This role creates the value file for joining channel from each participating peer - # to the vault. - - name: Create all join-channel jobs - include_role: - name: "create/channels_join" - vars: - build_path: "./build" - participants: "{{ item.participants }}" - docker_url: "{{ network.docker.url }}" - loop: "{{ network['channels'] }}" - - # This role creates the value file for anchor peer update over channel for - # each organization which is the part of the channel. - - name: Create all anchor-peer jobs - include_role: - name: "create/anchorpeer" - vars: - build_path: "./build" - participants: "{{ item.participants }}" - docker_url: "{{ network.docker.url }}" - loop: "{{ network['channels'] }}" + loop_control: + loop_var: org + when: org.services.peers is defined and org.services.peers | length > 0 - # Create CLI pod for peers with cli option enabled - - name: Create CLI pod for each peer with it enabled - include_role: - name: "create/cli_pod" + - name: "Create genesis block" + include_role: + name: "create/genesis" vars: - peers: "{{ org.services.peers }}" + org: "{{ network['organizations'] | first }}" docker_url: "{{ network.docker.url }}" - loop: "{{ network.organizations }}" - loop_control: - loop_var: org - when: org.type == "peer" and org.org_status == "new" + kubernetes: "{{ org.k8s }}" + generateGenisis: true vars: #These variables can be overriden from the command line privilege_escalate: false #Default to NOT escalate to root privledges diff --git a/platforms/hyperledger-fabric/configuration/external-chaincode-ops.yaml b/platforms/hyperledger-fabric/configuration/external-chaincode-ops.yaml index 831628ba575..3ff73511ba9 100644 --- a/platforms/hyperledger-fabric/configuration/external-chaincode-ops.yaml +++ b/platforms/hyperledger-fabric/configuration/external-chaincode-ops.yaml @@ -1,5 +1,5 @@ -# This playbook executes required tasks to install and instantiate external chaincode -# on existing Kubernetes clusters. The Kubernetes clusters should already be created and the infomation +# This playbook executes required tasks to install and instantiate external chaincode +# on existing Kubernetes clusters. The Kubernetes clusters should already be created and the infomation # to connect to the clusters be updated in the network.yaml file that is used as an input to this playbook ########################################################################################### # To Run this playbook from this directory, use the following command (network.yaml also in this directory) @@ -77,7 +77,7 @@ - item.type == 'peer' ############################################################################################ - # This task generates the crypto material by executing the generate-crypto-peer-chaincode.sh script + # This task generates the crypto material by executing the generate-crypto-peer-chaincode.sh script - name: Generate crypto material for peer to interact with external chaincode servers include_role: name: "create/chaincode/peer_certs" @@ -94,7 +94,7 @@ ca_server_url: "{{ item.ca_data.url }}" setup_user_env: true loop: "{{ network['organizations'] }}" - when: + when: - item.type == 'peer' ############################################################################################ @@ -115,7 +115,7 @@ ca_server_url: "{{ item.ca_data.url }}" setup_user_env: true loop: "{{ network['organizations'] }}" - when: + when: - item.type == 'peer' ############################################################################################ @@ -140,7 +140,7 @@ charts_dir: "{{ item.gitops.chart_source }}" values_dir: "{{playbook_dir}}/../../../{{item.gitops.release_dir}}/{{ item.name | lower }}" loop: "{{ network['organizations'] }}" - when: + when: - item.type == 'peer' - item.org_status == 'new' @@ -165,8 +165,8 @@ charts_dir: "{{ item.gitops.chart_source }}" values_dir: "{{playbook_dir}}/../../../{{item.gitops.release_dir}}/{{ item.name | lower }}" loop: "{{ network['organizations'] }}" - when: - - item.type == 'peer' + when: + - item.type == 'peer' - item.org_status == 'new' ############################################################################################ @@ -202,11 +202,11 @@ docker_url: "{{ network.docker.url }}" approvers: "{{ item.endorsers | default('', true) }}" loop: "{{ network['channels'] }}" - when: add_new_org == 'true' or '2.' in network.version + when: add_new_org == 'true' or '2.' in network.version vars: #These variables can be overriden from the command line - privilege_escalate: false #Default to NOT escalate to root privledges - install_os: "linux" #Default to linux OS - install_arch: "amd64" #Default to amd64 architecture - bin_install_dir: "~/bin" #Default to /bin install directory for binaries - add_new_org: "false" # Default to false as this is for main network creation + privilege_escalate: false #Default to NOT escalate to root privledges + install_os: "linux" #Default to linux OS + install_arch: "amd64" #Default to amd64 architecture + bin_install_dir: "~/bin" #Default to /bin install directory for binaries + add_new_org: 'false' # Default to false as this is for main network creation diff --git a/platforms/hyperledger-fabric/configuration/manage-user-certificate.yaml b/platforms/hyperledger-fabric/configuration/manage-user-certificate.yaml index 46f7fa7d1cb..770a0d93aca 100644 --- a/platforms/hyperledger-fabric/configuration/manage-user-certificate.yaml +++ b/platforms/hyperledger-fabric/configuration/manage-user-certificate.yaml @@ -54,30 +54,22 @@ loop: "{{ network['organizations'] }}" ############################################################################################ - # This task generates the crypto material by running the ca_tools/peer playbook + # This task generates the crypto material by executing the generate-user-crypto.sh script file + # present in the Organization's CA Tools CLI - name: Generate crypto material for user include_role: - name: "create/ca_tools/peer" + name: "create/users" vars: component_name: "{{ item.name | lower}}-net" - component: "{{ item.name | lower}}" component_type: "{{ item.type | lower}}" - component_services: "{{ item.services }}" - orderer_org: "{{ item.orderer_org | lower }}" - sc_name: "{{ component }}-bevel-storageclass" + org_name: "{{ item.name }}" + services: "{{ item.services }}" + subject: "{{ item.subject }}" + cert_subject: "{{ item.subject | regex_replace('/', ';') | regex_replace(',', '/') | regex_replace(';', ',') }}" # replace , to / and / to , for certpath kubernetes: "{{ item.k8s }}" vault: "{{ item.vault }}" - ca: "{{ item.services.ca }}" - docker_url: "{{ network.docker.url }}" - gitops: "{{ item.gitops }}" - values_dir: "{{ playbook_dir }}/../../../{{ item.gitops.release_dir }}/{{ item.name | lower }}" + users: "{{ item.users }}" + proxy: "{{ network.env.proxy }}" + ca_url: "{{ item.ca_data.url }}" loop: "{{ network['organizations'] }}" - when: item.type == 'peer' - - vars: #These variables can be overriden from the command line - privilege_escalate: false #Default to NOT escalate to root privledges - install_os: "linux" #Default to linux OS - install_arch: "amd64" #Default to amd64 architecture - refresh_user_cert: 'true' #Default for this playbook is true - bin_install_dir: "~/bin" #Default to ~/bin install directory for binaries - add_new_org: "false" + when: item.type == 'peer' and item.users is defined diff --git a/platforms/hyperledger-fabric/configuration/refresh-certificates.yaml b/platforms/hyperledger-fabric/configuration/refresh-certificates.yaml index 568a5dca276..617a80bf99b 100644 --- a/platforms/hyperledger-fabric/configuration/refresh-certificates.yaml +++ b/platforms/hyperledger-fabric/configuration/refresh-certificates.yaml @@ -47,7 +47,6 @@ component: "{{ item.name | lower}}" component_type: "{{ item.type | lower}}" component_services: "{{ item.services }}" - sc_name: "{{ item.name | lower}}-bevel-storageclass" kubernetes: "{{ item.k8s }}" vault: "{{ item.vault }}" ca: "{{ item.services.ca }}" @@ -82,7 +81,6 @@ component_type: "{{ item.type | lower}}" component_services: "{{ item.services }}" orderer_org: "{{ item.orderer_org | lower }}" - sc_name: "{{ item.name | lower}}-bevel-storageclass" kubernetes: "{{ item.k8s }}" vault: "{{ item.vault }}" ca: "{{ item.services.ca }}" diff --git a/platforms/hyperledger-fabric/configuration/roles/create/ca_server/tasks/main.yaml b/platforms/hyperledger-fabric/configuration/roles/create/ca_server/tasks/main.yaml index c094c4d0538..16cdc3413bf 100644 --- a/platforms/hyperledger-fabric/configuration/roles/create/ca_server/tasks/main.yaml +++ b/platforms/hyperledger-fabric/configuration/roles/create/ca_server/tasks/main.yaml @@ -9,105 +9,38 @@ # Also, creates the value file for Certificate Authority (CA) ############################################################################################# -# Create the folder to store crypto material -- name: "creating the directory ./build/crypto-config/{{ component_type }}Organizations/{{ component_name }}/ca" - include_role: - name: "{{ playbook_dir }}/../../shared/configuration/roles/check/directory" - vars: - path: "./build/crypto-config/{{ component_type }}Organizations/{{ component_name }}/ca" - -- name: Check if CA key already exists in vault. - include_role: - name: "{{ playbook_dir }}/../../shared/configuration/roles/check/setup" - vars: - vault_field: "{{ component_name }}-CA.key" - vault_path: "{{ vault.secret_path | default('secretsv2') }}/{{ component }}/{{ component_type }}Organizations/{{ component_name }}/ca" - check: "certs_created" - -# Generate cacerts helmrelease file. -- name: "Create value file for cacerts job" - include_role: - name: helm_component - vars: - name: "{{ component }}" - type: "cacerts_job" - component_name: "{{ component }}-cacerts-job" - component_ns: "{{ component }}-net" - subject: "{{ ca.subject }}" - git_protocol: "{{ gitops.git_protocol }}" - git_url: "{{ gitops.git_url }}" - git_branch: "{{ gitops.branch }}" - charts_dir: "{{ gitops.chart_source }}" - vault: "{{ item.vault }}" - values_dir: "{{playbook_dir}}/../../../{{ gitops.release_dir }}/{{ component }}" - when: certs_created.failed == True - -# Git Push : Push the above generated files to git directory -- name: Git Push - include_role: - name: "{{ playbook_dir }}/../../shared/configuration/roles/git_push" - vars: - GIT_DIR: "{{ playbook_dir }}/../../../" - gitops: "{{ item.gitops }}" - msg: "[ci skip] Pushing CA server files" - when: certs_created.failed == True - tags: - - notest - -# Check if cacerts-job is completed -- name: Check if cacerts-job job is completed - include_role: - name: "{{ playbook_dir }}/../../shared/configuration/roles/check/helm_component" - vars: - component_type: Job - namespace: "{{ component }}-net" - component_name: "{{ component }}-cacerts-job" - kubernetes: "{{ item.k8s }}" - when: certs_created.failed == True - tags: - - notest - # Copy custom config for fabric-ca server - name: Copy custom config for fabric-ca server shell: | cp {{ ca.configpath }} ../../../{{ gitops.chart_source }}/ca/conf/fabric-ca-server-config-{{ component }}.yaml when: ca.configpath is defined -# Create the CA value file for Orderer -- name: "Create CA server values for Orderer" - include_role: - name: helm_component - vars: - name: "{{ ca.name }}" - type: "ca-orderer" - git_protocol: "{{ gitops.git_protocol }}" - git_url: "{{ gitops.git_url }}" - git_branch: "{{ gitops.branch }}" - charts_dir: "{{ gitops.chart_source }}" - external_url_suffix: "{{ item.external_url_suffix }}" - when: component_type == 'orderer' +- name: Get the kubernetes server url + shell: | + KUBECONFIG={{ kubernetes.config_file }} kubectl config view --minify | grep server | cut -f 2- -d ":" | tr -d " " + register: kubernetes_server_url -# Create the CA value file for Organizations -- name: "Create CA server values organizations" +# Create the CA value file +- name: "Create CA server values" include_role: name: helm_component vars: - name: "{{ ca.name }}" - type: "ca-peer" + type: "ca-server" + name: "ca" + component_name: "{{ ca.name | lower }}" git_protocol: "{{ gitops.git_protocol }}" git_url: "{{ gitops.git_url }}" git_branch: "{{ gitops.branch }}" charts_dir: "{{ gitops.chart_source }}" - external_url_suffix: "{{ item.external_url_suffix }}" - when: component_type == 'peer' + subject: "{{ ca.subject | quote }}" + external_url_suffix: "{{ org.external_url_suffix }}" + kubernetes_url: "{{ kubernetes_server_url.stdout }}" -# Git Push : Push the above generated files to git directory +# Git Push: Push the above generated files to git directory - name: Git Push include_role: name: "{{ playbook_dir }}/../../shared/configuration/roles/git_push" vars: GIT_DIR: "{{ playbook_dir }}/../../../" - gitops: "{{ item.gitops }}" - msg: "[ci skip] Pushing CA server files" - tags: - - notest + gitops: "{{ org.gitops }}" + msg: "[ci skip] Pushing CA Server files" diff --git a/platforms/hyperledger-fabric/configuration/roles/create/ca_tools/orderer/tasks/delete_old_certs.yaml b/platforms/hyperledger-fabric/configuration/roles/create/ca_tools/orderer/tasks/delete_old_certs.yaml deleted file mode 100644 index be8b4ef733b..00000000000 --- a/platforms/hyperledger-fabric/configuration/roles/create/ca_tools/orderer/tasks/delete_old_certs.yaml +++ /dev/null @@ -1,24 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -############################################################################################# -# This role creates value file for the deployment of CA Tools CLI -############################################################################################# - -# Delete crypto materials from vault -- name: Delete Crypto for orderers - shell: | - vault kv delete {{ item.vault.secret_path | default('secretsv2') }}/{{ item.name | lower }}/ordererOrganizations/{{ component_name }}/orderers/{{orderer.name}}.{{ component_name }}/tls - vault kv delete {{ item.vault.secret_path | default('secretsv2') }}/{{ item.name | lower }}/ordererOrganizations/{{ component_name }}/orderers/{{orderer.name}}.{{ component_name }}/msp - vault kv delete {{ item.vault.secret_path | default('secretsv2') }}/{{ item.name | lower }}/ordererOrganizations/{{ component_name }}/users/admin/tls - vault kv delete {{ item.vault.secret_path | default('secretsv2') }}/{{ item.name | lower }}/ordererOrganizations/{{ component_name }}/users/admin/msp - loop: "{{ item.services.orderers }}" - loop_control: - loop_var: orderer - environment: - VAULT_ADDR: "{{ item.vault.url }}" - VAULT_TOKEN: "{{ item.vault.root_token }}" - when: component_type == 'orderer' diff --git a/platforms/hyperledger-fabric/configuration/roles/create/ca_tools/orderer/tasks/main.yaml b/platforms/hyperledger-fabric/configuration/roles/create/ca_tools/orderer/tasks/main.yaml deleted file mode 100644 index 0aa47a881f9..00000000000 --- a/platforms/hyperledger-fabric/configuration/roles/create/ca_tools/orderer/tasks/main.yaml +++ /dev/null @@ -1,195 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -############################################################################################# -# This role creates value file for the deployment of CA Tools CLI -############################################################################################# - -# Check if CA server is available -- name: "waiting for the CA server to be created in {{ item.name | lower }}-net" - include_role: - name: "{{ playbook_dir }}/../../shared/configuration/roles/check/helm_component" - vars: - component_type: "Pod" - namespace: "{{ item.name | lower }}-net" - component_name: "{{ component_services.ca.name }}" - label_selectors: - - name = {{ component_name }} - when: add_peer is not defined or add_peer != 'true' - -# Reset ca-tools pod -- name: "Reset ca-tools pod" - include_role: - name: create/refresh_certs/reset_pod - vars: - pod_name: "ca-tools" - file_path: "{{ values_dir }}/{{ pod_name }}/{{ component_name }}.yaml" - gitops_value: "{{ item.gitops }}" - component_ns: "{{ component_name }}" - kubernetes: "{{ item.k8s }}" - hr_name: "{{ component_name }}-ca-tools" - when: refresh_cert is defined and refresh_cert == 'true' - -- name: "Delete old certificates" - include_tasks: delete_old_certs.yaml - vars: - org_name: "{{ item.name | lower }}" - when: refresh_cert is defined and refresh_cert == 'true' - -# Create the CA-tools value files -- name: "Create CA-tools Values for orderer" - include_role: - name: helm_component - vars: - name: "ca-tools" - type: "ca-tools" - org_name: "{{ item.name | lower }}" - component_type: "{{ item.type | lower }}" - vault: "{{ item.vault }}" - external_url_suffix: "{{ item.external_url_suffix }}" - component_subject: "{{ item.subject }}" - cert_subject: "{{ item.subject | regex_replace('/', ';') | regex_replace(',', '/') | regex_replace(';', ',') }}" # replace , to / and / to , for certpath - component_country: "{{ item.country }}" - component_state: "{{ item.state }}" - component_location: "{{ item.location }}" - ca_url: "{{ item.ca_data.url }}" - proxy: "{{ network.env.proxy }}" - git_protocol: "{{ gitops.git_protocol }}" - git_url: "{{ gitops.git_url }}" - git_branch: "{{ gitops.branch }}" - charts_dir: "{{ gitops.chart_source }}" - orderers_list: "{{ item.services.orderers }}" - -# Git Push : Push the above generated files to git directory -- name: Git Push - include_role: - name: "{{ playbook_dir }}/../../shared/configuration/roles/git_push" - vars: - GIT_DIR: "{{ playbook_dir }}/../../../" - gitops: "{{ item.gitops }}" - msg: "[ci skip] Pushing CA-tools files" - -# Wait for key certs exists in vault. -- name: Wait for CA key exists in vault. - include_role: - name: "{{ playbook_dir }}/../../shared/configuration/roles/check/setup" - vars: - vault_field: "{{ component_name }}-CA.key" - vault_path: "{{ vault.secret_path | default('secretsv2') }}/{{ item.name | lower }}/{{ component_type }}Organizations/{{ component_name }}/ca" - check: "crypto_materials" - -# Wait for admin tls exists in vault. -- name: Wait for admin tls exists in vault. - include_role: - name: "{{ playbook_dir }}/../../shared/configuration/roles/check/setup" - vars: - vault_field: "client.key" - vault_path: "{{ vault.secret_path | default('secretsv2') }}/{{ item.name | lower }}/{{ component_type }}Organizations/{{ component_name }}/users/admin/tls" - check: "crypto_materials" - -# Wait for orderers tls exists in vault. -- name: Wait for orderers tls exists in vault. - include_role: - name: "{{ playbook_dir }}/../../shared/configuration/roles/check/setup" - vars: - vault_field: "server.key" - vault_path: "{{ vault.secret_path | default('secretv2') }}/{{ item.name | lower }}/{{ component_type }}Organizations/{{ component_name }}/orderers/{{ orderer.name }}.{{ component_name}}/tls" - check: "crypto_materials" - loop: "{{ item.services.orderers }}" - loop_control: - loop_var: orderer - -# Copy the msp admincerts from vault -- name: Fetch the msp admincerts from vault - shell: | - vault kv get -field=admincerts {{ vault.secret_path | default('secretsv2') }}/{{ item.name | lower }}/ordererOrganizations/{{ component_name }}/users/admin/msp > Admin@{{ component_name }}-cert.pem - mkdir -p ./build/crypto-config/ordererOrganizations/{{ component_name }}/msp/admincerts/ - mv Admin@{{ component_name }}-cert.pem ./build/crypto-config/ordererOrganizations/{{ component_name }}/msp/admincerts/ - environment: - VAULT_ADDR: "{{ vault.url }}" - VAULT_TOKEN: "{{ vault.root_token }}" - -# Copy the msp cacerts from vault -- name: Fetch the msp cacerts from vault - shell: | - vault kv get -field=cacerts {{ vault.secret_path | default('secretsv2') }}/{{ item.name | lower }}/ordererOrganizations/{{ component_name }}/users/admin/msp > ca-{{ component_name }}-{{ item.external_url_suffix }}.pem - mkdir -p ./build/crypto-config/ordererOrganizations/{{ component_name }}/msp/cacerts/ - mv ca-{{ component_name }}-{{ item.external_url_suffix }}.pem ./build/crypto-config/ordererOrganizations/{{ component_name }}/msp/cacerts/ - environment: - VAULT_ADDR: "{{ vault.url }}" - VAULT_TOKEN: "{{ vault.root_token }}" - when: network.env.proxy != 'none' - -# Copy the msp tlscacerts from vault -- name: Fetch the msp tlscacerts from vault - shell: | - vault kv get -field=tlscacerts {{ vault.secret_path | default('secretsv2') }}/{{ item.name | lower }}/ordererOrganizations/{{ component_name }}/users/admin/msp > ca-{{ component_name }}-{{ item.external_url_suffix }}.pem - mkdir -p ./build/crypto-config/ordererOrganizations/{{ component_name }}/msp/tlscacerts/ - mv ca-{{ component_name }}-{{ item.external_url_suffix }}.pem ./build/crypto-config/ordererOrganizations/{{ component_name }}/msp/tlscacerts/ - environment: - VAULT_ADDR: "{{ vault.url }}" - VAULT_TOKEN: "{{ vault.root_token }}" - when: network.env.proxy != 'none' - -# Copy the msp cacerts from vault proxy is none -- name: Fetch the msp cacerts from vault - shell: | - vault kv get -field=cacerts {{ vault.secret_path | default('secretsv2') }}/{{ item.name | lower }}/ordererOrganizations/{{ component_name }}/users/admin/msp > ca-{{ component_name }}-{{ item.services.ca.grpc.port }}.pem - mkdir -p ./build/crypto-config/ordererOrganizations/{{ component_name }}/msp/cacerts/ - mv ca-{{ component_name }}-{{ item.services.ca.grpc.port }}.pem ./build/crypto-config/ordererOrganizations/{{ component_name }}/msp/cacerts/ - environment: - VAULT_ADDR: "{{ vault.url }}" - VAULT_TOKEN: "{{ vault.root_token }}" - when: network.env.proxy == 'none' - -# Copy the msp tlscacerts from vault when proxy is none -- name: Fetch the msp tlscacerts from vault - shell: | - vault kv get -field=tlscacerts {{ vault.secret_path | default('secretsv2') }}/{{ item.name | lower }}/ordererOrganizations/{{ component_name }}/users/admin/msp > ca-{{ component_name }}-{{ item.services.ca.grpc.port }}.pem - mkdir -p ./build/crypto-config/ordererOrganizations/{{ component_name }}/msp/tlscacerts/ - mv ca-{{ component_name }}-{{ item.services.ca.grpc.port }}.pem ./build/crypto-config/ordererOrganizations/{{ component_name }}/msp/tlscacerts/ - environment: - VAULT_ADDR: "{{ vault.url }}" - VAULT_TOKEN: "{{ vault.root_token }}" - when: network.env.proxy == 'none' - -# Copy the tls server.crt from vault to the build directory -- name: Fetch the tls server.crt from vault - shell: | - vault kv get -field=server.crt {{ vault.secret_path | default('secretsv2') }}/{{ item.name | lower }}/ordererOrganizations/{{ component_name }}/orderers/{{ orderer.name }}.{{ component_name }}/tls > server.crt - mkdir -p ./build/crypto-config/ordererOrganizations/{{ component_name }}/orderers/{{ orderer.name }}.{{ component_name }}/tls - mv server.crt ./build/crypto-config/ordererOrganizations/{{ component_name }}/orderers/{{ orderer.name }}.{{ component_name }}/tls/ - environment: - VAULT_ADDR: "{{ vault.url }}" - VAULT_TOKEN: "{{ vault.root_token }}" - loop: "{{ item.services.orderers }}" - loop_control: - loop_var: orderer - -# Create the certs directory if it does not exist -- name: Create the certs directory if it does not exist - file: - path: "{{ orderer.certificate | dirname }}" - state: directory - loop: "{{ network.orderers }}" - loop_control: - loop_var: orderer - when: add_new_org == 'false' and add_peer is not defined - -# Copy the tls ca.crt file from the respective CA Tools CLI to the address specified in network.yaml -- name: Fetch the tls ca.crt file from vault - shell: | - vault kv get -field=ca.crt {{ vault.secret_path | default('secretsv2') }}/{{ item.name | lower }}/ordererOrganizations/{{ component_name }}/orderers/{{ orderer.name }}.{{ component_name }}/tls > ca.crt - mv ca.crt {{ orderer.certificate }} - environment: - VAULT_ADDR: "{{ vault.url }}" - VAULT_TOKEN: "{{ vault.root_token }}" - loop: "{{ network.orderers }}" - loop_control: - loop_var: orderer - when: - - add_new_org == 'false' and add_peer is not defined - - component == orderer.org_name diff --git a/platforms/hyperledger-fabric/configuration/roles/create/ca_tools/peer/tasks/delete_old_certs.yaml b/platforms/hyperledger-fabric/configuration/roles/create/ca_tools/peer/tasks/delete_old_certs.yaml deleted file mode 100644 index d2631cb3169..00000000000 --- a/platforms/hyperledger-fabric/configuration/roles/create/ca_tools/peer/tasks/delete_old_certs.yaml +++ /dev/null @@ -1,46 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -############################################################################################# -# This role creates value file for the deployment of CA Tools CLI -############################################################################################# - -# Delete crypto materials from vault -- name: Delete Crypto for peers - shell: | - vault kv delete {{ item.vault.secret_path | default('secretsv2') }}/{{ item.name | lower }}/peerOrganizations/{{ component_name }}/users/admin/tls - vault kv delete {{ item.vault.secret_path | default('secretsv2') }}/{{ item.name | lower }}/peerOrganizations/{{ component_name }}/users/admin/msp - vault kv delete {{ item.vault.secret_path | default('secretsv2') }}/{{ item.name | lower }}/peerOrganizations/{{ component_name }}/orderer/tls - vault kv delete {{ item.vault.secret_path | default('secretsv2') }}/{{ item.name | lower }}/peerOrganizations/{{ component_name }}/msp/config - {% for peer in peers %} - vault kv delete {{ item.vault.secret_path | default('secretsv2') }}/{{ item.name | lower }}/peerOrganizations/{{ component_name }}/peers/{{peer.name}}.{{ component_name }}/tls - vault kv delete {{ item.vault.secret_path | default('secretsv2') }}/{{ item.name | lower }}/peerOrganizations/{{ component_name }}/peers/{{peer.name}}.{{ component_name }}/msp - {% endfor %} - {% for user in users %} - vault kv delete {{ item.vault.secret_path | default('secretsv2') }}/{{ item.name | lower }}/peerOrganizations/{{ component_name }}/users/{{user.identity}}/tls - vault kv delete {{ item.vault.secret_path | default('secretsv2') }}/{{ item.name | lower }}/peerOrganizations/{{ component_name }}/users/{{user.identity}}/msp - {% endfor %} - vault kv delete {{ item.vault.secret_path | default('secretsv2') }}/{{ item.name | lower }}/credentials/{{ component_name }}/couchdb/{{ org_name }} - vars: - peers: "{{ item.services.peers }}" - environment: - VAULT_ADDR: "{{ item.vault.url }}" - VAULT_TOKEN: "{{ item.vault.root_token }}" - when: component_type == 'peer' and refresh_cert is defined and refresh_cert == 'true' - -# Delete crypto materials from vault only for users -- name: Delete Crypto for peers - shell: | - {% for user in users %} - vault kv delete {{ item.vault.secret_path | default('secretsv2') }}/{{ item.name | lower }}/peerOrganizations/{{ component_name }}/users/{{user.identity}}/tls - vault kv delete {{ item.vault.secret_path | default('secretsv2') }}/{{ item.name | lower }}/peerOrganizations/{{ component_name }}/users/{{user.identity}}/msp - {% endfor %} - vars: - peers: "{{ item.services.peers }}" - environment: - VAULT_ADDR: "{{ item.vault.url }}" - VAULT_TOKEN: "{{ item.vault.root_token }}" - when: component_type == 'peer' and refresh_user_cert is defined and refresh_user_cert == 'true' diff --git a/platforms/hyperledger-fabric/configuration/roles/create/ca_tools/peer/tasks/main.yaml b/platforms/hyperledger-fabric/configuration/roles/create/ca_tools/peer/tasks/main.yaml deleted file mode 100644 index 337533ade06..00000000000 --- a/platforms/hyperledger-fabric/configuration/roles/create/ca_tools/peer/tasks/main.yaml +++ /dev/null @@ -1,245 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -############################################################################################# -# This role creates value file for the deployment of CA Tools CLI -############################################################################################# - -# Check if CA server is available -- name: "waiting for the CA server to be created in {{ item.name | lower }}-net" - include_role: - name: "{{ playbook_dir }}/../../shared/configuration/roles/check/helm_component" - vars: - component_type: "Pod" - namespace: "{{ item.name | lower }}-net" - component_name: "{{ component_services.ca.name }}" - label_selectors: - - name = {{ component_name }} - when: add_peer is not defined or add_peer != 'true' - -# Reset ca-tools pod -- name: "Reset ca-tools pod" - include_role: - name: create/refresh_certs/reset_pod - vars: - pod_name: "ca-tools" - file_path: "{{ values_dir }}/{{ pod_name }}/{{ component_name }}.yaml" - gitops_value: "{{ item.gitops }}" - component_ns: "{{ component_name }}" - kubernetes: "{{ item.k8s }}" - hr_name: "{{ component_name }}-ca-tools" - when: (add_peer is defined and add_peer == 'true') or (refresh_cert is defined and refresh_cert == 'true') or (refresh_user_cert is defined and refresh_user_cert == 'true') - -# Delete old certificates -- name: "Delete old certificates" - include_tasks: delete_old_certs.yaml - vars: - org_name: "{{ item.name | lower }}" - users: "{{ item.users }}" - when: (refresh_cert is defined and refresh_cert == 'true') or (refresh_user_cert is defined and refresh_user_cert == 'true') - -# Get Orderer certificates -- name: "Get Orderer certificates" - include_tasks: nested_orderers.yaml - loop: "{{ network.orderers }}" - loop_control: - loop_var: orderer - -# Create the certs directory if it does not exist -- name: Create the certs directory if it does not exist - file: - path: "{{ playbook_dir }}/../charts/fabric-catools/certs" - state: directory - -- set_fact: - new_peer_list: [] - -# Loop over the peers and finds the number of new peers -- name: Count new peers - set_fact: - new_peer_list={{ new_peer_list + [ {'peer_name':peer.name } ] }} - loop: "{{ item.services.peers }}" - loop_control: - loop_var: peer - when: - - peer.peerstatus is defined and peer.peerstatus == 'new' - - add_peer is defined and add_peer == 'true' - -# Initialize the list new_peer_list -- name: Count new peers - set_fact: - new_peer_list={{ item.services.peers }} - when: - - add_peer is not defined or add_peer == 'false' - -- set_fact: - new_orderer_list: [] - -# Loop over the orderers and get orderers from the organization provided in the network.yaml -- name: Get orderers from the organization provided in the network.yaml - set_fact: - new_orderer_list={{ new_orderer_list + [orderer] }} - loop: "{{ network.orderers }}" - loop_control: - loop_var: orderer - when: - - orderer_org == orderer.org_name | lower - -# Create the CA-tools value files -- name: "Create CA-tools Values for peer" - include_role: - name: helm_component - vars: - name: "ca-tools" - type: "ca-tools" - org_name: "{{ item.name | lower }}" - component_type: "{{ item.type | lower }}" - vault: "{{ item.vault }}" - external_url_suffix: "{{ item.external_url_suffix }}" - component_subject: "{{ item.subject }}" - cert_subject: "{{ item.subject | regex_replace('/', ';') | regex_replace(',', '/') | regex_replace(';', ',') }}" # Replace '/', ',', and ':' with ';', ',', and ',' respectively for certpath - component_country: "{{ item.country }}" - component_state: "{{ item.state }}" - component_location: "{{ item.location }}" - ca_url: "{{ item.ca_data.url }}" - refresh_cert_value: "{{ refresh_cert | default(false) | quote }}" - refresh_user_cert_value: "{{ refresh_user_cert | default(false) | quote }}" - proxy: "{{ network.env.proxy }}" - git_protocol: "{{ item.gitops.git_protocol }}" - git_url: "{{ gitops.git_url }}" - git_branch: "{{ gitops.branch }}" - charts_dir: "{{ gitops.chart_source }}" - peers_list: "{{ item.services.peers }}" - orderers_list: "{{ new_orderer_list }}" - peer_count: "{{ item.services.peers | length }}" - add_peer_value: "{{ add_peer | default(false) | quote }}" - new_peer_count: "{{ new_peer_list | length }}" - user_list: "{{ item.users }}" - -# Git Push : Push the above generated files to git directory -- name: Git Push - include_role: - name: "{{ playbook_dir }}/../../shared/configuration/roles/git_push" - vars: - GIT_DIR: "{{ playbook_dir }}/../../../" - gitops: "{{ item.gitops }}" - msg: "[ci skip] Pushing CA-tools files" - -# Wait for CA key exists in vault. -- name: Wait for CA key exists in vault. - include_role: - name: "{{ playbook_dir }}/../../shared/configuration/roles/check/setup" - vars: - vault_field: "{{ component_name }}-CA.key" - vault_path: "{{ vault.secret_path | default('secretsv2') }}/{{ item.name | lower }}/{{ component_type }}Organizations/{{ component_name }}/ca" - check: "crypto_materials" - -# Wait for admin tls exists in vault. -- name: Wait for admin tls exists in vault. - include_role: - name: "{{ playbook_dir }}/../../shared/configuration/roles/check/setup" - vars: - vault_field: "client.key" - vault_path: "{{ vault.secret_path | default('secretsv2') }}/{{ item.name | lower }}/{{ component_type }}Organizations/{{ component_name }}/users/admin/tls" - check: "crypto_materials" - -# Wait for orderers tls exists in vault. -- name: Wait for peers tls exists in vault. - include_role: - name: "{{ playbook_dir }}/../../shared/configuration/roles/check/setup" - vars: - vault_field: "server.key" - vault_path: "{{ vault.secret_path | default('secretsv2') }}/{{ item.name | lower }}/{{ component_type }}Organizations/{{ component_name }}/peers/{{ peer.name }}.{{ component_name }}/tls" - check: "crypto_materials" - loop: "{{ item.services.peers }}" - loop_control: - loop_var: peer - -# Wait for users tls exists in vault. -- name: Wait for users tls exists in vault. - include_role: - name: "{{ playbook_dir }}/../../shared/configuration/roles/check/setup" - vars: - vault_field: "client.key" - vault_path: "{{ vault.secret_path | default('secretsv2') }}/{{ item.name | lower }}/peerOrganizations/{{ component_name }}/users/{{ user.identity }}/tls" - check: "crypto_materials" - loop: "{{ item.users }}" - loop_control: - loop_var: user - when: item.users is defined - -# Copy msp cacerts to given path -- name: "Copy msp cacerts to given path" - include_tasks: nested_endorsers.yaml - vars: - org_name: "{{ item.name |lower }}" - approvers: "{{ channel.endorsers }}" - loop: "{{ network.channels }}" - loop_control: - loop_var: channel - -# Fetch msp admincerts from vault -- name: Fetch the msp admincerts from vault - shell: | - vault kv get -field=admincerts {{ vault.secret_path | default('secretsv2') }}/{{ item.name | lower }}/peerOrganizations/{{ component_name }}/users/admin/msp > Admin@{{ component_name }}-cert.pem - mkdir -p ./build/crypto-config/peerOrganizations/{{ component_name }}/msp/admincerts/ - mv Admin@{{ component_name }}-cert.pem ./build/crypto-config/peerOrganizations/{{ component_name }}/msp/admincerts/ - environment: - VAULT_ADDR: "{{ vault.url }}" - VAULT_TOKEN: "{{ vault.root_token }}" - -# Fetch msp cacerts from vault -- name: Fetch the msp cacerts from vault - shell: | - vault kv get -field=cacerts {{ vault.secret_path | default('secretsv2') }}/{{ item.name | lower }}/peerOrganizations/{{ component_name }}/users/admin/msp > ca-{{ component_name }}-{{ item.external_url_suffix }}.pem - mkdir -p ./build/crypto-config/peerOrganizations/{{ component_name }}/msp/cacerts/ - mv ca-{{ component_name }}-{{ item.external_url_suffix }}.pem ./build/crypto-config/peerOrganizations/{{ component_name }}/msp/cacerts/ - environment: - VAULT_ADDR: "{{ vault.url }}" - VAULT_TOKEN: "{{ vault.root_token }}" - when: network.env.proxy != 'none' - -# Fetch msp tlscacerts from vault -- name: Fetch the msp tlscacerts from vault - shell: | - vault kv get -field=tlscacerts {{ vault.secret_path | default('secretsv2') }}/{{ item.name | lower }}/peerOrganizations/{{ component_name }}/users/admin/msp > ca-{{ component_name }}-{{ item.external_url_suffix }}.pem - mkdir -p ./build/crypto-config/peerOrganizations/{{ component_name }}/msp/tlscacerts/ - mv ca-{{ component_name }}-{{ item.external_url_suffix }}.pem ./build/crypto-config/peerOrganizations/{{ component_name }}/msp/tlscacerts/ - environment: - VAULT_ADDR: "{{ vault.url }}" - VAULT_TOKEN: "{{ vault.root_token }}" - when: network.env.proxy != 'none' - -# Fetch msp cacerts from vault when proxy is none -- name: Fetch the msp cacerts from vault - shell: | - vault kv get -field=cacerts {{ vault.secret_path | default('secretsv2') }}/{{ item.name | lower }}/peerOrganizations/{{ component_name }}/users/admin/msp > ca-{{ component_name }}-{{ item.services.ca.grpc.port }}.pem - mkdir -p ./build/crypto-config/peerOrganizations/{{ component_name }}/msp/cacerts/ - mv ca-{{ component_name }}-{{ item.services.ca.grpc.port }}.pem ./build/crypto-config/peerOrganizations/{{ component_name }}/msp/cacerts/ - environment: - VAULT_ADDR: "{{ vault.url }}" - VAULT_TOKEN: "{{ vault.root_token }}" - when: network.env.proxy == 'none' - -# Fetch msp tlscacerts from vault when proxy is none -- name: Fetch the msp tlscacerts from vault - shell: | - vault kv get -field=tlscacerts {{ vault.secret_path | default('secretsv2') }}/{{ item.name | lower }}/peerOrganizations/{{ component_name }}/users/admin/msp > ca-{{ component_name }}-{{ item.services.ca.grpc.port }}.pem - mkdir -p ./build/crypto-config/peerOrganizations/{{ component_name }}/msp/tlscacerts/ - mv ca-{{ component_name }}-{{ item.services.ca.grpc.port }}.pem ./build/crypto-config/peerOrganizations/{{ component_name }}/msp/tlscacerts/ - environment: - VAULT_ADDR: "{{ vault.url }}" - VAULT_TOKEN: "{{ vault.root_token }}" - when: network.env.proxy == 'none' - -# Fetch msp config.yaml file from vault -- name: Fetch msp config.yaml file from vault - shell: | - vault kv get -field=configfile {{ vault.secret_path | default('secretsv2') }}/{{ item.name | lower }}/peerOrganizations/{{ component_name }}/msp/config > config.yaml - mv config.yaml ./build/crypto-config/peerOrganizations/{{ component_name }}/msp/ - environment: - VAULT_ADDR: "{{ vault.url }}" - VAULT_TOKEN: "{{ vault.root_token }}" diff --git a/platforms/hyperledger-fabric/configuration/roles/create/ca_tools/peer/tasks/nested_endorsers.yaml b/platforms/hyperledger-fabric/configuration/roles/create/ca_tools/peer/tasks/nested_endorsers.yaml deleted file mode 100644 index fcd36c9975c..00000000000 --- a/platforms/hyperledger-fabric/configuration/roles/create/ca_tools/peer/tasks/nested_endorsers.yaml +++ /dev/null @@ -1,17 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - ---- -# Get endorsers data -- name: Get endorsers data - include_tasks: nested_peers.yaml - vars: - org_peers: "{{ item.services.peers }}" - endorsers_peers: "{{ endorser.peers }}" - loop: "{{ approvers }}" - loop_control: - loop_var: endorser - when: org_name == endorser.name diff --git a/platforms/hyperledger-fabric/configuration/roles/create/ca_tools/peer/tasks/nested_orderers.yaml b/platforms/hyperledger-fabric/configuration/roles/create/ca_tools/peer/tasks/nested_orderers.yaml deleted file mode 100644 index b64c55d312a..00000000000 --- a/platforms/hyperledger-fabric/configuration/roles/create/ca_tools/peer/tasks/nested_orderers.yaml +++ /dev/null @@ -1,39 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - ---- -# Check orderer-certificate file exists -- name: Check that orderer-certificate file exists - stat: - path: "{{ orderer.certificate }}" - register: orderer_file_result - failed_when: add_new_org == 'true' and not orderer_file_result.stat.exists # Fail the task if new_org is added and orderer-cert does not exist - tags: - - notest - -# Check if Orderer certs exists in vault. If yes, get the certificate -- name: Check if Orderer certs exist in Vault - shell: | - vault kv get -field=ca.crt {{ vault.secret_path | default('secretsv2') }}/{{ item.name | lower }}/peerOrganizations/{{ component_name }}/orderer/tls - environment: - VAULT_ADDR: "{{ vault.url }}" - VAULT_TOKEN: "{{ vault.root_token }}" - register: orderer_certs_result - ignore_errors: yes - changed_when: false - when: - - not orderer_file_result.stat.exists - tags: - - notest - -# Save Orderer certs if not in Vault -- name: Save Orderer certs if not in Vault - local_action: copy content="{{ orderer_certs_result.results[0].stdout }}" dest="{{ orderer.certificate }}" - when: - - not orderer_file_result.stat.exists - - orderer_certs_result.results[0].failed == False - tags: - - notest diff --git a/platforms/hyperledger-fabric/configuration/roles/create/ca_tools/peer/tasks/nested_peers.yaml b/platforms/hyperledger-fabric/configuration/roles/create/ca_tools/peer/tasks/nested_peers.yaml deleted file mode 100644 index 23771ceee3e..00000000000 --- a/platforms/hyperledger-fabric/configuration/roles/create/ca_tools/peer/tasks/nested_peers.yaml +++ /dev/null @@ -1,32 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - ---- -# Set a variable -- set_fact: - endorsers_peers_data: [] - -# Get peers from the endorser -- name: Get endorsers peers - set_fact: - endorsers_peers_data={{ endorsers_peers_data + [ {'name':peer.name | lower, 'certificate':peer.certificate } ] }} - loop: "{{ endorsers_peers }}" - loop_control: - loop_var: peer - when: peer.certificate is defined - -# Copy the certificates in the path provided in the network.yaml -- name: "Copy certificates" - include_tasks: write.yaml - vars: - endorsers_peers: "{{ endorsers_peers_data }}" - loop: "{{ org_peers }}" - loop_control: - loop_var: org_peer - when: - - org_name == endorser.name - - endorsers_peers_data is defined - - endorsers_peers_data|length > 0 diff --git a/platforms/hyperledger-fabric/configuration/roles/create/ca_tools/peer/tasks/write.yaml b/platforms/hyperledger-fabric/configuration/roles/create/ca_tools/peer/tasks/write.yaml deleted file mode 100644 index 80a3ed56388..00000000000 --- a/platforms/hyperledger-fabric/configuration/roles/create/ca_tools/peer/tasks/write.yaml +++ /dev/null @@ -1,34 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - ---- -# Create the certs directory if it does not exist -- name: Create the certs directory if it does not exist - file: - path: "{{ peer.certificate | dirname }}" - state: directory - loop: "{{ endorsers_peers }}" - loop_control: - loop_var: peer - when: - - item.org_status == 'new' - - org_peer.peerstatus is not defined or org_peer.peerstatus == 'new' - - org_peer.name == peer.name - -# Get msp config.yaml file from vault -- name: Get msp cacerts - shell: | - vault kv get -field=cacerts {{ vault.secret_path | default('secretsv2') }}/{{ item.name | lower }}/peerOrganizations/{{ component_name }}/peers/{{ peer.name }}.{{ component_name }}/msp > {{ peer.certificate }} - environment: - VAULT_ADDR: "{{ vault.url }}" - VAULT_TOKEN: "{{ vault.root_token }}" - loop: "{{ endorsers_peers }}" - loop_control: - loop_var: peer - when: - - item.org_status == 'new' - - org_peer.peerstatus is not defined or org_peer.peerstatus == 'new' - - org_peer.name == peer.name diff --git a/platforms/hyperledger-fabric/configuration/roles/create/channel_artifacts/tasks/fetch_orderers_certs.yaml b/platforms/hyperledger-fabric/configuration/roles/create/channel_artifacts/tasks/fetch_orderers_certs.yaml deleted file mode 100644 index 1129576aec2..00000000000 --- a/platforms/hyperledger-fabric/configuration/roles/create/channel_artifacts/tasks/fetch_orderers_certs.yaml +++ /dev/null @@ -1,43 +0,0 @@ - -# Fetch msp files from Vault -- name: Check if orderers tls certs already created - shell: | - vault kv get -field=server.crt {{ organization.vault.secret_path | default('secretsv2') }}/{{ organization.name | lower }}/ordererOrganizations/{{ organization.name | lower }}-net/orderers/{{ orderer.name }}.{{ organization.name | lower }}-net/tls > server.crt - mkdir -p ./build/crypto-config/ordererOrganizations/{{ organization.name | lower }}-net/orderers/{{ orderer.name }}.{{ organization.name | lower }}-net/tls - mv server.crt ./build/crypto-config/ordererOrganizations/{{ organization.name | lower }}-net/orderers/{{ orderer.name }}.{{ organization.name | lower }}-net/tls - environment: - VAULT_ADDR: "{{ organization.vault.url }}" - VAULT_TOKEN: "{{ organization.vault.root_token }}" - loop: "{{ orderers }}" - loop_control: - loop_var: orderer - -# Fetch msp files from Vault -- name: Check if msp admincerts already created - shell: | - vault kv get -field=admincerts {{ organization.vault.secret_path | default('secretsv2') }}/{{ organization.name | lower }}/ordererOrganizations/{{ organization.name | lower }}-net/users/admin/msp > Admin@{{ organization.name | lower }}-net-cert.pem - mkdir -p ./build/crypto-config/ordererOrganizations/{{ organization.name | lower }}-net/msp/admincerts/ - mv Admin@{{ organization.name | lower }}-net-cert.pem ./build/crypto-config/ordererOrganizations/{{ organization.name | lower }}-net/msp/admincerts/ - environment: - VAULT_ADDR: "{{ organization.vault.url }}" - VAULT_TOKEN: "{{ organization.vault.root_token }}" - -# Fetch msp files from Vault -- name: Check if msp cacerts already created - shell: | - vault kv get -field=cacerts {{ organization.vault.secret_path | default('secretsv2') }}/{{ organization.name | lower }}/ordererOrganizations/{{ organization.name | lower }}-net/users/admin/msp > ca-{{ organization.name | lower }}-net-{{ organization.services.ca.grpc.port }}.pem - mkdir -p ./build/crypto-config/ordererOrganizations/{{ organization.name | lower }}-net/msp/cacerts/ - mv ca-{{ organization.name | lower }}-net-{{ organization.services.ca.grpc.port }}.pem ./build/crypto-config/ordererOrganizations/{{ organization.name | lower }}-net/msp/cacerts/ - environment: - VAULT_ADDR: "{{ organization.vault.url }}" - VAULT_TOKEN: "{{ organization.vault.root_token }}" - -# Fetch msp files from Vault -- name: Check if msp tlscacerts already created - shell: | - vault kv get -field=tlscacerts {{ organization.vault.secret_path | default('secretsv2') }}/{{ organization.name | lower }}/ordererOrganizations/{{ organization.name | lower }}-net/users/admin/msp > ca-{{ organization.name | lower }}-net-{{ organization.services.ca.grpc.port }}.pem - mkdir -p ./build/crypto-config/ordererOrganizations/{{ organization.name | lower }}-net/msp/tlscacerts/ - mv ca-{{ organization.name | lower }}-net-{{ organization.services.ca.grpc.port }}.pem ./build/crypto-config/ordererOrganizations/{{ organization.name | lower }}-net/msp/tlscacerts/ - environment: - VAULT_ADDR: "{{ organization.vault.url }}" - VAULT_TOKEN: "{{ organization.vault.root_token }}" diff --git a/platforms/hyperledger-fabric/configuration/roles/create/channel_artifacts/tasks/main.yaml b/platforms/hyperledger-fabric/configuration/roles/create/channel_artifacts/tasks/main.yaml deleted file mode 100644 index da5a05fa56f..00000000000 --- a/platforms/hyperledger-fabric/configuration/roles/create/channel_artifacts/tasks/main.yaml +++ /dev/null @@ -1,169 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -############################################################################################## -# This task ensures that the directory exists, and creates it, if it does not exist -############################################################################################## - -# Create the build directory if it does not exist -- name: Create build directory if it does not exist - file: - path: "./build" - state: directory - -# Check configtxgen -- name: Check configtxgen - stat: - path: "{{ build_path }}/configtxgen" - register: config_stat_result - -# Register temporary directory -- name: Register temporary directory - tempfile: - state: directory - register: tmp_directory - -# Fetch orderers files from Vault -- name: "Check if orderers certs already created" - include_tasks: fetch_orderers_certs.yaml - vars: - orderers: "{{ organization.services.orderers }}" - loop: "{{ network['organizations'] }}" - loop_control: - loop_var: organization - when: - - fetch_certs == 'true' and '2.5.' in network.version - - organization.name == item.osn_creator_org.name - -# Fetch msp files from Vault -- name: Check if msp admincerts already created - vars: - query: "organizations[?name=='{{organization.name}}'].services.ca.grpc.port" - query_vault_url: "organizations[?name=='{{organization.name}}'].vault.url" - query_vault_token: "organizations[?name=='{{organization.name}}'].vault.root_token" - query_vault_secretPath: "organizations[?name=='{{organization.name}}'].vault.secret_path" - shell: | - vault kv get -field=admincerts {{ network | json_query(query_vault_secretPath) | first | default('secretsv2') }}/{{ organization.name | lower }}/peerOrganizations/{{ organization.name | lower }}-net/users/admin/msp > Admin@{{ organization.name | lower }}-net-cert.pem - mkdir -p ./build/crypto-config/peerOrganizations/{{ organization.name | lower }}-net/msp/admincerts/ - mv Admin@{{ organization.name | lower }}-net-cert.pem ./build/crypto-config/peerOrganizations/{{ organization.name | lower }}-net/msp/admincerts/ - environment: - VAULT_ADDR: "{{ network | json_query(query_vault_url) | first }}" - VAULT_TOKEN: "{{ network | json_query(query_vault_token) | first }}" - loop: "{{ item['participants'] }}" - loop_control: - loop_var: organization - when: fetch_certs == 'true' - -# Fetch msp files from Vault -- name: Check if msp cacerts already created - vars: - query: "organizations[?name=='{{organization.name}}'].services.ca.grpc.port" - query_vault_url: "organizations[?name=='{{organization.name}}'].vault.url" - query_vault_token: "organizations[?name=='{{organization.name}}'].vault.root_token" - query_vault_secretPath: "organizations[?name=='{{organization.name}}'].vault.secret_path" - shell: | - vault kv get -field=cacerts {{ network | json_query(query_vault_secretPath) | first | default('secretsv2') }}/{{ organization.name | lower }}/peerOrganizations/{{ organization.name | lower }}-net/users/admin/msp > ca-{{ organization.name | lower }}-net-{{ network | json_query(query) | first }}.pem - mkdir -p ./build/crypto-config/peerOrganizations/{{ organization.name | lower }}-net/msp/cacerts/ - mv ca-{{ organization.name | lower }}-net-{{ network | json_query(query) | first }}.pem ./build/crypto-config/peerOrganizations/{{ organization.name | lower }}-net/msp/cacerts/ - environment: - VAULT_ADDR: "{{ network | json_query(query_vault_url) | first }}" - VAULT_TOKEN: "{{ network | json_query(query_vault_token) | first }}" - loop: "{{ item['participants'] }}" - loop_control: - loop_var: organization - when: fetch_certs == 'true' - -# Fetch msp files from Vault -- name: Check if msp tlscacerts already created - vars: - query: "organizations[?name=='{{organization.name}}'].services.ca.grpc.port" - query_vault_url: "organizations[?name=='{{organization.name}}'].vault.url" - query_vault_token: "organizations[?name=='{{organization.name}}'].vault.root_token" - query_vault_secretPath: "organizations[?name=='{{organization.name}}'].vault.secret_path" - shell: | - vault kv get -field=tlscacerts {{ network | json_query(query_vault_secretPath) | first | default('secretsv2') }}/{{ organization.name | lower }}/peerOrganizations/{{ organization.name | lower }}-net/users/admin/msp > ca-{{ organization.name | lower }}-net-{{ network | json_query(query) | first }}.pem - mkdir -p ./build/crypto-config/peerOrganizations/{{ organization.name | lower }}-net/msp/tlscacerts/ - mv ca-{{ organization.name | lower }}-net-{{ network | json_query(query) | first }}.pem ./build/crypto-config/peerOrganizations/{{ organization.name | lower }}-net/msp/tlscacerts/ - environment: - VAULT_ADDR: "{{ network | json_query(query_vault_url) | first }}" - VAULT_TOKEN: "{{ network | json_query(query_vault_token) | first }}" - loop: "{{ item['participants'] }}" - loop_control: - loop_var: organization - when: fetch_certs == 'true' - -############################################################################################ -# Fetch the configtx gen tar file from the mentioned URL -- name: "Getting the configtxgen binary tar" - get_url: - url: https://github.com/hyperledger/fabric/releases/download/v{{network.version}}/hyperledger-fabric-{{install_os}}-{{install_arch}}-{{network.version}}.tar.gz - dest: "{{ tmp_directory.path }}" - when: config_stat_result.stat.exists == False - - -############################################################################################ -# Unzip the above downloaded tar file -- name: "Unziping the downloaded file" - unarchive: - src: "{{ tmp_directory.path }}/hyperledger-fabric-{{install_os}}-{{install_arch}}-{{network.version}}.tar.gz" - dest: "{{ tmp_directory.path }}" - when: config_stat_result.stat.exists == False - -############################################################################################ -# Extract the configtxgen binary and place it at appropriate path -- name: "Moving the configtxgen from the extracted folder and place in it path" - copy: - src: "{{ tmp_directory.path }}/bin/configtxgen" - dest: "{{ build_path }}/configtxgen" - mode: 0755 - when: config_stat_result.stat.exists == False - -############################################################################################ -# Create the channel-artifacts folder -- name: "Creating channel-artifacts folder" - file: - path: "{{ build_path }}/channel-artifacts" - state: directory - -# Remove old channel block -- name: Remove old channel block - file: - path: "{{ build_path }}/channel-artifacts/{{channel_name}}.tx" - state: absent - when: add_new_org == 'false' - -############################################################################################ -# Create channel by consuming the configtx.yaml file -- name: "Creating channels" - shell: | - cd {{ build_path }} - ./configtxgen -profile {{ profile_name }} -outputCreateChannelTx ./channel-artifacts/{{channel_name}}.tx -channelID {{channel_name}} - cat ./channel-artifacts/{{channel_name}}.tx | base64 > ./channel-artifacts/{{channel_name}}.tx.base64 - when: add_new_org == 'false' - -############################################################################################ -# Create the anchortx files -- name: "Creating Anchor artifacts" - shell: | - cd {{ build_path }} - ./configtxgen -profile {{ profile_name }} -outputAnchorPeersUpdate ./channel-artifacts/{{channel_name}}{{participant.name}}MSPAnchor.tx -channelID {{channel_name}} -asOrg {{participant.name}}MSP -configPath ./ - cat ./channel-artifacts/{{channel_name}}{{participant.name}}MSPAnchor.tx | base64 > ./channel-artifacts/{{channel_name}}{{participant.name}}MSPAnchor.tx.base64 - loop: "{{ item.participants }}" - loop_control: - loop_var: participant - when: add_new_org == 'false' - -# Create the channel by consuming the configtx.yaml file -- name: "Creating JSON configuration for new organization" - shell: | - cd {{ build_path }} - export FABRIC_CFG_PATH=$PWD - ./configtxgen -printOrg {{ participant.name }}MSP > ./channel-artifacts/{{ channel_name | lower }}.json - loop: "{{ item.participants }}" - loop_control: - loop_var: participant - register: result - when: participant.org_status == 'new' and add_new_org == 'true' diff --git a/platforms/hyperledger-fabric/configuration/roles/create/channels/tasks/peercheck.yaml b/platforms/hyperledger-fabric/configuration/roles/create/channels/tasks/peercheck.yaml index 1fd57b7d56b..21c738e9dc8 100644 --- a/platforms/hyperledger-fabric/configuration/roles/create/channels/tasks/peercheck.yaml +++ b/platforms/hyperledger-fabric/configuration/roles/create/channels/tasks/peercheck.yaml @@ -4,6 +4,11 @@ # SPDX-License-Identifier: Apache-2.0 ############################################################################################## +# Set Variable charts_dir +- name: "Set Variable charts_dir" + set_fact: + charts_dir: "{{ org.gitops.chart_source }}" + # Waiting for the creation of peer pod - name: "Waiting for peer pod {{ peer_name }} in {{ org.name | lower }}-net" include_role: diff --git a/platforms/hyperledger-fabric/configuration/roles/create/channels/tasks/valuefile.yaml b/platforms/hyperledger-fabric/configuration/roles/create/channels/tasks/valuefile.yaml index 4eabac444a9..a51483a20b8 100644 --- a/platforms/hyperledger-fabric/configuration/roles/create/channels/tasks/valuefile.yaml +++ b/platforms/hyperledger-fabric/configuration/roles/create/channels/tasks/valuefile.yaml @@ -19,7 +19,7 @@ loop: "{{ network['organizations'] }}" loop_control: loop_var: ordererorg - when: ordererorg.type == 'orderer' + when: ordererorg.services.orderers is defined and ordererorg.services.orderers | length > 0 # Check or wait for the peer - name: "Check peer pod is up" @@ -31,10 +31,38 @@ loop_control: loop_var: org +# Get channeltx file from configmap obtener los datos del ordener en un task a aprte y despeus ahcer esto +- name: Get channeltx file from config map + kubernetes.core.k8s_info: + kubeconfig: "{{ orderer_kubeconfig }}" + kind: ConfigMap + name: "{{ channel_name }}-channeltx" + namespace: "{{ orderer_namespace}}" + loop: "{{ network['organizations'] }}" + loop_control: + loop_var: ordererorg + vars: + orderer_namespace: "{{ ordererorg.name | lower }}-net" + orderer_kubeconfig: "{{ ordererorg.k8s.config_file }}" + orderer_context: "{{ ordererorg.k8s.context }}" + register: channeltx_data + +# Create the certs directory if it does not existt +- name: Create the certs directory if it does not exist + file: + path: "{{playbook_dir}}/../../../{{ charts_dir }}/fabric-channel-create/files" + state: directory + +- name: Save channeltx file locally for {{ channel_name }} + shell: | + echo '{{ channeltx_data.results[0].resources[0].data[channel_name ~ "-channeltx_base64"] | to_nice_json }}' > {{ files_loc }}/channeltx.json + vars: + files_loc: "{{playbook_dir}}/../../../{{ charts_dir }}/fabric-channel-create/files" + # Create the value file for creator Organization - name: "Create Create_Channel value file" include_role: - name: helm_component + name: "{{ playbook_dir }}/../../shared/configuration/roles/create/job_component" loop: "{{ network | json_query(org_query) }}" loop_control: loop_var: org @@ -43,16 +71,18 @@ name: "{{ org.name | lower }}" type: "create_channel_job" component_name: "{{item.channel_name|lower}}" + channel_name: "{{item.channel_name}}" component_ns: "{{ peer.name | lower}}-net" peer_name: "{{ peer | json_query('peers[*].name') | first }}" + peer_adress: "{{ peer | json_query('peers[*].peerAddress') | first }}" git_protocol: "{{ org.gitops.git_protocol }}" git_url: "{{ org.gitops.git_url }}" git_branch: "{{ org.gitops.branch }}" charts_dir: "{{ org.gitops.chart_source }}" - vault: "{{ org.vault }}" - k8s: "{{ org.k8s }}" - channeltx: "{{ lookup('file', '{{ build_path }}/channel-artifacts/{{item.channel_name|lower}}.tx.base64') }}" values_dir: "{{playbook_dir}}/../../../{{org.gitops.release_dir}}/{{ org.name | lower }}" + provider: "{{ org.cloud_provider }}" + vault: "{{ org.vault }}" + kubernetes: "{{ org.k8s }}" # Git Push : Push the above generated files to git directory - name: Git Push diff --git a/platforms/hyperledger-fabric/configuration/roles/create/channels_join/tasks/check.yaml b/platforms/hyperledger-fabric/configuration/roles/create/channels_join/tasks/check.yaml index 88cfa4e5326..b73cc89987e 100644 --- a/platforms/hyperledger-fabric/configuration/roles/create/channels_join/tasks/check.yaml +++ b/platforms/hyperledger-fabric/configuration/roles/create/channels_join/tasks/check.yaml @@ -15,8 +15,7 @@ vars: component_type: "Job" namespace: "{{ participant.name | lower }}-net" - component_name: "joinchannel-{{ peer.name }}-{{ channel_name }}" - kubernetes: "{{ org.k8s }}" + component_name: "{{ channel_name }}-{{ participant.name }}-{{ peer.name }}" loop: "{{ participant.peers }}" loop_control: loop_var: peer @@ -29,7 +28,6 @@ vars: namespace: "{{ participant.name | lower }}-net" pod_name: "{{ peer.name }}-0" - kubernetes: "{{ org.k8s }}" loop: "{{ participant.peers }}" loop_control: loop_var: peer diff --git a/platforms/hyperledger-fabric/configuration/roles/create/channels_join/tasks/main.yaml b/platforms/hyperledger-fabric/configuration/roles/create/channels_join/tasks/main.yaml index e9f6eced5ee..0fb48627fe6 100644 --- a/platforms/hyperledger-fabric/configuration/roles/create/channels_join/tasks/main.yaml +++ b/platforms/hyperledger-fabric/configuration/roles/create/channels_join/tasks/main.yaml @@ -11,6 +11,7 @@ channel_name: "{{ item.channel_name | lower }}" org_query: "organizations[?name=='{{participant.name}}']" org: "{{ network | json_query(org_query) | first }}" + kubernetes: "{{ org.k8s }}" loop: "{{ participants }}" loop_control: loop_var: participant @@ -24,6 +25,7 @@ channel_name: "{{ item.channel_name | lower }}" org_query: "organizations[?name=='{{participant.name}}']" org: "{{ network | json_query(org_query) | first }}" + kubernetes: "{{ org.k8s }}" loop: "{{ participants }}" loop_control: loop_var: participant diff --git a/platforms/hyperledger-fabric/configuration/roles/create/channels_join/tasks/nested_channel_join.yaml b/platforms/hyperledger-fabric/configuration/roles/create/channels_join/tasks/nested_channel_join.yaml index 326ea32d05b..e318dffba2c 100644 --- a/platforms/hyperledger-fabric/configuration/roles/create/channels_join/tasks/nested_channel_join.yaml +++ b/platforms/hyperledger-fabric/configuration/roles/create/channels_join/tasks/nested_channel_join.yaml @@ -14,21 +14,57 @@ vars: component_type: "Job" namespace: "{{ participant.name | lower}}-net" - component_name: "createchannel-{{ channel_name }}" + component_name: "{{ channel_name }}" kubernetes: "{{ org.k8s }}" when: - participant.type == 'creator' and ('2.2.' in network.version or '1.4.' in network.version) - participant.org_status is not defined or participant.org_status == 'new' +# Get anchortx file from configmap obtener los datos del ordener en un task a aprte y despeus ahcer esto +- name: Get anchortx file from config map + kubernetes.core.k8s_info: + kubeconfig: "{{ orderer_kubeconfig }}" + kind: ConfigMap + name: "{{ channel_name }}-{{ participant.name | lower }}-anchortx" + namespace: "{{ orderer_namespace}}" + loop: "{{ network['organizations'] }}" + loop_control: + loop_var: ordererorg + vars: + orderer_namespace: "{{ ordererorg.name | lower }}-net" + orderer_kubeconfig: "{{ ordererorg.k8s.config_file }}" + orderer_context: "{{ ordererorg.k8s.context }}" + register: anchortx_data + when: + - (participant.org_status is not defined or participant.org_status == 'new') and ('2.2.' in network.version or '1.4.' in network.version) + +# Create the certs directory if it does not existt +- name: Create the certs directory if it does not exist + file: + path: "{{playbook_dir}}/../../../{{ charts_dir }}/fabric-channel-join/files" + state: directory + when: + - (participant.org_status is not defined or participant.org_status == 'new') and ('2.2.' in network.version or '1.4.' in network.version) + +- name: Save anchortx file locally for {{ channel_name }} + shell: | + echo '{{ anchortx_data.results[0].resources[0].data[field_name ~ "-anchortx_base64"] | to_nice_json }}' > {{ files_loc }}/anchortx.json + vars: + files_loc: "{{playbook_dir}}/../../../{{ charts_dir }}/fabric-channel-join/files" + field_name: "{{ channel_name }}-{{ participant.name | lower}}" + when: + - (participant.org_status is not defined or participant.org_status == 'new') and ('2.2.' in network.version or '1.4.' in network.version) + # Create the join channel value file for each participating peer - name: "join channel {{ channel_name }}" include_role: - name: helm_component + name: "{{ playbook_dir }}/../../shared/configuration/roles/create/job_component" vars: name: "{{ participant.name }}" type: "join_channel_job" - component_name: "join-{{ channel_name }}-{{ participant.name }}-{{ peer.name }}" + component_name: "{{ channel_name }}-{{ participant.name }}-{{ peer.name }}" peer_name: "{{ peer.name }}" + peer_type: "{{ peer.type }}" component_ns: "{{ participant.name | lower}}-net" git_protocol: "{{ org.gitops.git_protocol }}" git_url: "{{ org.gitops.git_url }}" diff --git a/platforms/hyperledger-fabric/configuration/roles/create/cli_pod/tasks/main.yaml b/platforms/hyperledger-fabric/configuration/roles/create/cli_pod/tasks/main.yaml deleted file mode 100644 index bac5f7132ff..00000000000 --- a/platforms/hyperledger-fabric/configuration/roles/create/cli_pod/tasks/main.yaml +++ /dev/null @@ -1,69 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -############################################################################################# -# This role creates value file for Cli pods -############################################################################################ -# CREATE CLI POD # -############################################################################################ - -# Reset peers pods -- name: "Reset peers pods" - include_role: - name: create/refresh_certs/reset_pod - vars: - pod_name: "cli" - file_path: "{{ values_dir }}/{{ pod_name }}/{{ peer.name | lower}}-{{ org.name | lower }}-cli.yaml" - gitops_value: "{{ org.gitops }}" - component_ns: "{{ org.name | lower}}-net" - kubernetes: "{{ org.k8s }}" - hr_name: "{{ peer.name | lower}}-{{ org.name | lower }}-cli" - loop: "{{ peers }}" - loop_control: - loop_var: peer - when: - - refresh_cert is defined and refresh_cert == 'true' - - peer.cli is defined - - peer.cli == "enabled" - -# Create the value file for the cli pod as per requirements mentioned in network.yaml -- name: "Create Value file for CLI Pod" - include_role: - name: helm_component - vars: - name: "cli" - component_name: "{{ peer.name | lower}}-{{ org.name | lower }}-cli" - orderer: "{{ network.orderers | first }}" - component_ns: "{{ org.name | lower}}-net" - git_protocol: "{{ org.gitops.git_protocol }}" - git_url: "{{ org.gitops.git_url }}" - git_branch: "{{ org.gitops.branch }}" - charts_dir: "{{ org.gitops.chart_source }}" - vault: "{{ org.vault }}" - sc_name: "{{ org.name | lower }}-bevel-storageclass" - values_dir: "{{playbook_dir}}/../../../{{org.gitops.release_dir}}/{{ org.name | lower }}" - type: "cli" - external_url_suffix: "{{ org.external_url_suffix }}" - loop: "{{ peers }}" - loop_control: - loop_var: peer - when: - - peer.peerstatus is not defined or peer.peerstatus == 'new' - - peer.cli is defined - - peer.cli == "enabled" - -# Git Push : Push the above generated files to git directory -- name: Git Push - include_role: - name: "{{ playbook_dir }}/../../shared/configuration/roles/git_push" - vars: - GIT_DIR: "{{ playbook_dir }}/../../../" - gitops: "{{ org.gitops }}" - msg: "[ci skip] Pushing CLI value files" - loop: "{{ peers }}" - loop_control: - loop_var: peer - when: peer.cli is defined and peer.cli == "enabled" diff --git a/platforms/hyperledger-fabric/configuration/roles/create/configtx/tasks/main.yaml b/platforms/hyperledger-fabric/configuration/roles/create/configtx/tasks/main.yaml deleted file mode 100644 index a6d99caa40c..00000000000 --- a/platforms/hyperledger-fabric/configuration/roles/create/configtx/tasks/main.yaml +++ /dev/null @@ -1,125 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -############################################################################################## -# This role creates configtx.yaml file which is consumed by configtxgen tool -############################################################################################## - -############################################################################################## -# Create the configtx.yaml file as the requirements mentioned in network.yaml -# file. The configtx.yaml file is consumed by the configtxgen binary to generate the -# genesis block and channels. -############################################################################################## - -# Create the build directory if it does not exist -- name: Create build directory if it does not exist - file: - path: "./build" - state: directory - -# Remove old configtx file -- name: "Remove old configtx file" - file: - path: "{{ config_file }}" - state: absent - -# The tasks add the required data patch by patch to the configtx.yaml file to generate it. -- name: "create configtx.yaml file" - file: - path: "{{ config_file }}" - state: touch - -# Copy custom files if present -- name: "Copy custom files if present" - copy: - src: "{{ network.configtx.folder_path }}" - dest: ./roles/create/configtx/templates - when: network.configtx is defined and network.configtx.custom == true - -# Checking if custom init patch to configtx.yaml exists -- name: "Checking if custom init patch to configtx.yaml exists" - stat: - path: "{{ network.configtx.folder_path }}configtxinit_custom.tpl" - register: cfinit - when: network.configtx is defined and network.configtx.custom == true - -# Add init patch to configtx.yaml -- name: "Adding init patch to configtx.yaml" - blockinfile: - dest: "{{ config_file }}" - block: "{{ lookup('template', filename) }}" - marker: "#" - vars: - consensus: "{{ org.services.consensus }}" - org_query: "organizations[?type=='orderer']" - org: "{{ network | json_query(org_query) | first }}" - filename: configtxinit_{{ 'custom' if network.configtx is defined and network.configtx.custom == true and cfinit.stat.exists else 'default' }}.tpl - -# Check if custom init patch to configtx.yaml exists -- name: "Checking if custom init patch to configtx.yaml exists" - stat: - path: "{{ network.configtx.folder_path }}configtxOrg_custom.tpl" - register: cforg - when: network.configtx is defined and network.configtx.custom == true - -# Add organization patch to configtx.yaml -- name: "Adding organization patch to configtx.yaml" - blockinfile: - dest: "{{ config_file }}" - block: "{{ lookup('template', filename) }}" - marker: "#" - vars: - component_name: "{{ item.name }}" - component_ns: "{{ item.name | lower }}-net" - component_type: "{{ item.type | lower }}" - orderers: "{{ network.orderers }}" - provider: "{{ network.env.proxy }}" - filename: configtxOrg_{{ 'custom' if network.configtx is defined and network.configtx.custom == true and cforg.stat.exists else 'default' }}.tpl - loop: "{{ network['organizations'] }}" - -# Check if custom init patch to configtx.yaml exists -- name: "Checking if custom init patch to configtx.yaml exists" - stat: - path: "{{ network.configtx.folder_path }}configtxOrderer_custom.tpl" - register: cford - when: network.configtx is defined and network.configtx.custom == true - -# Add orderer patch to configtx.yaml -- name: "Adding orderer patch to configtx.yaml" - blockinfile: - dest: "{{ config_file }}" - block: "{{ lookup('template', filename) }}" - marker: "#" - vars: - orderers: "{{ network.orderers }}" - consensus: "{{ network.consensus }}" - provider: "{{ network.env.proxy }}" - filename: configtxOrderer_{{ 'custom' if network.configtx is defined and network.configtx.custom == true and cford.stat.exists else 'default' }}.tpl - -# Check if custom init patch to configtx.yaml exists -- name: "Checking if custom init patch to configtx.yaml exists" - stat: - path: "{{ network.configtx.folder_path }}configtxProfile_custom.tpl" - register: cfprofile - when: network.configtx is defined and network.configtx.custom == true - -# Add profile patch to configtx.yaml -- name: "Adding profile patch to configtx.yaml" - blockinfile: - dest: "{{ config_file }}" - block: "{{ lookup('template', filename) }}" - marker: "#" - vars: - orderers: "{{ network.orderers }}" - consensus: "{{ network.consensus }}" - provider: "{{ network.env.proxy }}" - filename: configtxProfile_{{ 'custom' if network.configtx is defined and network.configtx.custom == true and cfprofile.stat.exists else 'default' }}.tpl - when: network.channels is defined - -# Display configtx file contents -- name: Display configtx file contents - debug: - msg: "The configtx file is: {{ lookup('file', './build/configtx.yaml') }}" diff --git a/platforms/hyperledger-fabric/configuration/roles/create/configtx/templates/configtxOrderer_default.tpl b/platforms/hyperledger-fabric/configuration/roles/create/configtx/templates/configtxOrderer_default.tpl deleted file mode 100644 index 18245f54937..00000000000 --- a/platforms/hyperledger-fabric/configuration/roles/create/configtx/templates/configtxOrderer_default.tpl +++ /dev/null @@ -1,63 +0,0 @@ -Orderer: &OrdererDefaults -{% if consensus.name == 'raft' %} - OrdererType: etcdraft -{% else %} - OrdererType: {{ consensus.name }} -{% endif %} - Addresses: -{% for orderer in orderers %} -{% if provider == 'none' %} - - {{ orderer.name }}.{{ orderer.org_name | lower }}-net:7050 -{% else %} - - {{ orderer.uri }} -{% endif %} -{% endfor %} - BatchTimeout: 2s - BatchSize: - MaxMessageCount: 10 - AbsoluteMaxBytes: 98 MB - PreferredMaxBytes: 1024 KB -{% if consensus.name == 'kafka' %} - Kafka: - Brokers: -{% for org in network.organizations %} -{% if org.services.orderers is defined and org.services.orderers|length > 0 %} -{% for i in range(consensus.replicas) %} - - {{ consensus.name }}-{{ i }}.{{ consensus.type }}.{{ org.name |lower }}-net.svc.cluster.local:{{ consensus.grpc.port }} -{% endfor %} -{% endif %} -{% endfor %} -{% endif %} -{% if consensus.name == 'raft' %} - EtcdRaft: - Consenters: -{% for orderer in orderers %} -{% set component_ns = orderer.org_name.lower() + '-net' %} -{% if provider == 'none' %} - - Host: {{orderer.name}}.{{ component_ns }} - Port: 7050 -{% else %} -{% set path = orderer.uri.split(':') %} - - Host: {{ path[0] }} - Port: {{ path[1] }} -{% endif %} - ClientTLSCert: ./crypto-config/ordererOrganizations/{{ component_ns }}/orderers/{{ orderer.name }}.{{ component_ns }}/tls/server.crt - ServerTLSCert: ./crypto-config/ordererOrganizations/{{ component_ns }}/orderers/{{ orderer.name }}.{{ component_ns }}/tls/server.crt -{% endfor %} -{% endif %} - Organizations: - Policies: - Readers: - Type: ImplicitMeta - Rule: "ANY Readers" - Writers: - Type: ImplicitMeta - Rule: "ANY Writers" - Admins: - Type: ImplicitMeta - Rule: "MAJORITY Admins" - BlockValidation: - Type: ImplicitMeta - Rule: "ANY Writers" - Capabilities: - <<: *OrdererCapabilities diff --git a/platforms/hyperledger-fabric/configuration/roles/create/configtx/templates/configtxOrg_default.tpl b/platforms/hyperledger-fabric/configuration/roles/create/configtx/templates/configtxOrg_default.tpl deleted file mode 100644 index ffc192806db..00000000000 --- a/platforms/hyperledger-fabric/configuration/roles/create/configtx/templates/configtxOrg_default.tpl +++ /dev/null @@ -1,45 +0,0 @@ - - &{{ component_name }}Org - Name: {{ component_name }}MSP - ID: {{ component_name }}MSP - MSPDir: ./crypto-config/{{ component_type }}Organizations/{{ component_ns }}/msp - Policies: - Readers: - Type: Signature - Rule: "OR('{{ component_name }}MSP.member')" - Writers: - Type: Signature - Rule: "OR('{{ component_name }}MSP.member')" - Admins: - Type: Signature - Rule: "OR('{{ component_name }}MSP.admin')" - Endorsement: - Type: Signature - Rule: "OR('{{ component_name }}MSP.member')" -{% if component_type == 'peer' and '2.5' not in network.version %} - AnchorPeers: - # AnchorPeers defines the location of peers which can be used - # for cross org gossip communication. Note, this value is only - # encoded in the genesis block in the Application section context -{% for peer in item.services.peers %} -{% if peer.type == 'anchor' %} -{% if provider == 'none' %} - - Host: {{ peer.name }}.{{ component_ns }} - Port: 7051 -{% else %} -{% set path = peer.peerAddress.split(':') %} - - Host: {{ path[0] }} - Port: {{ path[1] }} -{% endif %} -{% endif %} -{% endfor %} -{% endif %} -{% if component_type == 'orderer' %} - OrdererEndpoints: -{% for orderer in orderers %} -{% if provider == 'none' %} - - {{ orderer.name }}.{{ orderer.org_name | lower }}-net:7050 -{% else %} - - {{ orderer.uri }} -{% endif %} -{% endfor %} -{% endif %} diff --git a/platforms/hyperledger-fabric/configuration/roles/create/configtx/templates/configtxProfile_default.tpl b/platforms/hyperledger-fabric/configuration/roles/create/configtx/templates/configtxProfile_default.tpl deleted file mode 100644 index bccfbba8670..00000000000 --- a/platforms/hyperledger-fabric/configuration/roles/create/configtx/templates/configtxProfile_default.tpl +++ /dev/null @@ -1,51 +0,0 @@ -Profiles: -{% for channel in network.channels %} - {{channel.genesis.name}}: - <<: *ChannelDefaults - Orderer: - <<: *OrdererDefaults -{% if consensus.name == 'raft' %} - OrdererType: etcdraft - EtcdRaft: - Consenters: -{% for orderer in orderers %} -{% set component_ns = orderer.org_name.lower() + '-net' %} -{% if provider == 'none' %} - - Host: {{orderer.name}}.{{ component_ns }} - Port: 7050 -{% else %} -{% set path = orderer.uri.split(':') %} - - Host: {{ path[0] }} - Port: {{ path[1] }} -{% endif %} - ClientTLSCert: ./crypto-config/ordererOrganizations/{{ component_ns }}/orderers/{{ orderer.name }}.{{ component_ns }}/tls/server.crt - ServerTLSCert: ./crypto-config/ordererOrganizations/{{ component_ns }}/orderers/{{ orderer.name }}.{{ component_ns }}/tls/server.crt -{% endfor %} -{% endif %} - Organizations: -{% for orderer in channel.orderers %} - - *{{ orderer }}Org -{% endfor %} -{% if '2.5' not in network.version %} - Consortiums: - {{channel.consortium}}: - Organizations: -{% for org in network.organizations %} -{% if org.type != 'orderer' %} - - *{{org.name}}Org -{% endif %} -{% endfor %} - {{channel.channel_name}}: - <<: *ChannelDefaults - Consortium: {{channel.consortium}} -{% endif %} - Application: - <<: *ApplicationDefaults - Organizations: -{% for org in channel.participants %} - - *{{org.name}}Org -{% endfor %} -{% if '2.5' in network.version %} - Capabilities: *ApplicationCapabilities -{% endif %} -{% endfor %} diff --git a/platforms/hyperledger-fabric/configuration/roles/create/configtx/templates/configtxinit_default.tpl b/platforms/hyperledger-fabric/configuration/roles/create/configtx/templates/configtxinit_default.tpl deleted file mode 100644 index 9d3948012f4..00000000000 --- a/platforms/hyperledger-fabric/configuration/roles/create/configtx/templates/configtxinit_default.tpl +++ /dev/null @@ -1,73 +0,0 @@ ---- -Capabilities: -{% if '2.' in network.version %} - Channel: &ChannelCapabilities - V2_0: true - Orderer: &OrdererCapabilities - V2_0: true - Application: &ApplicationCapabilities -{% if '2.5' in network.version %} - V2_5: true -{% else %} - V2_0: true -{% endif %} -{% endif %} -{% if '1.4' in network.version %} -{% if consensus.name == 'kafka' %} - Global: &ChannelCapabilities - V1_1: true - Orderer: &OrdererCapabilities - V1_1: true - Application: &ApplicationCapabilities - V1_1: true -{% endif %} -{% if consensus.name == 'raft' %} - Global: &ChannelCapabilities - V1_4_3: true - Orderer: &OrdererCapabilities - V1_4_2: true - Application: &ApplicationCapabilities - V1_4_2: true -{% endif %} -{% endif %} - -Application: &ApplicationDefaults - Organizations: -{% if '2.' in network.version %} - Policies: &ApplicationDefaultPolicies - LifecycleEndorsement: - Type: ImplicitMeta - Rule: "MAJORITY Endorsement" - Endorsement: - Type: ImplicitMeta - Rule: "MAJORITY Endorsement" - Readers: - Type: ImplicitMeta - Rule: "ANY Readers" - Writers: - Type: ImplicitMeta - Rule: "ANY Writers" - Admins: - Type: ImplicitMeta - Rule: "MAJORITY Admins" -{% endif %} - Capabilities: - <<: *ApplicationCapabilities - -Channel: &ChannelDefaults -{% if '2.' in network.version %} - Policies: - Readers: - Type: ImplicitMeta - Rule: "ANY Readers" - Writers: - Type: ImplicitMeta - Rule: "ANY Writers" - Admins: - Type: ImplicitMeta - Rule: "MAJORITY Admins" -{% endif %} - Capabilities: - <<: *ChannelCapabilities - -Organizations: diff --git a/platforms/hyperledger-fabric/configuration/roles/create/console_assets/tasks/main.yaml b/platforms/hyperledger-fabric/configuration/roles/create/console_assets/tasks/main.yaml index 7de25f29f49..c76b64154d0 100644 --- a/platforms/hyperledger-fabric/configuration/roles/create/console_assets/tasks/main.yaml +++ b/platforms/hyperledger-fabric/configuration/roles/create/console_assets/tasks/main.yaml @@ -18,7 +18,7 @@ # Get CA info from public url - name: Get CA data info uri: - url: "https://{{ item.ca_data.url }}" + url: "https://{{ item.ca_data.url }}/cainfo" validate_certs: no return_content: yes register: url_output diff --git a/platforms/hyperledger-fabric/configuration/roles/create/crypto_script/tasks/main.yaml b/platforms/hyperledger-fabric/configuration/roles/create/crypto_script/tasks/main.yaml deleted file mode 100644 index 31595836185..00000000000 --- a/platforms/hyperledger-fabric/configuration/roles/create/crypto_script/tasks/main.yaml +++ /dev/null @@ -1,68 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -############################################################################################# -# This role creates the generate_crypto.sh script for orderers and organizations. -############################################################################################# - -# Create the build directory if it does not exist -- name: Create build directory if it does not exist - file: - path: "./build" - state: directory - -# Create the generate_crypto.sh file for orderers -- name: Create generate_crypto script file for orderers - template: - src: "orderer_script.tpl" - dest: "./build/generate-crypto-{{ component_name }}-{{ peer_name }}.sh" - vars: - component_name: "{{ item.name | lower }}" - component_ns: "{{ item.name | lower }}-net" - component_country: "{{ item.country }}" - component_subject: "{{ item.subject }}" - component_state: "{{ item.state }}" - component_location: "{{ item.location }}" - ca_url: "{{ item.ca_data.url }}" - peer_name: "{{ orderer.name }}" - proxy: "{{ network.env.proxy }}" - loop: "{{ orderers }}" - loop_control: - loop_var: orderer - when: component_type == 'orderer' - -# Create the generate-crypto-{{ component_name }}.sh file for orderer organizations -- name: Create generate_crypto script file for orderer organisation - template: - src: "orderer_organisation_script.tpl" - dest: "./build/generate-crypto-{{ component_name }}.sh" - vars: - component_name: "{{ item.name | lower }}" - component_ns: "{{ item.name | lower }}-net" - component_subject: "{{ item.subject }}" - component_country: "{{ item.country }}" - component_state: "{{ item.state }}" - component_location: "{{ item.location }}" - ca_url: "{{ item.ca_data.url }}" - proxy: "{{ network.env.proxy }}" - when: component_type == 'orderer' - -# Create the generate_crypto.sh file for organizations -- name: Create generate_crypto script file for organisations - template: - src: "organisation_script.tpl" - dest: "./build/generate-crypto-{{ component_name }}.sh" - vars: - component_name: "{{ item.name | lower }}" - component_ns: "{{ item.name | lower }}-net" - component_subject: "{{ item.subject }}" - component_country: "{{ item.country }}" - component_state: "{{ item.state }}" - component_location: "{{ item.location }}" - ca_url: "{{ item.ca_data.url }}" - peer_count: "{{ item.services.peers | length }}" - proxy: "{{ network.env.proxy }}" - when: component_type == 'peer' diff --git a/platforms/hyperledger-fabric/configuration/roles/create/crypto_script/templates/orderer_organisation_script.tpl b/platforms/hyperledger-fabric/configuration/roles/create/crypto_script/templates/orderer_organisation_script.tpl deleted file mode 100644 index 31ad7014d99..00000000000 --- a/platforms/hyperledger-fabric/configuration/roles/create/crypto_script/templates/orderer_organisation_script.tpl +++ /dev/null @@ -1,65 +0,0 @@ -#!/bin/bash - -set -x - -CURRENT_DIR=${PWD} -FULLY_QUALIFIED_ORG_NAME="{{ component_ns }}" -EXTERNAL_URL_SUFFIX="{{ item.external_url_suffix }}" -ALTERNATIVE_ORG_NAMES=("{{ item.external_url_suffix }}") -ORG_NAME="{{ component_name }}" -SUBJECT="C={{ component_country }},ST={{ component_state }},L={{ component_location }},O={{ component_name }}" -SUBJECT_PEER="{{ component_subject }}" -CA="{{ ca_url }}" -CA_ADMIN_USER="${ORG_NAME}-admin" -CA_ADMIN_PASS="${ORG_NAME}-adminpw" - -ORG_ADMIN_USER="Admin@${FULLY_QUALIFIED_ORG_NAME}" -ORG_ADMIN_PASS="Admin@${FULLY_QUALIFIED_ORG_NAME}-pw" - -ORG_CYPTO_FOLDER="/crypto-config/ordererOrganizations/${FULLY_QUALIFIED_ORG_NAME}" - -ROOT_TLS_CERT="/crypto-config/ordererOrganizations/${FULLY_QUALIFIED_ORG_NAME}/ca/ca.${FULLY_QUALIFIED_ORG_NAME}-cert.pem" - -CAS_FOLDER="${HOME}/ca-tools/cas/ca-${ORG_NAME}" -ORG_HOME="${HOME}/ca-tools/${ORG_NAME}" - -## Enroll CA administrator for Org. This user will be used to create other identities -fabric-ca-client enroll -d -u https://${CA_ADMIN_USER}:${CA_ADMIN_PASS}@${CA} --tls.certfiles ${ROOT_TLS_CERT} --home ${CAS_FOLDER} --csr.names "${SUBJECT_PEER}" - -## Get the CA cert and store in Org MSP folder -fabric-ca-client getcacert -d -u https://${CA} --tls.certfiles ${ROOT_TLS_CERT} -M ${ORG_CYPTO_FOLDER}/msp - -if [ "{{ proxy }}" != "none" ]; then - mv ${ORG_CYPTO_FOLDER}/msp/cacerts/*.pem ${ORG_CYPTO_FOLDER}/msp/cacerts/ca-${FULLY_QUALIFIED_ORG_NAME}-${EXTERNAL_URL_SUFFIX}.pem -fi -mkdir ${ORG_CYPTO_FOLDER}/msp/tlscacerts -cp ${ORG_CYPTO_FOLDER}/msp/cacerts/* ${ORG_CYPTO_FOLDER}/msp/tlscacerts - -## Register and enroll admin for Org and populate admincerts for MSP -fabric-ca-client register -d --id.name ${ORG_ADMIN_USER} --id.secret ${ORG_ADMIN_PASS} --id.type admin --csr.names "${SUBJECT_PEER}" --id.attrs "hf.Registrar.Roles=client,hf.Registrar.Attributes=*,hf.Revoker=true,hf.AffiliationMgr=true,hf.GenCRL=true,admin=true:ecert,abac.init=true:ecert" --tls.certfiles ${ROOT_TLS_CERT} --home ${CAS_FOLDER} - -fabric-ca-client enroll -d -u https://${ORG_ADMIN_USER}:${ORG_ADMIN_PASS}@${CA} --tls.certfiles ${ROOT_TLS_CERT} --home ${ORG_HOME}/admin --csr.names "${SUBJECT_PEER}" - -mkdir -p ${ORG_CYPTO_FOLDER}/msp/admincerts -cp ${ORG_HOME}/admin/msp/signcerts/* ${ORG_CYPTO_FOLDER}/msp/admincerts/${ORG_ADMIN_USER}-cert.pem - -mkdir ${ORG_HOME}/admin/msp/admincerts -cp ${ORG_HOME}/admin/msp/signcerts/* ${ORG_HOME}/admin/msp/admincerts/${ORG_ADMIN_USER}-cert.pem - -mkdir -p ${ORG_CYPTO_FOLDER}/users/${ORG_ADMIN_USER} -cp -R ${ORG_HOME}/admin/msp ${ORG_CYPTO_FOLDER}/users/${ORG_ADMIN_USER} - -if [ "{{ proxy }}" != "none" ]; then - mv ${ORG_CYPTO_FOLDER}/users/${ORG_ADMIN_USER}/msp/cacerts/*.pem ${ORG_CYPTO_FOLDER}/users/${ORG_ADMIN_USER}/msp/cacerts/ca-${FULLY_QUALIFIED_ORG_NAME}-${EXTERNAL_URL_SUFFIX}.pem -fi - -# Get TLS cert for admin and copy to appropriate location -fabric-ca-client enroll -d --enrollment.profile tls -u https://${ORG_ADMIN_USER}:${ORG_ADMIN_PASS}@${CA} -M ${ORG_HOME}/admin/tls --tls.certfiles ${ROOT_TLS_CERT} --csr.names "${SUBJECT_PEER}" - -# Copy the TLS key and cert to the appropriate place -mkdir -p ${ORG_CYPTO_FOLDER}/users/${ORG_ADMIN_USER}/tls -cp ${ORG_HOME}/admin/tls/keystore/* ${ORG_CYPTO_FOLDER}/users/${ORG_ADMIN_USER}/tls/client.key -cp ${ORG_HOME}/admin/tls/signcerts/* ${ORG_CYPTO_FOLDER}/users/${ORG_ADMIN_USER}/tls/client.crt -cp ${ORG_HOME}/admin/tls/tlscacerts/* ${ORG_CYPTO_FOLDER}/users/${ORG_ADMIN_USER}/tls/ca.crt - -cd ${CURRENT_DIR} diff --git a/platforms/hyperledger-fabric/configuration/roles/create/crypto_script/templates/orderer_script.tpl b/platforms/hyperledger-fabric/configuration/roles/create/crypto_script/templates/orderer_script.tpl deleted file mode 100644 index 90be7a7afed..00000000000 --- a/platforms/hyperledger-fabric/configuration/roles/create/crypto_script/templates/orderer_script.tpl +++ /dev/null @@ -1,67 +0,0 @@ -#!/bin/bash - -set -x - -CURRENT_DIR=${PWD} -FULLY_QUALIFIED_ORG_NAME="{{ component_ns }}" -EXTERNAL_URL_SUFFIX="{{ item.external_url_suffix }}" -ALTERNATIVE_ORG_NAMES=("{{ item.external_url_suffix }}") -ORG_NAME="{{ component_name }}" -SUBJECT="C={{ component_country }},ST={{ component_state }},L={{ component_location }},O={{ component_name }}" -SUBJECT_PEER="{{ component_subject }}" -CA="{{ ca_url }}" -CA_ADMIN_USER="${ORG_NAME}-admin" -CA_ADMIN_PASS="${ORG_NAME}-adminpw" - -ORG_ADMIN_USER="Admin@${FULLY_QUALIFIED_ORG_NAME}" -ORG_ADMIN_PASS="Admin@${FULLY_QUALIFIED_ORG_NAME}-pw" - -ORG_CYPTO_FOLDER="/crypto-config/ordererOrganizations/${FULLY_QUALIFIED_ORG_NAME}" - -ROOT_TLS_CERT="/crypto-config/ordererOrganizations/${FULLY_QUALIFIED_ORG_NAME}/ca/ca.${FULLY_QUALIFIED_ORG_NAME}-cert.pem" - -CAS_FOLDER="${HOME}/ca-tools/cas/ca-${ORG_NAME}" -ORG_HOME="${HOME}/ca-tools/${ORG_NAME}" - -## Register and enroll node and populate its MSP folder -PEER="{{ peer_name }}.${FULLY_QUALIFIED_ORG_NAME}" -CSR_HOSTS=${PEER} -for i in "${ALTERNATIVE_ORG_NAMES[@]}" -do - CSR_HOSTS="${CSR_HOSTS},{{ peer_name }}.${i}" -done -echo "Registering and enrolling $PEER with csr hosts ${CSR_HOSTS}" - - -# Register the peer -fabric-ca-client register -d --id.name ${PEER} --id.secret ${PEER}-pw --id.type orderer --tls.certfiles ${ROOT_TLS_CERT} --home ${CAS_FOLDER} - -# Enroll to get peers TLS cert -fabric-ca-client enroll -d --enrollment.profile tls -u https://${PEER}:${PEER}-pw@${CA} -M ${ORG_HOME}/cas/orderers/tls --csr.hosts "${CSR_HOSTS}" --tls.certfiles ${ROOT_TLS_CERT} --csr.names "${SUBJECT_PEER}" - -# Copy the TLS key and cert to the appropriate place -mkdir -p ${ORG_CYPTO_FOLDER}/orderers/${PEER}/tls -cp ${ORG_HOME}/cas/orderers/tls/keystore/* ${ORG_CYPTO_FOLDER}/orderers/${PEER}/tls/server.key -cp ${ORG_HOME}/cas/orderers/tls/signcerts/* ${ORG_CYPTO_FOLDER}/orderers/${PEER}/tls/server.crt -cp ${ORG_HOME}/cas/orderers/tls/tlscacerts/* ${ORG_CYPTO_FOLDER}/orderers/${PEER}/tls/ca.crt - -rm -rf ${ORG_HOME}/cas/orderers/tls - -# Enroll again to get the peer's enrollment certificate (default profile) -fabric-ca-client enroll -d -u https://${PEER}:${PEER}-pw@${CA} -M ${ORG_CYPTO_FOLDER}/orderers/${PEER}/msp --tls.certfiles ${ROOT_TLS_CERT} --csr.names "${SUBJECT_PEER}" - - -# Create the TLS CA directories of the MSP folder if they don't exist. -mkdir ${ORG_CYPTO_FOLDER}/orderers/${PEER}/msp/tlscacerts - -if [ "{{ proxy }}" != "none" ]; then - mv ${ORG_CYPTO_FOLDER}/orderers/${PEER}/msp/cacerts/*.pem ${ORG_CYPTO_FOLDER}/orderers/${PEER}/msp/cacerts/ca-${FULLY_QUALIFIED_ORG_NAME}-${EXTERNAL_URL_SUFFIX}.pem -fi -cp ${ORG_CYPTO_FOLDER}/orderers/${PEER}/msp/cacerts/* ${ORG_CYPTO_FOLDER}/orderers/${PEER}/msp/tlscacerts - -# Copy the peer org's admin cert into target MSP directory -mkdir -p ${ORG_CYPTO_FOLDER}/orderers/${PEER}/msp/admincerts - -cp ${ORG_CYPTO_FOLDER}/msp/admincerts/${ORG_ADMIN_USER}-cert.pem ${ORG_CYPTO_FOLDER}/orderers/${PEER}/msp/admincerts - -cd ${CURRENT_DIR} \ No newline at end of file diff --git a/platforms/hyperledger-fabric/configuration/roles/create/crypto_script/templates/organisation_script.tpl b/platforms/hyperledger-fabric/configuration/roles/create/crypto_script/templates/organisation_script.tpl deleted file mode 100644 index 1d7d0ecd89b..00000000000 --- a/platforms/hyperledger-fabric/configuration/roles/create/crypto_script/templates/organisation_script.tpl +++ /dev/null @@ -1,112 +0,0 @@ -#!/bin/bash - -set -x - -CURRENT_DIR=${PWD} -FULLY_QUALIFIED_ORG_NAME="{{ component_ns }}" -ALTERNATIVE_ORG_NAMES=("{{ component_ns }}.svc.cluster.local" "{{ component_name }}.net" "{{ component_ns }}.{{ item.external_url_suffix }}") -ORG_NAME="{{ component_name }}" -EXTERNAL_URL_SUFFIX="{{ item.external_url_suffix }}" -AFFILIATION="{{ component_name }}" -SUBJECT="C={{ component_country }},ST={{ component_state }},L={{ component_location }},O={{ component_name }}" -SUBJECT_PEER="{{ component_subject }}" -CA="{{ ca_url }}" -CA_ADMIN_USER="${ORG_NAME}-admin" -CA_ADMIN_PASS="${ORG_NAME}-adminpw" - -ORG_ADMIN_USER="Admin@${FULLY_QUALIFIED_ORG_NAME}" -ORG_ADMIN_PASS="Admin@${FULLY_QUALIFIED_ORG_NAME}-pw" - -ORG_CYPTO_FOLDER="/crypto-config/peerOrganizations/${FULLY_QUALIFIED_ORG_NAME}" - -ROOT_TLS_CERT="/crypto-config/peerOrganizations/${FULLY_QUALIFIED_ORG_NAME}/ca/ca.${FULLY_QUALIFIED_ORG_NAME}-cert.pem" - -CAS_FOLDER="${HOME}/ca-tools/cas/ca-${ORG_NAME}" -ORG_HOME="${HOME}/ca-tools/${ORG_NAME}" - -NO_OF_PEERS={{ peer_count | e }} - -## Enroll CA administrator for Org. This user will be used to create other identities -fabric-ca-client enroll -d -u https://${CA_ADMIN_USER}:${CA_ADMIN_PASS}@${CA} --tls.certfiles ${ROOT_TLS_CERT} --home ${CAS_FOLDER} --csr.names "${SUBJECT_PEER}" - -## Get the CA cert and store in Org MSP folder -fabric-ca-client getcacert -d -u https://${CA} --tls.certfiles ${ROOT_TLS_CERT} -M ${ORG_CYPTO_FOLDER}/msp - -if [ "{{ proxy }}" != "none" ]; then - mv ${ORG_CYPTO_FOLDER}/msp/cacerts/*.pem ${ORG_CYPTO_FOLDER}/msp/cacerts/ca-${FULLY_QUALIFIED_ORG_NAME}-${EXTERNAL_URL_SUFFIX}.pem -fi -mkdir ${ORG_CYPTO_FOLDER}/msp/tlscacerts -cp ${ORG_CYPTO_FOLDER}/msp/cacerts/* ${ORG_CYPTO_FOLDER}/msp/tlscacerts - -# Add affiliation for organisation -fabric-ca-client affiliation add ${AFFILIATION} -u https://${CA_ADMIN_USER}:${CA_ADMIN_PASS}@${CA} --tls.certfiles ${ROOT_TLS_CERT} --home ${CAS_FOLDER} -## Register and enroll admin for Org and populate admincerts for MSP -fabric-ca-client register -d --id.name ${ORG_ADMIN_USER} --id.secret ${ORG_ADMIN_PASS} --id.type admin --csr.names "${SUBJECT_PEER}" --id.affiliation ${AFFILIATION} --id.attrs "hf.Registrar.Roles=client,hf.Registrar.Attributes=*,hf.Revoker=true,hf.AffiliationMgr=true,hf.GenCRL=true,admin=true:ecert,abac.init=true:ecert" --tls.certfiles ${ROOT_TLS_CERT} --home ${CAS_FOLDER} - -fabric-ca-client enroll -d -u https://${ORG_ADMIN_USER}:${ORG_ADMIN_PASS}@${CA} --id.affiliation ${AFFILIATION} --tls.certfiles ${ROOT_TLS_CERT} --home ${ORG_HOME}/admin --csr.names "${SUBJECT_PEER}" - -mkdir -p ${ORG_CYPTO_FOLDER}/msp/admincerts -cp ${ORG_HOME}/admin/msp/signcerts/* ${ORG_CYPTO_FOLDER}/msp/admincerts/${ORG_ADMIN_USER}-cert.pem - -mkdir ${ORG_HOME}/admin/msp/admincerts -cp ${ORG_HOME}/admin/msp/signcerts/* ${ORG_HOME}/admin/msp/admincerts/${ORG_ADMIN_USER}-cert.pem - -mkdir -p ${ORG_CYPTO_FOLDER}/users/${ORG_ADMIN_USER} -cp -R ${ORG_HOME}/admin/msp ${ORG_CYPTO_FOLDER}/users/${ORG_ADMIN_USER} - -if [ "{{ proxy }}" != "none" ]; then - mv ${ORG_CYPTO_FOLDER}/users/${ORG_ADMIN_USER}/msp/cacerts/*.pem ${ORG_CYPTO_FOLDER}/users/${ORG_ADMIN_USER}/msp/cacerts/ca-${FULLY_QUALIFIED_ORG_NAME}-${EXTERNAL_URL_SUFFIX}.pem -fi - -# Get TLS cert for admin and copy to appropriate location -fabric-ca-client enroll -d --enrollment.profile tls -u https://${ORG_ADMIN_USER}:${ORG_ADMIN_PASS}@${CA} -M ${ORG_HOME}/admin/tls --tls.certfiles ${ROOT_TLS_CERT} --csr.names "${SUBJECT_PEER}" - -# Copy the TLS key and cert to the appropriate place -mkdir -p ${ORG_CYPTO_FOLDER}/users/${ORG_ADMIN_USER}/tls -cp ${ORG_HOME}/admin/tls/keystore/* ${ORG_CYPTO_FOLDER}/users/${ORG_ADMIN_USER}/tls/client.key -cp ${ORG_HOME}/admin/tls/signcerts/* ${ORG_CYPTO_FOLDER}/users/${ORG_ADMIN_USER}/tls/client.crt -cp ${ORG_HOME}/admin/tls/tlscacerts/* ${ORG_CYPTO_FOLDER}/users/${ORG_ADMIN_USER}/tls/ca.crt - -## Register and enroll peers and populate their MSP folder -COUNTER=0 -while [ ${COUNTER} -lt ${NO_OF_PEERS} ]; do - PEER="peer${COUNTER}.${FULLY_QUALIFIED_ORG_NAME}" - CSR_HOSTS=${PEER} - for i in "${ALTERNATIVE_ORG_NAMES[@]}" - do - CSR_HOSTS="${CSR_HOSTS},peer${COUNTER}.${i}" - done - echo "Registering and enrolling $PEER with csr hosts ${CSR_HOSTS}" - - # Register the peer - fabric-ca-client register -d --id.name ${PEER} --id.secret ${PEER}-pw --id.type peer --tls.certfiles ${ROOT_TLS_CERT} --home ${CAS_FOLDER} - - # Enroll to get peers TLS cert - fabric-ca-client enroll -d --enrollment.profile tls -u https://${PEER}:${PEER}-pw@${CA} -M ${ORG_HOME}/cas/peers/tls --csr.hosts "${CSR_HOSTS}" --tls.certfiles ${ROOT_TLS_CERT} --csr.names "${SUBJECT_PEER}" - - # Copy the TLS key and cert to the appropriate place - mkdir -p ${ORG_CYPTO_FOLDER}/peers/${PEER}/tls - cp ${ORG_HOME}/cas/peers/tls/keystore/* ${ORG_CYPTO_FOLDER}/peers/${PEER}/tls/server.key - cp ${ORG_HOME}/cas/peers/tls/signcerts/* ${ORG_CYPTO_FOLDER}/peers/${PEER}/tls/server.crt - cp ${ORG_HOME}/cas/peers/tls/tlscacerts/* ${ORG_CYPTO_FOLDER}/peers/${PEER}/tls/ca.crt - - rm -rf ${ORG_HOME}/cas/peers/tls - - # Enroll again to get the peer's enrollment certificate (default profile) - fabric-ca-client enroll -d -u https://${PEER}:${PEER}-pw@${CA} -M ${ORG_CYPTO_FOLDER}/peers/${PEER}/msp --tls.certfiles ${ROOT_TLS_CERT} --csr.names "${SUBJECT_PEER}" - - # Create the TLS CA directories of the MSP folder if they don't exist. - mkdir -p ${ORG_CYPTO_FOLDER}/peers/${PEER}/msp/tlscacerts - - # Copy the peer org's admin cert into target MSP directory - mkdir -p ${ORG_CYPTO_FOLDER}/peers/${PEER}/msp/admincerts - if [ "{{ proxy }}" != "none" ]; then - mv ${ORG_CYPTO_FOLDER}/peers/${PEER}/msp/cacerts/*.pem ${ORG_CYPTO_FOLDER}/peers/${PEER}/msp/cacerts/ca-${FULLY_QUALIFIED_ORG_NAME}-${EXTERNAL_URL_SUFFIX}.pem - fi - cp ${ORG_CYPTO_FOLDER}/peers/${PEER}/msp/cacerts/* ${ORG_CYPTO_FOLDER}/peers/${PEER}/msp/tlscacerts - cp ${ORG_CYPTO_FOLDER}/msp/admincerts/${ORG_ADMIN_USER}-cert.pem ${ORG_CYPTO_FOLDER}/peers/${PEER}/msp/admincerts - - let COUNTER=COUNTER+1 -done - -cd ${CURRENT_DIR} diff --git a/platforms/hyperledger-fabric/configuration/roles/create/genesis/tasks/get_certificates.yaml b/platforms/hyperledger-fabric/configuration/roles/create/genesis/tasks/get_certificates.yaml new file mode 100644 index 00000000000..37468b01d95 --- /dev/null +++ b/platforms/hyperledger-fabric/configuration/roles/create/genesis/tasks/get_certificates.yaml @@ -0,0 +1,24 @@ +# Fetch peer msp config +- name: Fetch peer msp config + include_tasks: get_peer_msp_config.yaml + vars: + peer_name: "{{ peer.name }}" + loop: "{{ peers }}" + loop_control: + loop_var: peer + +# Get admin msp certificates from secret +- name: Get admin msp certificates from secret + kubernetes.core.k8s_info: + kubeconfig: "{{ org.k8s.config_file }}" + kind: Secret + name: "admin-msp" + namespace: "{{ org_name }}-net" + register: msp_cert_data + +- name: Save admin-msp cert locally for genesis + copy: + content: "{{ msp_cert_data.resources[0] | to_nice_json }}" + dest: "{{ files_loc }}/{{ org_name }}.json" + vars: + files_loc: "{{ playbook_dir }}/../../../{{ charts_dir }}/fabric-genesis/files" diff --git a/platforms/hyperledger-fabric/configuration/roles/create/genesis/tasks/get_channel.yaml b/platforms/hyperledger-fabric/configuration/roles/create/genesis/tasks/get_channel.yaml new file mode 100644 index 00000000000..8047ee8bf28 --- /dev/null +++ b/platforms/hyperledger-fabric/configuration/roles/create/genesis/tasks/get_channel.yaml @@ -0,0 +1,13 @@ +# Set Variable channel_name_value +- name: Set Variable channel_name_value + include_tasks: valuefile.yaml + loop: "{{ channel.orderers }}" + loop_control: + loop_var: ord_org + when: + - ord_org == org.name and ('2.2.' in network.version or '1.4.' in network.version) + +# Set Variable channel_name_value +- name: Set Variable channel_name_value + include_tasks: valuefile.yaml + when: channel.osn_creator_org.name == org.name and '2.5.' in network.version diff --git a/platforms/hyperledger-fabric/configuration/roles/create/genesis/tasks/get_channel_creator.yaml b/platforms/hyperledger-fabric/configuration/roles/create/genesis/tasks/get_channel_creator.yaml new file mode 100644 index 00000000000..8047ee8bf28 --- /dev/null +++ b/platforms/hyperledger-fabric/configuration/roles/create/genesis/tasks/get_channel_creator.yaml @@ -0,0 +1,13 @@ +# Set Variable channel_name_value +- name: Set Variable channel_name_value + include_tasks: valuefile.yaml + loop: "{{ channel.orderers }}" + loop_control: + loop_var: ord_org + when: + - ord_org == org.name and ('2.2.' in network.version or '1.4.' in network.version) + +# Set Variable channel_name_value +- name: Set Variable channel_name_value + include_tasks: valuefile.yaml + when: channel.osn_creator_org.name == org.name and '2.5.' in network.version diff --git a/platforms/hyperledger-fabric/configuration/roles/create/genesis/tasks/get_peer_msp_config.yaml b/platforms/hyperledger-fabric/configuration/roles/create/genesis/tasks/get_peer_msp_config.yaml new file mode 100644 index 00000000000..89d2af742e6 --- /dev/null +++ b/platforms/hyperledger-fabric/configuration/roles/create/genesis/tasks/get_peer_msp_config.yaml @@ -0,0 +1,35 @@ +# Get config file from configmap +- name: Get config file from config map + kubernetes.core.k8s_info: + kubeconfig: "{{ org.k8s.config_file }}" + kind: ConfigMap + name: "{{ peer_name }}-msp-config" + namespace: "{{ org_name }}-net" + register: config_file_data + +# Create the certs directory if it does not exist +- name: Create the certs directory if it does not exist + file: + path: "{{ playbook_dir }}/../../../{{ charts_dir }}/fabric-genesis/files" + state: directory + +# Find old .json files +- name: Find .json files + find: + paths: "{{ playbook_dir }}/../../../{{ charts_dir }}/fabric-genesis/files" + patterns: "*.json" + register: json_files + +# Delete old .json files +- name: Delete .json files + file: + path: "{{ item.path }}" + state: absent + loop: "{{ json_files.files }}" + +- name: Save config peer msp config locally for genesis + copy: + content: "{{ config_file_data.resources[0] | to_nice_json }}" + dest: "{{ files_loc }}/{{ org_name }}-config-file.json" + vars: + files_loc: "{{ playbook_dir }}/../../../{{ charts_dir }}/fabric-genesis/files" diff --git a/platforms/hyperledger-fabric/configuration/roles/create/genesis/tasks/main.yaml b/platforms/hyperledger-fabric/configuration/roles/create/genesis/tasks/main.yaml index 32b19b74838..12349da003f 100644 --- a/platforms/hyperledger-fabric/configuration/roles/create/genesis/tasks/main.yaml +++ b/platforms/hyperledger-fabric/configuration/roles/create/genesis/tasks/main.yaml @@ -5,40 +5,45 @@ ############################################################################################## ############################################################################################ -# Create the channel-artifacts folder -- name: "Creating channel-artifacts folder" - file: - path: "{{ build_path }}/channel-artifacts" - state: directory -# Remove old genesis block -- name: Remove old genesis block - file: - path: "{{ build_path }}/channel-artifacts/{{ channel_name }}.genesis.block" - state: absent -# Create the genesis block by consuming the configtx.yaml file -- name: "Create genesis block" - shell: | - cd {{ build_path }} - {% if '2.5' in network.version %} - ./configtxgen -profile {{ genesis.name }} -channelID {{ channel_name }} -outputBlock ./channel-artifacts/{{ channel_name }}.genesis.block - {% elif '2.2' in network.version %} - ./configtxgen -profile {{ genesis.name }} -channelID syschannel -outputBlock ./channel-artifacts/{{ channel_name }}.genesis.block - {% else %} - ./configtxgen -profile {{ genesis.name }} -channelID syschannel -outputBlock ./channel-artifacts/{{ channel_name }}.genesis.block - {% endif %} - cat ./channel-artifacts/{{ channel_name }}.genesis.block | base64 > ./channel-artifacts/{{ channel_name }}.genesis.block.base64 - when: add_new_org == 'false' +# Remove all Helm releases of organization except genesis +- name: Delete Helm releases + kubernetes.core.helm: + kubeconfig: "{{ kubernetes.config_file }}" + name: "genesis" + release_namespace: "{{ org.name | lower }}-net" + state: absent + when: genererate_configtx is defined and genererate_configtx == 'true' -# Write genesis block to Vault -- name: "Write genesis block to Vault" - shell: | - vault kv put {{ org.vault.secret_path | default('secretsv2') }}/{{ org.name | lower }}/ordererOrganizations/{{ org.name }}-net/{{ channel_name }} {{ network.env.type }}GenesisBlock=@{{build_path}}/channel-artifacts/{{ channel_name }}.genesis.block.base64 - environment: - VAULT_ADDR: "{{ org.vault.url }}" - VAULT_TOKEN: "{{ org.vault.root_token }}" +# Fetch peers cetificates +- name: Fetch peers cetificates + include_tasks: get_certificates.yaml + vars: + charts_dir: "{{ organization.gitops.chart_source }}" + org_name: "{{ organization.name | lower }}" + peers: "{{ organization.services.peers }}" loop: "{{ network['organizations'] }}" loop_control: - loop_var: org - when: add_new_org == 'false' and org.type == "orderer" + loop_var: organization + when: + - organization.services.peers is defined and organization.services.peers | length > 0 + - organization.name != org.name + +# Get channel_name +- name: Get channel_name + include_tasks: get_channel_creator.yaml + vars: + channel_orgs: "{{ channel.orderers}}" + loop: "{{ network['channels'] }}" + loop_control: + loop_var: channel + +# Check or wait for the join channel job to complete +- name: "waiting for genesis" + include_role: + name: "{{ playbook_dir }}/../../shared/configuration/roles/check/helm_component" + vars: + component_type: "Job" + namespace: "{{ org.name | lower }}-net" + component_name: "genesis" diff --git a/platforms/hyperledger-fabric/configuration/roles/create/genesis/tasks/valuefile.yaml b/platforms/hyperledger-fabric/configuration/roles/create/genesis/tasks/valuefile.yaml new file mode 100644 index 00000000000..5c0cf4c9380 --- /dev/null +++ b/platforms/hyperledger-fabric/configuration/roles/create/genesis/tasks/valuefile.yaml @@ -0,0 +1,24 @@ +# Create Value files for Genesis +- name: Create Value files for Genesis + include_role: + name: "{{ playbook_dir }}/../../shared/configuration/roles/create/job_component" + vars: + name: "{{ org.name | lower }}" + component_ns: "{{ org.name | lower }}-net" + component_name: "genesis" + consensus: "{{ network.consensus }}" + type: "fabric_genesis" + kubernetes: "{{ org.k8s }}" + vault: "{{ org.vault }}" + charts_dir: "{{ org.gitops.chart_source }}" + values_dir: "{{playbook_dir}}/../../../{{org.gitops.release_dir}}/{{ org.name | lower }}" + generateGenisisBLock: "{{ generateGenisis }}" + +# Git Push: Push the above generated files to git directory +- name: Git Push + include_role: + name: "{{ playbook_dir }}/../../shared/configuration/roles/git_push" + vars: + GIT_DIR: "{{ playbook_dir }}/../../../" + gitops: "{{ org.gitops }}" + msg: "[ci skip] Pushing Genesis files" diff --git a/platforms/hyperledger-fabric/configuration/roles/create/k8s_secrets/tasks/main.yaml b/platforms/hyperledger-fabric/configuration/roles/create/k8s_secrets/tasks/main.yaml deleted file mode 100644 index 6d843717a6c..00000000000 --- a/platforms/hyperledger-fabric/configuration/roles/create/k8s_secrets/tasks/main.yaml +++ /dev/null @@ -1,106 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -############################################################################################# -# This role creates the secrets and docker credentials -############################################################################################# - -# Check if root token secret exist of every organization in their namespace -- name: Check if root token exists in the namespace - k8s_info: - kind: Secret - namespace: "{{ namespace }}" - name: "roottoken" - kubeconfig: "{{ kubernetes.config_file }}" - context: "{{ kubernetes.context }}" - register: root_token_secret - when: check == "token_secret" - -# Put root token of every organization in their namespace -- name: Put root token of every organization - k8s: - definition: - apiVersion: v1 - kind: Secret - metadata: - name: "roottoken" - namespace: "{{ namespace }}" - stringData: - config.yaml: |- - token: "{{ vault.root_token }}" - state: present - kubeconfig: "{{ kubernetes.config_file }}" - context: "{{ kubernetes.context }}" - when: check == "token_secret" and root_token_secret.resources|length == 0 - -# Check if Docker credentials exist already -- name: "Checking if the docker credentials already exists" - include_role: - name: "{{ playbook_dir }}/../../shared/configuration/roles/check/setup" - vars: - check: "docker_credentials" - register: get_regcred - when: check == "docker_credentials" - -# Set a variable -- set_fact: - auth: "{{network.docker.username}}:{{network.docker.password}}" - when: check == "docker_credentials" and get_regcred.resources|length == 0 - -# Set a variable -- set_fact: - auth_64: "{{auth | b64encode}}" - when: check == "docker_credentials" and get_regcred.resources|length == 0 - -# Set a variable -- set_fact: - dockerconfigjson: "{\"auths\":{\"{{network.docker.url}}\":{\"username\":\"{{network.docker.username}}\",\"password\":\"{{network.docker.password}}\",\"email\":\"test@abc.mail\",\"auth\":\"{{auth_64}}\"}}}" - when: check == "docker_credentials" and get_regcred.resources|length == 0 - -# Create the docker pull credentials for image registry -- name: Create the docker pull credentials - k8s: - definition: - apiVersion: v1 - kind: Secret - metadata: - name: "regcred" - namespace: "{{ namespace }}" - type: kubernetes.io/dockerconfigjson - data: - .dockerconfigjson: "{{ dockerconfigjson | to_json | b64encode }}" - state: present - kubeconfig: "{{ kubernetes.config_file }}" - context: "{{ kubernetes.context }}" - when: check == "docker_credentials" and get_regcred.resources|length == 0 - -# Check if endorser certs secret exists in the namespace -- name: Check if endorser certs secret exists in the namespace - k8s_info: - kind: Secret - namespace: "{{ namespace }}" - name: "{{ org_name }}-endorser-cert" - kubeconfig: "{{ kubernetes.config_file }}" - context: "{{ kubernetes.context }}" - register: endorser_certs_secret - when: check == "endorser_certs" - -# Create endorser certs secret exists in the namespace -- name: Create endorser certs secret exists in the namespace - k8s: - definition: - apiVersion: v1 - kind: Secret - metadata: - name: "{{ org_name }}-endorser-cert" - namespace: "{{ namespace }}" - stringData: - config.yaml: |- - certificate: "{{ cert }}" - state: present - kubeconfig: "{{ kubernetes.config_file }}" - context: "{{ kubernetes.context }}" - when: check == "endorser_certs" and endorser_certs_secret.resources|length == 0 diff --git a/platforms/hyperledger-fabric/configuration/roles/create/namespace/tasks/main.yaml b/platforms/hyperledger-fabric/configuration/roles/create/namespace/tasks/main.yaml index aa8e6565909..3f4a15a6dfe 100644 --- a/platforms/hyperledger-fabric/configuration/roles/create/namespace/tasks/main.yaml +++ b/platforms/hyperledger-fabric/configuration/roles/create/namespace/tasks/main.yaml @@ -9,12 +9,12 @@ ############################################################################################# # Check if namespace created -- name: "Checking if the namespace {{ item.name | lower }}-net already exists" +- name: "Checking if the namespace {{ org.name | lower }}-net already exists" include_role: name: "{{ playbook_dir }}/../../shared/configuration/roles/check/k8_component" vars: component_type: "Namespace" - component_name: "{{ item.name | lower }}-net" + component_name: "{{ org.name | lower }}-net" type: "no_retry" register: get_namespace tags: @@ -39,7 +39,7 @@ name: "{{ playbook_dir }}/../../shared/configuration/roles/git_push" vars: GIT_DIR: "{{ playbook_dir }}/../../../" - gitops: "{{ item.gitops }}" + gitops: "{{ org.gitops }}" msg: "[ci skip] Pushing deployment files for namespace, service accounts and clusterrolebinding" tags: - notest diff --git a/platforms/hyperledger-fabric/configuration/roles/create/new_organization/orderer_org/appchannel/templates/update_channel_script.tpl b/platforms/hyperledger-fabric/configuration/roles/create/new_organization/orderer_org/appchannel/templates/update_channel_script.tpl index 1fe4b633293..db28cfd371b 100644 --- a/platforms/hyperledger-fabric/configuration/roles/create/new_organization/orderer_org/appchannel/templates/update_channel_script.tpl +++ b/platforms/hyperledger-fabric/configuration/roles/create/new_organization/orderer_org/appchannel/templates/update_channel_script.tpl @@ -3,9 +3,19 @@ set -x CURRENT_DIR=${PWD} +NETWORK_VERSION="{{ version }}" -echo "installing jq " -apt-get install -y jq +if [ "$NETWORK_VERSION" != "2.5.4" ]; then + echo "installing jq " + . /scripts/package-manager.sh + packages_to_install="jq" + install_packages "$packages_to_install" +else + echo "installing jq and wget" + . /scripts/package-manager.sh + packages_to_install="jq wget" + install_packages "$packages_to_install" +fi echo "installing configtxlator" mkdir temp cd temp/ @@ -17,11 +27,16 @@ rm -r temp configtxlator proto_decode --input {{ channel_name }}_config_block.pb --type common.Block | jq .data.data[0].payload.data.config > {{ channel_name }}_config_block.json -jq -s '.[0] * {"channel_group":{"groups":{"Orderer":{"groups": {"{{ component_name }}MSP":.[1]}}}}}' {{ channel_name }}_config_block.json ./config.json > config1.json -jq -s '.[0] * {"channel_group":{"groups":{"Application":{"groups": {"{{ component_name }}MSP":.[1]}}}}}' config1.json ./config.json > config2.json -cat config2.json | jq '.channel_group.groups.Orderer.values.ConsensusType.value.metadata.consenters += ['$(cat ./orderer-tls)']' > config3.json -cat config3.json | jq '.channel_group.values.OrdererAddresses.value.addresses += ['$(cat ./orderer)'] ' > {{ channel_name }}_modified_config.json - +if [ "$NETWORK_VERSION" != "2.5.4" ]; then + jq -s '.[0] * {"channel_group":{"groups":{"Orderer":{"groups": {"{{ component_name }}MSP":.[1]}}}}}' {{ channel_name }}_config_block.json ./config.json > config1.json + jq -s '.[0] * {"channel_group":{"groups":{"Application":{"groups": {"{{ component_name }}MSP":.[1]}}}}}' config1.json ./config.json > config2.json + cat config2.json | jq '.channel_group.groups.Orderer.values.ConsensusType.value.metadata.consenters += ['$(cat ./orderer-tls)']' > config3.json + cat config3.json | jq '.channel_group.values.OrdererAddresses.value.addresses += ['$(cat ./orderer)'] ' > {{ channel_name }}_modified_config.json +else + jq -s '.[0] * {"channel_group":{"groups":{"Orderer":{"groups": {"{{ component_name }}MSP":.[1]}}}}}' {{ channel_name }}_config_block.json ./config.json > config1.json + cat config1.json | jq '.channel_group.groups.Orderer.values.ConsensusType.value.metadata.consenters += ['$(cat ./orderer-tls)']' > config2.json + cat config2.json | jq '.channel_group.values.OrdererAddresses.value.addresses += ['$(cat ./orderer)'] ' > {{ channel_name }}_modified_config.json +fi echo "converting the channel_config.json and channel_modified_config.json to .pb files" configtxlator proto_encode --input {{ channel_name }}_config_block.json --type common.Config --output {{ channel_name }}_config.pb configtxlator proto_encode --input {{ channel_name }}_modified_config.json --type common.Config --output {{ channel_name }}_modified_config.pb diff --git a/platforms/hyperledger-fabric/configuration/roles/create/new_organization/orderer_org/syschannel/tasks/create_orderer.yaml b/platforms/hyperledger-fabric/configuration/roles/create/new_organization/orderer_org/syschannel/tasks/create_orderer.yaml index 7fc1adb6ac7..011f7bac5e5 100644 --- a/platforms/hyperledger-fabric/configuration/roles/create/new_organization/orderer_org/syschannel/tasks/create_orderer.yaml +++ b/platforms/hyperledger-fabric/configuration/roles/create/new_organization/orderer_org/syschannel/tasks/create_orderer.yaml @@ -77,6 +77,7 @@ kubernetes: "{{ org.k8s }}" ordererAddress: "{{ orderer.ordererAddress }}" when: network.env.proxy != 'none' + - name: fetch, modify, sign and copy the configuration block from the blockchain for proxy none shell: | export PEER_CLI=$(KUBECONFIG={{ org.k8s.config_file }} kubectl get po -n {{ component_ns }} | grep "cli" | head -n 1 | awk '{print $1}') diff --git a/platforms/hyperledger-fabric/configuration/roles/create/new_organization/orderer_org/syschannel/tasks/main.yaml b/platforms/hyperledger-fabric/configuration/roles/create/new_organization/orderer_org/syschannel/tasks/main.yaml index 5f148a453b4..cc4713e8f49 100644 --- a/platforms/hyperledger-fabric/configuration/roles/create/new_organization/orderer_org/syschannel/tasks/main.yaml +++ b/platforms/hyperledger-fabric/configuration/roles/create/new_organization/orderer_org/syschannel/tasks/main.yaml @@ -5,7 +5,7 @@ ############################################################################################## ############################################################################################ -## Adding the New Orderer to the json file that will be used inside the orderer cli +# Adding the New Orderer to the json file that will be used inside the orderer cli - name: "Creating JSON configuration for new organization" shell: | cd {{ build_path }} diff --git a/platforms/hyperledger-fabric/configuration/roles/create/new_organization/orderer_org/syschannel/tasks/nested_main.yaml b/platforms/hyperledger-fabric/configuration/roles/create/new_organization/orderer_org/syschannel/tasks/nested_main.yaml index c8ec09595a7..0d13c92bc41 100644 --- a/platforms/hyperledger-fabric/configuration/roles/create/new_organization/orderer_org/syschannel/tasks/nested_main.yaml +++ b/platforms/hyperledger-fabric/configuration/roles/create/new_organization/orderer_org/syschannel/tasks/nested_main.yaml @@ -45,7 +45,7 @@ then echo -n "{\"client_tls_cert\":\"$(cat {{ build_path }}/crypto-config/ordererOrganizations/{{ component_ns }}/orderers/{{ orderer.name }}.{{ component_ns }}/tls/server.crt | base64 -w 0)\",\"host\":\"{{ orderer.name }}.{{ component_ns }}\",\"port\":{{ orderer.grpc.port }},\"server_tls_cert\":\"$(cat {{ build_path }}/crypto-config/ordererOrganizations/{{ component_ns }}/orderers/{{ orderer.name }}.{{ component_ns }}/tls/server.crt | base64 -w 0)\"}" >> {{ build_path }}/channel-artifacts/{{ channel_name }}-consenter else - echo -n "{\"client_tls_cert\":\"$(cat {{ build_path }}/crypto-config/ordererOrganizations/{{ component_ns }}/orderers/{{ orderer.name|lower }}.{{ component_ns }}/tls/server.crt | base64 -w 0)\",\"host\":\"{{ orderer.ordererAddress.split(":")[0] | to_json }}\",\"port\":\"{{ orderer.ordererAddress.split(":")[1] | to_json }}\",\"server_tls_cert\":\"$(cat {{ build_path }}/crypto-config/ordererOrganizations/{{ component_ns }}/orderers/{{ orderer.name|lower }}.{{ component_ns }}/tls/server.crt | base64 -w 0)\"}" >> {{ build_path }}/channel-artifacts/{{ channel_name | lower}}-orderer-tls + echo -n "{\"client_tls_cert\":\"$(cat {{ build_path }}/crypto-config/ordererOrganizations/{{ component_ns }}/orderers/{{ orderer.name|lower }}.{{ component_ns }}/tls/server.crt | base64 -w 0)\",\"host\":\"{{ orderer.ordererAddress.split(":")[0] | to_json }}\",\"port\":{{ orderer.ordererAddress.split(":")[1] | to_json }},\"server_tls_cert\":\"$(cat {{ build_path }}/crypto-config/ordererOrganizations/{{ component_ns }}/orderers/{{ orderer.name|lower }}.{{ component_ns }}/tls/server.crt | base64 -w 0)\"}" >> {{ build_path }}/channel-artifacts/{{ channel_name }}-consenter fi when: update_type == "tls" diff --git a/platforms/hyperledger-fabric/configuration/roles/create/new_organization/orderer_org/syschannel/templates/orderer_org.tpl b/platforms/hyperledger-fabric/configuration/roles/create/new_organization/orderer_org/syschannel/templates/orderer_org.tpl index f2ee66a9191..7b7c2bdbee3 100644 --- a/platforms/hyperledger-fabric/configuration/roles/create/new_organization/orderer_org/syschannel/templates/orderer_org.tpl +++ b/platforms/hyperledger-fabric/configuration/roles/create/new_organization/orderer_org/syschannel/templates/orderer_org.tpl @@ -5,7 +5,10 @@ set -x CURRENT_DIR=${PWD} echo "installing jq " -apt-get install -y jq +. /scripts/package-manager.sh +packages_to_install="jq" +install_packages "$packages_to_install" + echo "installing configtxlator" mkdir temp cd temp/ diff --git a/platforms/hyperledger-fabric/configuration/roles/create/new_organization/orderer_org/syschannel/templates/syschannel_update_address_script.tpl b/platforms/hyperledger-fabric/configuration/roles/create/new_organization/orderer_org/syschannel/templates/syschannel_update_address_script.tpl index 19f504e1620..d0ca0c456e1 100644 --- a/platforms/hyperledger-fabric/configuration/roles/create/new_organization/orderer_org/syschannel/templates/syschannel_update_address_script.tpl +++ b/platforms/hyperledger-fabric/configuration/roles/create/new_organization/orderer_org/syschannel/templates/syschannel_update_address_script.tpl @@ -5,7 +5,10 @@ set -x CURRENT_DIR=${PWD} echo "installing jq " -apt-get install -y jq +. /scripts/package-manager.sh +packages_to_install="jq" +install_packages "$packages_to_install" + echo "installing configtxlator" mkdir temp cd temp/ diff --git a/platforms/hyperledger-fabric/configuration/roles/create/orderers/tasks/get_channel.yaml b/platforms/hyperledger-fabric/configuration/roles/create/orderers/tasks/get_channel.yaml new file mode 100644 index 00000000000..e0de6087d02 --- /dev/null +++ b/platforms/hyperledger-fabric/configuration/roles/create/orderers/tasks/get_channel.yaml @@ -0,0 +1,8 @@ +# Set Variable channel_name_value +- name: Set Variable channel_name_value + set_fact: + channel_name_value: "{{ channel.channel_name | lower }}" + loop: "{{ channel.orderers }}" + loop_control: + loop_var: ord_org + when: ord_org == org.name diff --git a/platforms/hyperledger-fabric/configuration/roles/create/orderers/tasks/main.yaml b/platforms/hyperledger-fabric/configuration/roles/create/orderers/tasks/main.yaml index ed1c13d4e7b..ffd9e9455ed 100644 --- a/platforms/hyperledger-fabric/configuration/roles/create/orderers/tasks/main.yaml +++ b/platforms/hyperledger-fabric/configuration/roles/create/orderers/tasks/main.yaml @@ -8,24 +8,22 @@ # This role creates value file for zkKafka and orderer ############################################################################################# +# Check if CA server is available +- name: "waiting for the CA server to be created in {{ org.name | lower }}-net" + include_role: + name: "{{ playbook_dir }}/../../shared/configuration/roles/check/helm_component" + vars: + component_type: "Pod" + namespace: "{{ org.name | lower }}-net" + component_name: "{{ component_services.ca.name }}" + label_selectors: + - app = {{ component_name }} + when: add_peer is not defined or add_peer != 'true' - -# Set Variable channel_name -- name: "Set Variable channel_name" +# Set Variable first_orderer +- name: "Set Variable first_orderer" set_fact: - channel_name: "{{ network['channels'] | map(attribute='channel_name') | first | lower }}" - when: item.type == 'orderer' and ('2.2.' in network.version or '1.4.' in network.version or '2.5.' in network.version) - -# Fetch the genesis block from vault to the build directory -- name: Fetch the genesis block from vault - shell: | - vault kv get -field={{ network.env.type }}GenesisBlock {{ item.vault.secret_path | default('secretsv2') }}/{{ item.name | lower }}/ordererOrganizations/{{ item.name | lower }}-net > {{ channel_name }}.genesis.block.base64 - mkdir -p ./build/channel-artifacts - mv {{ channel_name}}.genesis.block.base64 ./build/channel-artifacts/ - environment: - VAULT_ADDR: "{{ vault.url }}" - VAULT_TOKEN: "{{ vault.root_token }}" - when: refresh_cert is defined and refresh_cert == 'true' + first_orderer: "{{ component_services.orderers | map(attribute='name') | first }}" # Reset peers pods - name: "Reset peers pods" @@ -33,12 +31,12 @@ name: create/refresh_certs/reset_pod vars: pod_name: "{{ orderer.name | lower }}" - name: "{{ item.name | lower }}" - file_path: "{{ values_dir }}/orderer/{{ orderer.name | lower }}-{{ item.name | lower }}.yaml" - gitops_value: "{{ item.gitops }}" + name: "{{ org.name | lower }}" + file_path: "{{ values_dir }}/orderer/{{ orderer.name | lower }}-{{ org.name | lower }}.yaml" + gitops_value: "{{ org.gitops }}" component_ns: "{{ namespace }}" - kubernetes: "{{ item.k8s }}" - hr_name: "{{ item.name | lower }}-{{ orderer.name }}" + kubernetes: "{{ org.k8s }}" + hr_name: "{{ org.name | lower }}-{{ orderer.name }}" loop: "{{ component_services.orderers }}" loop_control: loop_var: orderer @@ -50,7 +48,7 @@ name: helm_component vars: name: "orderer" - org_name: "{{ item.name | lower }}" + org_name: "{{ org.name | lower }}" sc_name: "{{ org_name }}-bevel-storageclass" component_name: "zkkafka" type: "zkkafka" @@ -63,36 +61,18 @@ name: helm_component vars: name: "orderer" - org_name: "{{ item.name | lower }}" - sc_name: "{{ org_name }}-bevel-storageclass" - component_name: "{{ orderer.name }}-{{ org_name }}" - type: "orderers" - consensus: "{{component_services.consensus}}" - genesis: "{{ lookup('file', '{{ build_path }}/channel-artifacts/{{ channel_name}}.genesis.block.base64') }}" - loop: "{{ component_services.orderers }}" - loop_control: - loop_var: orderer - when: - - component_services.orderers is defined and component_services.consensus is defined - - (orderer.status is not defined or orderer.status == 'new') and '2.5.' not in network.version - -# Create the value file for the Orderers as per requirements mentioned in network.yaml -- name: "create orderers" - include_role: - name: helm_component - vars: - name: "orderer" - org_name: "{{ item.name | lower }}" - sc_name: "{{ org_name }}-bevel-storageclass" - component_name: "{{ orderer.name }}-{{ org_name }}" + org_name: "{{ org.name | lower }}" + component_name: "{{ orderer.name | lower }}" type: "orderers" consensus: "{{component_services.consensus}}" + component_subject: "{{ org.subject | quote }}" + create_configmaps: "{{ true if first_orderer == orderer.name else false }}" loop: "{{ component_services.orderers }}" loop_control: loop_var: orderer when: - component_services.orderers is defined and component_services.consensus is defined - - (orderer.status is not defined or orderer.status == 'new') and '2.5.' in network.version + - orderer.status is not defined or orderer.status == 'new' # Git Push: Push the above generated files to git directory - name: Git Push @@ -100,7 +80,37 @@ name: "{{ playbook_dir }}/../../shared/configuration/roles/git_push" vars: GIT_DIR: "{{ playbook_dir }}/../../../" - gitops: "{{ item.gitops }}" + gitops: "{{ org.gitops }}" msg: "[ci skip] Pushing Orderer files" tags: - notest + +# Wait for key certs exists in vault. +- name: Wait for CA key exists in vault + include_role: + name: "{{ playbook_dir }}/../../shared/configuration/roles/check/setup" + vars: + vault_field: "rootca_key" + vault_path: "{{ vault.secret_path | default('secretsv2') }}/{{ network.env.type }}{{ org_name }}/ca" + check: "crypto_materials" + +# Wait for admin tls exists in vault. +- name: Wait for admin tls exists in vault. + include_role: + name: "{{ playbook_dir }}/../../shared/configuration/roles/check/setup" + vars: + vault_field: "client_key" + vault_path: "{{ vault.secret_path | default('secretsv2') }}/{{ network.env.type }}{{ org_name }}/users/admin-tls" + check: "crypto_materials" + +# Wait for orderers tls exists in vault. +- name: Wait for orderers tls exists in vault. + include_role: + name: "{{ playbook_dir }}/../../shared/configuration/roles/check/setup" + vars: + vault_field: "server_key" + vault_path: "{{ vault.secret_path | default('secretv2') }}/{{ network.env.type }}{{ org_name }}/orderers/{{ orderer.name }}-tls" + check: "crypto_materials" + loop: "{{ org.services.orderers }}" + loop_control: + loop_var: orderer diff --git a/platforms/hyperledger-fabric/configuration/roles/create/osnchannels/tasks/main.yaml b/platforms/hyperledger-fabric/configuration/roles/create/osnchannels/tasks/main.yaml index 714f89a8a2b..6c9fde965c3 100644 --- a/platforms/hyperledger-fabric/configuration/roles/create/osnchannels/tasks/main.yaml +++ b/platforms/hyperledger-fabric/configuration/roles/create/osnchannels/tasks/main.yaml @@ -17,7 +17,8 @@ vars: org_creator_channels: "{{ item.osn_creator_org.name }}" channel_name: "{{ item.channel_name | lower }}" + kubernetes: "{{ org.k8s }}" loop: "{{ network['organizations'] }}" loop_control: loop_var: org - when: org.type == 'orderer' and org_creator_channels == org.name + when: (org.services.orderers is defined and org.services.orderers | length > 0) and org_creator_channels == org.name diff --git a/platforms/hyperledger-fabric/configuration/roles/create/osnchannels/tasks/valuefile.yaml b/platforms/hyperledger-fabric/configuration/roles/create/osnchannels/tasks/valuefile.yaml index d146f724195..3f51ae92dc3 100644 --- a/platforms/hyperledger-fabric/configuration/roles/create/osnchannels/tasks/valuefile.yaml +++ b/platforms/hyperledger-fabric/configuration/roles/create/osnchannels/tasks/valuefile.yaml @@ -33,7 +33,7 @@ # Create the value file for creator Organization - name: "Create Create_Channel value file osnadmin" include_role: - name: helm_component + name: "{{ playbook_dir }}/../../shared/configuration/roles/create/job_component" vars: name: "{{ org.name | lower }}" type: "osn_create_channel_job" @@ -46,7 +46,6 @@ vault: "{{ org.vault }}" k8s: "{{ org.k8s }}" orderers_list: "{{ org.services.orderers }}" - genesis: "{{ lookup('file', '{{ build_path }}/channel-artifacts/{{ channel_name }}.genesis.block.base64') }}" values_dir: "{{playbook_dir}}/../../../{{org.gitops.release_dir}}/{{ org.name | lower }}" add_orderer_value: "{{ add_orderer | default('false') }}" when: add_orderer is not defined or add_orderer == false @@ -54,7 +53,7 @@ # Create the value file for creator Organization - name: "Create Create_Channel value file osnadmin" include_role: - name: helm_component + name: "{{ playbook_dir }}/../../shared/configuration/roles/create/job_component" vars: name: "{{ org.name | lower }}" type: "osn_create_channel_job" @@ -82,6 +81,7 @@ gitops: "{{ org.gitops }}" msg: "[ci skip] Pushing channel-create files" + # Check or wait for the create channel job to complete - name: "waiting for {{ org.name }} to create channel {{ channel_name }}" include_role: @@ -89,5 +89,5 @@ vars: component_type: "Job" namespace: "{{ org.name | lower}}-net" - component_name: "osn-createchannel-{{ channel_name }}" + component_name: "{{ channel_name }}" kubernetes: "{{ org.k8s }}" diff --git a/platforms/hyperledger-fabric/configuration/roles/create/peers/tasks/main.yaml b/platforms/hyperledger-fabric/configuration/roles/create/peers/tasks/main.yaml index 70916bc512e..9cafa57600a 100644 --- a/platforms/hyperledger-fabric/configuration/roles/create/peers/tasks/main.yaml +++ b/platforms/hyperledger-fabric/configuration/roles/create/peers/tasks/main.yaml @@ -22,17 +22,22 @@ name: create/refresh_certs/reset_pod vars: pod_name: "{{ peer.name | lower }}" - name: "{{ item.name | lower }}" + name: "{{ org.name | lower }}" file_path: "{{ values_dir }}/{{ name }}/values-{{ peer.name }}.yaml" - gitops_value: "{{ item.gitops }}" + gitops_value: "{{ org.gitops }}" component_ns: "{{ namespace }}" - kubernetes: "{{ item.k8s }}" - hr_name: "{{ item.name | lower }}-{{ peer.name }}" + kubernetes: "{{ org.k8s }}" + hr_name: "{{ org.name | lower }}-{{ peer.name }}" loop: "{{ component_services.peers }}" loop_control: loop_var: peer when: (refresh_cert is defined and refresh_cert == 'true') or peer.configpath is defined +# Set Variable first_orderer +- name: "Set Variable first_peer" + set_fact: + first_peer: "{{ component_services.peers | map(attribute='name') | first }}" + # Create the value file for peers of organisations - name: This role creates the value file for peers of organisations include_tasks: nested_main.yaml @@ -46,45 +51,21 @@ name: "{{ playbook_dir }}/../../shared/configuration/roles/git_push" vars: GIT_DIR: "{{ playbook_dir }}/../../../" - gitops: "{{ item.gitops }}" + gitops: "{{ org.gitops }}" msg: "[ci skip] Pushing Peer files" - tags: - - notest - -# Wait for the peer HelmRelease -- name: 'Wait for {{ peer.name }} HelmRelease in {{ namespace }}' - k8s_info: - api_version: "helm.toolkit.fluxcd.io/v2beta1" - kind: "HelmRelease" - namespace: "{{ item.name | lower }}-net" - kubeconfig: "{{ item.k8s.config_file }}" - context: "{{ item.k8s.context }}" - name: "{{ item.name | lower }}-{{ peer.name }}" - field_selectors: - - status.conditions=Ready - register: component_data - retries: "{{ network.env.retry_count}}" - delay: 30 - until: component_data.resources|length > 0 - loop: "{{ component_services.peers }}" - loop_control: - loop_var: peer - when: (refresh_cert is defined and refresh_cert == 'true') or peer.configpath is defined # Wait for peer pods to be in the state of running -- name: "Waiting for peer pod {{ peer.name }} in {{ item.name | lower }}-net" +- name: "Waiting for peer pod {{ peer.name }} in {{ org.name | lower }}-net" include_role: name: "{{ playbook_dir }}/../../shared/configuration/roles/check/helm_component" vars: component_type: "Pod" - namespace: "{{ item.name | lower }}-net" + namespace: "{{ org.name | lower }}-net" component_name: "{{ peer.name }}" - kubernetes: "{{ item.k8s }}" + kubernetes: "{{ org.k8s }}" label_selectors: - app = {{ component_name }} loop: "{{ component_services.peers }}" loop_control: loop_var: peer when: peer.peerstatus is not defined or peer.peerstatus == 'new' - tags: - - notest diff --git a/platforms/hyperledger-fabric/configuration/roles/create/peers/tasks/nested_main.yaml b/platforms/hyperledger-fabric/configuration/roles/create/peers/tasks/nested_main.yaml index e72388ae9b8..daa8d3198dc 100644 --- a/platforms/hyperledger-fabric/configuration/roles/create/peers/tasks/nested_main.yaml +++ b/platforms/hyperledger-fabric/configuration/roles/create/peers/tasks/nested_main.yaml @@ -1,47 +1,83 @@ -# Remove existing value file for peer -- change to not use shell +# Remove existing value file for peer - name: Remove existing value file for {{ peer.name }} shell: | - rm -f {{ values_dir }}/{{ item.name | lower }}/values-{{ peer.name }}.yaml + rm -f {{ values_dir }}/{{ org.name | lower }}/values-{{ peer.name }}.yaml when: - add_peer is not defined or add_peer == 'false' +# Create the certs directory if it does not exist +- name: Create the certs directory if it does not exist + file: + path: "{{playbook_dir}}/../../../{{ charts_dir }}/fabric-peernode/files" + state: directory + # Fetch the custom core.yaml - name: Fetch the custom core.yaml shell: | - cat {{ peer.configpath }} | base64 > {{ playbook_dir }}/build/{{ peer.name }}_{{ item.name | lower }}_core.yaml.base64 + cat {{ peer.configpath }} > {{playbook_dir}}/../../../{{ charts_dir }}/fabric-peernode/conf/default_core.yaml register: core_yaml_file when: - peer.configpath is defined +# Get orderer tls cacert from configmap +- name: check crypto scripts already exists + kubernetes.core.k8s_info: + kubeconfig: "{{ kubernetes.config_file }}" + kind: ConfigMap + name: "crypto-scripts-cm" + namespace: "{{ org.name | lower }}-net" + register: crypto_scripts_data + +# Get orderer tls cacert from configmap +- name: Get orderer tls cacert from config map + kubernetes.core.k8s_info: + kubeconfig: "{{ kubernetes.config_file }}" + kind: ConfigMap + name: "orderer-tls-cacert" + namespace: "{{ org.orderer_org | lower }}-net" + register: tls_cert_data + when: org.orderer_org != org.name + +- name: Create new ConfigMap with tls_cert_data + kubernetes.core.k8s: + kubeconfig: "{{ kubernetes.config_file }}" + definition: + apiVersion: v1 + kind: ConfigMap + metadata: + name: "orderer-tls-cacert" + namespace: "{{ org.name | lower }}-net" + data: + cacert: "{{ tls_cert_data.resources[0].data['cacert'] }}" + when: org.orderer_org != org.name + +# Set Variable sc_name +- name: "Set Variable sc_name" + set_fact: + sc_name: "storage-{{ peer.name }}" + +- name: Get information about StorageClasses + kubernetes.core.k8s_info: + kind: StorageClass + api_version: storage.k8s.io/v1 + namespace: default + register: storage_classes_info + # Create Value files for Organization Peers - name: Create Value files for Organization Peers include_role: name: helm_component vars: - name: "{{ item.name | lower }}" - sc_name: "{{ name }}-bevel-storageclass" + name: "{{ org.name | lower }}" type: "value_peer" - component_name: values-{{ peer.name }} - peer_name: "{{ peer.name }}" - peer_ns: "{{ namespace }}" - provider: "{{ network.env.proxy }}" + component_name: "{{ peer.name }}" + component_subject: "{{ org.subject }}" + component_ns: "{{ namespace }}" + provider: "{{ org.cloud_provider }}" + orderer: "{{ network.orderers | first }}" + user_list: "{{ org.users | default('') }}" + enabled_cli: "{{ true if peer.cli == 'enabled' else false }}" + sc_enabled: "{{ false if storage_classes_info.resources | selectattr('metadata.name', 'equalto', sc_name) | list else true }}" + create_configmaps: "{{ true if (first_peer == peer.name) and (crypto_scripts_data.resources | length == 0) else false }}" when: - peer.peerstatus is not defined or peer.peerstatus == 'new' - - peer.configpath is not defined - -# Create Value files for Organization Peers - external -- name: Create Value files for Organization Peers - external - include_role: - name: helm_component - vars: - name: "{{ item.name | lower }}" - sc_name: "{{ name }}-bevel-storageclass" - type: "value_peer" - component_name: values-{{ peer.name }} - peer_name: "{{ peer.name }}" - peer_ns: "{{ namespace }}" - provider: "{{ network.env.proxy }}" - core_file: "{{ lookup('file', '{{ playbook_dir }}/build/{{ peer.name }}_{{ item.name | lower }}_core.yaml.base64') }}" - when: - - peer.peerstatus is not defined or peer.peerstatus == 'new' - - peer.configpath is defined diff --git a/platforms/hyperledger-fabric/configuration/roles/create/refresh_certs/create_channel_block/tasks/main.yaml b/platforms/hyperledger-fabric/configuration/roles/create/refresh_certs/create_channel_block/tasks/main.yaml index 039b80cd55b..6261d71565e 100644 --- a/platforms/hyperledger-fabric/configuration/roles/create/refresh_certs/create_channel_block/tasks/main.yaml +++ b/platforms/hyperledger-fabric/configuration/roles/create/refresh_certs/create_channel_block/tasks/main.yaml @@ -19,7 +19,6 @@ channel_name: "{{ sys_channel_name }}" orderer: "{{ org.services.orderers | first }}" check: "latest_block" - when: add_new_org == 'false' and ('2.2.' in network.version or '1.4.' in network.version) # Call get_update_block to get the latest from appchannel channel block. - name: Call get_update_block to get latest appchannel block @@ -38,23 +37,10 @@ path: "{{ build_path }}/channel-artifacts" state: directory -# Create the genesis block by consuming the latest config block for 2.5.x fabric versions -- name: "Create genesis block" - shell: | - cat {{ build_path }}/{{ channel.channel_name | lower }}_config_block.pb | base64 > {{ build_path }}/channel-artifacts/{{ channel.channel_name | lower }}.genesis.block.base64 - loop: "{{ network.channels }}" - loop_control: - loop_var: channel - when: add_new_org == 'false' and ('2.5.' in network.version) - # Create the genesis block by consuming the latest config block - name: "Create genesis block" shell: | - cat {{ build_path }}/{{ sys_channel_name | lower }}_config_block.pb | base64 > {{ build_path }}/channel-artifacts/{{ channel.channel_name | lower }}.genesis.block.base64 - loop: "{{ network.channels }}" - loop_control: - loop_var: channel - when: add_new_org == 'false' and ('2.2.' in network.version or '1.4.' in network.version) + cat {{ build_path }}/{{ sys_channel_name }}_config_block.pb | base64 > {{ build_path }}/channel-artifacts/{{ channel.channel_name | lower }}.genesis.block.base64 # Add new genesis block to the vault - name: "Write genesis block to Vault" @@ -63,10 +49,6 @@ environment: VAULT_ADDR: "{{ org.vault.url }}" VAULT_TOKEN: "{{ org.vault.root_token }}" - loop: "{{ network.channels }}" - loop_control: - loop_var: channel - when: add_new_org == 'false' and ('2.5.' in network.version) # Delete the orderer cli - name: "Delete all temp {{ orderer.name }}-{{ org.name }}-cli" diff --git a/platforms/hyperledger-fabric/configuration/roles/create/refresh_certs/create_channel_block/tasks/nested_create_cli.yaml b/platforms/hyperledger-fabric/configuration/roles/create/refresh_certs/create_channel_block/tasks/nested_create_cli.yaml index 431afcf6788..93073adc660 100644 --- a/platforms/hyperledger-fabric/configuration/roles/create/refresh_certs/create_channel_block/tasks/nested_create_cli.yaml +++ b/platforms/hyperledger-fabric/configuration/roles/create/refresh_certs/create_channel_block/tasks/nested_create_cli.yaml @@ -52,7 +52,6 @@ script: "syschannel-update-script.sh" channel_name: "{{ sys_channel_name }}" check: "fetch_block" - when: add_new_org == 'false' and ('1.4.' in network.version or '2.2.' in network.version) # Call get_update_block to fetch the appchannel channels block - name: Call get_update_block to fetch the {{ channel_name }} channel block @@ -72,7 +71,6 @@ script: "syschannel-update-script.sh" channel_name: "{{ sys_channel_name }}" check: "update_block" - when: add_new_org == 'false' and ('1.4.' in network.version or '2.2.' in network.version) # Call get_update_block to fetch the appchannel channels block - name: Call get_update_block to update the {{ channel_name }} channel block diff --git a/platforms/hyperledger-fabric/configuration/roles/create/secrets/tasks/main.yaml b/platforms/hyperledger-fabric/configuration/roles/create/secrets/tasks/main.yaml new file mode 100644 index 00000000000..cc31dd73c32 --- /dev/null +++ b/platforms/hyperledger-fabric/configuration/roles/create/secrets/tasks/main.yaml @@ -0,0 +1,32 @@ +############################################################################################## +# Copyright Accenture. All Rights Reserved. +# +# SPDX-License-Identifier: Apache-2.0 +############################################################################################## + +# Wait for namespace to be created by flux +- name: "Wait for the namespace {{ component_ns }} to be created" + include_role: + name: "{{ playbook_dir }}/../../shared/configuration/roles/check/k8_component" + vars: + component_type: "Namespace" + component_name: "{{ component_ns }}" + type: "retry" + +# Create the vault roottoken secret +- name: "Create vault token secret" + include_role: + name: "{{ playbook_dir }}/../../shared/configuration/roles/create/shared_k8s_secrets" + vars: + namespace: "{{ component_ns }}" + check: "token_secret" + +# Create the docker pull credentials for image registry +- name: "Create docker credentials secret" + include_role: + name: "{{ playbook_dir }}/../../shared/configuration/roles/create/shared_k8s_secrets" + vars: + namespace: "{{ component_ns }}" + check: "docker_credentials" + when: + - network.docker.username is defined diff --git a/platforms/hyperledger-fabric/configuration/roles/delete/genesis/tasks/main.yaml b/platforms/hyperledger-fabric/configuration/roles/delete/genesis/tasks/main.yaml index 467bea7b330..68abdb3bfda 100644 --- a/platforms/hyperledger-fabric/configuration/roles/delete/genesis/tasks/main.yaml +++ b/platforms/hyperledger-fabric/configuration/roles/delete/genesis/tasks/main.yaml @@ -9,10 +9,29 @@ ############################################################################################# # Delete BASE 64 encoded genesis blocks for all channels -- name: Delete genesis block from Vault +- name: Delete genesis block from Vault for syschannel shell: | - vault kv delete {{ item.vault.secret_path | default('secretsv2') }}/{{ item.name | lower }}/ordererOrganizations/{{ component_name }} + vault kv delete {{ item.vault.secret_path | default('secretsv2') }}/{{ network.env.type }}{{ item.name | lower }}/channel-artifacts/syschannel-genesis + target_path={{ item.vault.secret_path | default('secretsv2') }}/{{ network.env.type }}{{ item.name | lower }}/channel-artifacts; + for key in $(vault kv list -format=json "$target_path" | jq -r '.[]'); do + vault kv delete "$target_path/$key"; + done environment: VAULT_ADDR: "{{ item.vault.url }}" VAULT_TOKEN: "{{ item.vault.root_token }}" ignore_errors: yes + +# Delete genesis block to Vault +- name: "Delete genesis block to Vault for app channel" + shell: | + vault kv delete {{ item.vault.secret_path | default('secretsv2') }}/{{ network.env.type }}{{ item.name | lower }}/channel-artifacts/{{ channel.channel_name | lower }}-genesis + target_path={{ item.vault.secret_path | default('secretsv2') }}/{{ network.env.type }}{{ item.name | lower }}/channel-artifacts; + for key in $(vault kv list -format=json "$target_path" | jq -r '.[]'); do + vault kv delete "$target_path/$key"; + done + environment: + VAULT_ADDR: "{{ item.vault.url }}" + VAULT_TOKEN: "{{ item.vault.root_token }}" + loop: "{{ network['channels'] }}" + loop_control: + loop_var: channel diff --git a/platforms/hyperledger-fabric/configuration/roles/delete/operator/tasks/delete_channel.yaml b/platforms/hyperledger-fabric/configuration/roles/delete/operator/tasks/delete_channel.yaml index 1bf5d582457..587f76f42f3 100644 --- a/platforms/hyperledger-fabric/configuration/roles/delete/operator/tasks/delete_channel.yaml +++ b/platforms/hyperledger-fabric/configuration/roles/delete/operator/tasks/delete_channel.yaml @@ -13,7 +13,7 @@ state: absent kubeconfig: "{{ kubernetes.config_file }}" context: "{{ kubernetes.context }}" - ignore_errors: yes + ignore_errors: true # Delete main channel - name: Delete main channel @@ -24,4 +24,4 @@ state: absent kubeconfig: "{{ kubernetes.config_file }}" context: "{{ kubernetes.context }}" - ignore_errors: yes + ignore_errors: true diff --git a/platforms/hyperledger-fabric/configuration/roles/delete/vault_secrets/tasks/main.yaml b/platforms/hyperledger-fabric/configuration/roles/delete/vault_secrets/tasks/main.yaml index ecfa9600ef7..284c784eeeb 100644 --- a/platforms/hyperledger-fabric/configuration/roles/delete/vault_secrets/tasks/main.yaml +++ b/platforms/hyperledger-fabric/configuration/roles/delete/vault_secrets/tasks/main.yaml @@ -17,7 +17,7 @@ state: absent kubeconfig: "{{ kubernetes.config_file }}" context: "{{ kubernetes.context }}" - ignore_errors: yes + ignore_errors: true # Delete vault auth - name: Delete vault-auth path @@ -39,19 +39,18 @@ # Delete crypto materials from vault - name: Delete Crypto for orderers shell: | - vault kv delete {{ item.vault.secret_path | default('secretsv2') }}/{{ item.name | lower }}/ordererOrganizations/{{ component_name }}/ca - vault kv delete {{ item.vault.secret_path | default('secretsv2') }}/{{ item.name | lower }}/ordererOrganizations/{{ component_name }}/orderers/{{orderer.name}}.{{ component_name }}/tls - vault kv delete {{ item.vault.secret_path | default('secretsv2') }}/{{ item.name | lower }}/ordererOrganizations/{{ component_name }}/orderers/{{orderer.name}}.{{ component_name }}/msp - vault kv delete {{ item.vault.secret_path | default('secretsv2') }}/{{ item.name | lower }}/ordererOrganizations/{{ component_name }}/users/admin/tls - vault kv delete {{ item.vault.secret_path | default('secretsv2') }}/{{ item.name | lower }}/ordererOrganizations/{{ component_name }}/users/admin/msp - vault kv delete {{ item.vault.secret_path | default('secretsv2') }}/{{ item.name | lower }}/credentials/{{ component_name }}/ca/{{ org_name }} + vault kv delete {{ item.vault.secret_path | default('secretsv2') }}/{{ network.env.type }}{{ org_name }}/ca + vault kv delete {{ item.vault.secret_path | default('secretsv2') }}/{{ network.env.type }}{{ org_name }}/orderers/{{ orderer.name | lower }}-tls + vault kv delete {{ item.vault.secret_path | default('secretsv2') }}/{{ network.env.type }}{{ org_name }}/orderers/{{ orderer.name | lower }}-msp + vault kv delete {{ item.vault.secret_path | default('secretsv2') }}/{{ network.env.type }}{{ org_name }}/users/admin-tls + vault kv delete {{ item.vault.secret_path | default('secretsv2') }}/{{ network.env.type }}{{ org_name }}/users/admin-msp loop: "{{ services.orderers }}" loop_control: loop_var: orderer environment: VAULT_ADDR: "{{ item.vault.url }}" VAULT_TOKEN: "{{ item.vault.root_token }}" - when: component_type == 'orderer' + when: item.services.orderers is defined and item.services.orderers | length > 0 # Delete crypto materials from extternalchaincode - name: Delete Crypto for peers @@ -69,33 +68,26 @@ environment: VAULT_ADDR: "{{ item.vault.url }}" VAULT_TOKEN: "{{ item.vault.root_token }}" - when: component_type == 'peer' + when: + - item.services.peers is defined and item.services.peers | length > 0 + - peer.chaincodes is defined # Delete crypto materials from vault - name: Delete Crypto for peers shell: | - vault kv delete {{ item.vault.secret_path | default('secretsv2') }}/{{ item.name | lower }}/peerOrganizations/{{ component_name }}/ca - vault kv delete {{ item.vault.secret_path | default('secretsv2') }}/{{ item.name | lower }}/peerOrganizations/{{ component_name }}/users/admin/tls - vault kv delete {{ item.vault.secret_path | default('secretsv2') }}/{{ item.name | lower }}/peerOrganizations/{{ component_name }}/users/admin/msp - vault kv delete {{ item.vault.secret_path | default('secretsv2') }}/{{ item.name | lower }}/peerOrganizations/{{ component_name }}/orderer/tls - vault kv delete {{ item.vault.secret_path | default('secretsv2') }}/{{ item.name | lower }}/peerOrganizations/{{ component_name }}/msp/config + vault kv delete {{ item.vault.secret_path | default('secretsv2') }}/{{ network.env.type }}{{ org_name }}/ca + vault kv delete {{ item.vault.secret_path | default('secretsv2') }}/{{ network.env.type }}{{ org_name }}/users/admin-tls + vault kv delete {{ item.vault.secret_path | default('secretsv2') }}/{{ network.env.type }}{{ org_name }}/users/admin-msp {% for peer in peers %} - vault kv delete {{ item.vault.secret_path | default('secretsv2') }}/{{ item.name | lower }}/peerOrganizations/{{ component_name }}/peers/{{peer.name}}.{{ component_name }}/tls - vault kv delete {{ item.vault.secret_path | default('secretsv2') }}/{{ item.name | lower }}/peerOrganizations/{{ component_name }}/peers/{{peer.name}}.{{ component_name }}/msp + vault kv delete {{ item.vault.secret_path | default('secretsv2') }}/{{ network.env.type }}{{ org_name }}/peers/{{peer.name}}-tls + vault kv delete {{ item.vault.secret_path | default('secretsv2') }}/{{ network.env.type }}{{ org_name }}/peers//{{peer.name}}-msp {% endfor %} - vault kv delete {{ item.vault.secret_path | default('secretsv2') }}/{{ item.name | lower }}/credentials/{{ component_name }}/ca/{{ org_name }} - vault kv delete {{ item.vault.secret_path | default('secretsv2') }}/{{ item.name | lower }}/credentials/{{ component_name }}/couchdb/{{ org_name }} - vault kv delete {{ item.vault.secret_path | default('secretsv2') }}/{{ item.name | lower }}/credentials/{{ component_name }}/git - target_path={{ item.vault.secret_path | default('secretsv2') }}/{{ item.name | lower }}/peerOrganizations/{{ component_name }}/endorsers/{{ org_name }}/users; - for key in $(vault kv list -format=json "$target_path" | jq -r '.[]'); do - vault kv delete "$target_path/$key/msp"; - done vars: peers: "{{ services.peers }}" environment: VAULT_ADDR: "{{ item.vault.url }}" VAULT_TOKEN: "{{ item.vault.root_token }}" - when: component_type == 'peer' + when: item.services.peers is defined and item.services.peers | length > 0 # Remove all endorsers - name: Remove all endorsers @@ -109,37 +101,23 @@ environment: VAULT_ADDR: "{{ item.vault.url }}" VAULT_TOKEN: "{{ item.vault.root_token }}" - ignore_errors: yes - -# Delete genesis block to Vault -- name: "Delete genesis block to Vault" - shell: | - vault kv delete {{ item.vault.secret_path | default('secretsv2') }}/{{ item.name | lower }}/ordererOrganizations/{{ component_name }}/{{ channel.channel_name | lower }} - environment: - VAULT_ADDR: "{{ item.vault.url }}" - VAULT_TOKEN: "{{ item.vault.root_token }}" - loop: "{{ network['channels'] }}" - loop_control: - loop_var: channel + ignore_errors: true # Delete crypto materials from vault - name: Delete Crypto for users shell: | {% for user in users %} - vault kv delete {{ item.vault.secret_path | default('secretsv2') }}/{{ item.name | lower }}/peerOrganizations/{{ component_name }}/users/{{ user.identity }}/tls - vault kv delete {{ item.vault.secret_path | default('secretsv2') }}/{{ item.name | lower }}/peerOrganizations/{{ component_name }}/users/{{ user.identity }}/msp + vault kv delete {{ item.vault.secret_path | default('secretsv2') }}/{{ network.env.type }}{{ org_name }}/users/{{ user.identity }}-tls + vault kv delete {{ item.vault.secret_path | default('secretsv2') }}/{{ network.env.type }}{{ org_name }}/users/{{ user.identity }}-msp {% endfor %} - target_path={{ item.vault.secret_path | default('secret') }}/{{ item.name | lower }}/peerOrganizations/{{ component_name }}/users; - for key in $(vault kv list -format=json "$target_path" | jq -r '.[]'); do - vault kv delete "$target_path/$key/msp"; - vault kv delete "$target_path/$key/tls"; - done vars: users: "{{ item.users }}" environment: VAULT_ADDR: "{{ item.vault.url }}" VAULT_TOKEN: "{{ item.vault.root_token }}" - when: component_type == 'peer' and item.users is defined + when: + - item.services.peers is defined and item.services.peers | length > 0 + - item.users is defined # Delete policy - name: Delete policy @@ -148,4 +126,4 @@ environment: VAULT_ADDR: "{{ item.vault.url }}" VAULT_TOKEN: "{{ item.vault.root_token }}" - ignore_errors: yes + ignore_errors: true diff --git a/platforms/hyperledger-fabric/configuration/roles/helm_component/templates/anchorpeer_job.tpl b/platforms/hyperledger-fabric/configuration/roles/helm_component/templates/anchorpeer_job.tpl deleted file mode 100644 index daf8a46e8c6..00000000000 --- a/platforms/hyperledger-fabric/configuration/roles/helm_component/templates/anchorpeer_job.tpl +++ /dev/null @@ -1,59 +0,0 @@ -apiVersion: helm.toolkit.fluxcd.io/v2beta1 -kind: HelmRelease -metadata: - name: {{ component_name }} - namespace: {{ component_ns }} - annotations: - fluxcd.io/automated: "false" -spec: - interval: 1m - releaseName: {{ component_name }} - chart: - spec: - interval: 1m - sourceRef: - kind: GitRepository - name: flux-{{ network.env.type }} - namespace: flux-{{ network.env.type }} - chart: {{ charts_dir }}/fabric-anchorpeer - values: - metadata: - namespace: {{ component_ns }} - network: - version: {{ network.version }} - images: - fabrictools: {{ docker_url }}/{{ fabric_tools_image[network.version] }} - alpineutils: {{ docker_url }}/{{ alpine_image }} - - peer: - name: {{ peer_name }} -{% if network.env.proxy == 'none' %} - address: {{ peer.name }}.{{ component_ns }}:7051 -{% else %} - address: {{ peer.peerAddress }} -{% endif %} - localmspid: {{ org.name | lower}}MSP - loglevel: debug - tlsstatus: true - - vault: - role: vault-role - address: {{ vault.url }} - authpath: {{ k8s.cluster_id | default('')}}{{ network.env.type }}{{ org.name | lower }} - adminsecretprefix: {{ vault.secret_path | default('secretsv2') }}/data/{{ org.name | lower }}/peerOrganizations/{{ component_ns }}/users/admin - orderersecretprefix: {{ vault.secret_path | default('secretsv2') }}/data/{{ org.name | lower }}/peerOrganizations/{{ component_ns }}/orderer - serviceaccountname: vault-auth - type: {{ vault.type | default("hashicorp") }} -{% if network.docker.username is defined and network.docker.password is defined %} - imagesecretname: regcred -{% else %} - imagesecretname: "" -{% endif %} - - channel: - name: {{channel_name}} - orderer: - address: {{ participant.ordererAddress }} - anchorstx: |- -{{ anchorstx | indent(width=6, first=True) }} - diff --git a/platforms/hyperledger-fabric/configuration/roles/helm_component/templates/approve_chaincode_job.tpl b/platforms/hyperledger-fabric/configuration/roles/helm_component/templates/approve_chaincode_job.tpl index b410289091e..6fcaf8b3e6c 100644 --- a/platforms/hyperledger-fabric/configuration/roles/helm_component/templates/approve_chaincode_job.tpl +++ b/platforms/hyperledger-fabric/configuration/roles/helm_component/templates/approve_chaincode_job.tpl @@ -1,4 +1,4 @@ -apiVersion: helm.toolkit.fluxcd.io/v2beta1 +apiVersion: helm.toolkit.fluxcd.io/v2 kind: HelmRelease metadata: name: {{ component_name }} diff --git a/platforms/hyperledger-fabric/configuration/roles/helm_component/templates/ca-orderer.tpl b/platforms/hyperledger-fabric/configuration/roles/helm_component/templates/ca-orderer.tpl deleted file mode 100644 index 953cd401b1b..00000000000 --- a/platforms/hyperledger-fabric/configuration/roles/helm_component/templates/ca-orderer.tpl +++ /dev/null @@ -1,82 +0,0 @@ -apiVersion: helm.toolkit.fluxcd.io/v2beta1 -kind: HelmRelease -metadata: - name: {{ component_name }}-ca - namespace: {{ component_name }} - annotations: - fluxcd.io/automated: "false" -spec: - interval: 1m - releaseName: {{ component_name }}-ca - chart: - spec: - interval: 1m - sourceRef: - kind: GitRepository - name: flux-{{ network.env.type }} - namespace: flux-{{ network.env.type }} - chart: {{ charts_dir }}/fabric-ca-server - values: -{% if network.env.annotations is defined %} - deployment: - annotations: -{% for item in network.env.annotations.deployment %} -{% for key, value in item.items() %} - - {{ key }}: {{ value | quote }} -{% endfor %} -{% endfor %} - annotations: - service: -{% for item in network.env.annotations.service %} -{% for key, value in item.items() %} - - {{ key }}: {{ value | quote }} -{% endfor %} -{% endfor %} - pvc: -{% for item in network.env.annotations.pvc %} -{% for key, value in item.items() %} - - {{ key }}: {{ value | quote }} -{% endfor %} -{% endfor %} -{% endif %} - metadata: - namespace: {{ component_name | e }} - images: - alpineutils: {{ docker_url }}/{{ alpine_image }} - ca: {{ docker_url }}/{{ ca_image[network.version] }} - server: - name: {{ component_services.ca.name }} - tlsstatus: true - admin: {{ component }}-admin -{% if component_services.ca.configpath is defined %} - configpath: conf/fabric-ca-server-config-{{ component }}.yaml -{% endif %} - storage: - storageclassname: {{ sc_name }} - storagesize: 512Mi - vault: - role: vault-role - address: {{ vault.url }} - authpath: {{ item.k8s.cluster_id | default('')}}{{ network.env.type }}{{ item.name | lower }} - secretcert: {{ vault.secret_path | default('secretsv2') }}/data/{{ item.name | lower }}/ordererOrganizations/{{ component_name | e }}/ca?ca.{{ component_name | e }}-cert.pem - secretkey: {{ vault.secret_path | default('secretsv2') }}/data/{{ item.name | lower }}/ordererOrganizations/{{ component_name | e }}/ca?{{ component_name | e }}-CA.key - secretadminpass: {{ vault.secret_path | default('secretsv2') }}/data/{{ item.name | lower }}/credentials/{{ component_name | e }}/ca/{{ component }}?user - serviceaccountname: vault-auth - type: {{ vault.type | default("hashicorp") }} -{% if network.docker.username is defined and network.docker.password is defined %} - imagesecretname: regcred -{% else %} - imagesecretname: "" -{% endif %} - service: - servicetype: ClusterIP - ports: - tcp: - clusteripport: {{ component_services.ca.grpc.port }} -{% if component_services.ca.grpc.nodePort is defined %} - nodeport: {{ component_services.ca.grpc.nodePort }} -{% endif %} - proxy: - provider: {{ network.env.proxy }} - type: orderer - external_url_suffix: {{ external_url_suffix }} diff --git a/platforms/hyperledger-fabric/configuration/roles/helm_component/templates/ca-peer.tpl b/platforms/hyperledger-fabric/configuration/roles/helm_component/templates/ca-peer.tpl deleted file mode 100644 index 24ee0b9965d..00000000000 --- a/platforms/hyperledger-fabric/configuration/roles/helm_component/templates/ca-peer.tpl +++ /dev/null @@ -1,82 +0,0 @@ -apiVersion: helm.toolkit.fluxcd.io/v2beta1 -kind: HelmRelease -metadata: - name: {{ component_name }}-ca - namespace: {{ component_name }} - annotations: - fluxcd.io/automated: "false" -spec: - interval: 1m - releaseName: {{ component_name }}-ca - chart: - spec: - interval: 1m - sourceRef: - kind: GitRepository - name: flux-{{ network.env.type }} - namespace: flux-{{ network.env.type }} - chart: {{ charts_dir }}/fabric-ca-server - values: - metadata: - namespace: {{ component_name | e }} - images: - alpineutils: {{ docker_url }}/{{ alpine_image }} - ca: {{ docker_url }}/{{ ca_image[network.version] }} -{% if network.env.annotations is defined %} - deployment: - annotations: -{% for item in network.env.annotations.deployment %} -{% for key, value in item.items() %} - - {{ key }}: {{ value | quote }} -{% endfor %} -{% endfor %} - annotations: - service: -{% for item in network.env.annotations.service %} -{% for key, value in item.items() %} - - {{ key }}: {{ value | quote }} -{% endfor %} -{% endfor %} - pvc: -{% for item in network.env.annotations.pvc %} -{% for key, value in item.items() %} - - {{ key }}: {{ value | quote }} -{% endfor %} -{% endfor %} -{% endif %} - server: - name: {{ component_services.ca.name }} - tlsstatus: true - admin: {{ component }}-admin -{% if component_services.ca.configpath is defined %} - configpath: conf/fabric-ca-server-config-{{ component }}.yaml -{% endif %} - storage: - storageclassname: {{ sc_name }} - storagesize: 512Mi - vault: - role: vault-role - address: {{ vault.url }} - authpath: {{ item.k8s.cluster_id | default('')}}{{ network.env.type }}{{ item.name | lower }} - secretcert: {{ vault.secret_path | default('secretsv2') }}/data/{{ item.name | lower }}/peerOrganizations/{{ component_name | e }}/ca?ca.{{ component_name | e }}-cert.pem - secretkey: {{ vault.secret_path | default('secretsv2') }}/data/{{ item.name | lower }}/peerOrganizations/{{ component_name | e }}/ca?{{ component_name | e }}-CA.key - secretadminpass: {{ vault.secret_path | default('secretsv2') }}/data/{{ item.name | lower }}/credentials/{{ component_name | e }}/ca/{{ component }}?user - serviceaccountname: vault-auth - type: {{ vault.type | default("hashicorp") }} -{% if network.docker.username is defined and network.docker.password is defined %} - imagesecretname: regcred -{% else %} - imagesecretname: "" -{% endif %} - service: - servicetype: ClusterIP - ports: - tcp: - clusteripport: {{ component_services.ca.grpc.port }} -{% if component_services.ca.grpc.nodePort is defined %} - nodeport: {{ component_services.ca.grpc.nodePort }} -{% endif %} - proxy: - provider: {{ network.env.proxy }} - type: peer - external_url_suffix: {{ external_url_suffix }} diff --git a/platforms/hyperledger-fabric/configuration/roles/helm_component/templates/ca-server.tpl b/platforms/hyperledger-fabric/configuration/roles/helm_component/templates/ca-server.tpl new file mode 100644 index 00000000000..c21506f4ce5 --- /dev/null +++ b/platforms/hyperledger-fabric/configuration/roles/helm_component/templates/ca-server.tpl @@ -0,0 +1,89 @@ +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: {{ component_name | replace('_','-') }} + namespace: {{ component_ns }} + annotations: + fluxcd.io/automated: "false" +spec: + interval: 1m + releaseName: {{ component_name | replace('_','-') }} + chart: + spec: + interval: 1m + sourceRef: + kind: GitRepository + name: flux-{{ network.env.type }} + namespace: flux-{{ network.env.type }} + chart: {{ charts_dir }}/fabric-ca-server + values: + global: + serviceAccountName: vault-auth + cluster: + provider: {{ org.cloud_provider }} + cloudNativeServices: false + kubernetesUrl: {{ kubernetes_url }} + vault: + type: hashicorp + network: fabric + address: {{ vault.url }} + authPath: {{ network.env.type }}{{ component }} + secretEngine: {{ vault.secret_path | default("secretsv2") }} + secretPrefix: "data/{{ network.env.type }}{{ component }}" + role: vault-role + tls: false + proxy: + provider: {{ network.env.proxy | quote }} + externalUrlSuffix: {{ org.external_url_suffix }} + + storage: + size: 512Mi + reclaimPolicy: "Delete" + volumeBindingMode: Immediate + allowedTopologies: + enabled: false + + image: + alpineUtils: {{ docker_url }}/bevel-alpine:{{ bevel_alpine_version }} + ca: {{ docker_url }}/{{ ca_image[network.version] }} +{% if network.docker.username is defined and network.docker.password is defined %} + pullSecret: regcred +{% else %} + pullSecret: "" +{% endif %} + + server: + removeCertsOnDelete: true + tlsStatus: true + adminUsername: {{ component }}-admin + adminPassword: {{ component }}-adminpw + subject: "{{ subject | quote }}" +{% if component_services.ca.configpath is defined %} + configPath: conf/fabric-ca-server-config-{{ component }}.yaml +{% endif %} +{% if component_services.ca.grpc.nodePort is defined %} + nodePort: {{ component_services.ca.grpc.nodePort }} +{% endif %} + clusterIpPort: {{ component_services.ca.grpc.port }} + +{% if network.env.labels is defined %} + labels: +{% if network.env.labels.service is defined %} + service: +{% for key in network.env.labels.service.keys() %} + - {{ key }}: {{ network.env.labels.service[key] | quote }} +{% endfor %} +{% endif %} +{% if network.env.labels.pvc is defined %} + pvc: +{% for key in network.env.labels.pvc.keys() %} + - {{ key }}: {{ network.env.labels.pvc[key] | quote }} +{% endfor %} +{% endif %} +{% if network.env.labels.deployment is defined %} + deployment: +{% for key in network.env.labels.deployment.keys() %} + - {{ key }}: {{ network.env.labels.deployment[key] | quote }} +{% endfor %} +{% endif %} +{% endif %} diff --git a/platforms/hyperledger-fabric/configuration/roles/helm_component/templates/ca-tools.tpl b/platforms/hyperledger-fabric/configuration/roles/helm_component/templates/ca-tools.tpl deleted file mode 100644 index e8426fda057..00000000000 --- a/platforms/hyperledger-fabric/configuration/roles/helm_component/templates/ca-tools.tpl +++ /dev/null @@ -1,119 +0,0 @@ -apiVersion: helm.toolkit.fluxcd.io/v2beta1 -kind: HelmRelease -metadata: - name: {{ component_name }}-ca-tools - namespace: {{ component_name }} - annotations: - fluxcd.io/automated: "false" -spec: - interval: 1m - releaseName: {{ component_name }}-ca-tools - chart: - spec: - interval: 1m - sourceRef: - kind: GitRepository - name: flux-{{ network.env.type }} - namespace: flux-{{ network.env.type }} - chart: {{ charts_dir }}/fabric-catools - values: - metadata: - namespace: {{ component_name }} - name: ca-tools - component_type: {{ component_type }} - org_name: {{ org_name }} - proxy: {{ proxy }} -{% if network.env.annotations is defined %} - annotations: - service: -{% for item in network.env.annotations.service %} -{% for key, value in item.items() %} - - {{ key }}: {{ value | quote }} -{% endfor %} -{% endfor %} - pvc: -{% for item in network.env.annotations.pvc %} -{% for key, value in item.items() %} - - {{ key }}: {{ value | quote }} -{% endfor %} -{% endfor %} - deployment: -{% for item in network.env.annotations.deployment %} -{% for key, value in item.items() %} - - {{ key }}: {{ value | quote }} -{% endfor %} -{% endfor %} -{% endif %} - replicaCount: 1 - - image: - alpineutils: {{ docker_url }}/{{ alpine_image }} - catools: {{ docker_url }}/{{ ca_tools_image }} - pullPolicy: IfNotPresent - - storage: - storageclassname: {{ sc_name }} - storagesize: 512Mi - - vault: - role: vault-role - address: {{ vault.url }} - authpath: {{ item.k8s.cluster_id | default('')}}{{ network.env.type }}{{ item.name | lower }} - secretusers: {{ vault.secret_path | default('secretsv2') }}/data/{{ item.name | lower }}/{{ component_type }}Organizations/{{ component_name }}/users - secretorderer: {{ vault.secret_path | default('secretsv2') }}/data/{{ item.name | lower }}/{{ component_type }}Organizations/{{ component_name }}/orderers - secretpeer: {{ vault.secret_path | default('secretsv2') }}/data/{{ item.name | lower }}/{{ component_type }}Organizations/{{ component_name }}/peers - secretpeerorderertls: {{ vault.secret_path | default('secretsv2') }}/data/{{ item.name | lower }}/{{ component_type }}Organizations/{{ component_name }}/orderer/tls - secretcert: {{ vault.secret_path | default('secretsv2') }}/data/{{ item.name | lower }}/{{ component_type }}Organizations/{{ component_name | e }}/ca?ca.{{ component_name | e }}-cert.pem - secretkey: {{ vault.secret_path | default('secretsv2') }}/data/{{ item.name | lower }}/{{ component_type }}Organizations/{{ component_name | e }}/ca?{{ component_name | e }}-CA.key - secretcouchdb: {{ vault.secret_path | default('secretsv2') }}/data/{{ item.name | lower }}/credentials/{{ component_name }}/couchdb/{{ org_name }} - secretconfigfile: {{ vault.secret_path | default('secretsv2') }}/data/{{ item.name | lower }}/{{ component_type }}Organizations/{{ component_name | e }}/msp/config - serviceaccountname: vault-auth - type: {{ vault.type | default("hashicorp") }} -{% if network.docker.username is defined and network.docker.password is defined %} - imagesecretname: regcred -{% else %} - imagesecretname: "" -{% endif %} - - healthcheck: - retries: 10 - sleepTimeAfterError: 2 - - - org_data: - external_url_suffix: {{ external_url_suffix }} - component_subject: {{ component_subject }} - cert_subject: {{ cert_subject }} - component_country: {{ component_country }} - component_state: {{ component_state }} - component_location: {{ component_location }} - ca_url: {{ ca_url }} - - orderers: - name: {% for orderer in orderers_list %}{% for key, value in orderer.items() %}{% if key == 'name' %}{{ value }}-{% endif %}{% endfor %}{% endfor %} - -{% if item.type == 'peer' %} - orderers_info: -{% for orderer in orderers_list %} - - name: {{ orderer.name }} - path: "{{ lookup('file', orderer.certificate) | b64encode }}" -{% endfor %} - - peers: - name: {% for peer in peers_list %}{% for key, value in peer.items() %}{% if key == 'name' %}{{ value }},{% endif %}{% if key == 'peerstatus' %}{{ value }}{% endif %}{% endfor %}-{% endfor %} - - peer_count: "{{ peer_count }}" -{% if item.users is defined %} - users: - users_list: "{{ user_list | b64encode }}" - users_identities: {% for user in user_list %}{% for key, value in user.items() %}{% if key == 'identity' %}{{ value }}{% endif %}{% endfor %}-{% endfor %} -{% endif %} - -{% if add_peer_value == 'true' %} - new_peer_count: "{{ new_peer_count }}" -{% endif %} - checks: - refresh_cert_value: {{ refresh_cert_value }} - refresh_user_cert_value: {{ refresh_user_cert_value }} - add_peer_value: {{ add_peer_value }} -{% endif %} diff --git a/platforms/hyperledger-fabric/configuration/roles/helm_component/templates/cacerts_job.tpl b/platforms/hyperledger-fabric/configuration/roles/helm_component/templates/cacerts_job.tpl deleted file mode 100644 index b6d0b4caad5..00000000000 --- a/platforms/hyperledger-fabric/configuration/roles/helm_component/templates/cacerts_job.tpl +++ /dev/null @@ -1,41 +0,0 @@ -apiVersion: helm.toolkit.fluxcd.io/v2beta1 -kind: HelmRelease -metadata: - name: {{ name }}-cacerts-job - namespace: {{ component_ns }} - annotations: - fluxcd.io/automated: "false" -spec: - interval: 1m - releaseName: {{ name }}-cacerts-job - chart: - spec: - interval: 1m - sourceRef: - kind: GitRepository - name: flux-{{ network.env.type }} - namespace: flux-{{ network.env.type }} - chart: {{ charts_dir }}/fabric-cacerts-gen - values: - metadata: - name: {{ component }} - component_name: {{ component }}-net - namespace: {{ component_ns }} - images: - alpineutils: {{ docker_url }}/{{ alpine_image }} - vault: - role: vault-role - address: {{ vault.url }} - authpath: {{ item.k8s.cluster_id | default('')}}{{ network.env.type }}{{ item.name | lower }} - secretcryptoprefix: {{ vault.secret_path | default('secretsv2') }}/data/{{ item.name | lower }}/{{ component_type }}Organizations/{{ component }}-net/ca - secretcredentialsprefix: {{ vault.secret_path | default('secretsv2') }}/data/{{ item.name | lower }}/credentials/{{ component }}-net/ca/{{ component }} - serviceaccountname: vault-auth - type: {{ vault.type | default("hashicorp") }} -{% if network.docker.username is defined and network.docker.password is defined %} - imagesecretname: regcred -{% else %} - imagesecretname: "" -{% endif %} - - ca: - subject: {{ subject }} diff --git a/platforms/hyperledger-fabric/configuration/roles/helm_component/templates/cli.tpl b/platforms/hyperledger-fabric/configuration/roles/helm_component/templates/cli.tpl index 98cfaf2273a..fe18c283712 100644 --- a/platforms/hyperledger-fabric/configuration/roles/helm_component/templates/cli.tpl +++ b/platforms/hyperledger-fabric/configuration/roles/helm_component/templates/cli.tpl @@ -1,4 +1,4 @@ -apiVersion: helm.toolkit.fluxcd.io/v2beta1 +apiVersion: helm.toolkit.fluxcd.io/v2 kind: HelmRelease metadata: name: {{ component_name }} @@ -50,3 +50,24 @@ spec: {% endif %} orderer: address: {{ orderer.uri }} +{% if network.env.labels is defined %} + labels: +{% if network.env.labels.service is defined %} + service: +{% for key in network.env.labels.service.keys() %} + - {{ key }}: {{ network.env.labels.service[key] | quote }} +{% endfor %} +{% endif %} +{% if network.env.labels.pvc is defined %} + pvc: +{% for key in network.env.labels.pvc.keys() %} + - {{ key }}: {{ network.env.labels.pvc[key] | quote }} +{% endfor %} +{% endif %} +{% if network.env.labels.deployment is defined %} + deployment: +{% for key in network.env.labels.deployment.keys() %} + - {{ key }}: {{ network.env.labels.deployment[key] | quote }} +{% endfor %} +{% endif %} +{% endif %} diff --git a/platforms/hyperledger-fabric/configuration/roles/helm_component/templates/commit_chaincode_job.tpl b/platforms/hyperledger-fabric/configuration/roles/helm_component/templates/commit_chaincode_job.tpl index a2258413cf8..45059ead02b 100644 --- a/platforms/hyperledger-fabric/configuration/roles/helm_component/templates/commit_chaincode_job.tpl +++ b/platforms/hyperledger-fabric/configuration/roles/helm_component/templates/commit_chaincode_job.tpl @@ -1,4 +1,4 @@ -apiVersion: helm.toolkit.fluxcd.io/v2beta1 +apiVersion: helm.toolkit.fluxcd.io/v2 kind: HelmRelease metadata: name: {{ component_name }} diff --git a/platforms/hyperledger-fabric/configuration/roles/helm_component/templates/create_channel_job.tpl b/platforms/hyperledger-fabric/configuration/roles/helm_component/templates/create_channel_job.tpl deleted file mode 100644 index 835ac592b18..00000000000 --- a/platforms/hyperledger-fabric/configuration/roles/helm_component/templates/create_channel_job.tpl +++ /dev/null @@ -1,54 +0,0 @@ -apiVersion: helm.toolkit.fluxcd.io/v2beta1 -kind: HelmRelease -metadata: - name: channel-{{ org.name | lower }}-{{ component_name }} - namespace: {{ component_ns }} - annotations: - fluxcd.io/automated: "false" -spec: - interval: 1m - releaseName: channel-{{ org.name | lower }}-{{ component_name }} - chart: - spec: - interval: 1m - sourceRef: - kind: GitRepository - name: flux-{{ network.env.type }} - namespace: flux-{{ network.env.type }} - chart: {{ charts_dir }}/fabric-channel-create - values: - metadata: - namespace: {{ component_ns }} - network: - version: {{ network.version }} - images: - fabrictools: {{ docker_url }}/{{ fabric_tools_image[network.version] }} - alpineutils: {{ docker_url }}/{{ alpine_image }} - - peer: - name: {{ peer_name }} - address: {{ peer_name }}.{{ component_ns }}:7051 - localmspid: {{ org.name | lower }}MSP - loglevel: debug - tlsstatus: true - - vault: - role: vault-role - address: {{ vault.url }} - authpath: {{ k8s.cluster_id | default('')}}{{ network.env.type }}{{ org.name | lower }} - adminsecretprefix: {{ vault.secret_path | default('secretsv2') }}/data/{{ org.name | lower }}/peerOrganizations/{{ component_ns }}/users/admin - orderersecretprefix: {{ vault.secret_path | default('secretsv2') }}/data/{{ org.name | lower }}/peerOrganizations/{{ component_ns }}/orderer - serviceaccountname: vault-auth - type: {{ vault.type | default("hashicorp") }} -{% if network.docker.username is defined and network.docker.password is defined %} - imagesecretname: regcred -{% else %} - imagesecretname: "" -{% endif %} - - channel: - name: {{ component_name }} - orderer: - address: {{ peer.ordererAddress }} - channeltx: |- -{{ channeltx | indent(width=6, first=True) }} diff --git a/platforms/hyperledger-fabric/configuration/roles/helm_component/templates/external_chaincode.tpl b/platforms/hyperledger-fabric/configuration/roles/helm_component/templates/external_chaincode.tpl index bab4e450720..9a34b2e1bf6 100644 --- a/platforms/hyperledger-fabric/configuration/roles/helm_component/templates/external_chaincode.tpl +++ b/platforms/hyperledger-fabric/configuration/roles/helm_component/templates/external_chaincode.tpl @@ -1,4 +1,4 @@ -apiVersion: helm.toolkit.fluxcd.io/v2beta1 +apiVersion: helm.toolkit.fluxcd.io/v2 kind: HelmRelease metadata: name: cc-{{ chaincode_name }} @@ -49,3 +49,25 @@ spec: {% endif %} service: servicetype: ClusterIP + +{% if network.env.labels is defined %} + labels: +{% if network.env.labels.service is defined %} + service: +{% for key in network.env.labels.service.keys() %} + - {{ key }}: {{ network.env.labels.service[key] | quote }} +{% endfor %} +{% endif %} +{% if network.env.labels.pvc is defined %} + pvc: +{% for key in network.env.labels.pvc.keys() %} + - {{ key }}: {{ network.env.labels.pvc[key] | quote }} +{% endfor %} +{% endif %} +{% if network.env.labels.deployment is defined %} + deployment: +{% for key in network.env.labels.deployment.keys() %} + - {{ key }}: {{ network.env.labels.deployment[key] | quote }} +{% endfor %} +{% endif %} +{% endif %} diff --git a/platforms/hyperledger-fabric/configuration/roles/helm_component/templates/install_chaincode_job.tpl b/platforms/hyperledger-fabric/configuration/roles/helm_component/templates/install_chaincode_job.tpl index fabb64e4c6c..5990fdef549 100644 --- a/platforms/hyperledger-fabric/configuration/roles/helm_component/templates/install_chaincode_job.tpl +++ b/platforms/hyperledger-fabric/configuration/roles/helm_component/templates/install_chaincode_job.tpl @@ -1,4 +1,4 @@ -apiVersion: helm.toolkit.fluxcd.io/v2beta1 +apiVersion: helm.toolkit.fluxcd.io/v2 kind: HelmRelease metadata: name: {{ component_name }} diff --git a/platforms/hyperledger-fabric/configuration/roles/helm_component/templates/install_external_chaincode_job.tpl b/platforms/hyperledger-fabric/configuration/roles/helm_component/templates/install_external_chaincode_job.tpl index 0e289f78c0f..df53012860b 100644 --- a/platforms/hyperledger-fabric/configuration/roles/helm_component/templates/install_external_chaincode_job.tpl +++ b/platforms/hyperledger-fabric/configuration/roles/helm_component/templates/install_external_chaincode_job.tpl @@ -1,4 +1,4 @@ -apiVersion: helm.toolkit.fluxcd.io/v2beta1 +apiVersion: helm.toolkit.fluxcd.io/v2 kind: HelmRelease metadata: name: {{ component_name }} diff --git a/platforms/hyperledger-fabric/configuration/roles/helm_component/templates/instantiate_chaincode_job.tpl b/platforms/hyperledger-fabric/configuration/roles/helm_component/templates/instantiate_chaincode_job.tpl index 3de754f2580..b9446545c65 100644 --- a/platforms/hyperledger-fabric/configuration/roles/helm_component/templates/instantiate_chaincode_job.tpl +++ b/platforms/hyperledger-fabric/configuration/roles/helm_component/templates/instantiate_chaincode_job.tpl @@ -1,4 +1,4 @@ -apiVersion: helm.toolkit.fluxcd.io/v2beta1 +apiVersion: helm.toolkit.fluxcd.io/v2 kind: HelmRelease metadata: name: {{ component_name }} diff --git a/platforms/hyperledger-fabric/configuration/roles/helm_component/templates/invoke_chaincode_job.tpl b/platforms/hyperledger-fabric/configuration/roles/helm_component/templates/invoke_chaincode_job.tpl index 6d7c7fa21be..ff8f0d3cdf3 100644 --- a/platforms/hyperledger-fabric/configuration/roles/helm_component/templates/invoke_chaincode_job.tpl +++ b/platforms/hyperledger-fabric/configuration/roles/helm_component/templates/invoke_chaincode_job.tpl @@ -1,4 +1,4 @@ -apiVersion: helm.toolkit.fluxcd.io/v2beta1 +apiVersion: helm.toolkit.fluxcd.io/v2 kind: HelmRelease metadata: name: {{ component_name }} diff --git a/platforms/hyperledger-fabric/configuration/roles/helm_component/templates/join_channel_job.tpl b/platforms/hyperledger-fabric/configuration/roles/helm_component/templates/join_channel_job.tpl deleted file mode 100644 index dd2cbd920dd..00000000000 --- a/platforms/hyperledger-fabric/configuration/roles/helm_component/templates/join_channel_job.tpl +++ /dev/null @@ -1,54 +0,0 @@ -apiVersion: helm.toolkit.fluxcd.io/v2beta1 -kind: HelmRelease -metadata: - name: joinchannel-{{ peer.name }}-{{ component_name }} - namespace: {{ component_ns }} - annotations: - fluxcd.io/automated: "false" -spec: - interval: 1m - releaseName: joinchannel-{{ peer.name }}-{{ component_name }} - chart: - spec: - interval: 1m - sourceRef: - kind: GitRepository - name: flux-{{ network.env.type }} - namespace: flux-{{ network.env.type }} - chart: {{ charts_dir }}/fabric-channel-join - values: - metadata: - namespace: {{ component_ns }} - images: - fabrictools: {{ docker_url }}/{{ fabric_tools_image[network.version] }} - alpineutils: {{ docker_url }}/{{ alpine_image }} - - peer: - name: {{ peer_name }} -{% if network.env.proxy == 'none' %} - address: {{ peer.name }}.{{ component_ns }}:7051 -{% else %} - address: {{ peer.peerAddress }} -{% endif %} - localmspid: {{ org.name | lower}}MSP - loglevel: debug - tlsstatus: true - - vault: - role: vault-role - address: {{ vault.url }} - authpath: {{ org.k8s.cluster_id | default('')}}{{ network.env.type }}{{ org.name | lower }} - adminsecretprefix: {{ vault.secret_path | default('secretsv2') }}/data/{{ org.name | lower }}/peerOrganizations/{{ component_ns }}/users/admin - orderersecretprefix: {{ vault.secret_path | default('secretsv2') }}/data/{{ org.name | lower }}/peerOrganizations/{{ component_ns }}/orderer - serviceaccountname: vault-auth - type: {{ vault.type | default("hashicorp") }} -{% if network.docker.username is defined and network.docker.password is defined %} - imagesecretname: regcred -{% else %} - imagesecretname: "" -{% endif %} - - channel: - name: {{channel_name}} - orderer: - address: {{ participant.ordererAddress }} diff --git a/platforms/hyperledger-fabric/configuration/roles/helm_component/templates/operations_console.tpl b/platforms/hyperledger-fabric/configuration/roles/helm_component/templates/operations_console.tpl index c38c91d7ff1..57b2bf3f46c 100644 --- a/platforms/hyperledger-fabric/configuration/roles/helm_component/templates/operations_console.tpl +++ b/platforms/hyperledger-fabric/configuration/roles/helm_component/templates/operations_console.tpl @@ -1,4 +1,4 @@ -apiVersion: helm.toolkit.fluxcd.io/v2beta1 +apiVersion: helm.toolkit.fluxcd.io/v2 kind: HelmRelease metadata: name: {{ name }}-operations-console @@ -45,3 +45,24 @@ spec: proxy: provider: "{{ network.env.proxy }}" external_url_suffix: {{ item.external_url_suffix }} +{% if network.env.labels is defined %} + labels: +{% if network.env.labels.service is defined %} + service: +{% for key in network.env.labels.service.keys() %} + - {{ key }}: {{ network.env.labels.service[key] | quote }} +{% endfor %} +{% endif %} +{% if network.env.labels.pvc is defined %} + pvc: +{% for key in network.env.labels.pvc.keys() %} + - {{ key }}: {{ network.env.labels.pvc[key] | quote }} +{% endfor %} +{% endif %} +{% if network.env.labels.deployment is defined %} + deployment: +{% for key in network.env.labels.deployment.keys() %} + - {{ key }}: {{ network.env.labels.deployment[key] | quote }} +{% endfor %} +{% endif %} +{% endif %} diff --git a/platforms/hyperledger-fabric/configuration/roles/helm_component/templates/orderernode.tpl b/platforms/hyperledger-fabric/configuration/roles/helm_component/templates/orderernode.tpl index a21ad5e280f..d1dffb9f42e 100644 --- a/platforms/hyperledger-fabric/configuration/roles/helm_component/templates/orderernode.tpl +++ b/platforms/hyperledger-fabric/configuration/roles/helm_component/templates/orderernode.tpl @@ -1,13 +1,13 @@ -apiVersion: helm.toolkit.fluxcd.io/v2beta1 +apiVersion: helm.toolkit.fluxcd.io/v2 kind: HelmRelease metadata: - name: {{ org_name }}-{{ orderer.name }} + name: {{ component_name | replace('_','-') }} namespace: {{ namespace }} annotations: fluxcd.io/automated: "false" spec: interval: 1m - releaseName: {{ org_name }}-{{ orderer.name }} + releaseName: {{ component_name | replace('_','-') }} chart: spec: interval: 1m @@ -17,97 +17,118 @@ spec: namespace: flux-{{ network.env.type }} chart: {{ charts_dir }}/fabric-orderernode values: - metadata: - namespace: {{ namespace }} - network: - version: {{ network.version }} - images: - orderer: {{ docker_url }}/{{ orderer_image[network.version] }} - alpineutils: {{ docker_url }}/{{ alpine_image }} -{% if network.env.annotations is defined %} - annotations: - service: -{% for item in network.env.annotations.service %} -{% for key, value in item.items() %} - - {{ key }}: {{ value | quote }} -{% endfor %} -{% endfor %} - pvc: -{% for item in network.env.annotations.pvc %} -{% for key, value in item.items() %} - - {{ key }}: {{ value | quote }} -{% endfor %} -{% endfor %} - deployment: -{% for item in network.env.annotations.deployment %} -{% for key, value in item.items() %} - - {{ key }}: {{ value | quote }} -{% endfor %} -{% endfor %} + global: + version: {{ network.version }} + serviceAccountName: vault-auth + cluster: + provider: {{ org.cloud_provider }} + cloudNativeServices: false + vault: + type: hashicorp + network: fabric + address: {{ vault.url }} + authPath: {{ network.env.type }}{{ org_name }} + secretEngine: {{ vault.secret_path | default("secretsv2") }} + secretPrefix: "data/{{ network.env.type }}{{ org_name }}" + role: vault-role + tls: false + proxy: + provider: {{ network.env.proxy | quote }} + externalUrlSuffix: {{ org.external_url_suffix }} + + storage: + size: 512Mi + reclaimPolicy: "Delete" + volumeBindingMode: + allowedTopologies: + enabled: false + + certs: + generateCertificates: true + orgData: +{% if network.env.proxy == 'none' %} + caAddress: ca.{{ namespace }}:7054 +{% else %} + caAddress: ca.{{ namespace }}.{{ org.external_url_suffix }} {% endif %} - orderer: - name: {{ orderer.name }} - loglevel: info - localmspid: {{ org_name }}MSP - tlsstatus: true - keepaliveserverinterval: 10s - ordererAddress: {{ orderer.ordererAddress }} + caAdminUser: {{ org_name }}-admin + caAdminPassword: {{ org_name }}-adminpw + orgName: {{ org_name }} + type: orderer + componentSubject: "{{ component_subject | quote }}" - consensus: - name: {{ orderer.consensus }} + settings: + createConfigMaps: {{ create_configmaps }} + refreshCertValue: false + addPeerValue: false + removeCertsOnDelete: true + removeOrdererTlsOnDelete: true - storage: - storageclassname: {{ sc_name }} - storagesize: 512Mi + image: + orderer: {{ docker_url }}/{{ orderer_image }} + alpineUtils: {{ docker_url }}/bevel-alpine:{{ bevel_alpine_version }} +{% if network.docker.username is defined and network.docker.password is defined %} + pullSecret: regcred +{% else %} + pullSecret: "" +{% endif %} - service: - servicetype: ClusterIP + orderer: + consensus: {{ orderer.consensus }} + logLevel: info + localMspId: {{ org_name }}MSP + tlsStatus: true + keepAliveServerInterval: 10s + serviceType: ClusterIP ports: grpc: - clusteripport: {{ orderer.grpc.port }} + clusterIpPort: {{ orderer.grpc.port }} {% if orderer.grpc.nodePort is defined %} nodeport: {{ orderer.grpc.nodePort }} {% endif %} metrics: enabled: {{ orderer.metrics.enabled | default(false) }} - clusteripport: {{ orderer.metrics.port | default(9443) }} + clusterIpPort: {{ orderer.metrics.port | default(9443) }} + resources: + limits: + memory: 512M + cpu: 1 + requests: + memory: 512M + cpu: 0.25 - vault: - address: {{ vault.url }} - role: vault-role - authpath: {{ item.k8s.cluster_id | default('')}}{{ network.env.type }}{{ item.name | lower }} - type: {{ vault.type | default("hashicorp") }} - secretprefix: {{ vault.secret_path | default('secretsv2') }}/data/{{ item.name | lower }}/ordererOrganizations/{{ namespace }}/orderers/{{ orderer.name }}.{{ namespace }} -{% if network.docker.username is defined and network.docker.password is defined %} - imagesecretname: regcred -{% else %} - imagesecretname: "" -{% endif %} - serviceaccountname: vault-auth {% if orderer.consensus == 'kafka' %} kafka: - readinesscheckinterval: 10 - readinessthreshold: 10 + readinessCheckInterval: 10 + readinessThresHold: 10 brokers: {% for i in range(consensus.replicas) %} - {{ consensus.name }}-{{ i }}.{{ consensus.type }}.{{ namespace }}.svc.cluster.local:{{ consensus.grpc.port }} {% endfor %} {% endif %} - proxy: - provider: {{ network.env.proxy }} - external_url_suffix: {{ item.external_url_suffix }} -{% if '2.5' not in network.version %} - genesis: |- -{{ genesis | indent(width=6, first=True) }} -{% endif %} + healthCheck: + retries: 10 + sleepTimeAfterError: 15 - config: - pod: - resources: - limits: - memory: 512M - cpu: 1 - requests: - memory: 512M - cpu: 0.25 +{% if network.env.labels is defined %} + labels: +{% if network.env.labels.service is defined %} + service: +{% for key in network.env.labels.service.keys() %} + - {{ key }}: {{ network.env.labels.service[key] | quote }} +{% endfor %} +{% endif %} +{% if network.env.labels.pvc is defined %} + pvc: +{% for key in network.env.labels.pvc.keys() %} + - {{ key }}: {{ network.env.labels.pvc[key] | quote }} +{% endfor %} +{% endif %} +{% if network.env.labels.deployment is defined %} + deployment: +{% for key in network.env.labels.deployment.keys() %} + - {{ key }}: {{ network.env.labels.deployment[key] | quote }} +{% endfor %} +{% endif %} +{% endif %} diff --git a/platforms/hyperledger-fabric/configuration/roles/helm_component/templates/osn_create_channel_job.tpl b/platforms/hyperledger-fabric/configuration/roles/helm_component/templates/osn_create_channel_job.tpl deleted file mode 100644 index 6fc24964044..00000000000 --- a/platforms/hyperledger-fabric/configuration/roles/helm_component/templates/osn_create_channel_job.tpl +++ /dev/null @@ -1,57 +0,0 @@ -apiVersion: helm.toolkit.fluxcd.io/v2beta1 -kind: HelmRelease -metadata: - name: channel-{{ org.name | lower }}-{{ component_name }} - namespace: {{ component_ns }} - annotations: - fluxcd.io/automated: "false" -spec: - interval: 1m - releaseName: osn-channel-{{ org.name | lower }}-{{ component_name }} - chart: - spec: - interval: 1m - sourceRef: - kind: GitRepository - name: flux-{{ network.env.type }} - namespace: flux-{{ network.env.type }} - chart: {{ charts_dir }}/fabric-osnadmin-channel-create - values: - metadata: - namespace: {{ component_ns }} - network: - version: {{ network.version }} - images: - fabrictools: {{ docker_url }}/{{ fabric_tools_image[network.version] }} - alpineutils: {{ docker_url }}/{{ alpine_image }} - - vault: - role: vault-role - address: {{ vault.url }} - authpath: {{ org.k8s.cluster_id | default('')}}{{ network.env.type }}{{ org.name | lower }} - adminsecretprefix: {{ vault.secret_path | default('secretsv2') }}/data/{{ org.name | lower }}/ordererOrganizations/{{ component_ns }}/users/admin - orderersecretprefix: {{ vault.secret_path | default('secretsv2') }}/data/{{ org.name | lower }}/ordererOrganizations/{{ component_ns }}/orderers - serviceaccountname: vault-auth - type: {{ vault.type | default("hashicorp") }} -{% if network.docker.username is defined and network.docker.password is defined %} - imagesecretname: regcred -{% else %} - imagesecretname: "" -{% endif %} - - channel: - name: {{ component_name }} - orderers: - orderer_info: {% for orderer in orderers_list %}{% for key, value in orderer.items() %}{% if key == 'name' %}{{ value }}{% endif %}{% endfor %}*{% endfor %} - - add_orderer: {{ add_orderer_value }} - -{% if add_orderer is not defined or add_orderer is sameas false %} - genesis: |- -{{ genesis | indent(width=6, first=True) }} -{% else %} - orderer: - name: {{ first_orderer.name }} - localmspid: {{ org.name | lower}}MSP - address: {{ first_orderer.ordererAddress }} -{% endif %} diff --git a/platforms/hyperledger-fabric/configuration/roles/helm_component/templates/upgrade_chaincode_job.tpl b/platforms/hyperledger-fabric/configuration/roles/helm_component/templates/upgrade_chaincode_job.tpl index 169557c1733..350354ebc64 100644 --- a/platforms/hyperledger-fabric/configuration/roles/helm_component/templates/upgrade_chaincode_job.tpl +++ b/platforms/hyperledger-fabric/configuration/roles/helm_component/templates/upgrade_chaincode_job.tpl @@ -1,4 +1,4 @@ -apiVersion: helm.toolkit.fluxcd.io/v2beta1 +apiVersion: helm.toolkit.fluxcd.io/v2 kind: HelmRelease metadata: name: {{ component_name }} diff --git a/platforms/hyperledger-fabric/configuration/roles/helm_component/templates/value_peer.tpl b/platforms/hyperledger-fabric/configuration/roles/helm_component/templates/value_peer.tpl index ecb99df43e1..7aaab41423b 100755 --- a/platforms/hyperledger-fabric/configuration/roles/helm_component/templates/value_peer.tpl +++ b/platforms/hyperledger-fabric/configuration/roles/helm_component/templates/value_peer.tpl @@ -1,13 +1,13 @@ -apiVersion: helm.toolkit.fluxcd.io/v2beta1 +apiVersion: helm.toolkit.fluxcd.io/v2 kind: HelmRelease metadata: - name: {{ name }}-{{ peer_name }} - namespace: {{ peer_ns }} + name: {{ component_name | replace('_','-') }} + namespace: {{ component_ns }} annotations: fluxcd.io/automated: "false" spec: interval: 1m - releaseName: {{ name }}-{{ peer_name }} + releaseName: {{ component_name | replace('_','-') }} chart: spec: interval: 1m @@ -17,110 +17,142 @@ spec: namespace: flux-{{ network.env.type }} chart: {{ charts_dir }}/fabric-peernode values: -{% if network.upgrade is defined %} - upgrade: {{ network.upgrade }} -{% endif %} - metadata: - namespace: {{ peer_ns }} - images: - couchdb: {{ docker_url }}/{{ couchdb_image[network.version] }} - peer: {{ docker_url }}/{{ peer_image[network.version] }} - alpineutils: {{ docker_url }}/{{ alpine_image }} + global: + version: {{ network.version }} + serviceAccountName: vault-auth + cluster: + provider: {{ org.cloud_provider }} + cloudNativeServices: false + vault: + type: hashicorp + network: fabric + address: {{ vault.url }} + authPath: {{ network.env.type }}{{ name }} + secretEngine: {{ vault.secret_path | default("secretsv2") }} + secretPrefix: "data/{{ network.env.type }}{{ name }}" + role: vault-role + tls: false + proxy: + provider: {{ network.env.proxy | quote }} + externalUrlSuffix: {{ org.external_url_suffix }} -{% if network.env.annotations is defined %} - annotations: - service: -{% for item in network.env.annotations.service %} -{% for key, value in item.items() %} - - {{ key }}: {{ value | quote }} -{% endfor %} -{% endfor %} - pvc: -{% for item in network.env.annotations.pvc %} -{% for key, value in item.items() %} - - {{ key }}: {{ value | quote }} -{% endfor %} -{% endfor %} - deployment: -{% for item in network.env.annotations.deployment %} -{% for key, value in item.items() %} - - {{ key }}: {{ value | quote }} -{% endfor %} -{% endfor %} -{% endif %} - peer: - name: {{ peer_name }} - gossippeeraddress: {{ peer.gossippeeraddress }} -{% if provider == 'none' %} - gossipexternalendpoint: {{ peer_name }}.{{ peer_ns }}:7051 + storage: + enabled: {{ sc_enabled }} + peer: 512Mi + couchdb: 512Mi + reclaimPolicy: "Delete" + volumeBindingMode: Immediate + allowedTopologies: + enabled: false + + certs: + generateCertificates: true + orgData: +{% if network.env.proxy == 'none' %} + caAddress: ca.{{ namespace }}:7054 {% else %} - gossipexternalendpoint: {{ peer.peerAddress }} + caAddress: ca.{{ namespace }}.{{ org.external_url_suffix }} {% endif %} - localmspid: {{ name }}MSP - loglevel: info - tlsstatus: true - builder: hyperledger/fabric-ccenv:{{ network.version }} - couchdb: - username: {{ name }}-user -{% if peer.configpath is defined %} - configpath: conf/{{ peer_name }}_{{ name }}_core.yaml - core: |- -{{ core_file | indent(width=8, first=True) }} + caAdminUser: {{ name }}-admin + caAdminPassword: {{ name }}-adminpw + orgName: {{ name }} + type: peer + componentSubject: "{{ component_subject }}" + +{% if org.users is defined %} + users: + usersList: +{% for user in user_list %} + - {{ user }} +{% endfor %} {% endif %} - storage: - peer: - storageclassname: {{ sc_name }} - storagesize: 512Mi - couchdb: - storageclassname: {{ sc_name }} - storagesize: 1Gi + settings: + createConfigMaps: {{ create_configmaps }} + refreshCertValue: false + addPeerValue: false + removeCertsOnDelete: true + removeOrdererTlsOnDelete: true - vault: - role: vault-role - address: {{ vault.url }} - authpath: {{ item.k8s.cluster_id | default('')}}{{ network.env.type }}{{ item.name | lower }} - secretprefix: {{ vault.secret_path | default('secretsv2') }}/data/{{ item.name | lower }}/peerOrganizations/{{ namespace }}/peers/{{ peer_name }}.{{ namespace }} - serviceaccountname: vault-auth - type: {{ vault.type | default("hashicorp") }} -{% if network.docker.username is defined and network.docker.password is defined %} - imagesecretname: regcred + image: + couchdb: {{ docker_url }}/{{ couchdb_image }} + peer: {{ docker_url }}/{{ peer_image }} + alpineUtils: {{ docker_url }}/bevel-alpine:{{ bevel_alpine_version }} +{% if network.docker.username is defined and network.docker.password is defined %} + pullSecret: regcred {% else %} - imagesecretname: "" + pullSecret: "" {% endif %} - secretcouchdbpass: {{ vault.secret_path | default('secretsv2') }}/data/{{ item.name | lower }}/credentials/{{ namespace }}/couchdb/{{ name }}?user - service: - servicetype: ClusterIP + peer: + gossipPeerAddress: {{ peer.peerAddress }} + logLevel: info + localMspId: {{ name }}MSP + tlsStatus: true + cliEnabled: {{ enabled_cli }} + ordererAddress: {{ orderer.uri }} + builder: hyperledger/fabric-ccenv + couchdb: + username: {{ name }}-user + password: {{ name }}-userpw + mspConfig: + organizationalUnitIdentifiers: + nodeOUs: + clientOUIdentifier: client + peerOUIdentifier: peer + adminOUIdentifier: admin + ordererOUIdentifier: orderer + serviceType: ClusterIP + loadBalancerType: "" ports: grpc: - clusteripport: {{ peer.grpc.port }} + clusterIpPort: {{ peer.grpc.port }} {% if peer.grpc.nodePort is defined %} - nodeport: {{ peer.grpc.nodePort }} + nodePort: {{ peer.grpc.nodePort }} {% endif %} events: - clusteripport: {{ peer.events.port }} + clusterIpPort: {{ peer.events.port }} {% if peer.events.nodePort is defined %} - nodeport: {{ peer.events.nodePort }} + nodePort: {{ peer.events.nodePort }} {% endif %} couchdb: - clusteripport: {{ peer.couchdb.port }} + clusterIpPort: {{ peer.couchdb.port }} {% if peer.couchdb.nodePort is defined %} - nodeport: {{ peer.couchdb.nodePort }} + nodepnodePortort: {{ peer.couchdb.nodePort }} {% endif %} - metrics: + metrics: enabled: {{ peer.metrics.enabled | default(false) }} - clusteripport: {{ peer.metrics.port | default(9443) }} - proxy: - provider: "{{ network.env.proxy }}" - external_url_suffix: {{ item.external_url_suffix }} + clusterIpPort: {{ peer.metrics.port | default(9443) }} + resources: + limits: + memory: 1Gi + cpu: 1 + requests: + memory: 512M + cpu: 0.25 + upgrade: {{ network.upgrade | default(false) }} + healthCheck: + retries: 20 + sleepTimeAfterError: 15 - config: - pod: - resources: - limits: - memory: 512M - cpu: 1 - requests: - memory: 512M - cpu: 0.25 +{% if network.env.labels is defined %} + labels: +{% if network.env.labels.service is defined %} + service: +{% for key in network.env.labels.service.keys() %} + - {{ key }}: {{ network.env.labels.service[key] | quote }} +{% endfor %} +{% endif %} +{% if network.env.labels.pvc is defined %} + pvc: +{% for key in network.env.labels.pvc.keys() %} + - {{ key }}: {{ network.env.labels.pvc[key] | quote }} +{% endfor %} +{% endif %} +{% if network.env.labels.deployment is defined %} + deployment: +{% for key in network.env.labels.deployment.keys() %} + - {{ key }}: {{ network.env.labels.deployment[key] | quote }} +{% endfor %} +{% endif %} +{% endif %} diff --git a/platforms/hyperledger-fabric/configuration/roles/helm_component/templates/zkkafka.tpl b/platforms/hyperledger-fabric/configuration/roles/helm_component/templates/zkkafka.tpl index 29b97d85d48..af0ef0d229c 100644 --- a/platforms/hyperledger-fabric/configuration/roles/helm_component/templates/zkkafka.tpl +++ b/platforms/hyperledger-fabric/configuration/roles/helm_component/templates/zkkafka.tpl @@ -1,4 +1,4 @@ -apiVersion: helm.toolkit.fluxcd.io/v2beta1 +apiVersion: helm.toolkit.fluxcd.io/v2 kind: HelmRelease metadata: name: zkkafka-{{ org_name }}-orderer diff --git a/platforms/hyperledger-fabric/configuration/roles/helm_component/vars/main.yaml b/platforms/hyperledger-fabric/configuration/roles/helm_component/vars/main.yaml index 22ee588acae..f33e0cf7ab9 100644 --- a/platforms/hyperledger-fabric/configuration/roles/helm_component/vars/main.yaml +++ b/platforms/hyperledger-fabric/configuration/roles/helm_component/vars/main.yaml @@ -5,8 +5,7 @@ ############################################################################################## helm_templates: - ca-orderer: ca-orderer.tpl - ca-peer: ca-peer.tpl + ca-server: ca-server.tpl ca-tools: ca-tools.tpl cas: ca.tpl orderers: orderernode.tpl @@ -30,31 +29,20 @@ helm_templates: external_chaincode: external_chaincode.tpl install_external_chaincode_job: install_external_chaincode_job.tpl -alpine_image: bevel-alpine:latest -ca_tools_image: bevel-fabric-ca-tools:1.2.1 +bevel_alpine_version: latest # Change to tag version when using tag specific images +fabric_tools_image: bevel-fabric-tools kafka_image: bevel-fabric-kafka:0.4.18 zookeeper_image: bevel-fabric-zookeeper:0.4.18 fabric_console_image: bevel-fabric-console:latest +orderer_image: bevel-fabric-orderer +peer_image: bevel-fabric-peer +couchdb_image: bevel-fabric-couchdb + ca_image: 1.4.8: bevel-fabric-ca:1.4.8 2.2.2: bevel-fabric-ca:1.4.8 2.5.4: bevel-fabric-ca:latest -orderer_image: - 1.4.8: bevel-fabric-orderer:1.4.8 - 2.2.2: bevel-fabric-orderer:2.2.2 - 2.5.4: bevel-fabric-orderer:2.5.4 - -peer_image: - 1.4.8: bevel-fabric-peer:1.4.8 - 2.2.2: bevel-fabric-peer:2.2.2 - 2.5.4: bevel-fabric-peer:2.5.4 - -couchdb_image: - 1.4.8: bevel-fabric-couchdb:1.4.8 - 2.2.2: bevel-fabric-couchdb:2.2.2 - 2.5.4: bevel-fabric-couchdb:2.5.4 - fabric_tools_image: 1.4.8: bevel-fabric-tools:1.4.8 2.2.2: bevel-fabric-tools:2.2.2 diff --git a/platforms/hyperledger-fabric/configuration/roles/k8_component/tasks/main.yaml b/platforms/hyperledger-fabric/configuration/roles/k8_component/tasks/main.yaml index f603add72e1..21c4ab9f408 100644 --- a/platforms/hyperledger-fabric/configuration/roles/k8_component/tasks/main.yaml +++ b/platforms/hyperledger-fabric/configuration/roles/k8_component/tasks/main.yaml @@ -9,25 +9,25 @@ ############################################################################################# # Ensure that the directory exists for each entity, if not, it creates them -- name: Ensure {{ component_type_name }} dir exists +- name: Ensure {{ release_dir }} dir exists file: - path: "{{ release_dir }}/{{ component_type_name }}" + path: "{{ release_dir }}" state: directory # Create the value file for the k8 components -- name: "Create {{ component_type }} file for {{ component_type_name }}" +- name: "Create {{ component_type }} file for {{ org.name | lower }}" template: src: "{{ k8_templates[type] | default('default.tpl') }}" dest: "{{ values_file }}" vars: - values_file: "{{ release_dir }}/{{ component_type_name }}/{{ component_type }}.yaml" + values_file: "{{ release_dir }}/{{ component_type }}.yaml" type: "{{ component_type }}" # Create the component in kubernetes cluster directly when using operator - name: Create the component in kubernetes cluster directly when using operator kubernetes.core.k8s: state: present - src: "{{ release_dir }}/{{ component_type_name }}/{{ component_type }}.yaml" + src: "{{ release_dir }}/{{ component_type }}.yaml" kubeconfig: "{{ kubernetes.config_file }}" context: "{{ kubernetes.context }}" when: diff --git a/platforms/hyperledger-fabric/configuration/roles/operator/create/ca/user/tasks/main.yaml b/platforms/hyperledger-fabric/configuration/roles/operator/create/ca/user/tasks/main.yaml index 8cb2455877c..4a95cf94204 100644 --- a/platforms/hyperledger-fabric/configuration/roles/operator/create/ca/user/tasks/main.yaml +++ b/platforms/hyperledger-fabric/configuration/roles/operator/create/ca/user/tasks/main.yaml @@ -17,7 +17,7 @@ until: this.status == 200 retries: "{{ network.env.retry_count }}" delay: 20 - ignore_errors: yes + ignore_errors: true # Create main user for orderer org - name: "Create main user for orderer org" @@ -33,7 +33,7 @@ when: - component_type == 'orderer' - user_type == 'default' - ignore_errors: yes + ignore_errors: true # Create main user for peer org - name: "Create main user for peer org" @@ -63,7 +63,7 @@ kubeconfig_path: "{{ item.k8s.config_file }}" when: - user_type == 'admin' - ignore_errors: yes + ignore_errors: true # Create admin user identity for orderer - name: "Create admin user identity for orderer" diff --git a/platforms/hyperledger-fabric/configuration/roles/operator/create/orderer/tasks/main.yaml b/platforms/hyperledger-fabric/configuration/roles/operator/create/orderer/tasks/main.yaml index 9ddcfec0d4b..2d544ebec0d 100644 --- a/platforms/hyperledger-fabric/configuration/roles/operator/create/orderer/tasks/main.yaml +++ b/platforms/hyperledger-fabric/configuration/roles/operator/create/orderer/tasks/main.yaml @@ -17,7 +17,7 @@ until: this.status == 200 retries: "{{ network.env.retry_count }}" delay: 20 - ignore_errors: yes + ignore_errors: true # Create orderer node - name: "Create orderer node" diff --git a/platforms/hyperledger-fabric/configuration/roles/operator/create/peer/tasks/main.yaml b/platforms/hyperledger-fabric/configuration/roles/operator/create/peer/tasks/main.yaml index bd4b5a23f97..9992d83211a 100644 --- a/platforms/hyperledger-fabric/configuration/roles/operator/create/peer/tasks/main.yaml +++ b/platforms/hyperledger-fabric/configuration/roles/operator/create/peer/tasks/main.yaml @@ -17,7 +17,7 @@ until: this.status == 200 retries: "{{ network.env.retry_count }}" delay: 20 - ignore_errors: yes + ignore_errors: true # Create peer node - name: "Create peer node" diff --git a/platforms/hyperledger-fabric/configuration/samples/network-fabric-add-new-channel.yaml b/platforms/hyperledger-fabric/configuration/samples/network-fabric-add-new-channel.yaml index 166347992ae..589ab5ea165 100644 --- a/platforms/hyperledger-fabric/configuration/samples/network-fabric-add-new-channel.yaml +++ b/platforms/hyperledger-fabric/configuration/samples/network-fabric-add-new-channel.yaml @@ -11,7 +11,7 @@ network: # Network level configuration specifies the attributes required for each organization # to join an existing network. type: fabric - version: 2.2.2 # currently tested 1.4.8 and 2.2.2 + version: 2.2.2 # currently tested 1.4.8, 2.2.2 and 2.5.4 frontend: enabled #Flag for frontend to enabled for nodes/peers @@ -21,9 +21,9 @@ network: proxy: haproxy # values can be 'haproxy' or 'none' retry_count: 20 # Retry count for the checks external_dns: enabled # Should be enabled if using external-dns for automatic route configuration - annotations: # Additional annotations that can be used for some pods (ca, ca-tools, orderer and peer nodes) + labels: # Additional annotations that can be used for some pods (ca, ca-tools, orderer and peer nodes) service: - - example1: example2 + example1: example2 deployment: {} pvc: {} # For providing Custom Templates to generate configtx.yaml @@ -48,42 +48,69 @@ network: type: orderer name: orderer1 org_name: supplychain #org_name should match one organization definition below in organizations: key - uri: orderer1.org1proxy.blockchaincloudpoc.com:443 # Must include port, can be external or internal URI for orderer which should be reachable by all peers - certificate: /home/bevel/build/orderer1.crt # Ensure that the directory exists + uri: orderer1.supplychain-net.org1proxy.hlf.blockchaincloudpoc-develop.com:443 # Must include port, Can be external or internal URI for orderer which should be reachable by all peers - orderer: type: orderer name: orderer2 org_name: supplychain #org_name should match one organization definition below in organizations: key - uri: orderer2.org1proxy.blockchaincloudpoc.com:443 # Must include port, can be external or internal URI for orderer which should be reachable by all peers - certificate: /home/bevel/build/orderer2.crt # Ensure that the directory exists + uri: orderer2.supplychain-net.org1proxy.hlf.blockchaincloudpoc-develop.com:443 # Must include port, Can be external or internal URI for orderer which should be reachable by all peers - orderer: type: orderer name: orderer3 org_name: supplychain #org_name should match one organization definition below in organizations: key - uri: orderer3.org1proxy.blockchaincloudpoc.com:443 # Must include port, can be external or internal URI for orderer which should be reachable by all peers - certificate: /home/bevel/build/orderer3.crt # Ensure that the directory exists + uri: orderer3.supplychain-net.org1proxy.hlf.blockchaincloudpoc-develop.com:443 # Must include port, Can be external or internal URI for orderer which should be reachable by all peers # The channels defined for a network with participating peers in each channel channels: - channel: consortium: SupplyChainConsortium channel_name: AllChannel - channel_status: existing + channel_status: new + osn_creator_org: # Organization name, whose orderers will create the channel. This field is only used with version 2.5 + name: supplychain chaincodes: - "chaincode_name" - orderers: + orderers: - supplychain participants: - organization: name: carrier type: creator # creator organization will create the channel and instantiate chaincode, in addition to joining the channel and install chaincode - org_status: existing + org_status: new peers: - peer: name: peer0 + type: anchor gossipAddress: peer0.carrier-net.org3proxy.blockchaincloudpoc.com:443 # Must include port, External or internal URI of the gossip peer peerAddress: peer0.carrier-net.org3proxy.blockchaincloudpoc.com:443 # Must include port, External URI of the peer - ordererAddress: orderer1.org1proxy.blockchaincloudpoc.com:443 # Must include port, External or internal URI of the orderer + ordererAddress: orderer1.supplychain-net.org1proxy.hlf.blockchaincloudpoc-develop.com:443 # Must include port, External or internal URI of the orderer + - organization: + name: supplychain + type: joiner + org_status: new + peers: + - peer: + name: peer0 + type: anchor + gossipAddress: peer1.supplychain-net.org1proxy.hlf.blockchaincloudpoc-develop.com:443 + peerAddress: peer0.supplychain-net.org1proxy.hlf.blockchaincloudpoc-develop.com:443 + - peer: + name: peer1 + type: nonanchor + gossipAddress: peer0.supplychain-net.org1proxy.hlf.blockchaincloudpoc-develop.com:443 + peerAddress: peer1.supplychain-net.org1proxy.hlf.blockchaincloudpoc-develop.com:443 + ordererAddress: orderer1.supplychain-net.org1proxy.hlf.blockchaincloudpoc-develop.com:443 + - organization: + name: store + type: joiner # joiner organization will only join the channel and install chaincode + org_status: new + peers: + - peer: + name: peer0 + type: anchor + gossipAddress: peer0.store-net.org4proxy.blockchaincloudpoc.com:443 + peerAddress: peer0.store-net.org4proxy.blockchaincloudpoc.com:443 # Must include port, External URI of the peer + ordererAddress: orderer1.supplychain-net.org1proxy.hlf.blockchaincloudpoc-develop.com:443 - organization: name: warehouse type: joiner @@ -91,9 +118,10 @@ network: peers: - peer: name: peer0 + type: anchor gossipAddress: peer0.warehouse-net.org5proxy.blockchaincloudpoc.com:443 peerAddress: peer0.warehouse-net.org5proxy.blockchaincloudpoc.com:443 # Must include port, External URI of the peer - ordererAddress: orderer1.org1proxy.blockchaincloudpoc.com:443 + ordererAddress: orderer1.supplychain-net.org1proxy.hlf.blockchaincloudpoc-develop.com:443 - organization: name: manufacturer type: joiner @@ -101,19 +129,10 @@ network: peers: - peer: name: peer0 + type: anchor gossipAddress: peer0.manufacturer-net.org2proxy.blockchaincloudpoc.com:443 peerAddress: peer0.manufacturer-net.org2proxy.blockchaincloudpoc.com:443 # Must include port, External URI of the peer - ordererAddress: orderer1.org1proxy.blockchaincloudpoc.com:443 - - organization: - name: store - type: joiner # joiner organization will only join the channel and install chaincode - org_status: new - peers: - - peer: - name: peer0 - gossipAddress: peer0.store-net.org4proxy.blockchaincloudpoc.com:443 - peerAddress: peer0.store-net.org4proxy.blockchaincloudpoc.com:443 # Must include port, External URI of the peer - ordererAddress: orderer1.org1proxy.blockchaincloudpoc.com:443 + ordererAddress: orderer1.supplychain-net.org1proxy.hlf.blockchaincloudpoc-develop.com:443 endorsers: # Only one peer per org required for endorsement - organization: @@ -144,8 +163,7 @@ network: name: peer0 corepeerAddress: peer0.store-net.org4proxy.blockchaincloudpoc.com:443 certificate: "/path/store/server.crt" # certificate path for peer - genesis: - name: OrdererGenesis + - channel: consortium: SupplyChainConsortium channel_name: ChannelTwo @@ -153,6 +171,22 @@ network: orderers: - supplychain participants: + - organization: + name: supplychain + type: joiner + org_status: new + peers: + - peer: + name: peer0 + type: anchor + gossipAddress: peer1.supplychain-net.org1proxy.hlf.blockchaincloudpoc-develop.com:443 + peerAddress: peer0.supplychain-net.org1proxy.hlf.blockchaincloudpoc-develop.com:443 + - peer: + name: peer1 + type: nonanchor + gossipAddress: peer0.supplychain-net.org1proxy.hlf.blockchaincloudpoc-develop.com:443 + peerAddress: peer1.supplychain-net.org1proxy.hlf.blockchaincloudpoc-develop.com:443 + ordererAddress: orderer1.supplychain-net.org1proxy.hlf.blockchaincloudpoc-develop.com:443 - organization: name: carrier type: joiner # creator organization will create the channel and instantiate chaincode, in addition to joining the channel and install chaincode @@ -160,9 +194,10 @@ network: peers: - peer: name: peer0 + type: anchor gossipAddress: peer0.carrier-net.org3proxy.blockchaincloudpoc.com:443 # Must include port, External or internal URI of the gossip peer peerAddress: peer0.carrier-net.org3proxy.blockchaincloudpoc.com:443 # Must include port, External URI of the peer - ordererAddress: orderer1.org1proxy.blockchaincloudpoc.com:443 # Must include port, External or internal URI of the orderer + ordererAddress: orderer1.supplychain-net.org1proxy.blockchaincloudpoc.com:443 # Must include port, External or internal URI of the orderer - organization: name: store type: creator # joiner organization will only join the channel and install chaincode @@ -170,9 +205,10 @@ network: peers: - peer: name: peer0 + type: anchor gossipAddress: peer0.store-net.org4proxy.blockchaincloudpoc.com:443 peerAddress: peer0.store-net.org4proxy.blockchaincloudpoc.com:443 # External URI of the peer - ordererAddress: orderer1.org1proxy.blockchaincloudpoc.com:443 + ordererAddress: orderer1.supplychain-net.org1proxy.blockchaincloudpoc.com:443 endorsers: # Only one peer per org required for endorsement - organization: @@ -189,8 +225,6 @@ network: name: peer0 corepeerAddress: peer0.store-net.org4proxy.blockchaincloudpoc.com:443 certificate: "/path/store/server.crt" # certificate path for peer - genesis: - name: ChannelTwoGenesis # Allows specification of one or many organizations that will be connecting to a network. # If an organization is also hosting the root of the network (e.g. doorman, membership service, etc), @@ -204,18 +238,17 @@ network: state: London location: London subject: "O=Orderer,OU=Orderer,L=51.50/-0.13/London,C=GB" - type: orderer external_url_suffix: org1proxy.blockchaincloudpoc.com org_status: new + fabric_console: enabled # To deploy Fabric console for this organization ca_data: - url: ca.supplychain-net.org1proxy.blockchaincloudpoc.com certificate: /path/supplychain/server.crt # Path where ca public cert will be stored (if new) or read from (if existing ca) - + cloud_provider: aws # Options: aws, azure, gcp, digitalocean, minikube aws: access_key: "aws_access_key" # AWS Access key, only used when cloud_provider=aws secret_key: "aws_secret_key" # AWS Secret key, only used when cloud_provider=aws - + # Kubernetes cluster deployment variables. The config file path and name has to be provided in case # the cluster has already been created. k8s: @@ -248,14 +281,13 @@ network: services: ca: name: ca - subject: "/C=GB/ST=London/L=London/O=Orderer/CN=ca.supplychain-net.org1proxy.blockchaincloudpoc.com" + subject: "/C=GB/ST=London/L=London/O=Orderer" type: ca grpc: port: 7054 consensus: name: raft - orderers: # This sample has multiple orderers as an example. # You can use a single orderer for most production implementations. @@ -266,111 +298,59 @@ network: consensus: raft grpc: port: 7050 - ordererAddress: orderer1.org1proxy.blockchaincloudpoc.com:443 + ordererAddress: orderer1.supplychain-net.org1proxy.hlf.blockchaincloudpoc-develop.com:443 - orderer: name: orderer2 type: orderer consensus: raft grpc: port: 7050 - ordererAddress: orderer2.org1proxy.blockchaincloudpoc.com:443 + ordererAddress: orderer2.supplychain-net.org1proxy.hlf.blockchaincloudpoc-develop.com:443 - orderer: name: orderer3 type: orderer consensus: raft grpc: port: 7050 - ordererAddress: orderer3.org1proxy.blockchaincloudpoc.com:443 - - # Specification for the 2nd organization. Each organization maps to a VPC and a separate k8s cluster - - organization: - name: manufacturer - country: CH - state: Zurich - location: Zurich - subject: "O=Manufacturer,OU=Manufacturer,L=47.38/8.54/Zurich,C=CH" - type: peer - external_url_suffix: org2proxy.blockchaincloudpoc.com - org_status: new - orderer_org: supplychain # Name of the organization that provides the ordering service - ca_data: - url: ca.manufacturer-net.org2proxy.blockchaincloudpoc.com - certificate: /path/manufacturer/server.crt - - cloud_provider: aws # Options: aws, azure, gcp, digitalocean, minikube - aws: - access_key: "aws_access_key" # AWS Access key, only used when cloud_provider=aws - secret_key: "aws_secret_key" # AWS Secret key, only used when cloud_provider=aws - - # Kubernetes cluster deployment variables. The config file path and name has to be provided in case - # the cluster has already been created. - k8s: - region: "cluster_region" - context: "cluster_context" - config_file: "cluster_config" + ordererAddress: orderer3.supplychain-net.org1proxy.hlf.blockchaincloudpoc-develop.com:443 - # Hashicorp Vault server address and root-token. Vault should be unsealed. - # Do not check-in root_token - vault: - url: "vault_addr" - root_token: "vault_root_token" - secret_path: "secretsv2" - # Git Repo details which will be used by GitOps/Flux. - # Do not check-in git_access_token - gitops: - git_protocol: "https" # Option for git over https or ssh - git_url: "https://github.com//bevel.git" # Gitops https or ssh url for flux value files - branch: "develop" # Git branch where release is being made - release_dir: "platforms/hyperledger-fabric/releases/dev" # Relative Path in the Git repo for flux sync per environment. - chart_source: "platforms/hyperledger-fabric/charts" # Relative Path where the Helm charts are stored in Git repo - git_repo: "github.com//bevel.git" # Gitops git repository URL for git push (without https://) - username: "git_username" # Git Service user who has rights to check-in in all branches - password: "git_access_token" # Git Server user password/token (Optional for ssh; Required for https) - email: "git@email.com" # Email to use in git config - private_key: "path_to_private_key" # Path to private key file which has write-access to the git repo (Optional for https; Required for ssh) - - # The participating nodes are peers - # This organization hosts it's own CA server - services: - ca: - name: ca - subject: "/C=CH/ST=Zurich/L=Zurich/O=Manufacturer/CN=ca.manufacturer-net.org2proxy.blockchaincloudpoc.com" - type: ca - grpc: - port: 7054 peers: - peer: - name: peer0 - type: anchor # This can be anchor/nonanchor. Atleast one peer should be anchor peer. - gossippeeraddress: peer0.manufacturer-net:7051 # Internal Address of the other peer in same Org for gossip, same peer if there is only one peer - peerAddress: peer0.manufacturer-net.org2proxy.blockchaincloudpoc.com:443 # Must include port, External URI of the peer - certificate: /path/manufacturer/peer0.crt # Path to peer Certificate - cli: disabled # Creates a peer cli pod depending upon the (enabled/disabled) tag. + name: peer0 + type: anchor # This can be anchor/nonanchor. Atleast one peer should be anchor peer. + gossippeeraddress: peer1.supplychain-net.org1proxy.hlf.blockchaincloudpoc-develop.com:443 # Internal Address of the other peer in same Org for gossip, same peer if there is only one peer + peerAddress: peer0.supplychain-net.org1proxy.hlf.blockchaincloudpoc-develop.com:443 # External URI of the peer + cli: enabled # Creates a peer cli pod depending upon the (enabled/disabled) tag. grpc: - port: 7051 + port: 7051 events: port: 7053 couchdb: port: 5984 - restserver: # This is for the rest-api server + restserver: targetPort: 20001 - port: 20001 - expressapi: # This is for the express api server + port: 20001 + expressapi: + targetPort: 3000 + port: 3000 + - peer: + name: peer1 + type: nonanchor # This can be anchor/nonanchor. Atleast one peer should be anchor peer. + gossippeeraddress: peer0.supplychain-net.org1proxy.hlf.blockchaincloudpoc-develop.com:443 # External address of the existing anchor peer + peerAddress: peer1.supplychain-net.org1proxy.hlf.blockchaincloudpoc-develop.com:443 # External URI of the peer + cli: enabled # Creates a peer cli pod depending upon the (enabled/disabled) tag. + grpc: + port: 7051 + events: + port: 7053 + couchdb: + port: 5984 + restserver: + targetPort: 20001 + port: 20001 + expressapi: targetPort: 3000 port: 3000 - chaincodes: - - name: "chaincode_name" #This has to be replaced with the name of the chaincode - version: "1" #This has to be replaced with the version of the chaincode - maindirectory: "chaincode_main" #The main directory where chaincode is needed to be placed - lang: "golang" # The language in which the chaincode is written ( golang/ java) - repository: - username: "git_username" # Git Service user who has rights to check-in in all branches - password: "git_access_token" - url: "github.com//bevel-samples.git" - branch: main - path: "chaincode_src" #The path to the chaincode - arguments: 'chaincode_args' #Arguments to be passed along with the chaincode parameters - endorsements: "" #Endorsements (if any) provided along with the chaincode - organization: name: carrier @@ -378,12 +358,10 @@ network: state: London location: London subject: "O=Carrier,OU=Carrier,L=51.50/-0.13/London,C=GB" - type: peer external_url_suffix: org3proxy.blockchaincloudpoc.com org_status: new orderer_org: supplychain # Name of the organization that provides the ordering service ca_data: - url: ca.carrier-net.org3proxy.blockchaincloudpoc.com certificate: /path/carrier/server.crt cloud_provider: aws # Options: aws, azure, gcp, digitalocean, minikube @@ -421,7 +399,7 @@ network: services: ca: name: ca - subject: "/C=GB/ST=London/L=London/O=Carrier/CN=ca.carrier-net.org3proxy.blockchaincloudpoc.com" + subject: "/C=GB/ST=London/L=London/O=Carrier" type: ca grpc: port: 7054 @@ -431,7 +409,6 @@ network: type: anchor # This can be anchor/nonanchor. Atleast one peer should be anchor peer. gossippeeraddress: peer0.carrier-net:7051 # Internal Address of the other peer in same Org for gossip, same peer if there is only one peer peerAddress: peer0.carrier-net.org3proxy.blockchaincloudpoc.com:443 # Must include port, External URI of the peer - certificate: /path/carrier/peer0.crt # Path to peer Certificate cli: disabled # Creates a peer cli pod depending upon the (enabled/disabled) tag. grpc: port: 7051 @@ -464,12 +441,11 @@ network: state: New York location: New York subject: "O=Store,OU=Store,L=40.73/-74/New York,C=US" - type: peer + external_url_suffix: org4proxy.blockchaincloudpoc.com org_status: new orderer_org: supplychain # Name of the organization that provides the ordering service ca_data: - url: ca.store-net.org4proxy.blockchaincloudpoc.com certificate: /path/store/server.crt cloud_provider: aws # Options: aws, azure, gcp, digitalocean, minikube @@ -507,7 +483,7 @@ network: services: ca: name: ca - subject: "/C=US/ST=New York/L=New York/O=Store/CN=ca.store-net.org4proxy.blockchaincloudpoc.com" + subject: "/C=US/ST=New York/L=New York/O=Store" type: ca grpc: port: 7054 @@ -517,7 +493,6 @@ network: type: anchor # This can be anchor/nonanchor. Atleast one peer should be anchor peer. gossippeeraddress: peer0.store-net:7051 # Internal Address of the other peer in same Org for gossip, same peer if there is only one peer peerAddress: peer0.store-net.org4proxy.blockchaincloudpoc.com:443 # Must include port, External URI of the peer - certificate: /path/store/peer0.crt # Path to peer Certificate cli: disabled # Creates a peer cli pod depending upon the (enabled/disabled) tag. grpc: port: 7051 @@ -551,12 +526,10 @@ network: state: Massachusetts location: Boston subject: "O=Warehouse,OU=Warehouse,L=42.36/-71.06/Boston,C=US" - type: peer external_url_suffix: org5proxy.blockchaincloudpoc.com org_status: new orderer_org: supplychain # Name of the organization that provides the ordering service ca_data: - url: ca.warehouse-net.org5proxy.blockchaincloudpoc.com certificate: /path/warehouse/server.crt cloud_provider: aws # Options: aws, azure, gcp, digitalocean, minikube @@ -594,7 +567,7 @@ network: services: ca: name: ca - subject: "/C=US/ST=Massachusetts/L=Boston/O=Warehouse/CN=ca.warehouse-net.org5proxy.blockchaincloudpoc.com" + subject: "/C=US/ST=Massachusetts/L=Boston/O=Warehouse" type: ca grpc: port: 7054 @@ -604,7 +577,6 @@ network: type: anchor # This can be anchor/nonanchor. Atleast one peer should be anchor peer. gossippeeraddress: peer0.warehouse-net:7051 # Internal Address of the other peer in same Org for gossip, same peer if there is only one peer peerAddress: peer0.warehouse-net.org5proxy.blockchaincloudpoc.com:443 # Must include port, External URI of the peer - certificate: /path/warehouse/peer0.crt # Path to peer Certificate cli: disabled # Creates a peer cli pod depending upon the (enabled/disabled) tag. grpc: port: 7051 diff --git a/platforms/hyperledger-fabric/configuration/samples/network-fabric-add-ordererorg.yaml b/platforms/hyperledger-fabric/configuration/samples/network-fabric-add-ordererorg.yaml index 5b606b1678a..87509e0b6bc 100644 --- a/platforms/hyperledger-fabric/configuration/samples/network-fabric-add-ordererorg.yaml +++ b/platforms/hyperledger-fabric/configuration/samples/network-fabric-add-ordererorg.yaml @@ -14,7 +14,7 @@ network: # Network level configuration specifies the attributes required for each organization # to join an existing network. type: fabric - version: 2.2.2 #Addition of ordrer organization only works for 2.2.2 + version: 2.2.2 #Addition of ordrer organization works for 2.2.2 and 2.5.4 frontend: enabled #Flag for frontend to enabled for nodes/peers @@ -46,26 +46,24 @@ network: type: orderer name: orderer1 org_name: supplychain #org_name should match one organization definition below in organizations: key - uri: orderer1.org1proxy.blockchaincloudpoc.com:443 # Must include port, Can be external or internal URI for orderer which should be reachable by all peers - certificate: /home/bevel/build/orderer1.crt # Ensure that the directory exists + uri: orderer1.supplychain-net.org1proxy.hlf.blockchaincloudpoc-develop.com:443 # Must include port, Can be external or internal URI for orderer which should be reachable by all peers - orderer: type: orderer name: orderer2 org_name: supplychain #org_name should match one organization definition below in organizations: key - uri: orderer2.org1proxy.blockchaincloudpoc.com:443 # Must include port, Can be external or internal URI for orderer which should be reachable by all peers - certificate: /home/bevel/build/orderer2.crt # Ensure that the directory exists + uri: orderer2.supplychain-net.org1proxy.hlf.blockchaincloudpoc-develop.com:443 # Must include port, Can be external or internal URI for orderer which should be reachable by all peers - orderer: type: orderer name: orderer3 org_name: supplychain #org_name should match one organization definition below in organizations: key - uri: orderer3.org1proxy.blockchaincloudpoc.com:443 # Must include port, Can be external or internal URI for orderer which should be reachable by all peers - certificate: /home/bevel/build/orderer3.crt # Ensure that the directory exists + uri: orderer3.supplychain-net.org1proxy.hlf.blockchaincloudpoc-develop.com:443 # Must include port, Can be external or internal URI for orderer which should be reachable by all peers # The channels defined for a network with participating peers in each channel channels: - channel: consortium: SupplyChainConsortium channel_name: AllChannel + channel_status: new chaincodes: - "chaincode_name" orderers: @@ -80,7 +78,7 @@ network: name: peer0 gossipAddress: peer0.carrier-net.org3proxy.blockchaincloudpoc.com:443 # Must include port, External or internal URI of the gossip peer peerAddress: peer0.carrier-net.org3proxy.blockchaincloudpoc.com:443 # Must include port, External URI of the peer - ordererAddress: orderer1.org1proxy.blockchaincloudpoc.com:443 # Must include port, External or internal URI of the orderer + ordererAddress: orderer1.supplychain-net.org1proxy.blockchaincloudpoc.com:443 # Must include port, External or internal URI of the orderer - organization: name: store type: joiner # joiner organization will only join the channel and install chaincode @@ -90,7 +88,7 @@ network: name: peer0 gossipAddress: peer0.store-net.org4proxy.blockchaincloudpoc.com:443 peerAddress: peer0.store-net.org4proxy.blockchaincloudpoc.com:443 # Must include port, External URI of the peer - ordererAddress: orderer1.org1proxy.blockchaincloudpoc.com:443 + ordererAddress: orderer1.supplychain-net.org1proxy.blockchaincloudpoc.com:443 - organization: name: warehouse type: joiner @@ -100,7 +98,7 @@ network: name: peer0 gossipAddress: peer0.warehouse-net.org5proxy.blockchaincloudpoc.com:443 peerAddress: peer0.warehouse-net.org5proxy.blockchaincloudpoc.com:443 # Must include port, External URI of the peer - ordererAddress: orderer1.org1proxy.blockchaincloudpoc.com:443 + ordererAddress: orderer1.supplychain-net.org1proxy.blockchaincloudpoc.com:443 - organization: name: manufacturer type: joiner @@ -110,7 +108,7 @@ network: name: peer0 gossipAddress: peer0.manufacturer-net.org2proxy.blockchaincloudpoc.com:443 peerAddress: peer0.manufacturer-net.org2proxy.blockchaincloudpoc.com:443 # Must include port, External URI of the peer - ordererAddress: orderer1.org1proxy.blockchaincloudpoc.com:443 + ordererAddress: orderer1.supplychain-net.org1proxy.blockchaincloudpoc.com:443 endorsers: # Only one peer per org required for endorsement - organization: @@ -134,8 +132,6 @@ network: name: peer0 corepeerAddress: peer0.manufacturer-net.org2proxy.blockchaincloudpoc.com:443 certificate: "/path/manufacturer/server.crt" # certificate path for peer - genesis: - name: OrdererGenesis # Allows specification of one or many organizations that will be connecting to a network. # If an organization is also hosting the root of the network (e.g. doorman, membership service, etc), @@ -149,11 +145,9 @@ network: state: London location: London subject: "O=Orderer,L=51.50/-0.13/London,C=GB" - type: orderer external_url_suffix: org1proxy.blockchaincloudpoc.com org_status: existing # Status of the organization for the existing network, can be new / existing ca_data: - url: ca.supplychain-net.org1proxy.blockchaincloudpoc.com certificate: file/server.crt # Path where ca public cert will be stored (if new) or read from (if existing ca) cloud_provider: aws # Options: aws, azure, gcp, digitalocean, minikube @@ -211,7 +205,7 @@ network: consensus: raft grpc: port: 7050 - ordererAddress: orderer1.org1proxy.blockchaincloudpoc.com:443 + ordererAddress: orderer1.supplychain-net.org1proxy.blockchaincloudpoc.com:443 - orderer: name: orderer2 type: orderer @@ -234,11 +228,9 @@ network: state: London location: London subject: "O=NewOrderer,L=51.50/-0.13/London,C=GB" - type: orderer external_url_suffix: org6proxy.blockchaincloudpoc.com org_status: new # Status of the organization for the existing network, can be new / existing ca_data: - url: ca.neworderer-net.org6proxy.blockchaincloudpoc.com certificate: file/server.crt # Path where ca public cert will be stored (if new) or read from (if existing ca) cloud_provider: aws # Options: aws, azure, gcp, digitalocean, minikube @@ -311,12 +303,10 @@ network: state: Zurich location: Zurich subject: "O=Manufacturer,OU=Manufacturer,L=47.38/8.54/Zurich,C=CH" - type: peer external_url_suffix: org2proxy.blockchaincloudpoc.com org_status: existing # Status of the organization for the existing network, can be new / existing orderer_org: supplychain # Name of the organization that provides the ordering service ca_data: - url: ca.manufacturer-net.org2proxy.blockchaincloudpoc.com certificate: file/server.crt cloud_provider: aws # Options: aws, azure, gcp, digitalocean, minikube @@ -399,12 +389,10 @@ network: state: London location: London subject: "O=Carrier,OU=Carrier,L=51.50/-0.13/London,C=GB" - type: peer external_url_suffix: org3proxy.blockchaincloudpoc.com org_status: existing # Status of the organization for the existing network, can be new / existing orderer_org: supplychain # Name of the organization that provides the ordering service ca_data: - url: ca.carrier-net.org3proxy.blockchaincloudpoc.com certificate: file/server.crt cloud_provider: aws # Options: aws, azure, gcp, digitalocean, minikube @@ -485,12 +473,10 @@ network: state: New York location: New York subject: "O=Store,OU=Store,L=40.73/-74/New York,C=US" - type: peer external_url_suffix: org4proxy.blockchaincloudpoc.com org_status: existing # Status of the organization for the existing network, can be new / existing orderer_org: supplychain # Name of the organization that provides the ordering service ca_data: - url: ca.store-net.org4proxy.blockchaincloudpoc.com certificate: file/server.crt cloud_provider: aws # Options: aws, azure, gcp, digitalocean, minikube @@ -571,12 +557,10 @@ network: state: Massachusetts location: Boston subject: "O=Warehouse,OU=Warehouse,L=42.36/-71.06/Boston,C=US" - type: peer external_url_suffix: org5proxy.blockchaincloudpoc.com org_status: existing # Status of the organization for the existing network, can be new / existing orderer_org: supplychain # Name of the organization that provides the ordering service ca_data: - url: ca.warehouse-net.org5proxy.blockchaincloudpoc.com certificate: file/server.crt cloud_provider: aws # Options: aws, azure, gcp, digitalocean, minikube diff --git a/platforms/hyperledger-fabric/configuration/samples/network-fabric-add-organization.yaml b/platforms/hyperledger-fabric/configuration/samples/network-fabric-add-organization.yaml index 4c8853b5923..49fd66c048d 100644 --- a/platforms/hyperledger-fabric/configuration/samples/network-fabric-add-organization.yaml +++ b/platforms/hyperledger-fabric/configuration/samples/network-fabric-add-organization.yaml @@ -11,7 +11,7 @@ network: # Network level configuration specifies the attributes required for each organization # to join an existing network. type: fabric - version: 2.2.2 # currently tested 1.4.8 and 2.2.2 + version: 2.2.2 # currently tested 1.4.8, 2.2.2 and 2.5.4 frontend: enabled #Flag for frontend to enabled for nodes/peers @@ -47,26 +47,24 @@ network: type: orderer name: orderer1 org_name: supplychain #org_name should match one organization definition below in organizations: key - uri: orderer1.org1proxy.blockchaincloudpoc.com:443 # Must include port, Can be external or internal URI for orderer which should be reachable by all peers - certificate: /home/bevel/build/orderer1.crt # Path of the orderer certificate which must exist + uri: orderer1.supplychain-net.org1proxy.blockchaincloudpoc.com:443 # Must include port, Can be external or internal URI for orderer which should be reachable by all peers - orderer: type: orderer name: orderer2 org_name: supplychain #org_name should match one organization definition below in organizations: key - uri: orderer2.org1proxy.blockchaincloudpoc.com:443 # Must include port, Can be external or internal URI for orderer which should be reachable by all peers - certificate: /home/bevel/build/orderer2.crt # Path of the orderer certificate which must exist + uri: orderer2.supplychain-net.org1proxy.blockchaincloudpoc.com:443 # Must include port, Can be external or internal URI for orderer which should be reachable by all peers - orderer: type: orderer name: orderer3 org_name: supplychain #org_name should match one organization definition below in organizations: key - uri: orderer3.org1proxy.blockchaincloudpoc.com:443 # Must include port, Can be external or internal URI for orderer which should be reachable by all peers - certificate: /home/bevel/build/orderer3.crt # Ensure that the directory exists + uri: orderer3.supplychain-net.org1proxy.blockchaincloudpoc.com:443 # Must include port, Can be external or internal URI for orderer which should be reachable by all peers # The channels defined for a network with participating peers in each channel channels: - channel: consortium: SupplyChainConsortium channel_name: AllChannel + channel_status: new chaincodes: - "chaincode_name" orderers: @@ -81,7 +79,7 @@ network: name: peer0 gossipAddress: peer0.carrier-net.org3proxy.blockchaincloudpoc.com:443 # Must include port, External or internal URI of the gossip peer peerAddress: peer0.carrier-net.org3proxy.blockchaincloudpoc.com:443 # Must include port, External URI of the peer - ordererAddress: orderer1.org1proxy.blockchaincloudpoc.com:443 # Must include port, External or internal URI of the orderer + ordererAddress: orderer1.supplychain-net.org1proxy.blockchaincloudpoc.com:443 # Must include port, External or internal URI of the orderer - organization: name: warehouse type: joiner @@ -91,7 +89,7 @@ network: name: peer0 gossipAddress: peer0.warehouse-net.org5proxy.blockchaincloudpoc.com:443 peerAddress: peer0.warehouse-net.org5proxy.blockchaincloudpoc.com:443 # Must include port, External URI of the peer - ordererAddress: orderer1.org1proxy.blockchaincloudpoc.com:443 + ordererAddress: orderer1.supplychain-net.org1proxy.blockchaincloudpoc.com:443 - organization: name: manufacturer type: joiner @@ -101,7 +99,7 @@ network: name: peer0 gossipAddress: peer0.manufacturer-net.org2proxy.blockchaincloudpoc.com:443 peerAddress: peer0.manufacturer-net.org2proxy.blockchaincloudpoc.com:443 # Must include port, External URI of the peer - ordererAddress: orderer1.org1proxy.blockchaincloudpoc.com:443 + ordererAddress: orderer1.supplychain-net.org1proxy.blockchaincloudpoc.com:443 - organization: name: store type: joiner # joiner organization will only join the channel and install chaincode @@ -111,7 +109,7 @@ network: name: peer0 gossipAddress: peer0.store-net.org4proxy.blockchaincloudpoc.com:443 peerAddress: peer0.store-net.org4proxy.blockchaincloudpoc.com:443 # Must include port, External URI of the peer - ordererAddress: orderer1.org1proxy.blockchaincloudpoc.com:443 + ordererAddress: orderer1.supplychain-net.org1proxy.blockchaincloudpoc.com:443 endorsers: # Only one peer per org required for endorsement - organization: @@ -135,9 +133,7 @@ network: name: peer0 corepeerAddress: peer0.manufacturer-net.org2proxy.blockchaincloudpoc.com:443 certificate: /home/bevel/build/manufacturer/server.crt # certificate path for peer - genesis: - name: OrdererGenesis - + # Allows specification of one or many organizations that will be connecting to a network. # If an organization is also hosting the root of the network (e.g. doorman, membership service, etc), # then these services should be listed in this section as well. @@ -150,11 +146,9 @@ network: state: London location: London subject: "O=Orderer,OU=Orderer,L=51.50/-0.13/London,C=GB" - type: orderer external_url_suffix: org1proxy.blockchaincloudpoc.com org_status: existing # Status of the organization for the existing network, can be new / existing ca_data: - url: ca.supplychain-net.org1proxy.blockchaincloudpoc.com certificate: /home/bevel/build/supplychain/server.crt # Path where ca public cert will be stored (if new) or read from (if existing ca) cloud_provider: aws # Options: aws, azure, gcp, digitalocean, minikube aws: @@ -211,21 +205,21 @@ network: consensus: raft grpc: port: 7050 - ordererAddress: orderer1.org1proxy.blockchaincloudpoc.com:443 + ordererAddress: orderer1.supplychain-net.org1proxy.blockchaincloudpoc.com:443 - orderer: name: orderer2 type: orderer consensus: raft grpc: port: 7050 - ordererAddress: orderer2.org1proxy.blockchaincloudpoc.com:443 + ordererAddress: orderer2.supplychain-net.org1proxy.blockchaincloudpoc.com:443 - orderer: name: orderer3 type: orderer consensus: raft grpc: port: 7050 - ordererAddress: orderer3.org1proxy.blockchaincloudpoc.com:443 + ordererAddress: orderer3.supplychain-net.org1proxy.blockchaincloudpoc.com:443 # Specification for the 2nd organization. Each organization maps to a VPC and a separate k8s cluster - organization: @@ -234,12 +228,10 @@ network: state: Zurich location: Zurich subject: "O=Manufacturer,OU=Manufacturer,L=47.38/8.54/Zurich,C=CH" - type: peer external_url_suffix: org2proxy.blockchaincloudpoc.com org_status: existing # Status of the organization for the existing network, can be new / existing orderer_org: supplychain # Name of the organization that provides the ordering service ca_data: - url: ca.manufacturer-net.org2proxy.blockchaincloudpoc.com certificate: /home/bevel/build/manufacturer/server.crt cloud_provider: aws # Options: aws, azure, gcp, digitalocean, minikube @@ -288,7 +280,6 @@ network: type: anchor # This can be anchor/nonanchor. Atleast one peer should be anchor peer. gossippeeraddress: peer0.manufacturer-net:7051 # Internal Address of the other peer in same Org for gossip, same peer if there is only one peer peerAddress: peer0.manufacturer-net.org2proxy.blockchaincloudpoc.com:443 # Must include port, External URI of the peer - certificate: /home/bevel/build/manufacturer/peer0.crt # Path to peer Certificate cli: disabled # Creates a peer cli pod depending upon the (enabled/disabled) tag. grpc: port: 7051 @@ -322,12 +313,10 @@ network: state: London location: London subject: "O=Carrier,OU=Carrier,L=51.50/-0.13/London,C=GB" - type: peer external_url_suffix: org3proxy.blockchaincloudpoc.com org_status: existing # Status of the organization for the existing network, can be new / existing orderer_org: supplychain # Name of the organization that provides the ordering service ca_data: - url: ca.carrier-net.org3proxy.blockchaincloudpoc.com certificate: /home/bevel/build/carrier/server.crt cloud_provider: aws # Options: aws, azure, gcp, digitalocean, minikube @@ -375,7 +364,6 @@ network: type: anchor # This can be anchor/nonanchor. Atleast one peer should be anchor peer. gossippeeraddress: peer0.carrier-net:7051 # Internal Address of the other peer in same Org for gossip, same peer if there is only one peer peerAddress: peer0.carrier-net.org3proxy.blockchaincloudpoc.com:443 # Must include port, External URI of the peer - certificate: /home/bevel/build/carrier/peer0.crt # Path to peer Certificate cli: disabled # Creates a peer cli pod depending upon the (enabled/disabled) tag. grpc: port: 7051 @@ -409,12 +397,10 @@ network: state: New York location: New York subject: "O=Store,OU=Store,L=40.73/-74/New York,C=US" - type: peer external_url_suffix: org4proxy.blockchaincloudpoc.com org_status: new # Status of the organization for the existing network, can be new / existing orderer_org: supplychain # Name of the organization that provides the ordering service ca_data: - url: ca.store-net.org4proxy.blockchaincloudpoc.com certificate: /home/bevel/build/store/server.crt cloud_provider: aws # Options: aws, azure, gcp, digitalocean, minikube @@ -467,7 +453,6 @@ network: type: anchor # This can be anchor/nonanchor. Atleast one peer should be anchor peer. gossippeeraddress: peer0.store-net:7051 # Internal Address of the other peer in same Org for gossip, same peer if there is only one peer peerAddress: peer0.store-net.org4proxy.blockchaincloudpoc.com:443 # Must include port, External URI of the peer - certificate: /home/bevel/build/store/peer0.crt # Path to peer Certificate cli: disabled # Creates a peer cli pod depending upon the (enabled/disabled) tag. grpc: port: 7051 @@ -500,12 +485,10 @@ network: state: Massachusetts location: Boston subject: "O=Warehouse,OU=Warehouse,L=42.36/-71.06/Boston,C=US" - type: peer external_url_suffix: org5proxy.blockchaincloudpoc.com org_status: existing # Status of the organization for the existing network, can be new / existing orderer_org: supplychain # Name of the organization that provides the ordering service ca_data: - url: ca.warehouse-net.org5proxy.blockchaincloudpoc.com certificate: /home/bevel/build/warehouse/server.crt cloud_provider: aws # Options: aws, azure, gcp, digitalocean, minikube @@ -553,7 +536,6 @@ network: type: anchor # This can be anchor/nonanchor. Atleast one peer should be anchor peer. gossippeeraddress: peer0.warehouse-net:7051 # Internal Address of the other peer in same Org for gossip, same peer if there is only one peer peerAddress: peer0.warehouse-net.org5proxy.blockchaincloudpoc.com:443 # Must include port, External URI of the peer - certificate: /home/bevel/build/warehouse/peer0.crt # Path to peer Certificate cli: disabled # Creates a peer cli pod depending upon the (enabled/disabled) tag. grpc: port: 7051 diff --git a/platforms/hyperledger-fabric/configuration/samples/network-fabric-add-peer.yaml b/platforms/hyperledger-fabric/configuration/samples/network-fabric-add-peer.yaml index 0ce83ed16ea..e847535da4f 100644 --- a/platforms/hyperledger-fabric/configuration/samples/network-fabric-add-peer.yaml +++ b/platforms/hyperledger-fabric/configuration/samples/network-fabric-add-peer.yaml @@ -11,7 +11,7 @@ network: # Network level configuration specifies the attributes required for each organization # to join an existing network. type: fabric - version: 2.2.2 # currently tested 1.4.8 and 2.2.2 + version: 2.2.2 # currently tested 1.4.8, 2.2.2 and 2.5.4 frontend: enabled #Flag for frontend to enabled for nodes/peers #Environment section for Kubernetes setup @@ -42,26 +42,24 @@ network: type: orderer name: orderer1 org_name: supplychain #org_name should match one organization definition below in organizations: key - uri: orderer1.org1proxy.blockchaincloudpoc.com:443 # Must include port, Can be external or internal URI for orderer which should be reachable by all peers - certificate: /home/bevel/build/orderer1.crt # Ensure that the directory exists + uri: orderer1.supplychain-net.org1proxy.blockchaincloudpoc.com:443 # Must include port, Can be external or internal URI for orderer which should be reachable by all peers - orderer: type: orderer name: orderer2 org_name: supplychain #org_name should match one organization definition below in organizations: key - uri: orderer2.org1proxy.blockchaincloudpoc.com:443 # Must include port, Can be external or internal URI for orderer which should be reachable by all peers - certificate: /home/bevel/build/orderer2.crt # Ensure that the directory exists + uri: orderer2.supplychain-net.org1proxy.blockchaincloudpoc.com:443 # Must include port, Can be external or internal URI for orderer which should be reachable by all peers - orderer: type: orderer name: orderer3 org_name: supplychain #org_name should match one organization definition below in organizations: key - uri: orderer3.org1proxy.blockchaincloudpoc.com:443 # Must include port, Can be external or internal URI for orderer which should be reachable by all peers - certificate: /home/bevel/build/orderer3.crt # Ensure that the directory exists + uri: orderer3.supplychain-net.org1proxy.blockchaincloudpoc.com:443 # Must include port, Can be external or internal URI for orderer which should be reachable by all peers # The channels defined for a network with participating peers in each channel channels: - channel: consortium: SupplyChainConsortium channel_name: AllChannel + channel_status: new chaincodes: - "chaincode_name" orderers: @@ -82,9 +80,7 @@ network: peerstatus: new # new peers should have status as new gossipAddress: peer0.carrier-net.org3proxy.blockchaincloudpoc.com:443 # Must include port, External or internal URI of the gossip peer peerAddress: peer1.carrier-net.org3proxy.blockchaincloudpoc.com:443 # Must include port, External URI of the peer - ordererAddress: orderer1.org1proxy.blockchaincloudpoc.com:443 # Must include port, External or internal URI of the orderer - genesis: - name: OrdererGenesis + ordererAddress: orderer1.supplychain-net.org1proxy.blockchaincloudpoc.com:443 # Must include port, External or internal URI of the orderer # Allows specification of one or many organizations that will be connecting to a network. # If an organization is also hosting the root of the network (e.g. doorman, membership service, etc), @@ -97,12 +93,10 @@ network: state: London location: London subject: "O=Carrier,OU=Carrier,L=51.50/-0.13/London,C=GB" - type: peer external_url_suffix: org3proxy.blockchaincloudpoc.com org_status: existing # org_status must be existing when adding peer orderer_org: supplychain # Name of the organization that provides the ordering service ca_data: - url: ca.carrier-net.org3proxy.blockchaincloudpoc.com # CA Server URL must be public when adding peer on new cluster certificate: /path/carrier/server.crt # CA Server public cert must be provided when adding peer on new cluster cloud_provider: aws # Options: aws, azure, gcp, digitalocean, minikube @@ -157,7 +151,6 @@ network: gossippeeraddress: peer1.carrier-net.org3proxy.blockchaincloudpoc.com:443 # No change from original configuration peerAddress: peer0.carrier-net.org3proxy.blockchaincloudpoc.com:443 # Must include port, External URI of the peer peerstatus: existing # old peers should have status as existing - certificate: /path/carrier/peer0.crt # Path to peer Certificate cli: disabled # Creates a peer cli pod depending upon the (enabled/disabled) tag. grpc: port: 7051 @@ -190,7 +183,6 @@ network: gossippeeraddress: peer0.carrier-net.org3proxy.blockchaincloudpoc.com:443 # Must include port, External address of the existing anchor peer peerAddress: peer1.carrier-net.org3proxy.blockchaincloudpoc.com:443 # Must include port, External URI of the peer peerstatus: new # new peers should have status as new - certificate: /path/carrier/peer1.crt # Path to peer Certificate cli: disabled # Creates a peer cli pod depending upon the (enabled/disabled) tag. grpc: port: 7051 @@ -225,11 +217,9 @@ network: state: London location: London subject: "O=Orderer,OU=Orderer,L=51.50/-0.13/London,C=GB" - type: orderer external_url_suffix: org1proxy.blockchaincloudpoc.com org_status: existing # org_status must be existing when adding peer ca_data: - url: ca.supplychain-net.org1proxy.blockchaincloudpoc.com certificate: /path/supplychain/server.crt # Path where ca public cert will be stored (if new) or read from (if existing ca) cloud_provider: aws # Options: aws, azure, gcp, digitalocean, minikube @@ -287,18 +277,18 @@ network: consensus: raft grpc: port: 7050 - ordererAddress: orderer1.org1proxy.blockchaincloudpoc.com:443 + ordererAddress: orderer1.supplychain-net.org1proxy.blockchaincloudpoc.com:443 - orderer: name: orderer2 type: orderer consensus: raft grpc: port: 7050 - ordererAddress: orderer2.org1proxy.blockchaincloudpoc.com:443 + ordererAddress: orderer2.supplychain-net.org1proxy.blockchaincloudpoc.com:443 - orderer: name: orderer3 type: orderer consensus: raft grpc: port: 7050 - ordererAddress: orderer3.org1proxy.blockchaincloudpoc.com:443 + ordererAddress: orderer3.supplychain-net.org1proxy.blockchaincloudpoc.com:443 diff --git a/platforms/hyperledger-fabric/configuration/samples/network-fabric-remove-organization.yaml b/platforms/hyperledger-fabric/configuration/samples/network-fabric-remove-organization.yaml index df1dc6e7c70..1e1d8f22367 100644 --- a/platforms/hyperledger-fabric/configuration/samples/network-fabric-remove-organization.yaml +++ b/platforms/hyperledger-fabric/configuration/samples/network-fabric-remove-organization.yaml @@ -11,7 +11,7 @@ network: # Network level configuration specifies the attributes required for each organization # to remove an organization from existing network. type: fabric - version: 2.2.2 # currently tested 1.4.8 and 2.2.2 + version: 2.2.2 # currently tested 1.4.8, 2.2.2 and 2.5.4 frontend: enabled #Flag for frontend to enabled for nodes/peers @@ -43,25 +43,23 @@ network: type: orderer name: orderer1 org_name: supplychain #org_name should match one organization definition below in organizations: key - uri: orderer1.org1proxy.blockchaincloudpoc.com:443 # Must include port, Can be external or internal URI for orderer which should be reachable by all peers - certificate: /home/bevel/build/orderer1.crt # Path of the orderer certificate which must exist + uri: orderer1.supplychain-net.org1proxy.blockchaincloudpoc.com:443 # Must include port, Can be external or internal URI for orderer which should be reachable by all peers - orderer: type: orderer name: orderer2 org_name: supplychain #org_name should match one organization definition below in organizations: key - uri: orderer2.org1proxy.blockchaincloudpoc.com:443 # Must include port, Can be external or internal URI for orderer which should be reachable by all peers - certificate: /home/bevel/build/orderer2.crt # Path of the orderer certificate which must exist + uri: orderer2.supplychain-net.org1proxy.blockchaincloudpoc.com:443 # Must include port, Can be external or internal URI for orderer which should be reachable by all peers - orderer: type: orderer name: orderer3 org_name: supplychain #org_name should match one organization definition below in organizations: key - uri: orderer3.org1proxy.blockchaincloudpoc.com:443 # Must include port, Can be external or internal URI for orderer which should be reachable by all peers - certificate: /home/bevel/build/orderer3.crt # Ensure that the directory exists + uri: orderer3.supplychain-net.org1proxy.blockchaincloudpoc.com:443 # Must include port, Can be external or internal URI for orderer which should be reachable by all peers # The channels defined for a network with participating peers in each channel channels: - channel: consortium: SupplyChainConsortium + channel_status: new channel_name: AllChannel chaincodes: - "chaincode_name" @@ -77,7 +75,7 @@ network: name: peer0 gossipAddress: peer0.carrier-net.org3proxy.blockchaincloudpoc.com:443 # Must include port, External or internal URI of the gossip peer peerAddress: peer0.carrier-net.org3proxy.blockchaincloudpoc.com:443 # Must include port, External URI of the peer - ordererAddress: orderer1.org1proxy.blockchaincloudpoc.com:443 # Must include port, External or internal URI of the orderer + ordererAddress: orderer1.supplychain-net.org1proxy.blockchaincloudpoc.com:443 # Must include port, External or internal URI of the orderer - organization: name: store type: joiner # joiner organization will only join the channel and install chaincode @@ -87,7 +85,7 @@ network: name: peer0 gossipAddress: peer0.store-net.org4proxy.blockchaincloudpoc.com:443 peerAddress: peer0.store-net.org4proxy.blockchaincloudpoc.com:443 # Must include port, External URI of the peer - ordererAddress: orderer1.org1proxy.blockchaincloudpoc.com:443 + ordererAddress: orderer1.supplychain-net.org1proxy.blockchaincloudpoc.com:443 - organization: name: warehouse type: joiner @@ -97,7 +95,7 @@ network: name: peer0 gossipAddress: peer0.warehouse-net.org5proxy.blockchaincloudpoc.com:443 peerAddress: peer0.warehouse-net.org5proxy.blockchaincloudpoc.com:443 # Must include port, External URI of the peer - ordererAddress: orderer1.org1proxy.blockchaincloudpoc.com:443 + ordererAddress: orderer1.supplychain-net.org1proxy.blockchaincloudpoc.com:443 - organization: name: manufacturer type: joiner @@ -107,9 +105,7 @@ network: name: peer0 gossipAddress: peer0.manufacturer-net.org2proxy.blockchaincloudpoc.com:443 peerAddress: peer0.manufacturer-net.org2proxy.blockchaincloudpoc.com:443 # Must include port, External URI of the peer - ordererAddress: orderer1.org1proxy.blockchaincloudpoc.com:443 - genesis: - name: OrdererGenesis + ordererAddress: orderer1.supplychain-net.org1proxy.blockchaincloudpoc.com:443 # Allows specification of one or many organizations that will be connecting to a network. # If an organization is also hosting the root of the network (e.g. doorman, membership service, etc), @@ -123,11 +119,9 @@ network: state: London location: London subject: "O=Orderer,OU=Orderer,L=51.50/-0.13/London,C=GB" - type: orderer external_url_suffix: org1proxy.blockchaincloudpoc.com org_status: existing # Status of the organization for the existing network, can be delete / existing ca_data: - url: ca.supplychain-net.org1proxy.blockchaincloudpoc.com certificate: /path/supplychain/server.crt # Path where ca public cert will be stored (if new) or read from (if existing ca) cloud_provider: aws # Options: aws, azure, gcp, digitalocean, minikube @@ -185,21 +179,21 @@ network: consensus: raft grpc: port: 7050 - ordererAddress: orderer1.org1proxy.blockchaincloudpoc.com:443 + ordererAddress: orderer1.supplychain-net.org1proxy.blockchaincloudpoc.com:443 - orderer: name: orderer2 type: orderer consensus: raft grpc: port: 7050 - ordererAddress: orderer2.org1proxy.blockchaincloudpoc.com:443 + ordererAddress: orderer2.supplychain-net.org1proxy.blockchaincloudpoc.com:443 - orderer: name: orderer3 type: orderer consensus: raft grpc: port: 7050 - ordererAddress: orderer3.org1proxy.blockchaincloudpoc.com:443 + ordererAddress: orderer3.supplychain-net.org1proxy.blockchaincloudpoc.com:443 # Specification for the 2nd organization. Each organization maps to a VPC and a separate k8s cluster - organization: @@ -208,12 +202,10 @@ network: state: Zurich location: Zurich subject: "O=Manufacturer,OU=Manufacturer,L=47.38/8.54/Zurich,C=CH" - type: peer external_url_suffix: org2proxy.blockchaincloudpoc.com org_status: existing # Status of the organization for the existing network, can be delete / existing orderer_org: supplychain # Name of the organization that provides the ordering service ca_data: - url: ca.manufacturer-net.org2proxy.blockchaincloudpoc.com certificate: /path/manufacturer/server.crt cloud_provider: aws # Options: aws, azure, gcp, digitalocean, minikube @@ -263,7 +255,6 @@ network: type: anchor # This can be anchor/nonanchor. Atleast one peer should be anchor peer. gossippeeraddress: peer0.manufacturer-net:7051 # Internal Address of the other peer in same Org for gossip, same peer if there is only one peer peerAddress: peer0.manufacturer-net.org2proxy.blockchaincloudpoc.com:443 # Must include port, External URI of the peer - certificate: /path/manufacturer/peer0.crt # Path to peer Certificate cli: disabled # Creates a peer cli pod depending upon the (enabled/disabled) tag. grpc: port: 7051 @@ -297,12 +288,10 @@ network: state: London location: London subject: "O=Carrier,OU=Carrier,L=51.50/-0.13/London,C=GB" - type: peer external_url_suffix: org3proxy.blockchaincloudpoc.com org_status: existing # Status of the organization for the existing network, can be delete / existing orderer_org: supplychain # Name of the organization that provides the ordering service ca_data: - url: ca.carrier-net.org3proxy.blockchaincloudpoc.com certificate: /path/carrier/server.crt cloud_provider: aws # Options: aws, azure, gcp, digitalocean, minikube @@ -350,7 +339,6 @@ network: type: anchor # This can be anchor/nonanchor. Atleast one peer should be anchor peer. gossippeeraddress: peer0.carrier-net:7051 # Internal Address of the other peer in same Org for gossip, same peer if there is only one peer peerAddress: peer0.carrier-net.org3proxy.blockchaincloudpoc.com:443 # Must include port, External URI of the peer - certificate: /path/carrier/peer0.crt # Path to peer Certificate cli: disabled # Creates a peer cli pod depending upon the (enabled/disabled) tag. grpc: port: 7051 @@ -384,12 +372,10 @@ network: state: New York location: New York subject: "O=Store,OU=Store,L=40.73/-74/New York,C=US" - type: peer external_url_suffix: org4proxy.blockchaincloudpoc.com org_status: delete # Status of the organization for the existing network, can be delete / existing orderer_org: supplychain # Name of the organization that provides the ordering service ca_data: - url: ca.store-net.org4proxy.blockchaincloudpoc.com certificate: /path/store/server.crt cloud_provider: aws # Options: aws, azure, gcp, digitalocean, minikube @@ -437,7 +423,6 @@ network: type: anchor # This can be anchor/nonanchor. Atleast one peer should be anchor peer. gossippeeraddress: peer0.store-net:7051 # Internal Address of the other peer in same Org for gossip, same peer if there is only one peer peerAddress: peer0.store-net.org4proxy.blockchaincloudpoc.com:443 # Must include port, External URI of the peer - certificate: /path/store/peer0.crt # Path to peer Certificate cli: disabled # Creates a peer cli pod depending upon the (enabled/disabled) tag. grpc: port: 7051 @@ -471,12 +456,10 @@ network: state: Massachusetts location: Boston subject: "O=Warehouse,OU=Warehouse,L=42.36/-71.06/Boston,C=US" - type: peer external_url_suffix: org5proxy.blockchaincloudpoc.com org_status: existing # Status of the organization for the existing network, can be delete / existing orderer_org: supplychain # Name of the organization that provides the ordering service ca_data: - url: ca.warehouse-net.org5proxy.blockchaincloudpoc.com certificate: /path/warehouse/server.crt cloud_provider: aws # Options: aws, azure, gcp, digitalocean, minikube @@ -524,7 +507,6 @@ network: type: anchor # This can be anchor/nonanchor. Atleast one peer should be anchor peer. gossippeeraddress: peer0.warehouse-net:7051 # Internal Address of the other peer in same Org for gossip, same peer if there is only one peer peerAddress: peer0.warehouse-net.org5proxy.blockchaincloudpoc.com:443 # Must include port, External URI of the peer - certificate: /path/warehouse/peer0.crt # Path to peer Certificate cli: disabled # Creates a peer cli pod depending upon the (enabled/disabled) tag. grpc: port: 7051 diff --git a/platforms/hyperledger-fabric/configuration/samples/network-fabricv2-external-chaincode.yaml b/platforms/hyperledger-fabric/configuration/samples/network-fabricv2-external-chaincode.yaml index e92ec77618c..a5c451ad8ee 100644 --- a/platforms/hyperledger-fabric/configuration/samples/network-fabricv2-external-chaincode.yaml +++ b/platforms/hyperledger-fabric/configuration/samples/network-fabricv2-external-chaincode.yaml @@ -11,7 +11,7 @@ network: # Network level configuration specifies the attributes required for each organization # to join an existing network. type: fabric - version: 2.2.2 # currently tested 1.4.8 and 2.2.2 + version: 2.2.2 # currently tested 1.4.8, 2.2.2 and 2.5.4 frontend: enabled #Flag for frontend to enabled for nodes/peers @@ -48,25 +48,23 @@ network: type: orderer name: orderer1 org_name: supplychain #org_name should match one organization definition below in organizations: key - uri: orderer1.org1proxy.blockchaincloudpoc.com:443 # Must include port, Can be external or internal URI for orderer which should be reachable by all peers - certificate: /home/bevel/build/orderer1.crt # Ensure that the directory exists + uri: orderer1.supplychain-net.org1proxy.blockchaincloudpoc.com:443 # Must include port, Can be external or internal URI for orderer which should be reachable by all peers - orderer: type: orderer name: orderer2 org_name: supplychain #org_name should match one organization definition below in organizations: key - uri: orderer2.org1proxy.blockchaincloudpoc.com:443 # Must include port, Can be external or internal URI for orderer which should be reachable by all peers - certificate: /home/bevel/build/orderer2.crt # Ensure that the directory exists + uri: orderer2.supplychain-net.org1proxy.blockchaincloudpoc.com:443 # Must include port, Can be external or internal URI for orderer which should be reachable by all peers - orderer: type: orderer name: orderer3 org_name: supplychain #org_name should match one organization definition below in organizations: key - uri: orderer3.org1proxy.blockchaincloudpoc.com:443 # Must include port, Can be external or internal URI for orderer which should be reachable by all peers - certificate: /home/bevel/build/orderer3.crt + uri: orderer3.supplychain-net.org1proxy.blockchaincloudpoc.com:443 # Must include port, Can be external or internal URI for orderer which should be reachable by all peers # The channels defined for a network with participating peers in each channel channels: - channel: consortium: SupplyChainConsortium channel_name: AllChannel + channel_status: new chaincodes: - "assettransfer" orderers: @@ -81,7 +79,7 @@ network: name: peer0 gossipAddress: peer0.carrier-net.org3proxy.blockchaincloudpoc.com:443 # Must include port, External or internal URI of the gossip peer peerAddress: peer0.carrier-net.org3proxy.blockchaincloudpoc.com:443 # Must include port, External URI of the peer - ordererAddress: orderer1.org1proxy.blockchaincloudpoc.com:443 # Must include port, External or internal URI of the orderer + ordererAddress: orderer1.supplychain-net.org1proxy.blockchaincloudpoc.com:443 # Must include port, External or internal URI of the orderer - organization: name: store type: joiner # joiner organization will only join the channel and install chaincode @@ -91,7 +89,7 @@ network: name: peer0 gossipAddress: peer0.store-net.org4proxy.blockchaincloudpoc.com:443 peerAddress: peer0.store-net.org4proxy.blockchaincloudpoc.com:443 # Must include port, External URI of the peer - ordererAddress: orderer1.org1proxy.blockchaincloudpoc.com:443 + ordererAddress: orderer1.supplychain-net.org1proxy.blockchaincloudpoc.com:443 - organization: name: warehouse type: joiner @@ -101,7 +99,7 @@ network: name: peer0 gossipAddress: peer0.warehouse-net.org5proxy.blockchaincloudpoc.com:443 peerAddress: peer0.warehouse-net.org5proxy.blockchaincloudpoc.com:443 # Must include port, External URI of the peer - ordererAddress: orderer1.org1proxy.blockchaincloudpoc.com:443 + ordererAddress: orderer1.supplychain-net.org1proxy.blockchaincloudpoc.com:443 - organization: name: manufacturer type: joiner @@ -111,7 +109,7 @@ network: name: peer0 gossipAddress: peer0.manufacturer-net.org2proxy.blockchaincloudpoc.com:443 peerAddress: peer0.manufacturer-net.org2proxy.blockchaincloudpoc.com:443 # Must include port, External URI of the peer - ordererAddress: orderer1.org1proxy.blockchaincloudpoc.com:443 + ordererAddress: orderer1.supplychain-net.org1proxy.blockchaincloudpoc.com:443 endorsers: # Only one peer per org required for endorsement - organization: @@ -135,8 +133,6 @@ network: name: peer0 corepeerAddress: peer0.manufacturer-net.org2proxy.blockchaincloudpoc.com:443 certificate: "/home/bevel/build/manufacturer/server.crt" # certificate path for peer - genesis: - name: OrdererGenesis # Allows specification of one or many organizations that will be connecting to a network. # If an organization is also hosting the root of the network (e.g. doorman, membership service, etc), @@ -150,12 +146,10 @@ network: state: London location: London subject: "O=Orderer,OU=Orderer,L=51.50/-0.13/London,C=GB" - type: orderer external_url_suffix: org1proxy.blockchaincloudpoc.com org_status: new fabric_console: enabled # To deploy Fabric console for this organization ca_data: - url: ca.supplychain-net:7054 certificate: /home/bevel/build/supplychain/server.crt # Path where ca public cert will be stored (if new) or read from (if existing ca) cloud_provider: aws # Options: aws, azure, gcp, minikube @@ -187,7 +181,7 @@ network: git_repo: "github.com//bevel.git" # Gitops https URL for git push (without https://) username: "git_username" # Git Service user who has rights to check-in in all branches password: "git_access_token" # Git Server user password - email: "git_email" # Email to use in git config + email: "git@email.com" # Email to use in git config private_key: "path_to_private_key" # Path to private key file which has write-access to the git repo # Services maps to the pods that will be deployed on the k8s cluster @@ -230,13 +224,11 @@ network: state: Zurich location: Zurich subject: "O=Manufacturer,OU=Manufacturer,L=47.38/8.54/Zurich,C=CH" - type: peer external_url_suffix: org2proxy.blockchaincloudpoc.com org_status: new orderer_org: supplychain # Name of the organization that provides the ordering service fabric_console: disabled # To deploy Fabric console for this organization ca_data: - url: ca.manufacturer-net:7054 certificate: /home/bevel/build/manufacturer/server.crt cloud_provider: aws # Options: aws, azure, gcp, minikube @@ -268,7 +260,7 @@ network: git_repo: "github.com//bevel.git" # Gitops https URL for git push (without https://) username: "git_username" # Git Service user who has rights to check-in in all branches password: "git_access_token" # Git Server user password - email: "git_email" # Email to use in git config + email: "git@email.com" # Email to use in git config private_key: "path_to_private_key" # Path to private key file which has write-access to the git repo # Generating User Certificates with custom attributes using Fabric CA in BAF for Peer Organizations users: @@ -292,7 +284,6 @@ network: type: anchor # This can be anchor/nonanchor. Atleast one peer should be anchor peer. gossippeeraddress: peer0.manufacturer-net:7051 # Internal Address of the other peer in same Org for gossip, same peer if there is only one peer peerAddress: peer0.manufacturer-net.org2proxy.blockchaincloudpoc.com:443 # Must include port, External URI of the peer - certificate: /home/bevel/build/manufacturer/peer0.crt # Path to peer Certificate cli: enabled # Creates a peer cli pod depending upon the (enabled/disabled) tag. configpath: /home/bevel/build/peer0-core.yaml # path to custom core.yaml grpc: @@ -310,9 +301,11 @@ network: chaincodes: - name: "assettransfer" # This has to be replaced with the name of the chaincode version: "1" # This has to be replaced with the version of the chaincode + sequence: "1" # Sequence of the chaincode, update this only for chaincode upgrade external_chaincode: true init_required: false tls: true + upgrade_chaincode: false buildpack_path: /home/fabric-samples/asset-transfer-basic/chaincode-external/sampleBuilder # The path where buildpacks are locally stored image: ghcr.io/hyperledger/bevel-samples-example:1.0 arguments: '\"InitLedger\",\"\"' # Init Arguments to be passed which will mark chaincode as init-required @@ -324,13 +317,11 @@ network: state: London location: London subject: "O=Carrier,OU=Carrier,L=51.50/-0.13/London,C=GB" - type: peer external_url_suffix: org3proxy.blockchaincloudpoc.com org_status: new orderer_org: supplychain # Name of the organization that provides the ordering service fabric_console: disabled # To not deploy Fabric console for this organization ca_data: - url: ca.carrier-net:7054 certificate: /home/bevel/build/carrier/server.crt cloud_provider: aws # Options: aws, azure, gcp, minikube @@ -362,7 +353,7 @@ network: git_repo: "github.com//bevel.git" # Gitops https URL for git push (without https://) username: "git_username" # Git Service user who has rights to check-in in all branches password: "git_access_token" # Git Server user password - email: "git_email" # Email to use in git config + email: "git@email.com" # Email to use in git config private_key: "path_to_private_key" # Path to private key file which has write-access to the git repo # Generating User Certificates with custom attributes using Fabric CA in Bevel for Peer Organizations users: @@ -384,7 +375,6 @@ network: type: anchor # This can be anchor/nonanchor. Atleast one peer should be anchor peer. gossippeeraddress: peer0.carrier-net:7051 # Internal Address of the other peer in same Org for gossip, same peer if there is only one peer peerAddress: peer0.carrier-net.org3proxy.blockchaincloudpoc.com:443 # Must include port, External URI of the peer - certificate: /home/bevel/build/carrier/peer0.crt # Path to peer Certificate cli: disabled # Creates a peer cli pod depending upon the (enabled/disabled) tag. configpath: /home/bevel/build/peer0-core.yaml # path to custom core.yaml grpc: @@ -402,9 +392,11 @@ network: chaincodes: - name: "assettransfer" # This has to be replaced with the name of the chaincode version: "1" # This has to be replaced with the version of the chaincode + sequence: "1" # Sequence of the chaincode, update this only for chaincode upgrade external_chaincode: true init_required: false tls: true + upgrade_chaincode: false buildpack_path: /home/fabric-samples/asset-transfer-basic/chaincode-external/sampleBuilder # The path where buildpacks are locally stored image: ghcr.io/hyperledger/bevel-samples-example:1.0 arguments: '\"InitLedger\",\"\"' # Init Arguments to be passed which will mark chaincode as init-required @@ -416,13 +408,11 @@ network: state: New York location: New York subject: "O=Store,OU=Store,L=40.73/-74/New York,C=US" - type: peer external_url_suffix: org4proxy.blockchaincloudpoc.com org_status: new orderer_org: supplychain # Name of the organization that provides the ordering service fabric_console: disabled # To not deploy Fabric console for this organization ca_data: - url: ca.store-net:7054 certificate: /home/bevel/build/store/server.crt cloud_provider: aws # Options: aws, azure, gcp, minikube @@ -454,7 +444,7 @@ network: git_repo: "github.com//bevel.git" # Gitops https URL for git push (without https://) username: "git_username" # Git Service user who has rights to check-in in all branches password: "git_access_token" # Git Server user password - email: "git_email" # Email to use in git config + email: "git@email.com" # Email to use in git config private_key: "path_to_private_key" # Path to private key file which has write-access to the git repo # Generating User Certificates with custom attributes using Fabric CA in Bevel for Peer Organizations users: @@ -476,7 +466,6 @@ network: type: anchor # This can be anchor/nonanchor. Atleast one peer should be anchor peer. gossippeeraddress: peer0.store-net:7051 # Internal Address of the other peer in same Org for gossip, same peer if there is only one peer peerAddress: peer0.store-net.org4proxy.blockchaincloudpoc.com:443 # Must include port, External URI of the peer - certificate: /home/bevel/build/store/peer0.crt # Path to peer Certificate cli: disabled # Creates a peer cli pod depending upon the (enabled/disabled) tag. configpath: /home/bevel/build/peer0-core.yaml # path to custom core.yaml grpc: @@ -494,9 +483,11 @@ network: chaincodes: - name: "assettransfer" # This has to be replaced with the name of the chaincode version: "1" # This has to be replaced with the version of the chaincode + sequence: "1" # Sequence of the chaincode, update this only for chaincode upgrade external_chaincode: true init_required: false tls: true + upgrade_chaincode: false buildpack_path: /home/fabric-samples/asset-transfer-basic/chaincode-external/sampleBuilder # The path where buildpacks are locally stored image: ghcr.io/hyperledger/bevel-samples-example:1.0 arguments: '\"InitLedger\",\"\"' # Init Arguments to be passed which will mark chaincode as init-required @@ -507,13 +498,11 @@ network: state: Massachusetts location: Boston subject: "O=Warehouse,OU=Warehouse,L=42.36/-71.06/Boston,C=US" - type: peer external_url_suffix: org5proxy.blockchaincloudpoc.com org_status: new orderer_org: supplychain # Name of the organization that provides the ordering service fabric_console: disabled # To not deploy Fabric console for this organization ca_data: - url: ca.warehouse-net:7054 certificate: /home/bevel/build/warehouse/server.crt cloud_provider: aws # Options: aws, azure, gcp, minikube @@ -545,7 +534,7 @@ network: git_repo: "github.com//bevel.git" # Gitops https URL for git push (without https://) username: "git_username" # Git Service user who has rights to check-in in all branches password: "git_access_token" # Git Server user password - email: "git_email" # Email to use in git config + email: "git@email.com" # Email to use in git config private_key: "path_to_private_key" # Path to private key file which has write-access to the git repo services: @@ -561,7 +550,6 @@ network: type: anchor # This can be anchor/nonanchor. Atleast one peer should be anchor peer. gossippeeraddress: peer0.warehouse-net:7051 # Internal Address of the other peer in same Org for gossip, same peer if there is only one peer peerAddress: peer0.warehouse-net.org5proxy.blockchaincloudpoc.com:443 # Must include port, External URI of the peer - certificate: /home/bevel/build/warehouse/peer0.crt # Path to peer Certificate cli: disabled # Creates a peer cli pod depending upon the (enabled/disabled) tag. configpath: /home/bevel/build/peer0-core.yaml # path to custom core.yaml grpc: @@ -579,10 +567,11 @@ network: chaincodes: - name: "assettransfer" # This has to be replaced with the name of the chaincode version: "1" # This has to be replaced with the version of the chaincode + sequence: "1" # Sequence of the chaincode, update this only for chaincode upgrade external_chaincode: true init_required: false tls: true - upgrade_chaincode: true + upgrade_chaincode: false buildpack_path: /home/fabric-samples/asset-transfer-basic/chaincode-external/sampleBuilder # The path where buildpacks are locally stored image: ghcr.io/hyperledger/bevel-samples-example:1.0 arguments: '\"InitLedger\",\"\"' # Init Arguments to be passed which will mark chaincode as init-required diff --git a/platforms/hyperledger-fabric/configuration/samples/network-fabricv2-kafka.yaml b/platforms/hyperledger-fabric/configuration/samples/network-fabricv2-kafka.yaml index 417341e0372..91ff5f125f4 100644 --- a/platforms/hyperledger-fabric/configuration/samples/network-fabricv2-kafka.yaml +++ b/platforms/hyperledger-fabric/configuration/samples/network-fabricv2-kafka.yaml @@ -51,20 +51,19 @@ network: type: orderer name: orderer1 org_name: supplychain #org_name should match one organization definition below in organizations: key - uri: orderer1.org1proxy.blockchaincloudpoc.com:443 # Must include port, Can be external or internal URI for orderer which should be reachable by all peers - certificate: /home/bevel/build/orderer1.crt # Ensure that the directory exists + uri: orderer1.supplychain-net.org1proxy.blockchaincloudpoc.com:443 # Must include port, Can be external or internal URI for orderer which should be reachable by all peers - orderer: type: orderer name: orderer2 org_name: supplychain #org_name should match one organization definition below in organizations: key - uri: orderer2.org1proxy.blockchaincloudpoc.com:443 # Must include port, Can be external or internal URI for orderer which should be reachable by all peers - certificate: /home/bevel/build/orderer2.crt # Ensure that the directory exists + uri: orderer2.supplychain-net.org1proxy.blockchaincloudpoc.com:443 # Must include port, Can be external or internal URI for orderer which should be reachable by all peers # The channels defined for a network with participating peers in each channel channels: - channel: consortium: SupplyChainConsortium channel_name: AllChannel + channel_status: new chaincodes: - "chaincode_name" orderers: @@ -79,7 +78,7 @@ network: name: peer0 gossipAddress: peer0.carrier-net.org3proxy.blockchaincloudpoc.com:443 # Must include port, External or internal URI of the gossip peer peerAddress: peer0.carrier-net.org3proxy.blockchaincloudpoc.com:443 # Must include port, External URI of the peer - ordererAddress: orderer1.org1proxy.blockchaincloudpoc.com:443 # Must include port, External or internal URI of the orderer + ordererAddress: orderer1.supplychain-net.org1proxy.blockchaincloudpoc.com:443 # Must include port, External or internal URI of the orderer - organization: name: store type: joiner # joiner organization will only join the channel and install chaincode @@ -89,7 +88,7 @@ network: name: peer0 gossipAddress: peer0.store-net.org3proxy.blockchaincloudpoc.com:443 peerAddress: peer0.store-net.org3proxy.blockchaincloudpoc.com:443 # Must include port, External URI of the peer - ordererAddress: orderer1.org1proxy.blockchaincloudpoc.com:443 + ordererAddress: orderer1.supplychain-net.org1proxy.blockchaincloudpoc.com:443 - organization: name: warehouse type: joiner @@ -99,7 +98,7 @@ network: name: peer0 gossipAddress: peer0.warehouse-net.org2proxy.blockchaincloudpoc.com:443 peerAddress: peer0.warehouse-net.org3proxy.blockchaincloudpoc.com:443 # Must include port, External URI of the peer - ordererAddress: orderer1.org1proxy.blockchaincloudpoc.com:443 + ordererAddress: orderer1.supplychain-net.org1proxy.blockchaincloudpoc.com:443 - organization: name: manufacturer type: joiner @@ -109,9 +108,7 @@ network: name: peer0 gossipAddress: peer0.manufacturer-net.org2proxy.blockchaincloudpoc.com:443 peerAddress: peer0.manufacturer-net.org3proxy.blockchaincloudpoc.com:443 # Must include port, External URI of the peer - ordererAddress: orderer1.org1proxy.blockchaincloudpoc.com:443 - genesis: - name: OrdererGenesis + ordererAddress: orderer1.supplychain-net.org1proxy.blockchaincloudpoc.com:443 # Allows specification of one or many organizations that will be connecting to a network. # If an organization is also hosting the root of the network (e.g. doorman, membership service, etc), @@ -125,12 +122,10 @@ network: state: London location: London subject: "O=Orderer,L=51.50/-0.13/London,C=GB" - type: orderer external_url_suffix: org1proxy.blockchaincloudpoc.com org_status: new fabric_console: enabled # To deploy Fabric console for this organization ca_data: - url: ca.supplychain-net.org1proxy.blockchaincloudpoc.com certificate: file/server.crt # Path where ca public cert will be stored (if new) or read from (if existing ca) cloud_provider: aws # Options: aws, azure, gcp, digitalocean, minikube @@ -191,14 +186,14 @@ network: consensus: kafka grpc: port: 7050 - ordererAddress: orderer1.org1proxy.blockchaincloudpoc.com:443 + ordererAddress: orderer1.supplychain-net.org1proxy.blockchaincloudpoc.com:443 - orderer: name: orderer2 type: orderer consensus: kafka grpc: port: 7050 - ordererAddress: orderer2.org1proxy.blockchaincloudpoc.com:443 + ordererAddress: orderer2.supplychain-net.org1proxy.blockchaincloudpoc.com:443 # Specification for the 2nd organization. Each organization maps to a VPC and a separate k8s cluster @@ -208,13 +203,11 @@ network: state: Zurich location: Zurich subject: "O=Manufacturer,OU=Manufacturer,L=47.38/8.54/Zurich,C=CH" - type: peer external_url_suffix: org2proxy.blockchaincloudpoc.com org_status: new orderer_org: supplychain # Name of the organization that provides the ordering service fabric_console: enabled # To deploy Fabric console for this organization ca_data: - url: ca.manufacturer-net.org2proxy.blockchaincloudpoc.com certificate: file/server.crt cloud_provider: aws # Options: aws, azure, gcp, digitalocean, minikube @@ -297,13 +290,11 @@ network: state: London location: London subject: "O=Carrier,OU=Carrier,L=51.50/-0.13/London,C=GB" - type: peer external_url_suffix: org3proxy.blockchaincloudpoc.com org_status: new orderer_org: supplychain # Name of the organization that provides the ordering service fabric_console: disabled # To not deploy Fabric console for this organization ca_data: - url: ca.carrier-net.org3proxy.blockchaincloudpoc.com certificate: file/server.crt cloud_provider: aws # Options: aws, azure, gcp, digitalocean, minikube @@ -383,13 +374,11 @@ network: state: New York location: New York subject: "O=Store,OU=Store,L=40.73/-74/New York,C=US" - type: peer external_url_suffix: org3proxy.blockchaincloudpoc.com org_status: new orderer_org: supplychain # Name of the organization that provides the ordering service fabric_console: disabled # To not deploy Fabric console for this organization ca_data: - url: ca.store-net.org3proxy.blockchaincloudpoc.com certificate: file/server.crt cloud_provider: aws # Options: aws, azure, gcp, digitalocean, minikube @@ -470,13 +459,11 @@ network: state: Massachusetts location: Boston subject: "O=Warehouse,OU=Warehouse,L=42.36/-71.06/Boston,C=US" - type: peer external_url_suffix: org2proxy.blockchaincloudpoc.com org_status: new orderer_org: supplychain # Name of the organization that provides the ordering service fabric_console: disabled # To not deploy Fabric console for this organization ca_data: - url: ca.warehouse-net.org2proxy.blockchaincloudpoc.com certificate: /file/server.crt cloud_provider: aws # Options: aws, azure, gcp, digitalocean, minikube diff --git a/platforms/hyperledger-fabric/configuration/samples/network-fabricv2-raft-add-orderer.yaml b/platforms/hyperledger-fabric/configuration/samples/network-fabricv2-raft-add-orderer.yaml index 1aaab440b31..37068d00dcc 100644 --- a/platforms/hyperledger-fabric/configuration/samples/network-fabricv2-raft-add-orderer.yaml +++ b/platforms/hyperledger-fabric/configuration/samples/network-fabricv2-raft-add-orderer.yaml @@ -11,7 +11,7 @@ network: # Network level configuration specifies the attributes required for each organization # to join an existing network. type: fabric - version: 2.2.2 # currently tested 1.4.8 and 2.2.2 + version: 2.2.2 # currently tested 1.4.8, 2.2.2 and 2.5.4 frontend: enabled #Flag for frontend to enabled for nodes/peers @@ -44,32 +44,29 @@ network: type: orderer name: orderer1 org_name: supplychain #org_name should match one organization definition below in organizations: key - uri: orderer1.org1proxy.blockchaincloudpoc.com:443 # Must include port, Can be external or internal URI for orderer which should be reachable by all peers - certificate: /home/bevel/build/orderer1.crt # Ensure that the directory exists + uri: orderer1.supplychain-net.org1proxy.blockchaincloudpoc.com:443 # Must include port, Can be external or internal URI for orderer which should be reachable by all peers - orderer: type: orderer name: orderer2 org_name: supplychain #org_name should match one organization definition below in organizations: key - uri: orderer2.org1proxy.blockchaincloudpoc.com:443 # Must include port, Can be external or internal URI for orderer which should be reachable by all peers - certificate: /home/bevel/build/orderer2.crt # Ensure that the directory exists + uri: orderer2.supplychain-net.org1proxy.blockchaincloudpoc.com:443 # Must include port, Can be external or internal URI for orderer which should be reachable by all peers - orderer: type: orderer name: orderer3 org_name: supplychain #org_name should match one organization definition below in organizations: key - uri: orderer3.org1proxy.blockchaincloudpoc.com:443 # Must include port, Can be external or internal URI for orderer which should be reachable by all peers - certificate: /home/bevel/build/orderer3.crt # Ensure that the directory exists + uri: orderer3.supplychain-net.org1proxy.blockchaincloudpoc.com:443 # Must include port, Can be external or internal URI for orderer which should be reachable by all peers - orderer: type: orderer name: orderer4 org_name: supplychain #org_name should match one organization definition below in organizations: key - uri: orderer4.org1proxy.blockchaincloudpoc.com:443 # Must include port, Can be external or internal URI for orderer which should be reachable by all peers - certificate: /home/bevel/build/orderer4.crt # Ensure that the directory exists + uri: orderer4.supplychain-net.org1proxy.blockchaincloudpoc.com:443 # Must include port, Can be external or internal URI for orderer which should be reachable by all peers # The channels defined for a network with participating peers in each channel channels: - channel: consortium: SupplyChainConsortium channel_name: AllChannel + channel_status: new chaincodes: - "chaincode_name" orderers: @@ -84,7 +81,7 @@ network: name: peer0 gossipAddress: peer0.carrier-net.org3proxy.blockchaincloudpoc.com:443 # Must include port, External or internal URI of the gossip peer peerAddress: peer0.carrier-net.org3proxy.blockchaincloudpoc.com:443 # Must include port, External URI of the peer - ordererAddress: orderer1.org1proxy.blockchaincloudpoc.com:443 # Must include port, External or internal URI of the orderer + ordererAddress: orderer1.supplychain-net.org1proxy.blockchaincloudpoc.com:443 # Must include port, External or internal URI of the orderer - organization: name: store type: joiner # joiner organization will only join the channel and install chaincode @@ -94,7 +91,7 @@ network: name: peer0 gossipAddress: peer0.store-net.org4proxy.blockchaincloudpoc.com:443 peerAddress: peer0.store-net.org4proxy.blockchaincloudpoc.com:443 # Must include port, External URI of the peer - ordererAddress: orderer1.org1proxy.blockchaincloudpoc.com:443 + ordererAddress: orderer1.supplychain-net.org1proxy.blockchaincloudpoc.com:443 - organization: name: warehouse type: joiner @@ -104,7 +101,7 @@ network: name: peer0 gossipAddress: peer0.warehouse-net.org5proxy.blockchaincloudpoc.com:443 peerAddress: peer0.warehouse-net.org5proxy.blockchaincloudpoc.com:443 # Must include port, External URI of the peer - ordererAddress: orderer1.org1proxy.blockchaincloudpoc.com:443 + ordererAddress: orderer1.supplychain-net.org1proxy.blockchaincloudpoc.com:443 - organization: name: manufacturer type: joiner @@ -114,9 +111,7 @@ network: name: peer0 gossipAddress: peer0.manufacturer-net.org2proxy.blockchaincloudpoc.com:443 peerAddress: peer0.manufacturer-net.org2proxy.blockchaincloudpoc.com:443 # Must include port, External URI of the peer - ordererAddress: orderer1.org1proxy.blockchaincloudpoc.com:443 - genesis: - name: OrdererGenesis + ordererAddress: orderer1.supplychain-net.org1proxy.blockchaincloudpoc.com:443 # Allows specification of one or many organizations that will be connecting to a network. # If an organization is also hosting the root of the network (e.g. doorman, membership service, etc), @@ -130,11 +125,9 @@ network: state: London location: London subject: "O=Orderer,L=51.50/-0.13/London,C=GB" - type: orderer external_url_suffix: org1proxy.blockchaincloudpoc.com org_status: existing ca_data: - url: ca.supplychain-net.org1proxy.blockchaincloudpoc.com certificate: /path/supplychain/server.crt # Path where ca public cert will be stored (if new) or read from (if existing ca) cloud_provider: aws # Options: aws, azure, gcp, digitalocean, minikube @@ -193,7 +186,7 @@ network: consensus: raft grpc: port: 7050 - ordererAddress: orderer1.org1proxy.blockchaincloudpoc.com:443 + ordererAddress: orderer1.supplychain-net.org1proxy.blockchaincloudpoc.com:443 - orderer: name: orderer2 status: existing @@ -201,7 +194,7 @@ network: consensus: raft grpc: port: 7050 - ordererAddress: orderer2.org1proxy.blockchaincloudpoc.com:443 + ordererAddress: orderer2.supplychain-net.org1proxy.blockchaincloudpoc.com:443 - orderer: name: orderer3 status: existing @@ -209,7 +202,7 @@ network: consensus: raft grpc: port: 7050 - ordererAddress: orderer3.org1proxy.blockchaincloudpoc.com:443 + ordererAddress: orderer3.supplychain-net.org1proxy.blockchaincloudpoc.com:443 - orderer: name: orderer4 status: new @@ -217,4 +210,4 @@ network: consensus: raft grpc: port: 7050 - ordererAddress: orderer4.org1proxy.blockchaincloudpoc.com:443 + ordererAddress: orderer4.supplychain-net.org1proxy.blockchaincloudpoc.com:443 diff --git a/platforms/hyperledger-fabric/configuration/samples/network-fabricv2.yaml b/platforms/hyperledger-fabric/configuration/samples/network-fabricv2.yaml index de34d8340e8..6e54918ce12 100644 --- a/platforms/hyperledger-fabric/configuration/samples/network-fabricv2.yaml +++ b/platforms/hyperledger-fabric/configuration/samples/network-fabricv2.yaml @@ -12,7 +12,7 @@ network: # to join an existing network. type: fabric version: 2.2.2 # currently tested 1.4.8, 2.2.2 and 2.5.4 - + upgrade: false # true : To upgrading Hyperledger Fabric version from 1.4.x to 2.2.x frontend: enabled #Flag for frontend to enabled for nodes/peers #Environment section for Kubernetes setup @@ -21,9 +21,9 @@ network: proxy: haproxy # values can be 'haproxy' or 'none' retry_count: 20 # Retry count for the checks external_dns: enabled # Should be enabled if using external-dns for automatic route configuration - annotations: # Additional annotations that can be used for some pods (ca, ca-tools, orderer and peer nodes) + labels: service: - - example1: example2 + example1: example2 deployment: {} pvc: {} # For providing Custom Templates to generate configtx.yaml @@ -48,26 +48,24 @@ network: type: orderer name: orderer1 org_name: supplychain #org_name should match one organization definition below in organizations: key - uri: orderer1.org1proxy.blockchaincloudpoc.com:443 # Must include port, Can be external or internal URI for orderer which should be reachable by all peers - certificate: /home/bevel/build/orderer1.crt # Ensure that the directory exists + uri: orderer1.supplychain-net.org1proxy.hlf.blockchaincloudpoc-develop.com:443 # Must include port, Can be external or internal URI for orderer which should be reachable by all peers - orderer: type: orderer name: orderer2 org_name: supplychain #org_name should match one organization definition below in organizations: key - uri: orderer2.org1proxy.blockchaincloudpoc.com:443 # Must include port, Can be external or internal URI for orderer which should be reachable by all peers - certificate: /home/bevel/build/orderer2.crt # Ensure that the directory exists + uri: orderer2.supplychain-net.org1proxy.hlf.blockchaincloudpoc-develop.com:443 # Must include port, Can be external or internal URI for orderer which should be reachable by all peers - orderer: type: orderer name: orderer3 org_name: supplychain #org_name should match one organization definition below in organizations: key - uri: orderer3.org1proxy.blockchaincloudpoc.com:443 # Must include port, Can be external or internal URI for orderer which should be reachable by all peers - certificate: /home/bevel/build/orderer3.crt # Ensure that the directory exists + uri: orderer3.supplychain-net.org1proxy.hlf.blockchaincloudpoc-develop.com:443 # Must include port, Can be external or internal URI for orderer which should be reachable by all peers # The channels defined for a network with participating peers in each channel channels: - channel: consortium: SupplyChainConsortium channel_name: AllChannel + channel_status: new osn_creator_org: # Organization name, whose orderers will create the channel. This field is only used with version 2.5 name: supplychain chaincodes: @@ -82,9 +80,26 @@ network: peers: - peer: name: peer0 + type: anchor gossipAddress: peer0.carrier-net.org3proxy.blockchaincloudpoc.com:443 # Must include port, External or internal URI of the gossip peer peerAddress: peer0.carrier-net.org3proxy.blockchaincloudpoc.com:443 # Must include port, External URI of the peer - ordererAddress: orderer1.org1proxy.blockchaincloudpoc.com:443 # Must include port, External or internal URI of the orderer + ordererAddress: orderer1.supplychain-net.org1proxy.hlf.blockchaincloudpoc-develop.com:443 # Must include port, External or internal URI of the orderer + - organization: + name: supplychain + type: joiner + org_status: new + peers: + - peer: + name: peer0 + type: anchor + gossipAddress: peer1.supplychain-net.org1proxy.hlf.blockchaincloudpoc-develop.com:443 + peerAddress: peer0.supplychain-net.org1proxy.hlf.blockchaincloudpoc-develop.com:443 + - peer: + name: peer1 + type: nonanchor + gossipAddress: peer0.supplychain-net.org1proxy.hlf.blockchaincloudpoc-develop.com:443 + peerAddress: peer1.supplychain-net.org1proxy.hlf.blockchaincloudpoc-develop.com:443 + ordererAddress: orderer1.supplychain-net.org1proxy.hlf.blockchaincloudpoc-develop.com:443 - organization: name: store type: joiner # joiner organization will only join the channel and install chaincode @@ -92,9 +107,10 @@ network: peers: - peer: name: peer0 + type: anchor gossipAddress: peer0.store-net.org4proxy.blockchaincloudpoc.com:443 peerAddress: peer0.store-net.org4proxy.blockchaincloudpoc.com:443 # Must include port, External URI of the peer - ordererAddress: orderer1.org1proxy.blockchaincloudpoc.com:443 + ordererAddress: orderer1.supplychain-net.org1proxy.hlf.blockchaincloudpoc-develop.com:443 - organization: name: warehouse type: joiner @@ -102,9 +118,10 @@ network: peers: - peer: name: peer0 + type: anchor gossipAddress: peer0.warehouse-net.org5proxy.blockchaincloudpoc.com:443 peerAddress: peer0.warehouse-net.org5proxy.blockchaincloudpoc.com:443 # Must include port, External URI of the peer - ordererAddress: orderer1.org1proxy.blockchaincloudpoc.com:443 + ordererAddress: orderer1.supplychain-net.org1proxy.hlf.blockchaincloudpoc-develop.com:443 - organization: name: manufacturer type: joiner @@ -112,9 +129,10 @@ network: peers: - peer: name: peer0 + type: anchor gossipAddress: peer0.manufacturer-net.org2proxy.blockchaincloudpoc.com:443 peerAddress: peer0.manufacturer-net.org2proxy.blockchaincloudpoc.com:443 # Must include port, External URI of the peer - ordererAddress: orderer1.org1proxy.blockchaincloudpoc.com:443 + ordererAddress: orderer1.supplychain-net.org1proxy.hlf.blockchaincloudpoc-develop.com:443 endorsers: # Only one peer per org required for endorsement - organization: @@ -145,8 +163,6 @@ network: name: peer0 corepeerAddress: peer0.store-net.org4proxy.blockchaincloudpoc.com:443 certificate: "/path/store/server.crt" # certificate path for peer - genesis: - name: OrdererGenesis # Allows specification of one or many organizations that will be connecting to a network. # If an organization is also hosting the root of the network (e.g. doorman, membership service, etc), @@ -160,12 +176,10 @@ network: state: London location: London subject: "O=Orderer,OU=Orderer,L=51.50/-0.13/London,C=GB" - type: orderer external_url_suffix: org1proxy.blockchaincloudpoc.com org_status: new fabric_console: enabled # To deploy Fabric console for this organization ca_data: - url: ca.supplychain-net.org1proxy.blockchaincloudpoc.com certificate: /path/supplychain/server.crt # Path where ca public cert will be stored (if new) or read from (if existing ca) cloud_provider: aws # Options: aws, azure, gcp, digitalocean, minikube @@ -205,7 +219,7 @@ network: services: ca: name: ca - subject: "/C=GB/ST=London/L=London/O=Orderer/CN=ca.supplychain-net.org1proxy.blockchaincloudpoc.com" + subject: "/C=GB/ST=London/L=London/O=Orderer" type: ca grpc: port: 7054 @@ -222,21 +236,59 @@ network: consensus: raft grpc: port: 7050 - ordererAddress: orderer1.org1proxy.blockchaincloudpoc.com:443 + ordererAddress: orderer1.supplychain-net.org1proxy.hlf.blockchaincloudpoc-develop.com:443 - orderer: name: orderer2 type: orderer consensus: raft grpc: port: 7050 - ordererAddress: orderer2.org1proxy.blockchaincloudpoc.com:443 + ordererAddress: orderer2.supplychain-net.org1proxy.hlf.blockchaincloudpoc-develop.com:443 - orderer: name: orderer3 type: orderer consensus: raft grpc: port: 7050 - ordererAddress: orderer3.org1proxy.blockchaincloudpoc.com:443 + ordererAddress: orderer3.supplychain-net.org1proxy.hlf.blockchaincloudpoc-develop.com:443 + + peers: + - peer: + name: peer0 + type: anchor # This can be anchor/nonanchor. Atleast one peer should be anchor peer. + gossippeeraddress: peer1.supplychain-net.org1proxy.hlf.blockchaincloudpoc-develop.com:443 # Internal Address of the other peer in same Org for gossip, same peer if there is only one peer + peerAddress: peer0.supplychain-net.org1proxy.hlf.blockchaincloudpoc-develop.com:443 # External URI of the peer + cli: enabled # Creates a peer cli pod depending upon the (enabled/disabled) tag. + grpc: + port: 7051 + events: + port: 7053 + couchdb: + port: 5984 + restserver: + targetPort: 20001 + port: 20001 + expressapi: + targetPort: 3000 + port: 3000 + - peer: + name: peer1 + type: nonanchor # This can be anchor/nonanchor. Atleast one peer should be anchor peer. + gossippeeraddress: peer0.supplychain-net.org1proxy.hlf.blockchaincloudpoc-develop.com:443 # External address of the existing anchor peer + peerAddress: peer1.supplychain-net.org1proxy.hlf.blockchaincloudpoc-develop.com:443 # External URI of the peer + cli: enabled # Creates a peer cli pod depending upon the (enabled/disabled) tag. + grpc: + port: 7051 + events: + port: 7053 + couchdb: + port: 5984 + restserver: + targetPort: 20001 + port: 20001 + expressapi: + targetPort: 3000 + port: 3000 # Specification for the 2nd organization. Each organization maps to a VPC and a separate k8s cluster - organization: @@ -245,13 +297,11 @@ network: state: Zurich location: Zurich subject: "O=Manufacturer,OU=Manufacturer,L=47.38/8.54/Zurich,C=CH" - type: peer external_url_suffix: org2proxy.blockchaincloudpoc.com org_status: new orderer_org: supplychain # Name of the organization that provides the ordering service fabric_console: enabled # To deploy Fabric console for this organization ca_data: - url: ca.manufacturer-net.org2proxy.blockchaincloudpoc.com certificate: /path/manufacturer/server.crt cloud_provider: aws # Options: aws, azure, gcp, digitalocean, minikube @@ -297,7 +347,7 @@ network: services: ca: name: ca - subject: "/C=CH/ST=Zurich/L=Zurich/O=Manufacturer/CN=ca.manufacturer-net.org2proxy.blockchaincloudpoc.com" + subject: "/C=CH/ST=Zurich/L=Zurich/O=Manufacturer" type: ca grpc: port: 7054 @@ -307,7 +357,6 @@ network: type: anchor # This can be anchor/nonanchor. Atleast one peer should be anchor peer. gossippeeraddress: peer0.manufacturer-net:7051 # Internal Address of the other peer in same Org for gossip, same peer if there is only one peer peerAddress: peer0.manufacturer-net.org2proxy.blockchaincloudpoc.com:443 # Must include port, External URI of the peer - certificate: /path/manufacturer/peer0.crt # Path to peer Certificate cli: disabled # Creates a peer cli pod depending upon the (enabled/disabled) tag. cactus_connector: disabled # set to enabled to create a cactus connector for Fabric grpc: @@ -343,13 +392,11 @@ network: state: London location: London subject: "O=Carrier,OU=Carrier,L=51.50/-0.13/London,C=GB" - type: peer external_url_suffix: org3proxy.blockchaincloudpoc.com org_status: new orderer_org: supplychain # Name of the organization that provides the ordering service fabric_console: disabled # To not deploy Fabric console for this organization ca_data: - url: ca.carrier-net.org3proxy.blockchaincloudpoc.com certificate: /path/carrier/server.crt cloud_provider: aws # Options: aws, azure, gcp, digitalocean, minikube @@ -393,7 +440,7 @@ network: services: ca: name: ca - subject: "/C=GB/ST=London/L=London/O=Carrier/CN=ca.carrier-net.org3proxy.blockchaincloudpoc.com" + subject: "/C=GB/ST=London/L=London/O=Carrier" type: ca grpc: port: 7054 @@ -403,7 +450,6 @@ network: type: anchor # This can be anchor/nonanchor. Atleast one peer should be anchor peer. gossippeeraddress: peer0.carrier-net:7051 # Internal Address of the other peer in same Org for gossip, same peer if there is only one peer peerAddress: peer0.carrier-net.org3proxy.blockchaincloudpoc.com:443 # Must include port, External URI of the peer - certificate: /path/carrier/peer0.crt # Path to peer Certificate cli: disabled # Creates a peer cli pod depending upon the (enabled/disabled) tag. cactus_connector: disabled # set to enabled to create a cactus connector for Fabric grpc: @@ -438,13 +484,11 @@ network: state: New York location: New York subject: "O=Store,OU=Store,L=40.73/-74/New York,C=US" - type: peer external_url_suffix: org4proxy.blockchaincloudpoc.com org_status: new orderer_org: supplychain # Name of the organization that provides the ordering service fabric_console: disabled # To not deploy Fabric console for this organization ca_data: - url: ca.store-net.org4proxy.blockchaincloudpoc.com certificate: /path/store/server.crt cloud_provider: aws # Options: aws, azure, gcp, digitalocean, minikube @@ -488,7 +532,7 @@ network: services: ca: name: ca - subject: "/C=US/ST=New York/L=New York/O=Store/CN=ca.store-net.org4proxy.blockchaincloudpoc.com" + subject: "/C=US/ST=New York/L=New York/O=Store" type: ca grpc: port: 7054 @@ -498,7 +542,6 @@ network: type: anchor # This can be anchor/nonanchor. Atleast one peer should be anchor peer. gossippeeraddress: peer0.store-net:7051 # Internal Address of the other peer in same Org for gossip, same peer if there is only one peer peerAddress: peer0.store-net.org4proxy.blockchaincloudpoc.com:443 # Must include port, External URI of the peer - certificate: /path/store/peer0.crt # Path to peer Certificate cli: disabled # Creates a peer cli pod depending upon the (enabled/disabled) tag. cactus_connector: disabled # set to enabled to create a cactus connector for Fabric grpc: @@ -534,13 +577,11 @@ network: state: Massachusetts location: Boston subject: "O=Warehouse,OU=Warehouse,L=42.36/-71.06/Boston,C=US" - type: peer external_url_suffix: org5proxy.blockchaincloudpoc.com org_status: new orderer_org: supplychain # Name of the organization that provides the ordering service fabric_console: disabled # To not deploy Fabric console for this organization ca_data: - url: ca.warehouse-net.org5proxy.blockchaincloudpoc.com certificate: /path/warehouse/server.crt cloud_provider: aws # Options: aws, azure, gcp, digitalocean, minikube @@ -584,7 +625,7 @@ network: services: ca: name: ca - subject: "/C=US/ST=Massachusetts/L=Boston/O=Warehouse/CN=ca.warehouse-net.org5proxy.blockchaincloudpoc.com" + subject: "/C=US/ST=Massachusetts/L=Boston/O=Warehouse" type: ca grpc: port: 7054 @@ -594,7 +635,6 @@ network: type: anchor # This can be anchor/nonanchor. Atleast one peer should be anchor peer. gossippeeraddress: peer0.warehouse-net:7051 # Internal Address of the other peer in same Org for gossip, same peer if there is only one peer peerAddress: peer0.warehouse-net.org5proxy.blockchaincloudpoc.com:443 # Must include port, External URI of the peer - certificate: /path/warehouse/peer0.crt # Path to peer Certificate cli: disabled # Creates a peer cli pod depending upon the (enabled/disabled) tag. cactus_connector: disabled # set to enabled to create a cactus connector for Fabric grpc: diff --git a/platforms/hyperledger-fabric/configuration/samples/network-operator-fabric.yaml b/platforms/hyperledger-fabric/configuration/samples/network-operator-fabric.yaml index 1cecf912954..628f0cf424c 100644 --- a/platforms/hyperledger-fabric/configuration/samples/network-operator-fabric.yaml +++ b/platforms/hyperledger-fabric/configuration/samples/network-operator-fabric.yaml @@ -46,26 +46,24 @@ network: type: orderer name: orderer1 org_name: supplychain #org_name should match one organization definition below in organizations: key - uri: orderer1.org1proxy.blockchaincloudpoc.com:443 # Must include port, Can be external or internal URI for orderer which should be reachable by all peers - certificate: /home/bevel/build/orderer1.crt # Ensure that the directory exists + uri: orderer1.supplychain-net.org1proxy.blockchaincloudpoc.com:443 # Must include port, Can be external or internal URI for orderer which should be reachable by all peers - orderer: type: orderer name: orderer2 org_name: supplychain #org_name should match one organization definition below in organizations: key - uri: orderer2.org1proxy.blockchaincloudpoc.com:443 # Must include port, Can be external or internal URI for orderer which should be reachable by all peers - certificate: /home/bevel/build/orderer2.crt # Ensure that the directory exists + uri: orderer2.supplychain-net.org1proxy.blockchaincloudpoc.com:443 # Must include port, Can be external or internal URI for orderer which should be reachable by all peers - orderer: type: orderer name: orderer3 org_name: supplychain #org_name should match one organization definition below in organizations: key - uri: orderer3.org1proxy.blockchaincloudpoc.com:443 # Must include port, Can be external or internal URI for orderer which should be reachable by all peers - certificate: /home/bevel/build/orderer3.crt # Ensure that the directory exists + uri: orderer3.supplychain-net.org1proxy.blockchaincloudpoc.com:443 # Must include port, Can be external or internal URI for orderer which should be reachable by all peers # The channels defined for a network with participating peers in each channel channels: - channel: consortium: SupplyChainConsortium channel_name: AllChannel + channel_status: new chaincodes: - "not_supported" orderers: @@ -80,7 +78,7 @@ network: name: peer0 gossipAddress: peer0.carrier-net.org3proxy.blockchaincloudpoc.com:443 # Must include port, External or internal URI of the gossip peer peerAddress: peer0.carrier-net.org3proxy.blockchaincloudpoc.com:443 # Must include port, External URI of the peer - ordererAddress: orderer1.org1proxy.blockchaincloudpoc.com:443 # Must include port, External or internal URI of the orderer + ordererAddress: orderer1.supplychain-net.org1proxy.blockchaincloudpoc.com:443 # Must include port, External or internal URI of the orderer - organization: name: store type: joiner # joiner organization will only join the channel and install chaincode @@ -90,7 +88,7 @@ network: name: peer0 gossipAddress: peer0.store-net.org4proxy.blockchaincloudpoc.com:443 peerAddress: peer0.store-net.org4proxy.blockchaincloudpoc.com:443 # Must include port, External URI of the peer - ordererAddress: orderer1.org1proxy.blockchaincloudpoc.com:443 + ordererAddress: orderer1.supplychain-net.org1proxy.blockchaincloudpoc.com:443 - organization: name: warehouse type: joiner @@ -100,7 +98,7 @@ network: name: peer0 gossipAddress: peer0.warehouse-net.org5proxy.blockchaincloudpoc.com:443 peerAddress: peer0.warehouse-net.org5proxy.blockchaincloudpoc.com:443 # Must include port, External URI of the peer - ordererAddress: orderer1.org1proxy.blockchaincloudpoc.com:443 + ordererAddress: orderer1.supplychain-net.org1proxy.blockchaincloudpoc.com:443 - organization: name: manufacturer type: joiner @@ -110,7 +108,7 @@ network: name: peer0 gossipAddress: peer0.manufacturer-net.org2proxy.blockchaincloudpoc.com:443 peerAddress: peer0.manufacturer-net.org2proxy.blockchaincloudpoc.com:443 # Must include port, External URI of the peer - ordererAddress: orderer1.org1proxy.blockchaincloudpoc.com:443 + ordererAddress: orderer1.supplychain-net.org1proxy.blockchaincloudpoc.com:443 endorsers: # Only one peer per org required for endorsement - organization: @@ -141,8 +139,6 @@ network: name: peer0 corepeerAddress: peer0.store-net.org4proxy.blockchaincloudpoc.com:443 certificate: "/path/store/server.crt" # certificate path for peer - genesis: - name: OrdererGenesis # Allows specification of one or many organizations that will be connecting to a network. # If an organization is also hosting the root of the network (e.g. doorman, membership service, etc), @@ -156,12 +152,10 @@ network: state: London location: London subject: "O=Orderer,OU=Orderer,L=51.50/-0.13/London,C=GB" - type: orderer external_url_suffix: org1proxy.blockchaincloudpoc.com org_status: new fabric_console: enabled # To deploy Fabric console for this organization ca_data: - url: ca.supplychain-net.org1proxy.blockchaincloudpoc.com certificate: /path/supplychain/server.crt # Path where ca public cert will be stored (if new) or read from (if existing ca) cloud_provider: aws # Options: aws, azure, gcp, digitalocean, minikube @@ -198,21 +192,21 @@ network: consensus: raft grpc: port: 7050 - ordererAddress: orderer1.org1proxy.blockchaincloudpoc.com:443 + ordererAddress: orderer1.supplychain-net.org1proxy.blockchaincloudpoc.com:443 - orderer: name: orderer2 type: orderer consensus: raft grpc: port: 7050 - ordererAddress: orderer2.org1proxy.blockchaincloudpoc.com:443 + ordererAddress: orderer2.supplychain-net.org1proxy.blockchaincloudpoc.com:443 - orderer: name: orderer3 type: orderer consensus: raft grpc: port: 7050 - ordererAddress: orderer3.org1proxy.blockchaincloudpoc.com:443 + ordererAddress: orderer3.supplychain-net.org1proxy.blockchaincloudpoc.com:443 # Specification for the 2nd organization. Each organization maps to a VPC and a separate k8s cluster - organization: @@ -221,13 +215,11 @@ network: state: Zurich location: Zurich subject: "O=Manufacturer,OU=Manufacturer,L=47.38/8.54/Zurich,C=CH" - type: peer external_url_suffix: org2proxy.blockchaincloudpoc.com org_status: new orderer_org: supplychain # Name of the organization that provides the ordering service fabric_console: enabled # To deploy Fabric console for this organization ca_data: - url: ca.manufacturer-net.org2proxy.blockchaincloudpoc.com certificate: /path/manufacturer/server.crt cloud_provider: aws # Options: aws, azure, gcp, digitalocean, minikube @@ -264,7 +256,6 @@ network: type: anchor # This can be anchor/nonanchor. Atleast one peer should be anchor peer. gossippeeraddress: peer0.manufacturer-net:7051 # Internal Address of the other peer in same Org for gossip, same peer if there is only one peer peerAddress: peer0.manufacturer-net.org2proxy.blockchaincloudpoc.com:443 # Must include port, External URI of the peer - certificate: /path/manufacturer/peer0.crt # Path to peer Certificate cli: disabled # Creates a peer cli pod depending upon the (enabled/disabled) tag. cactus_connector: disabled # set to enabled to create a cactus connector for Fabric grpc: @@ -299,13 +290,11 @@ network: state: London location: London subject: "O=Carrier,OU=Carrier,L=51.50/-0.13/London,C=GB" - type: peer external_url_suffix: org3proxy.blockchaincloudpoc.com org_status: new orderer_org: supplychain # Name of the organization that provides the ordering service fabric_console: disabled # To not deploy Fabric console for this organization ca_data: - url: ca.carrier-net.org3proxy.blockchaincloudpoc.com certificate: /path/carrier/server.crt cloud_provider: aws # Options: aws, azure, gcp, digitalocean, minikube @@ -340,7 +329,6 @@ network: type: anchor # This can be anchor/nonanchor. Atleast one peer should be anchor peer. gossippeeraddress: peer0.carrier-net:7051 # Internal Address of the other peer in same Org for gossip, same peer if there is only one peer peerAddress: peer0.carrier-net.org3proxy.blockchaincloudpoc.com:443 # Must include port, External URI of the peer - certificate: /path/carrier/peer0.crt # Path to peer Certificate cli: disabled # Creates a peer cli pod depending upon the (enabled/disabled) tag. cactus_connector: disabled # set to enabled to create a cactus connector for Fabric grpc: @@ -374,13 +362,11 @@ network: state: New York location: New York subject: "O=Store,OU=Store,L=40.73/-74/New York,C=US" - type: peer external_url_suffix: org4proxy.blockchaincloudpoc.com org_status: new orderer_org: supplychain # Name of the organization that provides the ordering service fabric_console: disabled # To not deploy Fabric console for this organization ca_data: - url: ca.store-net.org4proxy.blockchaincloudpoc.com certificate: /path/store/server.crt cloud_provider: aws # Options: aws, azure, gcp, digitalocean, minikube @@ -415,7 +401,6 @@ network: type: anchor # This can be anchor/nonanchor. Atleast one peer should be anchor peer. gossippeeraddress: peer0.store-net:7051 # Internal Address of the other peer in same Org for gossip, same peer if there is only one peer peerAddress: peer0.store-net.org4proxy.blockchaincloudpoc.com:443 # Must include port, External URI of the peer - certificate: /path/store/peer0.crt # Path to peer Certificate cli: disabled # Creates a peer cli pod depending upon the (enabled/disabled) tag. cactus_connector: disabled # set to enabled to create a cactus connector for Fabric grpc: @@ -450,13 +435,11 @@ network: state: Massachusetts location: Boston subject: "O=Warehouse,OU=Warehouse,L=42.36/-71.06/Boston,C=US" - type: peer external_url_suffix: org5proxy.blockchaincloudpoc.com org_status: new orderer_org: supplychain # Name of the organization that provides the ordering service fabric_console: disabled # To not deploy Fabric console for this organization ca_data: - url: ca.warehouse-net.org5proxy.blockchaincloudpoc.com certificate: /path/warehouse/server.crt cloud_provider: aws # Options: aws, azure, gcp, digitalocean, minikube @@ -491,7 +474,6 @@ network: type: anchor # This can be anchor/nonanchor. Atleast one peer should be anchor peer. gossippeeraddress: peer0.warehouse-net:7051 # Internal Address of the other peer in same Org for gossip, same peer if there is only one peer peerAddress: peer0.warehouse-net.org5proxy.blockchaincloudpoc.com:443 # Must include port, External URI of the peer - certificate: /path/warehouse/peer0.crt # Path to peer Certificate cli: disabled # Creates a peer cli pod depending upon the (enabled/disabled) tag. cactus_connector: disabled # set to enabled to create a cactus connector for Fabric grpc: diff --git a/platforms/hyperledger-fabric/configuration/samples/network-proxy-none.yaml b/platforms/hyperledger-fabric/configuration/samples/network-proxy-none.yaml index 77e8df9e0d5..ee882785906 100644 --- a/platforms/hyperledger-fabric/configuration/samples/network-proxy-none.yaml +++ b/platforms/hyperledger-fabric/configuration/samples/network-proxy-none.yaml @@ -43,13 +43,13 @@ network: name: orderer1 org_name: supplychain # org_name should match one organization definition below in organizations: key uri: orderer1.supplychain-net:7050 # Internal URI for orderer which should be reachable by all peers - certificate: /home/bevel/build/orderer1.crt # the directory should be writable # The channels defined for a network with participating peers in each channel channels: - channel: consortium: SupplyChainConsortium channel_name: AllChannel + channel_status: new chaincodes: - "chaincode_name" orderers: @@ -91,8 +91,6 @@ network: name: peer0 corepeerAddress: peer0.manufacturer-net:7051 certificate: "/home/bevel/build/manufacturer/server.crt" # certificate path for peer - genesis: - name: OrdererGenesis # Allows specification of one or many organizations that will be connecting to a network. # If an organization is also hosting the root of the network (e.g. doorman, membership service, etc), @@ -106,12 +104,10 @@ network: state: London location: London subject: "O=Orderer,L=51.50/-0.13/London,C=GB" - type: orderer external_url_suffix: develop.local.com # Ignore for proxy none org_status: new fabric_console: enabled ca_data: - url: ca.supplychain-net:7054 certificate: /home/bevel/build/supplychain/server.crt cloud_provider: aws # Options: aws, azure, gcp @@ -179,12 +175,10 @@ network: state: Zurich location: Zurich subject: "O=Manufacturer,OU=Manufacturer,L=47.38/8.54/Zurich,C=CH" - type: peer external_url_suffix: develop.local.com # Ignore for proxy none org_status: new orderer_org: supplychain # Name of the organization that provides the ordering service ca_data: - url: ca.manufacturer-net:7054 certificate: /home/bevel/build/manufacturer/server.crt cloud_provider: aws # Options: aws, azure, gcp @@ -242,7 +236,6 @@ network: type: anchor # This can be anchor/nonanchor. Atleast one peer should be anchor peer. gossippeeraddress: peer0.manufacturer-net:7051 # Internal Address of the other peer in same Org for gossip, same peer if there is only one peer peerAddress: peer0.manufacturer-net:7051 # Internal URI of the peer - certificate: /home/bevel/build/manufacturer/peer0.crt # Path to peer Certificate cli: enabled # Creates a peer cli pod depending upon the (enabled/disabled) tag. cactus_connector: enabled # set to enabled to create a cactus connector for Fabric grpc: @@ -279,12 +272,10 @@ network: state: London location: London subject: "O=Carrier,OU=Carrier,L=51.50/-0.13/London,C=GB" - type: peer external_url_suffix: develop.local.com # Ignore for proxy none org_status: new orderer_org: supplychain # Name of the organization that provides the ordering service ca_data: - url: ca.carrier-net:7054 certificate: /home/bevel/build/carrier/server.crt cloud_provider: aws # Options: aws, azure, gcp @@ -340,7 +331,6 @@ network: type: anchor # This can be anchor/nonanchor. Atleast one peer should be anchor peer. gossippeeraddress: peer0.carrier-net:7051 # Internal Address of the other peer in same Org for gossip, same peer if there is only one peer peerAddress: peer0.carrier-net:7051 # Internal URI of the peer - certificate: /home/bevel/build/carrier/peer0.crt # Path to peer Certificate cli: disabled # Creates a peer cli pod depending upon the (enabled/disabled) tag. cactus_connector: disabled # set to enabled to create a cactus connector for Fabric grpc: diff --git a/platforms/hyperledger-fabric/configuration/samples/network-user-certificate.yaml b/platforms/hyperledger-fabric/configuration/samples/network-user-certificate.yaml index 1e387def9cf..00896f97978 100644 --- a/platforms/hyperledger-fabric/configuration/samples/network-user-certificate.yaml +++ b/platforms/hyperledger-fabric/configuration/samples/network-user-certificate.yaml @@ -37,13 +37,11 @@ network: state: London location: London subject: "O=Carrier,OU=Carrier,L=51.50/-0.13/London,C=GB" - type: peer external_url_suffix: org3proxy.blockchaincloudpoc.com org_status: new orderer_org: supplychain # Name of the organization that provides the ordering service cli: enabled ca_data: - url: ca.carrier-net:7054 certificate: file/server.crt cloud_provider: aws # Options: aws, azure, gcp, minikube diff --git a/platforms/hyperledger-fabric/configuration/samples/workflow/network-fabric-workflow.yaml b/platforms/hyperledger-fabric/configuration/samples/workflow/network-fabric-workflow.yaml index ab2b529441e..a4818baaea0 100644 --- a/platforms/hyperledger-fabric/configuration/samples/workflow/network-fabric-workflow.yaml +++ b/platforms/hyperledger-fabric/configuration/samples/workflow/network-fabric-workflow.yaml @@ -48,20 +48,17 @@ network: type: orderer name: orderer1 org_name: supplychain #org_name should match one organization definition below in organizations: key - uri: orderer1.EXTERNAL_URL_SUFFIX:443 # Must include port, Can be external or internal URI for orderer which should be reachable by all peers - certificate: USER_DIRECTORY/build/orderer1.crt # Ensure that the directory exists + uri: orderer1.supplychain-net.EXTERNAL_URL_SUFFIX:443 # Must include port, Can be external or internal URI for orderer which should be reachable by all peers - orderer: type: orderer name: orderer2 org_name: supplychain #org_name should match one organization definition below in organizations: key - uri: orderer2.EXTERNAL_URL_SUFFIX:443 # Must include port, Can be external or internal URI for orderer which should be reachable by all peers - certificate: USER_DIRECTORY/build/orderer2.crt # Ensure that the directory exists + uri: orderer2.supplychain-net.EXTERNAL_URL_SUFFIX:443 # Must include port, Can be external or internal URI for orderer which should be reachable by all peers - orderer: type: orderer name: orderer3 org_name: supplychain #org_name should match one organization definition below in organizations: key - uri: orderer3.EXTERNAL_URL_SUFFIX:443 # Must include port, Can be external or internal URI for orderer which should be reachable by all peers - certificate: USER_DIRECTORY/build/orderer3.crt # Ensure that the directory exists + uri: orderer3.supplychain-net.EXTERNAL_URL_SUFFIX:443 # Must include port, Can be external or internal URI for orderer which should be reachable by all peers # The channels defined for a network with participating peers in each channel channels: @@ -84,7 +81,7 @@ network: name: peer0 gossipAddress: peer0.carrier-net.EXTERNAL_URL_SUFFIX:443 # Must include port, External or internal URI of the gossip peer peerAddress: peer0.carrier-net.EXTERNAL_URL_SUFFIX:443 # Must include port, External URI of the peer - ordererAddress: orderer1.EXTERNAL_URL_SUFFIX:443 # Must include port, External or internal URI of the orderer + ordererAddress: orderer1.supplychain-net.EXTERNAL_URL_SUFFIX:443 # Must include port, External or internal URI of the orderer - organization: name: store type: joiner # joiner organization will only join the channel and install chaincode @@ -94,7 +91,7 @@ network: name: peer0 gossipAddress: peer0.store-net.EXTERNAL_URL_SUFFIX:443 peerAddress: peer0.store-net.EXTERNAL_URL_SUFFIX:443 # Must include port, External URI of the peer - ordererAddress: orderer1.EXTERNAL_URL_SUFFIX:443 + ordererAddress: orderer1.supplychain-net.EXTERNAL_URL_SUFFIX:443 - organization: name: warehouse type: joiner @@ -104,7 +101,7 @@ network: name: peer0 gossipAddress: peer0.warehouse-net.EXTERNAL_URL_SUFFIX:443 peerAddress: peer0.warehouse-net.EXTERNAL_URL_SUFFIX:443 # Must include port, External URI of the peer - ordererAddress: orderer1.EXTERNAL_URL_SUFFIX:443 + ordererAddress: orderer1.supplychain-net.EXTERNAL_URL_SUFFIX:443 - organization: name: manufacturer type: joiner @@ -114,7 +111,7 @@ network: name: peer0 gossipAddress: peer0.manufacturer-net.EXTERNAL_URL_SUFFIX:443 peerAddress: peer0.manufacturer-net.EXTERNAL_URL_SUFFIX:443 # Must include port, External URI of the peer - ordererAddress: orderer1.EXTERNAL_URL_SUFFIX:443 + ordererAddress: orderer1.supplychain-net.EXTERNAL_URL_SUFFIX:443 endorsers: # Only one peer per org required for endorsement - organization: @@ -145,8 +142,6 @@ network: name: peer0 corepeerAddress: peer0.store-net.EXTERNAL_URL_SUFFIX:443 certificate: "USER_DIRECTORY/store/server.crt" # certificate path for peer - genesis: - name: OrdererGenesis # Allows specification of one or many organizations that will be connecting to a network. # If an organization is also hosting the root of the network (e.g. doorman, membership service, etc), @@ -160,12 +155,10 @@ network: state: London location: London subject: "O=Orderer,OU=Orderer,L=51.50/-0.13/London,C=GB" - type: orderer external_url_suffix: EXTERNAL_URL_SUFFIX org_status: new fabric_console: enabled # To deploy Fabric console for this organization ca_data: - url: ca.supplychain-net.EXTERNAL_URL_SUFFIX certificate: USER_DIRECTORY/supplychain/server.crt # Path where ca public cert will be stored (if new) or read from (if existing ca) cloud_provider: aws # Options: aws, azure, gcp, digitalocean, minikube @@ -222,21 +215,21 @@ network: consensus: raft grpc: port: 7050 - ordererAddress: orderer1.EXTERNAL_URL_SUFFIX:443 + ordererAddress: orderer1.supplychain-net.EXTERNAL_URL_SUFFIX:443 - orderer: name: orderer2 type: orderer consensus: raft grpc: port: 7050 - ordererAddress: orderer2.EXTERNAL_URL_SUFFIX:443 + ordererAddress: orderer2.supplychain-net.EXTERNAL_URL_SUFFIX:443 - orderer: name: orderer3 type: orderer consensus: raft grpc: port: 7050 - ordererAddress: orderer3.EXTERNAL_URL_SUFFIX:443 + ordererAddress: orderer3.supplychain-net.EXTERNAL_URL_SUFFIX:443 # Specification for the 2nd organization. Each organization maps to a VPC and a separate k8s cluster - organization: @@ -245,13 +238,11 @@ network: state: Zurich location: Zurich subject: "O=Manufacturer,OU=Manufacturer,L=47.38/8.54/Zurich,C=CH" - type: peer external_url_suffix: EXTERNAL_URL_SUFFIX org_status: new orderer_org: supplychain # Name of the organization that provides the ordering service fabric_console: enabled # To deploy Fabric console for this organization ca_data: - url: ca.manufacturer-net.EXTERNAL_URL_SUFFIX certificate: USER_DIRECTORY/manufacturer/server.crt cloud_provider: aws # Options: aws, azure, gcp, digitalocean, minikube @@ -307,7 +298,6 @@ network: type: anchor # This can be anchor/nonanchor. Atleast one peer should be anchor peer. gossippeeraddress: peer0.manufacturer-net:7051 # Internal Address of the other peer in same Org for gossip, same peer if there is only one peer peerAddress: peer0.manufacturer-net.EXTERNAL_URL_SUFFIX:443 # Must include port, External URI of the peer - certificate: USER_DIRECTORY/manufacturer/peer0.crt # Path to peer Certificate cli: disabled # Creates a peer cli pod depending upon the (enabled/disabled) tag. cactus_connector: disabled # set to enabled to create a cactus connector for Fabric grpc: @@ -343,13 +333,11 @@ network: state: London location: London subject: "O=Carrier,OU=Carrier,L=51.50/-0.13/London,C=GB" - type: peer external_url_suffix: EXTERNAL_URL_SUFFIX org_status: new orderer_org: supplychain # Name of the organization that provides the ordering service fabric_console: disabled # To not deploy Fabric console for this organization ca_data: - url: ca.carrier-net certificate: USER_DIRECTORY/carrier/server.crt cloud_provider: aws # Options: aws, azure, gcp, digitalocean, minikube @@ -403,7 +391,6 @@ network: type: anchor # This can be anchor/nonanchor. Atleast one peer should be anchor peer. gossippeeraddress: peer0.carrier-net:7051 # Internal Address of the other peer in same Org for gossip, same peer if there is only one peer peerAddress: peer0.carrier-net.EXTERNAL_URL_SUFFIX:443 # Must include port, External URI of the peer - certificate: USER_DIRECTORY/carrier/peer0.crt # Path to peer Certificate cli: disabled # Creates a peer cli pod depending upon the (enabled/disabled) tag. cactus_connector: disabled # set to enabled to create a cactus connector for Fabric grpc: @@ -438,13 +425,11 @@ network: state: New York location: New York subject: "O=Store,OU=Store,L=40.73/-74/New York,C=US" - type: peer external_url_suffix: EXTERNAL_URL_SUFFIX org_status: new orderer_org: supplychain # Name of the organization that provides the ordering service fabric_console: disabled # To not deploy Fabric console for this organization ca_data: - url: ca.store-net certificate: USER_DIRECTORY/store/server.crt cloud_provider: aws # Options: aws, azure, gcp, digitalocean, minikube @@ -498,7 +483,6 @@ network: type: anchor # This can be anchor/nonanchor. Atleast one peer should be anchor peer. gossippeeraddress: peer0.store-net:7051 # Internal Address of the other peer in same Org for gossip, same peer if there is only one peer peerAddress: peer0.store-net.EXTERNAL_URL_SUFFIX:443 # Must include port, External URI of the peer - certificate: USER_DIRECTORY/store/peer0.crt # Path to peer Certificate cli: disabled # Creates a peer cli pod depending upon the (enabled/disabled) tag. cactus_connector: disabled # set to enabled to create a cactus connector for Fabric grpc: @@ -534,13 +518,11 @@ network: state: Massachusetts location: Boston subject: "O=Warehouse,OU=Warehouse,L=42.36/-71.06/Boston,C=US" - type: peer external_url_suffix: EXTERNAL_URL_SUFFIX org_status: new orderer_org: supplychain # Name of the organization that provides the ordering service fabric_console: disabled # To not deploy Fabric console for this organization ca_data: - url: ca.warehouse-net certificate: USER_DIRECTORY/warehouse/server.crt cloud_provider: aws # Options: aws, azure, gcp, digitalocean, minikube @@ -594,7 +576,6 @@ network: type: anchor # This can be anchor/nonanchor. Atleast one peer should be anchor peer. gossippeeraddress: peer0.warehouse-net:7051 # Internal Address of the other peer in same Org for gossip, same peer if there is only one peer peerAddress: peer0.warehouse-net.EXTERNAL_URL_SUFFIX:443 # Must include port, External URI of the peer - certificate: USER_DIRECTORY/warehouse/peer0.crt # Path to peer Certificate cli: disabled # Creates a peer cli pod depending upon the (enabled/disabled) tag. cactus_connector: disabled # set to enabled to create a cactus connector for Fabric grpc: diff --git a/platforms/hyperledger-indy/charts/README.md b/platforms/hyperledger-indy/charts/README.md index cd44972df0d..d0255bf7122 100644 --- a/platforms/hyperledger-indy/charts/README.md +++ b/platforms/hyperledger-indy/charts/README.md @@ -3,40 +3,154 @@ [//]: # (SPDX-License-Identifier: Apache-2.0) [//]: # (##############################################################################################) -# Charts for Indy components +# Charts for Hyperledger Indy components ## About -This folder contains helm charts which are used by the ansible playbooks for the deployment of the component. Each chart folder contain a folder for templates, chart file and the corresponding value file. +This folder contains the helm charts which are used for the deployment of the Hyperledger Indy components. Each helm chart that you can use has the following keys and you need to set them. The `global.cluster.provider` is used as a key for the various cloud features to be enabled. Also you only need to specify one cloud provider, **not** both if deploying to cloud. As of writing this doc, AWS ans Azure is fully supported. -## Example Folder Structure ### +```yaml +global: + serviceAccountName: vault-auth + cluster: + provider: aws # choose from: minikube | aws | azure | gcp + cloudNativeServices: false # future: set to true to use Cloud Native Services + kubernetesUrl: "https://kubernetes.url" # Provide the k8s URL, ignore if not using Hashicorp Vault + vault: + type: hashicorp # choose from hashicorp | kubernetes + network: indy # must be indy for these charts + # Following are necessary only when hashicorp vault is used. + address: "http://vault.url:8200" + authPath: authority + secretEngine: secretsv2 + secretPrefix: "data/authority" + role: vault-role ``` -/indy-node -|-- templates -| |--_helpers.tpl -| |-- volumes.yaml -| |-- deployment.yaml -| |-- service.yaml -|-- Chart.yaml -|-- values.yaml + +## Usage + +### Pre-requisites + +- Kubernetes Cluster (either Managed cloud option like EKS or local like minikube) +- Accessible and unsealed Hahsicorp Vault (if using Vault) +- Configured Ambassador AES (if using Ambassador as proxy) +- Update the dependencies + ``` + helm dependency update indy-key-mgmt + helm dependency update indy-node + ``` + +### _Without Proxy or Vault_ + +> **Important:** As Indy nodes need IP Address, the no-proxy option works only with minikube or cluster with 1 node in nodepool. + +Replace the `publicIp` in all the files in `./values/noproxy-and-novault/` folder with the IP address of your Minikube or the single node in your Cloud Cluster. + +For Indy, the keys need to be created first for each organisation +```bash +# Create keys for first trustee +helm install authority-keys ./indy-key-mgmt --namespace authority-ns --create-namespace --values ./values/noproxy-and-novault/authority-keys.yaml +# Create keys for endorser and stewards from another org namespace +helm install university-keys ./indy-key-mgmt --namespace university-ns --create-namespace --values ./values/noproxy-and-novault/university-keys.yaml + +# Get the public keys from Kubernetes for genesis +cd ../scripts/genesis +chmod +x get_keys.sh +./get_keys.sh + +cd ../../charts +# Update the IP address and Ports in ./values/noproxy-and-novault/genesis.yaml +helm install genesis ./indy-genesis --namespace authority-ns --values ./values/noproxy-and-novault/genesis.yaml + +# Get the genesis files from existing authority and place in indy-genesis/files +cd ./indy-genesis/files/ +kubectl --namespace authority-ns get configmap dtg -o jsonpath='{.data.domain_transactions_genesis}' > domain_transactions_genesis.json +kubectl --namespace authority-ns get configmap ptg -o jsonpath='{.data.pool_transactions_genesis}' > pool_transactions_genesis.json + +# Run secondary genesis +cd ../.. +helm install genesis ./indy-genesis --namespace university-ns --values ./values/noproxy-and-novault/genesis-sec.yaml + +# Then deploy the stewards +helm install university-steward-1 ./indy-node --namespace university-ns --values ./values/noproxy-and-novault/steward.yaml +helm install university-steward-2 ./indy-node --namespace university-ns --values ./values/noproxy-and-novault/steward.yaml --set settings.node.externalPort=30021 --set settings.client.externalPort=30022 --set settings.node.port=30021 --set settings.client.port=30022 +helm install university-steward-3 ./indy-node --namespace university-ns --values ./values/noproxy-and-novault/steward.yaml --set settings.node.externalPort=30031 --set settings.client.externalPort=30032 --set settings.node.port=30031 --set settings.client.port=30032 + +# Get endorser public keys +cd ./indy-register-identity/files +kubectl --namespace university-ns get secret university-endorser-identity-public -o jsonpath='{.data.value}' | base64 -d | jq '.["did"]'> university-endorser-did.json +kubectl --namespace university-ns get secret university-endorser-node-public-verif-keys -o jsonpath='{.data.value}' | base64 -d | jq '.["verification-key"]' > university-endorser-verkey.json +# Register the endorser identity using the trustee's credentials +# Deploy the endorser identity registration Helm chart in the authority namespace, where the trustee resides +cd ../.. +helm install university-endorser-id ./indy-register-identity --namespace authority-ns +``` + +### _With Ambassador proxy and Vault_ +Replace the `global.vault.address`, `global.cluster.kubernetesUrl` and `publicIp` of your Ambassador Loadbalancer in all the files in `./values/proxy-and-vault/` folder. + +For Indy, the keys need to be created first for each organisation +```bash +kubectl create namespace authority-ns # if the namespace does not exist already +# Create the roottoken secret +kubectl -n authority-ns create secret generic roottoken --from-literal=token= + +kubectl create namespace university-ns # if the namespace does not exist already +# Create the roottoken secret +kubectl -n university-ns create secret generic roottoken --from-literal=token= + +# Create keys for first trustee +helm install authority-keys ./indy-key-mgmt --namespace authority-ns --values ./values/proxy-and-vault/authority-keys.yaml +# Create keys for endorser and stewards from another org namespace +helm install university-keys ./indy-key-mgmt --namespace university-ns --values ./values/proxy-and-vault/university-keys.yaml + +# Get the public keys from Kubernetes for genesis +cd ../scripts/genesis +chmod +x get_keys.sh +./get_keys.sh + +cd ../../charts +# Update the IP address and Ports in ./values/proxy-and-vault/genesis.yaml +helm install genesis ./indy-genesis --namespace authority-ns --values ./values/proxy-and-vault/genesis.yaml + +# Get the genesis files from existing authority and place in indy-genesis/files +cd ./indy-genesis/files/ +kubectl --namespace authority-ns get configmap dtg -o jsonpath='{.data.domain_transactions_genesis}' > domain_transactions_genesis.json +kubectl --namespace authority-ns get configmap ptg -o jsonpath='{.data.pool_transactions_genesis}' > pool_transactions_genesis.json + +# Run secondary genesis +cd ../.. +helm install genesis ./indy-genesis --namespace university-ns --values ./values/proxy-and-vault/genesis-sec.yaml + +# Then deploy the stewards +helm install university-steward-1 ./indy-node --namespace university-ns --values ./values/proxy-and-vault/steward.yaml +helm install university-steward-2 ./indy-node --namespace university-ns --values ./values/proxy-and-vault/steward.yaml --set settings.node.externalPort=15021 --set settings.client.externalPort=15022 +helm install university-steward-3 ./indy-node --namespace university-ns --values ./values/proxy-and-vault/steward.yaml --set settings.node.externalPort=15031 --set settings.client.externalPort=15032 +helm install university-steward-4 ./indy-node --namespace university-ns --values ./values/proxy-and-vault/steward.yaml --set settings.node.externalPort=15041 --set settings.client.externalPort=15042 + +# Get endorser public keys +cd ./indy-register-identity/files +kubectl --namespace university-ns get secret university-endorser-identity-public -o jsonpath='{.data.value}' | base64 -d | jq '.["did"]'> university-endorser-did.json +kubectl --namespace university-ns get secret university-endorser-node-public-verif-keys -o jsonpath='{.data.value}' | base64 -d | jq '.["verification-key"]' > university-endorser-verkey.json +# Register the endorser identity using the trustee's credentials +# Deploy the endorser identity registration Helm chart in the authority namespace, where the trustee resides +cd ../.. +helm install university-endorser-id ./indy-register-identity --namespace authority-ns ``` -## Pre-requisites - - Helm to be installed and configured - -## Charts description ## - -### 1. indy-auth-job ### -- This folder contains chart templates and default values for creation of indy authotization job. -### 2. indy-cli ### -- This folder contains chart templates and default values for creation of indy cli. -### 4. indy-domain-genesis ### -- This folder contains chart templates and default values for creation of indy domain genesis. -### 5. indy-key-mgmt ### -- This folder contains chart templates and default values for creation of indy key management. -### 6. indy-ledger-txn ### -- This folder contains chart templates and default values for creation of indy ledger txn. -### 7. indy-node ### -- This folder contains chart templates and default values for creation of indy node. -### 8. indy-pool-genesis ### -- This folder contains chart templates and default values for creation of indy pool genesis. +### Clean-up + +To clean up, simply uninstall the Helm charts. +> **NOTE**: It's important to uninstall the genesis Helm chart at the end to prevent any cleanup failure. + +```bash +helm uninstall --namespace university-ns university-steward-1 +helm uninstall --namespace university-ns university-steward-2 +helm uninstall --namespace university-ns university-steward-3 +helm uninstall --namespace university-ns university-steward-4 +helm uninstall --namespace university-ns university-keys +helm uninstall --namespace university-ns genesis + +helm uninstall --namespace authority-ns university-endorser-id +helm uninstall --namespace authority-ns authority-keys +helm uninstall --namespace authority-ns genesis +``` diff --git a/platforms/hyperledger-indy/charts/indy-auth-job/Chart.yaml b/platforms/hyperledger-indy/charts/indy-auth-job/Chart.yaml deleted file mode 100644 index 8308f3148df..00000000000 --- a/platforms/hyperledger-indy/charts/indy-auth-job/Chart.yaml +++ /dev/null @@ -1,11 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -apiVersion: v1 -appVersion: "2.0" -description: "hyperledger-indy: Creates an indy authorization job" -name: indy-auth-job -version: 1.0.0 diff --git a/platforms/hyperledger-indy/charts/indy-auth-job/README.md b/platforms/hyperledger-indy/charts/indy-auth-job/README.md deleted file mode 100644 index 6f21d33bba8..00000000000 --- a/platforms/hyperledger-indy/charts/indy-auth-job/README.md +++ /dev/null @@ -1,187 +0,0 @@ -[//]: # (##############################################################################################) -[//]: # (Copyright Accenture. All Rights Reserved.) -[//]: # (SPDX-License-Identifier: Apache-2.0) -[//]: # (##############################################################################################) - - -# indy-auth-job - -- [indy-auth-job Helm Chart](#indy-auth-node-deployment-helm-chart) -- [Prerequisites](#prerequisites) -- [Chart Structure](#chart-structure) -- [Configuration](#configuration) -- [Deployment](#deployment) -- [Verification](#verification) -- [Updating the Deployment](#updating-the-deployment) -- [Deletion](#deletion) -- [Contributing](#contributing) -- [License](#license) - - -## indy-auth-job Helm Chart ---- -This [Helm chart](https://github.com/hyperledger/bevel/tree/develop/platforms/hyperledger-indy/charts/indy-auth-job) helps to deploy indy authorization job. - - -## Prerequisites ---- -Before deploying the Helm chart, make sure to have the following prerequisites: - -- Kubernetes cluster up and running. -- A HashiCorp Vault instance is set up and configured to use Kubernetes service account token-based authentication. -- The Vault is unsealed and initialized. -- Helm installed. - - -## Chart Structure ---- -The structure of the Helm chart is as follows: - -``` -indy-auth-job/ - |- templates/ - |- helpers.tpl - |- job.yaml - |- Chart.yaml - |- README.md - |- values.yaml -``` - -- `templates/`: This directory contains the template files for generating Kubernetes resources. -- `_helpers.tpl`: Contains custom label definitions used in other templates. -- `job.yaml`: This file provides information about the kubernetes job -- `Chart.yaml`: Provides metadata about the chart, such as its name, version, and description. -- `README.md`: This file provides information and instructions about the Helm chart. -- `values.yaml`: Contains the default configuration values for the chart. It includes configuration for the metadata, image, node, Vault, etc. - - -## Configuration ---- -The [values.yaml](https://github.com/hyperledger/bevel/blob/develop/platforms/hyperledger-indy/charts/indy-auth-job/values.yaml) file contains configurable values for the Helm chart. We can modify these values according to the deployment requirements. Here are some important configuration options: - -## Parameters ---- -### metadata - -| Name | Description | Default Value | -| ----------------| ----------------------------------------------- | ------------- | -| namespace | Provide the namespace for organization's peer | bevel | -| name | Provide the name for indy-auth-job release | indy-auth-job | - - -### network - -| Name | Description | Default Value | -| ------------ | ------------------------------------| ---------------------- | -| name | Provide the name of the network | bevel | -| kubernetes_url | Provide the kubernetes host url | https://10.3.8.5:6443 | - -### image - -| Name | Description | Default Value | -| -------- | ----------------------------------------------------------- | ------------- | -| name | Provide the image name for the indy-auth-job container | indy-auth-job | -| repository | Provide the image repoitory for the indy-auth-job container | alpine:3.9. 4 | - - -### vault - -| Name | Description | Default Value | -| ----------------- | ----------------------------------| -----------------------------------------| -| address | Provide the vault server address | http://54.226.163.39:8200 | -| identity | Provide the vault identity | my-identity | -| admin_auth_path | Provide the admin authpath | kubernetes-bevel-provider-admin-auth | -| policy | Provide the vault policy name | bevel-provider-steward-1-ro | -| policy_content | Provide the vault policy content | path "/kv/{{ organization }} | -| auth_path | Provide the authpath | kubernetes-bevel-provider-steward-1-auth | - -### account - -| Name | Description | Default Value | -| --------------| --------------------------------------- | --------------------------------------| -| admin_service | Provide the admin service account name | | -| admin_role | Provide the admin service account role | bevel-provider-admin-vault-auth | -| service | Provide the service account name | rw | -| role | Provide the service account role | provider-steward-1-vault-auth | - - - -## Deployment ---- - -To deploy the indy-auth-job Helm chart, follow these steps: - -1. Modify the [values.yaml](https://github.com/hyperledger/bevel/blob/develop/platforms/hyperledger-indy/charts/indy-auth-job/values.yaml) file to set the desired configuration values. -2. Run the following Helm command to install the chart: - ``` - $ helm repo add bevel https://hyperledger.github.io/bevel/ - $ helm install ./indy-auth-job - ``` -Replace `` with the desired name for the release. - -This will deploy the indy auth job to the Kubernetes cluster based on the provided configurations. - - - -## Verification ---- - -To verify the jobs, we can use the following command: -``` -$ kubectl get jobs -n -``` -Replace `` with the actual namespace where the job was created. The command will display information about the jobs. - - - -## Updating the job ---- - -If we need to update the job with new configurations or changes, modify the same [values.yaml](https://github.com/hyperledger/bevel/blob/develop/platforms/hyperledger-indy/charts/indy-auth-job/values.yaml) file with the desired changes and run the following Helm command: -``` -$ helm upgrade ./indy-auth-job -``` -Replace `` with the name of the release. This command will apply the changes to the job , ensuring the job is up to date. - - - -## Deletion ---- - -To delete the jobs and associated resources, run the following Helm command: -``` -$ helm uninstall -``` -Replace `` with the name of the release. This command will remove all the resources created by the Helm chart. - - - -## Contributing ---- -If you encounter any bugs, have suggestions, or would like to contribute to the [INDY authorization job Helm Chart](https://github.com/hyperledger/bevel/tree/develop/platforms/hyperledger-indy/charts/indy-auth-job), please feel free to open an issue or submit a pull request on the [project's GitHub repository](https://github.com/hyperledger/bevel). - - - -## License - -This chart is licensed under the Apache v2.0 license. - -Copyright © 2023 Accenture - -### Attribution - -This chart is adapted from the [charts](https://hyperledger.github.io/bevel/) which is licensed under the Apache v2.0 License which is reproduced here: - -``` -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -``` diff --git a/platforms/hyperledger-indy/charts/indy-auth-job/templates/_helpers.tpl b/platforms/hyperledger-indy/charts/indy-auth-job/templates/_helpers.tpl deleted file mode 100644 index d43c09d8cef..00000000000 --- a/platforms/hyperledger-indy/charts/indy-auth-job/templates/_helpers.tpl +++ /dev/null @@ -1,5 +0,0 @@ -{{- define "labels.custom" }} - {{ range $key, $val := $.Values.metadata.labels }} - {{ $key }}: {{ $val }} - {{ end }} -{{- end }} diff --git a/platforms/hyperledger-indy/charts/indy-auth-job/templates/job.yaml b/platforms/hyperledger-indy/charts/indy-auth-job/templates/job.yaml deleted file mode 100644 index 87f20ad739a..00000000000 --- a/platforms/hyperledger-indy/charts/indy-auth-job/templates/job.yaml +++ /dev/null @@ -1,90 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -apiVersion: batch/v1 -kind: Job -metadata: - name: "{{ $.Values.metadata.name }}" - namespace: "{{ $.Values.metadata.namespace }}" - labels: - app: "{{ $.Values.metadata.name }}" -spec: - template: - metadata: - labels: - app: "{{ $.Values.metadata.name }}" - spec: - restartPolicy: OnFailure - imagePullSecrets: - - name: "{{ $.Values.image.pullSecret }}" - serviceAccountName: {{ $.Values.account.admin_service }} - containers: - - name: "{{ $.Values.image.name }}" - image: "{{ $.Values.image.repository }}" - imagePullPolicy: IfNotPresent - command: - - "sh" - - "-c" - - > - apk update && apk add curl git jq; - - curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.15.1/bin/linux/amd64/kubectl; - - chmod u+x kubectl && mv kubectl /bin/kubectl; - - - validateVaultResponse () { - if [ ${1} != 200 -a ${1} != 204 ]; then - echo "ERROR: Unable to retrieve. Http status: ${1}" - exit 1 - fi - }; - - export KUBE_TOKEN=$(cat /var/run/secrets/kubernetes.io/serviceaccount/token); - export KUBE_CERT=$(cat /var/run/secrets/kubernetes.io/serviceaccount/ca.crt); - - export VAULT_SA_NAME=$(kubectl get sa {{ $.Values.account.service }} -n {{ $.Values.metadata.namespace }} -o jsonpath="{.secrets[*]['name']}"); - - export SA_JWT_TOKEN=$(kubectl get secret $VAULT_SA_NAME -n {{ $.Values.metadata.namespace }} -o jsonpath="{.data.token}" | base64 -d; echo) - - VAULT_TOKEN="$(curl --request POST --data '{"jwt": "'"$KUBE_TOKEN"'", "role": "{{ $.Values.account.admin_role }}"}' -s -k {{ $.Values.vault.address }}/v1/auth/{{ $.Values.vault.admin_auth_path }}/login | jq -r '.auth.client_token')"; - - export SA_CA_CRT_ONELINE=$(kubectl get secret $VAULT_SA_NAME -n {{ $.Values.metadata.namespace }} -o jsonpath="{.data['ca\.crt']}" | base64 -d | awk 'NF {sub(/\r/, ""); printf "%s\\n",$0;}'); - - - # Create auth - - curl --header "X-Vault-Token: $VAULT_TOKEN" --request POST --data '{"type": "kubernetes"}' {{ $.Values.vault.address }}/v1/sys/auth/{{ $.Values.vault.auth_path }}; - - curl --header "X-Vault-Token: $VAULT_TOKEN" --request POST --data '{"kubernetes_host": "{{ $.Values.network.kubernetes_url }}", "kubernetes_ca_cert": "'"$SA_CA_CRT_ONELINE"'","disable_iss_validation": "true"}' -s -k {{ $.Values.vault.address }}/v1/auth/{{ $.Values.vault.auth_path }}/config; - - # Check auth - - response_status=$(curl -s -o /dev/null -w "%{http_code}" --header "X-Vault-Token: $VAULT_TOKEN" {{ $.Values.vault.address }}/v1/auth/{{ $.Values.vault.auth_path }}/config); - - validateVaultResponse ${response_status}; - - - # Create policy - - curl --header "X-Vault-Token: $VAULT_TOKEN" --request PUT --data '{"policy": "{{ $.Values.vault.policy_content }}"}' {{ $.Values.vault.address }}/v1/sys/policy/{{ $.Values.vault.policy }}; - - # Check policy - - response_status=$(curl -s -o /dev/null -w "%{http_code}" --header "X-Vault-Token: $VAULT_TOKEN" {{ $.Values.vault.address }}/v1/sys/policy/{{ $.Values.vault.policy }}); - - validateVaultResponse ${response_status}; - - - # Create role - - curl -s -o /dev/null -w "%{http_code}" --header "X-Vault-Token: $VAULT_TOKEN" --request POST --data '{"bound_service_account_names": "{{ $.Values.account.service }}","bound_service_account_namespaces": "{{ $.Values.metadata.namespace }}","policies": ["{{ $.Values.vault.policy }}"], "ttl": 3600}' {{ $.Values.vault.address }}/v1/auth/{{ $.Values.vault.auth_path }}/role/{{ $.Values.account.role }}; - - # Check role - - response_status=$(curl -s -o /dev/null -w "%{http_code}" --header "X-Vault-Token: $VAULT_TOKEN" {{ $.Values.vault.address }}/v1/auth/{{ $.Values.vault.auth_path }}/role/{{ $.Values.account.role }}); - - validateVaultResponse ${response_status}; diff --git a/platforms/hyperledger-indy/charts/indy-auth-job/values.yaml b/platforms/hyperledger-indy/charts/indy-auth-job/values.yaml deleted file mode 100644 index e71a32594ca..00000000000 --- a/platforms/hyperledger-indy/charts/indy-auth-job/values.yaml +++ /dev/null @@ -1,79 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -# Default values for indy-auth-job. -# This is a YAML-formatted file. -# Declare variables to be passed into your templates. - -metadata: - #Provide the namespace for organization's peer - #Eg. namespace: bevel - namespace: - - #Provide the name for indy-auth-job release - #Eg. name: indy-auth-job - name: - -network: - #Provide the name for network - #Eg. name: bevel - name: - - #Provide the kubernetes host url - #Eg. kubernetes_url: https://10.3.8.5:6443 - kubernetes_url: - -image: - #Provide the image name for the indy-auth-job container - #Eg. name: indy-auth-job - name: - - #Provide the image repoitory for the indy-auth-job container - #Eg. repository: alpine:3.9.4 - repository: - - -vault: - #Provide the vault server address - #Eg. address: http://54.226.163.39:8200 - address: - - #Provide the vault identity - #Eg. identity: my-identity - identity: - - #Provide the admin authpath - #Eg. admin_auth_path: kubernetes-bevel-provider-admin-auth - admin_auth_path: - - #Provide the vault policy name - #Eg. policy: bevel-provider-steward-1-ro - policy: - - #Provide the vault policy content - #Eg. policy_content: path "/kv/{{ organization }}/bevel-ac/+/+/+/public*" {capabilities = [ "read", "list" ]} - policy_content: - - #Provide the authpath - #Eg. authpath: kubernetes-bevel-provider-steward-1-auth - auth_path: - -account: - #Provide the admin service account name - #Eg. admin_service: bevel-provider-admin-vault-auth - admin_service: - - #Provide the admin service account role - #Eg. admin_role: rw - admin_role: - - #Provide the service account name - #Eg. service: provider-steward-1-vault-auth - service: - - #Provide the service account role - #Eg. role: ro - role: diff --git a/platforms/hyperledger-indy/charts/indy-cli/README.md b/platforms/hyperledger-indy/charts/indy-cli/README.md deleted file mode 100644 index 987b5ea6923..00000000000 --- a/platforms/hyperledger-indy/charts/indy-cli/README.md +++ /dev/null @@ -1,6 +0,0 @@ -[//]: # (##############################################################################################) -[//]: # (Copyright Accenture. All Rights Reserved.) -[//]: # (SPDX-License-Identifier: Apache-2.0) -[//]: # (##############################################################################################) - -# Hyperledger Bevel Indy indy-cli Helm chart diff --git a/platforms/hyperledger-indy/charts/indy-domain-genesis/Chart.yaml b/platforms/hyperledger-indy/charts/indy-domain-genesis/Chart.yaml deleted file mode 100644 index a38c2a093cf..00000000000 --- a/platforms/hyperledger-indy/charts/indy-domain-genesis/Chart.yaml +++ /dev/null @@ -1,12 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -apiVersion: v1 -appVersion: "2.0" -description: "hyperledger-indy: Creates config map for domain transactions genesis" -name: indy-domain-genesis -version: 1.0.0 - diff --git a/platforms/hyperledger-indy/charts/indy-domain-genesis/README.md b/platforms/hyperledger-indy/charts/indy-domain-genesis/README.md deleted file mode 100644 index 64aead47c26..00000000000 --- a/platforms/hyperledger-indy/charts/indy-domain-genesis/README.md +++ /dev/null @@ -1,156 +0,0 @@ -[//]: # (##############################################################################################) -[//]: # (Copyright Accenture. All Rights Reserved.) -[//]: # (SPDX-License-Identifier: Apache-2.0) -[//]: # (##############################################################################################) - - -# indy-domain-genesis - -- [indy-domain-genesis Helm Chart](#indy-node-deployment-helm-chart) -- [Prerequisites](#prerequisites) -- [Chart Structure](#chart-structure) -- [Configuration](#configuration) -- [Deployment](#deployment) -- [Verification](#verification) -- [Updating the Deployment](#updating-the-job) -- [Deletion](#deletion) -- [Contributing](#contributing) -- [License](#license) - - -## indy-domain-genesis Helm Chart ---- -This [Helm chart](https://github.com/hyperledger/bevel/tree/develop/platforms/hyperledger-indy/charts/indy-domain-genesis) helps to deploy the indy-domain-genesis job. - - -## Prerequisites ---- -Before deploying the Helm chart, make sure to have the following prerequisites: - -- Kubernetes cluster up and running. -- Helm installed. - - -## Chart Structure ---- -The structure of the Helm chart is as follows: - -``` -indy-domain-genesis/ - |- templates/ - |- _helpers.tpl - |- configmap.yaml - |- Chart.yaml - |- README.md - |- values.yaml -``` - -- `templates/`: This directory contains the template files for generating Kubernetes resources. -- `_helpers.tpl`: Contains custom label definitions used in other templates. -- `configmap.yaml`: This file provides information about the kubernetes configmap job -- `Chart.yaml`: Provides metadata about the chart, such as its name, version, and description. -- `README.md`: This file provides information and instructions about the Helm chart. -- `values.yaml`: Contains the default configuration values for the chart. It includes configuration for the metadata, image, node, Vault, etc. - - -## Configuration ---- -The [values.yaml](https://github.com/hyperledger/bevel/blob/develop/platforms/hyperledger-indy/charts/indy-domain-genesis/values.yaml) file contains configurable values for the Helm chart. We can modify these values according to the deployment requirements. Here are some important configuration options: - -## Parameters ---- -### metadata - -| Name | Description | Default Value | -| ----------------| ----------------------------------------------- | --------------------| -| namespace | Provide the namespace for organization's peer | bevel | -| name | Provide the name for indy-domain-genesis release | indy-domain-genesis | - - -### organization - -| Name | Description | Default Value | -| ----------------| -------------------------------------------------| ------------- | -| name | Provide the namespace for organization's peer | provider | -| configmap | Provide the name for organization | configmap | - - -## Deployment ---- - -To deploy the indy-domain-genesis job Helm chart, follow these steps: - -1. Modify the [values.yaml](https://github.com/hyperledger/bevel/blob/develop/platforms/hyperledger-indy/charts/indy-domain-genesis/values.yam) file to set the desired configuration values. -2. Run the following Helm command to install the chart: - ``` - $ helm repo add bevel https://hyperledger.github.io/bevel/ - $ helm install ./indy-domain-genesis - ``` -Replace `` with the desired name for the release. - -This will deploy the indy-domain-genesis job to the Kubernetes cluster based on the provided configurations. - - - -## Verification ---- - -To verify the jobs, we can use the following command: -``` -$ kubectl get jobs -n -``` -Replace `` with the actual namespace where the job was created. The command will display information about the jobs. - - - -## Updating the Deployment ---- - -If we need to update the job with new configurations or changes, modify the same [values.yaml](https://github.com/hyperledger/bevel/blob/develop/platforms/hyperledger-indy/charts/indy-domain-genesis/values.yaml) file with the desired changes and run the following Helm command: -``` -$ helm upgrade ./indy-domain-genesis -``` -Replace `` with the name of the release. This command will apply the changes to the job , ensuring the job is up to date. - - - -## Deletion ---- - -To delete the jobs and associated resources, run the following Helm command: -``` -$ helm uninstall -``` -Replace `` with the name of the release. This command will remove all the resources created by the Helm chart. - - - -## Contributing ---- -If you encounter any bugs, have suggestions, or would like to contribute to the [INDY authorization job Helm Chart](https://github.com/hyperledger/bevel/tree/develop/platforms/hyperledger-indy/charts/indy-auth-job), please feel free to open an issue or submit a pull request on the [project's GitHub repository](https://github.com/hyperledger/bevel). - - - -## License - -This chart is licensed under the Apache v2.0 license. - -Copyright © 2023 Accenture - -### Attribution - -This chart is adapted from the [charts](https://hyperledger.github.io/bevel/) which is licensed under the Apache v2.0 License which is reproduced here: - -``` -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -``` diff --git a/platforms/hyperledger-indy/charts/indy-domain-genesis/templates/_helpers.tpl b/platforms/hyperledger-indy/charts/indy-domain-genesis/templates/_helpers.tpl deleted file mode 100644 index d43c09d8cef..00000000000 --- a/platforms/hyperledger-indy/charts/indy-domain-genesis/templates/_helpers.tpl +++ /dev/null @@ -1,5 +0,0 @@ -{{- define "labels.custom" }} - {{ range $key, $val := $.Values.metadata.labels }} - {{ $key }}: {{ $val }} - {{ end }} -{{- end }} diff --git a/platforms/hyperledger-indy/charts/indy-domain-genesis/templates/configmap.yaml b/platforms/hyperledger-indy/charts/indy-domain-genesis/templates/configmap.yaml deleted file mode 100644 index 160d30479b5..00000000000 --- a/platforms/hyperledger-indy/charts/indy-domain-genesis/templates/configmap.yaml +++ /dev/null @@ -1,18 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -apiVersion: v1 -kind: ConfigMap -metadata: - name: {{ $.Values.organization.name }}-dtg - namespace: {{ $.Values.metadata.namespace }} - labels: - app.kubernetes.io/name: {{ $.Values.organization.name }}-dtg - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/instance: {{ .Release.Name }} -data: - domain_transactions_genesis: | - {{ $.Values.configmap.domainGenesis | nindent 6 }} diff --git a/platforms/hyperledger-indy/charts/indy-domain-genesis/values.yaml b/platforms/hyperledger-indy/charts/indy-domain-genesis/values.yaml deleted file mode 100644 index 24acff6fc9b..00000000000 --- a/platforms/hyperledger-indy/charts/indy-domain-genesis/values.yaml +++ /dev/null @@ -1,33 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -# Default values for indy-domain-genesis. -# This is a YAML-formatted file. -# Declare variables to be passed into your templates. - -metadata: - #Provide the namespace for organization's peer - #Eg. namespace: bevel - namespace: - - #Provide the name for indy-domain-genesis release - #Eg. name: indy-domain-genesis - name: - -organization: - #Provide the name for organization - #Eg. name: provider - name: - - configmap: - #Provide the domain Genesis - #Eg. domainGenesis: |- - # {"reqSignature":{},"txn":{"data":{"alias":"authority-trustee","dest":"68N4MNTN9K9cQJQhz4pKA2","role":"0","verkey":"3o5EVpzadvVYzT7X4sy1uD1d9zrRptQ72YiipCVyHroW"},"metadata":{},"type":"1"},"txnMetadata":{"seqNo":1},"ver":"1"} - # {"reqSignature":{},"txn":{"data":{"alias":"provider-steward-1","dest":"JerLtFwVmp8f4LS6tdTDwA","role":"2","verkey":"Ad2wXywwt8NiBDXhQU6am2CVHbHyYRRY38HCxATE7pzz"},"metadata":{"from":"68N4MNTN9K9cQJQhz4pKA2"},"type":"1"},"txnMetadata":{"seqNo":2},"ver":"1"} - # {"reqSignature":{},"txn":{"data":{"alias":"provider-steward-2","dest":"4M286TT2qVTSWn2i7d6Ggg","role":"2","verkey":"2pkLP55RVqjwPPZvRyrMXNasNxkGfvUuHs2sXgpvjgLv"},"metadata":{"from":"68N4MNTN9K9cQJQhz4pKA2"},"type":"1"},"txnMetadata":{"seqNo":3},"ver":"1"} - # {"reqSignature":{},"txn":{"data":{"alias":"partner-steward-1","dest":"Rsn88jsgAGSyABaB8b73V4","role":"2","verkey":"EZN4GQMvFhUv7jqDbf3Q7aow9Yb7JcKgidfSTR8zbsp5"},"metadata":{"from":"68N4MNTN9K9cQJQhz4pKA2"},"type":"1"},"txnMetadata":{"seqNo":4},"ver":"1"} - # {"reqSignature":{},"txn":{"data":{"alias":"partner-steward-2","dest":"8QrUxhXHb7v63D2PPwdZr3","role":"2","verkey":"53HWJHMUDFEaVwRLk8awK9meoatqZrqiyNsJqHP3M6sN"},"metadata":{"from":"68N4MNTN9K9cQJQhz4pKA2"},"type":"1"},"txnMetadata":{"seqNo":5},"ver":"1"} - domainGenesis: diff --git a/platforms/hyperledger-indy/charts/indy-genesis/Chart.yaml b/platforms/hyperledger-indy/charts/indy-genesis/Chart.yaml new file mode 100644 index 00000000000..c40d3ec1ac6 --- /dev/null +++ b/platforms/hyperledger-indy/charts/indy-genesis/Chart.yaml @@ -0,0 +1,26 @@ +############################################################################################## +# Copyright Accenture. All Rights Reserved. +# +# SPDX-License-Identifier: Apache-2.0 +############################################################################################## + +apiVersion: v1 +name: indy-genesis +description: "Hyperledger Indy: Genesis generator" +version: 1.1.0 +appVersion: latest +keywords: + - bevel + - identity + - indy + - hyperledger + - enterprise + - blockchain + - deployment + - accenture +home: https://hyperledger-bevel.readthedocs.io/en/latest/ +sources: + - https://github.com/hyperledger/bevel +maintainers: + - name: Hyperledger Bevel maintainers + email: bevel@lists.hyperledger.org diff --git a/platforms/hyperledger-indy/charts/indy-genesis/README.md b/platforms/hyperledger-indy/charts/indy-genesis/README.md new file mode 100644 index 00000000000..f09e349d304 --- /dev/null +++ b/platforms/hyperledger-indy/charts/indy-genesis/README.md @@ -0,0 +1,125 @@ +[//]: # (##############################################################################################) +[//]: # (Copyright Accenture. All Rights Reserved.) +[//]: # (SPDX-License-Identifier: Apache-2.0) +[//]: # (##############################################################################################) + +# indy-genesis + +This chart is a component of Hyperledger Bevel. The indy-genesis chart creates the domain_transactions_genesis and pool_transaction_genesis files as Kubernetes config maps for Indy network. If enabled, the genesis files are then stored on the configured vault. See [Bevel documentation](https://hyperledger-bevel.readthedocs.io/en/latest/) for details. + +> **Important**: All the public key files should already be placed in `files` before installing this chart. Check **Prerequisites**. + +## TL;DR + +```bash +helm repo add bevel https://hyperledger.github.io/bevel +helm install genesis bevel/indy-genesis +``` + +## Prerequisites + +- Kubernetes 1.19+ +- Helm 3.2.0+ + +If Hashicorp Vault is used, then +- HashiCorp Vault Server 1.13.1+ + +Before running indy-genesis, the public key information for each trustee and steward should be saved in the `files` directory. For example, given a trustee called `authority-trustee` and a steward called `university-steward-1`, run the following commands to save the public key info. + +> **Important**: The [indy-key-mgmt](../indy-key-mgmt/README.md) chart generates these keys, so should be installed before this chart. + +```bash +cd files +# trustee files are in authority-ns namespace +trustee_namespace=authority-ns +trustee_name=authority-trustee +kubectl --namespace $trustee_namespace get secret $trustee_name-identity-public -o jsonpath='{.data.value}' | base64 -d | jq '.["did"]'> $trustee_name-did.json +kubectl --namespace $trustee_namespace get secret $trustee_name-node-public-verif-keys -o jsonpath='{.data.value}' | base64 -d | jq '.["verification-key"]' > $trustee_name-verkey.json + +# steward files are in university-ns namespace +steward_namespace=university-ns +steward_name=university-steward-1 +kubectl --namespace $steward_namespace get secret $steward_name-identity-public -o jsonpath='{.data.value}' | base64 -d | jq '.["did"]'> $steward_name-did.json +kubectl --namespace $steward_namespace get secret $steward_name-node-public-verif-keys -o jsonpath='{.data.value}' | base64 -d | jq '.["verification-key"]' > $steward_name-verkey.json +kubectl --namespace $steward_namespace get secret $steward_name-node-public-bls-keys -o jsonpath='{.data.value}' | base64 -d | jq '.["bls-key-pop"]' > $steward_name-blspop.json +kubectl --namespace $steward_namespace get secret $steward_name-node-public-bls-keys -o jsonpath='{.data.value}' | base64 -d | jq '.["bls-public-key"]' > $steward_name-blspub.json +``` + +## Installing the Chart + +To install the chart with the release name `genesis`: + +```bash +helm repo add bevel https://hyperledger.github.io/bevel +helm install genesis bevel/indy-genesis +``` + +The command deploys the chart on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation. + +> **Tip**: List all releases using `helm list` + +## Uninstalling the Chart + +To uninstall/delete the `genesis` deployment: + +```bash +helm uninstall genesis +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Parameters + +### Global parameters +These parameters are refered to as same in each parent or child chart +| Name | Description | Default Value | +|--------|---------|-------------| +|`global.serviceAccountName` | The serviceaccount name that will be created for Vault Auth and k8S Secret management| `vault-auth` | +| `global.cluster.provider` | Kubernetes cluster provider like AWS EKS or minikube. Currently ony `aws`, `azure` and `minikube` is tested | `aws` | +| `global.cluster.cloudNativeServices` | only `false` is implemented, `true` to use Cloud Native Services (SecretsManager and IAM for AWS; KeyVault & Managed Identities for Azure) is for future | `false` | +| `global.vault.type` | Type of Vault to support other providers. Currently, only `hashicorp` and `kubernetes` is supported. | `hashicorp` | +| `global.vault.role` | Role used for authentication with Vault | `vault-role` | +| `global.vault.network` | Network type that is being deployed | `indy` | +| `global.vault.address`| URL of the Vault server. | `""` | +| `global.vault.authPath` | Authentication path for Vault | `authority` | +| `global.vault.secretEngine` | The value for vault secret engine name | `secretsv2` | +| `global.vault.secretPrefix` | The value for vault secret prefix which must start with `data/` | `data/authority` | + +### Image + +| Name | Description | Default Value | +| -------------| ---------- | --------- | +| `image.alpineutils` | Alpine utils image repository | `ghcr.io/hyperledger/bevel-alpine-ext:latest` | +| `image.pullSecret` | Secret name in the namespace containing private image registry credentials | `""` | + +### Settings + +| Name | Description | Default Value | +|--------|---------|-------------| +|`settings.removeGenesisOnDelete` | Setting to delete the genesis configmaps when uninstalling the release | `true` | +| `settings.secondaryGenesis` | Flag to copy genesis and static nodes from `files` for secondary members | `false` | +| `settings.trustees` | Array of trustees and the relatedß stewards with IP and port details | `- name: authority-trustee`
  `stewards:`
  `- name: university-steward-1`
      `publicIp:`
      `clientPort: 15011`
      `nodePort: 15012` | + +## License + +This chart is licensed under the Apache v2.0 license. + +Copyright © 2024 Accenture + +### Attribution + +This chart is adapted from the [charts](https://hyperledger.github.io/bevel/) which is licensed under the Apache v2.0 License which is reproduced here: + +``` +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +``` diff --git a/platforms/hyperledger-indy/charts/indy-genesis/files/readme.txt b/platforms/hyperledger-indy/charts/indy-genesis/files/readme.txt new file mode 100644 index 00000000000..672160e2f49 --- /dev/null +++ b/platforms/hyperledger-indy/charts/indy-genesis/files/readme.txt @@ -0,0 +1 @@ +This is a dummy file. Place the public key files in this folder. \ No newline at end of file diff --git a/platforms/hyperledger-indy/charts/indy-genesis/templates/_helpers.tpl b/platforms/hyperledger-indy/charts/indy-genesis/templates/_helpers.tpl new file mode 100644 index 00000000000..34fc4d9e2cb --- /dev/null +++ b/platforms/hyperledger-indy/charts/indy-genesis/templates/_helpers.tpl @@ -0,0 +1,28 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "indy-genesis.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "indy-genesis.fullname" -}} +{{- $name := default .Chart.Name -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" $name .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "indy-genesis.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} diff --git a/platforms/hyperledger-indy/charts/indy-genesis/templates/configmap.yaml b/platforms/hyperledger-indy/charts/indy-genesis/templates/configmap.yaml new file mode 100644 index 00000000000..07526145207 --- /dev/null +++ b/platforms/hyperledger-indy/charts/indy-genesis/templates/configmap.yaml @@ -0,0 +1,64 @@ +############################################################################################## +# Copyright Accenture. All Rights Reserved. +# +# SPDX-License-Identifier: Apache-2.0 +############################################################################################## +{{- if .Values.settings.secondaryGenesis }} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: dtg + namespace: {{ .Release.Namespace }} + labels: + app.kubernetes.io/instance: {{ $.Release.Name }} +data: + domain_transactions_genesis: |- + {{ .Files.Get "files/domain_transactions_genesis.json" | nindent 8 }} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: ptg + namespace: {{ .Release.Namespace }} + labels: + app.kubernetes.io/instance: {{ $.Release.Name }} +data: + pool_transactions_genesis: |- + {{ .Files.Get "files/pool_transactions_genesis.json" | nindent 8 }} +{{- else }} +{{- range .Values.settings.trustees }} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ .name }}-keys + namespace: {{ $.Release.Namespace }} + labels: + app.kubernetes.io/instance: {{ $.Release.Name }} +data: + did: | +{{ $.Files.Get (printf "files/%s-did.json" .name) | replace "\"" "" | indent 4 }} + verkey: | +{{ $.Files.Get (printf "files/%s-verkey.json" .name) | replace "\"" "" | indent 4 }} +{{- range .stewards }} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ .name }}-keys + namespace: {{ $.Release.Namespace }} + labels: + app.kubernetes.io/instance: {{ $.Release.Name }} +data: + did: | +{{ $.Files.Get (printf "files/%s-did.json" .name) | replace "\"" "" | indent 4 }} + verkey: | +{{ $.Files.Get (printf "files/%s-verkey.json" .name) | replace "\"" "" | indent 4 }} + blspop: | +{{ $.Files.Get (printf "files/%s-blspop.json" .name) | replace "\"" "" | indent 4 }} + blspub: | +{{ $.Files.Get (printf "files/%s-blspub.json" .name) | replace "\"" "" | indent 4 }} +{{- end -}} +{{- end }} +{{- end }} diff --git a/platforms/hyperledger-indy/charts/indy-genesis/templates/genesis-cleanup.yaml b/platforms/hyperledger-indy/charts/indy-genesis/templates/genesis-cleanup.yaml new file mode 100644 index 00000000000..0aa3b2032e6 --- /dev/null +++ b/platforms/hyperledger-indy/charts/indy-genesis/templates/genesis-cleanup.yaml @@ -0,0 +1,57 @@ +############################################################################################## +# Copyright Accenture. All Rights Reserved. +# +# SPDX-License-Identifier: Apache-2.0 +############################################################################################## + +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ include "indy-genesis.name" . }}-cleanup + namespace: {{ .Release.Namespace }} + annotations: + helm.sh/hook-weight: "0" + helm.sh/hook: "pre-delete" + helm.sh/hook-delete-policy: "hook-succeeded" + labels: + app: {{ .Release.Name }} + app.kubernetes.io/name: indy-genesis-cleanup + app.kubernetes.io/component: genesis-cleanup + app.kubernetes.io/part-of: {{ include "indy-genesis.fullname" . }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} + app.kubernetes.io/managed-by: helm +spec: + backoffLimit: 5 + template: + metadata: + labels: + app: {{ .Release.Name }} + app.kubernetes.io/name: indy-genesis-cleanup + app.kubernetes.io/component: genesis-cleanup + app.kubernetes.io/part-of: {{ include "indy-genesis.fullname" . }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} + app.kubernetes.io/managed-by: helm + spec: + restartPolicy: Never + imagePullSecrets: + {{- if .Values.image.pullSecret }} + - name: {{ .Values.image.pullSecret }} + {{- end }} + serviceAccountName: {{ .Values.global.serviceAccountName }} + containers: + - name: genesis-cleanup + image: {{ .Values.image.alpineutils }} + imagePullPolicy: IfNotPresent + command: ["sh", "-c"] + args: + - | +{{- if .Values.settings.removeGenesisOnDelete }} + if kubectl get configmap --namespace {{ $.Release.Namespace }} dtg &> /dev/null; then + kubectl delete configmap --namespace {{ $.Release.Namespace }} dtg + fi + if kubectl get configmap --namespace {{ $.Release.Namespace }} ptg &> /dev/null; then + kubectl delete configmap --namespace {{ $.Release.Namespace }} ptg + fi +{{- end }} diff --git a/platforms/hyperledger-indy/charts/indy-genesis/templates/genesis-job.yaml b/platforms/hyperledger-indy/charts/indy-genesis/templates/genesis-job.yaml new file mode 100644 index 00000000000..9bff0c1fbf3 --- /dev/null +++ b/platforms/hyperledger-indy/charts/indy-genesis/templates/genesis-job.yaml @@ -0,0 +1,159 @@ +############################################################################################## +# Copyright Accenture. All Rights Reserved. +# +# SPDX-License-Identifier: Apache-2.0 +############################################################################################## + +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ include "indy-genesis.name" . }} + namespace: {{ .Release.Namespace }} + annotations: + helm.sh/hook-delete-policy: "hook-succeeded" + labels: + app: {{ .Release.Name }} + app.kubernetes.io/name: indy-genesis-job + app.kubernetes.io/component: genesis-job + app.kubernetes.io/part-of: {{ include "indy-genesis.fullname" . }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} + app.kubernetes.io/managed-by: helm +spec: + backoffLimit: 5 + template: + metadata: + labels: + app: {{ .Release.Name }} + app.kubernetes.io/name: indy-genesis-job + app.kubernetes.io/component: genesis-job + app.kubernetes.io/part-of: {{ include "indy-genesis.fullname" . }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} + app.kubernetes.io/managed-by: helm + spec: + restartPolicy: OnFailure + imagePullSecrets: + {{- if .Values.image.pullSecret }} + - name: {{ .Values.image.pullSecret }} + {{- end }} + serviceAccountName: {{ .Values.global.serviceAccountName }} + containers: + - name: genesis + image: {{ .Values.image.alpineutils }} + imagePullPolicy: IfNotPresent + volumeMounts: + - name: package-manager + mountPath: /scripts/package-manager.sh + subPath: package-manager.sh + {{- if eq .Values.global.vault.type "hashicorp" }} + - name: scripts-volume + mountPath: /scripts/bevel-vault.sh + subPath: bevel-vault.sh + env: + - name: VAULT_ADDR + value: "{{ .Values.global.vault.address }}" + - name: VAULT_SECRET_ENGINE + value: "{{ .Values.global.vault.secretEngine }}" + - name: VAULT_SECRET_PREFIX + value: "{{ .Values.global.vault.secretPrefix }}" + - name: KUBERNETES_AUTH_PATH + value: "{{ .Values.global.vault.authPath }}" + - name: VAULT_APP_ROLE + value: "{{ .Values.global.vault.role }}" + - name: VAULT_TYPE + value: "{{ .Values.global.vault.type }}" + {{- end }} + command: ["sh", "-c"] + args: + - | + + #!/bin/bash +{{- if .Values.settings.secondaryGenesis }} + echo "Secondary Genesis, config maps are created from local files." +{{- else }} + domain_genesis="" + first_global_trustee_did="" + seqNo=1 + pool_genesis="" + pseqNo=1 + {{- range .Values.settings.trustees }} + echo "Trustee: {{ .name }}" + trustee_did=$(kubectl get configmap {{ .name }}-keys -o jsonpath='{.data.did}') + trustee_verkey=$(kubectl get configmap {{ .name }}-keys -o jsonpath='{.data.verkey}') + if [ -z "$first_global_trustee_did" ] + then + first_global_trustee_did="${trustee_did}" + JSON_STRING=$( jq -n -c \ + --arg trustee_did "${trustee_did}" \ + --arg trustee_verkey "${trustee_verkey}" \ + --arg seqNo $seqNo \ + --arg alias "{{ .name }}" \ + '{"reqSignature":{},"txn":{"data":{"alias":$alias,"dest":$trustee_did,"role":"0","verkey":$trustee_verkey},"metadata":{},"type":"1"},"txnMetadata":{"seqNo":$seqNo|tonumber},"ver":"1"}') + else + JSON_STRING=$( jq -n -c \ + --arg trustee_did "${trustee_did}" \ + --arg trustee_verkey "${trustee_verkey}" \ + --arg first_global_trustee_did "${first_global_trustee_did}" \ + --arg seqNo $seqNo \ + --arg alias "{{ .name }}" \ + '{"reqSignature":{},"txn":{"data":{"alias":$alias,"dest":$trustee_did,"role":"0","verkey":$trustee_verkey},"metadata":{"from":$first_global_trustee_did},"type":"1"},"txnMetadata":{"seqNo":$seqNo|tonumber},"ver":"1"}') + fi + seqNo=$((seqNo + 1)) + domain_genesis="${domain_genesis}${JSON_STRING}\n" + + {{- range .stewards }} + echo "Steward: {{ .name }}" + steward_did=$(kubectl get configmap {{ .name }}-keys -o jsonpath='{.data.did}') + steward_verkey=$(kubectl get configmap {{ .name }}-keys -o jsonpath='{.data.verkey}') + + JSON_STRING=$( jq -n -c \ + --arg steward_did "${steward_did}" \ + --arg trustee_did "${trustee_did}" \ + --arg steward_verkey "${steward_verkey}" \ + --arg seqNo $seqNo \ + --arg alias "{{ .name }}" \ + '{"reqSignature":{},"txn":{"data":{"alias":$alias,"dest":$steward_did,"role":"2","verkey":$steward_verkey},"metadata":{"from":$trustee_did},"type":"1"},"txnMetadata":{"seqNo":$seqNo|tonumber},"ver":"1"}') + + seqNo=$((seqNo + 1)) + domain_genesis="${domain_genesis}${JSON_STRING}\n" + + bls_key=$(kubectl get configmap {{ .name }}-keys -o jsonpath='{.data.blspub}') + bls_key_pop=$(kubectl get configmap {{ .name }}-keys -o jsonpath='{.data.blspop}') + txn_id=$(echo {{ .name }} | sha256sum | awk '{print $1}') + json=$( jq -n -c \ + --arg alias "{{ .name }}" \ + --arg blskey "${bls_key}" \ + --arg blskey_pop "${bls_key_pop}" \ + --arg client_ip "{{ .publicIp }}" \ + --arg client_port {{ .clientPort }} \ + --arg node_ip "{{ .publicIp }}" \ + --arg node_port {{ .nodePort }} \ + --arg type "VALIDATOR" \ + --arg dest "${steward_verkey}" \ + --arg from "${steward_did}" \ + --arg seqNo ${pseqNo} \ + --arg txnId "${txn_id}" \ + '{"reqSignature":{},"txn":{"data":{"data":{"alias":$alias,"blskey":$blskey,"blskey_pop":$blskey_pop,"client_ip":$client_ip,"client_port":$client_port|tonumber,"node_ip":$node_ip,"node_port":$node_port|tonumber,"services":[$type]},"dest":$dest},"metadata":{"from":$from},"type":"0"},"txnMetadata":{"seqNo":$seqNo|tonumber,"txnId":$txnId},"ver":"1"}') + + pool_genesis="${pool_genesis}${json}\n" + pseqNo=$((pseqNo + 1)) + {{- end }} + {{- end }} + + echo -e "${domain_genesis}" + kubectl create configmap --namespace {{ .Release.Namespace }} dtg --from-literal=domain_transactions_genesis="$(echo -e $domain_genesis)" + echo -e "${pool_genesis}" + kubectl create configmap --namespace {{ .Release.Namespace }} ptg --from-literal=pool_transactions_genesis="$(echo -e $pool_genesis)" +{{- end }} + volumes: + {{- if eq .Values.global.vault.type "hashicorp" }} + - name: scripts-volume + configMap: + name: bevel-vault-script + defaultMode: 0777 + {{- end }} + - name: package-manager + configMap: + name: package-manager + defaultMode: 0777 diff --git a/platforms/hyperledger-indy/charts/indy-genesis/values.yaml b/platforms/hyperledger-indy/charts/indy-genesis/values.yaml new file mode 100644 index 00000000000..d6e17fbdbf9 --- /dev/null +++ b/platforms/hyperledger-indy/charts/indy-genesis/values.yaml @@ -0,0 +1,68 @@ +############################################################################################## +# Copyright Accenture. All Rights Reserved. +# +# SPDX-License-Identifier: Apache-2.0 +############################################################################################## + +# Default values for indy-genesis. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. +--- +# The following are for overriding global values +global: + #Provide the service account name which will be created. + serviceAccountName: vault-auth + cluster: + provider: aws # choose from: minikube | aws | azure | gcp + cloudNativeServices: false # only 'false' is implemented now + vault: + #Provide the type of vault + type: kubernetes # hashicorp | kubernetes + #Provide the vault role used. + role: vault-role + #Provide the network type + network: indy + #Provide the vault server address + address: + #Provide the vault authPath configured to be used. + authPath: authority + #Provide the secret engine. + secretEngine: secretsv2 + #Provide the vault path where the secrets will be stored + secretPrefix: "data/authority" + +image: + #Provide the image for the job container + #Eg. alpineutils: ghcr.io/hyperledger/bevel-alpine-ext:latest + alpineutils: ghcr.io/hyperledger/bevel-alpine-ext:latest + #Provide the secret to use if private repository + #Eg. pullSecret: regcred + pullSecret: + +settings: + # Flag to ensure the genesis configmaps are removed on helm uninstall + removeGenesisOnDelete: true + # Flag to copy domain and pool genesis from files for secondary members + secondaryGenesis: false + # Provide the steward details by following trustee tree as per example below + trustees: + - name: authority-trustee + stewards: + - name: university-steward-1 # Steward name + publicIp: # Steward public IP Address /Kubernetes API IP for noproxy + nodePort: 15011 # Node external port + clientPort: 15012 # Client external port + # - name: university-steward-2 + # publicIp: + # nodePort: 15021 + # clientPort: 15022 + # - name: partner-trustee + # stewards: + # - name: partner-steward-1 + # publicIp: + # nodePort: 15031 + # clientPort: 15032 + # - name: partner-steward-2 + # publicIp: + # nodePort: 15041 + # clientPort: 15042 diff --git a/platforms/hyperledger-indy/charts/indy-key-mgmt/Chart.yaml b/platforms/hyperledger-indy/charts/indy-key-mgmt/Chart.yaml index ccae66eba31..5b7e293fb62 100644 --- a/platforms/hyperledger-indy/charts/indy-key-mgmt/Chart.yaml +++ b/platforms/hyperledger-indy/charts/indy-key-mgmt/Chart.yaml @@ -5,7 +5,22 @@ ############################################################################################## apiVersion: v1 -appVersion: "2.0" -description: "hyperledger-indy: indy-key-mgmt" name: indy-key-mgmt -version: 1.0.0 +description: "hyperledger Indy: Keys generator" +version: 1.1.0 +appVersion: latest +keywords: + - bevel + - identity + - indy + - hyperledger + - enterprise + - blockchain + - deployment + - accenture +home: https://hyperledger-bevel.readthedocs.io/en/latest/ +sources: + - https://github.com/hyperledger/bevel +maintainers: + - name: Hyperledger Bevel maintainers + email: bevel@lists.hyperledger.org diff --git a/platforms/hyperledger-indy/charts/indy-key-mgmt/README.md b/platforms/hyperledger-indy/charts/indy-key-mgmt/README.md index a6b009f4cfd..b1990cf5b82 100644 --- a/platforms/hyperledger-indy/charts/indy-key-mgmt/README.md +++ b/platforms/hyperledger-indy/charts/indy-key-mgmt/README.md @@ -3,170 +3,89 @@ [//]: # (SPDX-License-Identifier: Apache-2.0) [//]: # (##############################################################################################) - # indy-key-mgmt -- [indy-key-mgmt Helm Chart](#indy-node-deployment-helm-chart) -- [Prerequisites](#prerequisites) -- [Chart Structure](#chart-structure) -- [Configuration](#configuration) -- [Deployment](#deployment) -- [Verification](#verification) -- [Updating the Deployment](#updating-the-job) -- [Deletion](#deletion) -- [Contributing](#contributing) -- [License](#license) - - -## indy-key-mgmt Helm Chart ---- -This [Helm chart](https://github.com/hyperledger/bevel/tree/develop/platforms/hyperledger-indy/charts/indy-key-mgmt) helps to deploy the indy-key-mgmt job. - - -## Prerequisites ---- -Before deploying the Helm chart, make sure to have the following prerequisites: - -- Kubernetes cluster up and running. -- A HashiCorp Vault instance is set up and configured to use Kubernetes service account token-based authentication. -- The Vault is unsealed and initialized. -- Helm installed. +This chart is a component of Hyperledger Bevel. The indy-key-mgmt chart generates the various keys needed for a Hyperledger Indy node. If enabled, the keys are then stored on the configured vault and stored as Kubernetes secrets. See [Bevel documentation](https://hyperledger-bevel.readthedocs.io/en/latest/) for details. - -## Chart Structure ---- -The structure of the Helm chart is as follows: +## TL;DR +```bash +helm repo add bevel https://hyperledger.github.io/bevel +helm install authority-keys bevel/indy-key-mgmt ``` -indy-key-mgmt/ - |- templates/ - |- _helpers.tpl - |- configmap.yaml - |- Chart.yaml - |- README.md - |- values.yaml -``` - -- `templates/`: This directory contains the template files for generating Kubernetes resources. -- `_helpers.tpl`: Contains custom label definitions used in other templates. -- `configmap.yaml`: This file provides information about the kubernetes configmap job -- `Chart.yaml`: Provides metadata about the chart, such as its name, version, and description. -- `README.md`: This file provides information and instructions about the Helm chart. -- `values.yaml`: Contains the default configuration values for the chart. It includes configuration for the metadata, image, node, Vault, etc. - - -## Configuration ---- -The [values.yaml](https://github.com/hyperledger/bevel/blob/develop/platforms/hyperledger-indy/charts/indy-key-mgmt/values.yaml) file contains configurable values for the Helm chart. We can modify these values according to the deployment requirements. Here are some important configuration options: - -## Parameters ---- -### metadata - -| Name | Description | Default Value | -| ----------------| ----------------------------------------------- | ----------------| -| namespace | Provide the namespace for organization's peer | bevel | -| name | Provide the name for indy-key-mgmt release | indy-key-mgmt | - -### network - -| Name | Description | Default Value | -| ----------------| ----------------------------------------------- | ------------- | -| name | Provide the name for network | bevel | - -### image -| Name | Description | Default Value | -| ----------------| ----------------------------------------------------------- | ----------------| -| name | Provide the image name for the indy-key-mgmt container | indy-key-mgmt | -| repository | Provide the image repository for the indy-key-mgmt container | ind-key-mgmt:lts| -| pullSecret | Provide the image pull secret of image | regcred | - -### vault - -| Name | Description | Default Value | -| ---------------- | ------------------------------------------- | ------------- | -| address | Provide the vault server address | http://54.226.163.39:8200 | -| version | Provide the vault secret version address | "1 or 2" | -| keyPath | Provide the key path for vault | provider.stewards | -| identity | Provide the identity for vault | my-identity | -| auth_path | Provide the authpath | kubernetes-bevel-provider-admin-auth | -| certsecretprefix | Provide the vault path where the certificates are stored | secret/organisation-name | -| retries | Provide The amount of times to retry fetching from/writing to Vault before giving up | "10" | -| sleepTimeAfterError | The amount of time in seconds to wait after an error occurs when fetching from/writing to Vault"" | "15" | +## Prerequisites +- Kubernetes 1.19+ +- Helm 3.2.0+ +If Hashicorp Vault is used, then +- HashiCorp Vault Server 1.13.1+ -### account - -| Name | Description | Default Value | -| --------| --------------------------------- | ------------- | -| service | Provide the service account name | vault-auth-provider-agent-app | -| role |Provide the service account role | ro | - - - -## Deployment ---- - -To deploy the indy-key-mgmt job Helm chart, follow these steps: - -1. Modify the [values.yaml](https://github.com/hyperledger/bevel/blob/develop/platforms/hyperledger-indy/charts/indy-key-mgmt/values.yam) file to set the desired configuration values. -2. Run the following Helm command to install the chart: - ``` - $ helm repo add bevel https://hyperledger.github.io/bevel/ - $ helm install ./indy-key-mgmt - ``` -Replace `` with the desired name for the release. - -This will deploy the indy-key-mgmt job to the Kubernetes cluster based on the provided configurations. +> **Important**: Also check the dependent charts. +## Installing the Chart - -## Verification ---- +To install the chart with the release name `authority-keys`: -To verify the jobs, we can use the following command: +```bash +helm repo add bevel https://hyperledger.github.io/bevel +helm install authority-keys bevel/indy-key-mgmt ``` -$ kubectl get jobs -n -``` -Replace `` with the actual namespace where the job was created. The command will display information about the jobs. - - -## Updating the deployment ---- +The command deploys the chart on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation. -If we need to update the job with new configurations or changes, modify the same [values.yaml](https://github.com/hyperledger/bevel/blob/develop/platforms/hyperledger-indy/charts/indy-key-mgmt/values.yaml) file with the desired changes and run the following Helm command: -``` -$ helm upgrade ./indy-key-mgmt -``` -Replace `` with the name of the release. This command will apply the changes to the job , ensuring the job is up to date. +> **Tip**: List all releases using `helm list` +## Uninstalling the Chart - -## Deletion ---- +To uninstall/delete the `authority-keys` deployment: -To delete the jobs and associated resources, run the following Helm command: -``` -$ helm uninstall +```bash +helm uninstall authority-keys ``` -Replace `` with the name of the release. This command will remove all the resources created by the Helm chart. +The command removes all the Kubernetes components associated with the chart and deletes the release. - -## Contributing ---- -If you encounter any bugs, have suggestions, or would like to contribute to the [INDY authorization job Helm Chart](https://github.com/hyperledger/bevel/tree/develop/platforms/hyperledger-indy/charts/indy-auth-job), please feel free to open an issue or submit a pull request on the [project's GitHub repository](https://github.com/hyperledger/bevel). +## Parameters +### Global parameters +These parameters are refered to as same in each parent or child chart +| Name | Description | Default Value | +|--------|---------|-------------| +|`global.serviceAccountName` | The serviceaccount name that will be created for Vault Auth and k8S Secret management| `vault-auth` | +| `global.cluster.provider` | Kubernetes cluster provider like AWS EKS, AKS or minikube. Currently ony `aws`, `azure` and `minikube` is tested | `aws` | +| `global.cluster.cloudNativeServices` | only `false` is implemented, `true` to use Cloud Native Services (SecretsManager and IAM for AWS; KeyVault & Managed Identities for Azure) is for future | `false` | +| `global.cluster.kubernetesUrl` | URL of the Kubernetes Cluster | `""` | +| `global.vault.type` | Type of Vault to support other providers. Currently, only `hashicorp` and `kubernetes` is supported. | `hashicorp` | +| `global.vault.role` | Role used for authentication with Vault | `vault-role` | +| `global.vault.network` | Network type that is being deployed | `indy` | +| `global.vault.address`| URL of the Vault server. | `""` | +| `global.vault.authPath` | Authentication path for Vault | `authority` | +| `global.vault.secretEngine` | The value for vault secret engine name | `secretsv2` | +| `global.vault.secretPrefix` | The value for vault secret prefix which must start with `data/` | `data/authority` | + +### Image + +| Name | Description | Default Value | +| -------------| ---------- | --------- | +| `image.keyUtils` | Indy Key Gen image repository for the Indy version | `ghcr.io/hyperledger/bevel-indy-key-mgmt:1.12.6` | +| `image.pullSecret` | Secret name in the namespace containing private image registry credentials | `""` | + +### Settings + +| Name | Description | Default Value | +|--------|---------|-------------| +|`settings.removeKeysOnDelete` | Setting to delete the keys when uninstalling the release | `true` | +| `settings.identities.trustee` | Single trustee identity to be created for the organization. Set to empty if not needed | `authority-trustee` | +| `settings.identities.endorser` | Single endorser identity to be created for the organization. Set to empty if not needed | `""` | +| `settings.identities.stewards` | Array of steward identities to be created for the orgnaization. Set to empty if not needed | `[]` | - ## License This chart is licensed under the Apache v2.0 license. -Copyright © 2023 Accenture +Copyright © 2024 Accenture ### Attribution diff --git a/platforms/hyperledger-indy/charts/indy-key-mgmt/requirements.yaml b/platforms/hyperledger-indy/charts/indy-key-mgmt/requirements.yaml new file mode 100644 index 00000000000..b1195396c5f --- /dev/null +++ b/platforms/hyperledger-indy/charts/indy-key-mgmt/requirements.yaml @@ -0,0 +1,11 @@ +dependencies: + - name: bevel-vault-mgmt + repository: "file://../../../shared/charts/bevel-vault-mgmt" + tags: + - bevel + version: ~1.0.0 + - name: bevel-scripts + repository: "file://../../../shared/charts/bevel-scripts" + tags: + - bevel + version: ~1.0.0 diff --git a/platforms/hyperledger-indy/charts/indy-key-mgmt/templates/_helpers.tpl b/platforms/hyperledger-indy/charts/indy-key-mgmt/templates/_helpers.tpl index d43c09d8cef..0d54910e220 100644 --- a/platforms/hyperledger-indy/charts/indy-key-mgmt/templates/_helpers.tpl +++ b/platforms/hyperledger-indy/charts/indy-key-mgmt/templates/_helpers.tpl @@ -1,5 +1,28 @@ -{{- define "labels.custom" }} - {{ range $key, $val := $.Values.metadata.labels }} - {{ $key }}: {{ $val }} - {{ end }} -{{- end }} +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "indy-key-mgmt.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "indy-key-mgmt.fullname" -}} +{{- $name := default .Chart.Name -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" $name .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "indy-key-mgmt.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} diff --git a/platforms/hyperledger-indy/charts/indy-key-mgmt/templates/job-cleanup.yaml b/platforms/hyperledger-indy/charts/indy-key-mgmt/templates/job-cleanup.yaml new file mode 100644 index 00000000000..61ba3d89a37 --- /dev/null +++ b/platforms/hyperledger-indy/charts/indy-key-mgmt/templates/job-cleanup.yaml @@ -0,0 +1,114 @@ +############################################################################################## +# Copyright Accenture. All Rights Reserved. +# +# SPDX-License-Identifier: Apache-2.0 +############################################################################################## + +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ include "indy-key-mgmt.name" . }}-cleanup + namespace: {{ .Release.Namespace }} + annotations: + helm.sh/hook-weight: "0" + helm.sh/hook: "pre-delete" + helm.sh/hook-delete-policy: "hook-succeeded" + labels: + app: {{ .Release.Name }} + app.kubernetes.io/name: indy-key-mgmt-cleanup + app.kubernetes.io/component: key-mgmt-cleanup + app.kubernetes.io/part-of: {{ include "indy-key-mgmt.fullname" . }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} + app.kubernetes.io/managed-by: helm +spec: + backoffLimit: 3 + completions: 1 + template: + metadata: + labels: + app: {{ .Release.Name }} + app.kubernetes.io/name: indy-key-mgmt-cleanup + app.kubernetes.io/component: key-mgmt-cleanup + app.kubernetes.io/part-of: {{ include "indy-key-mgmt.fullname" . }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} + app.kubernetes.io/managed-by: helm + spec: + restartPolicy: Never + imagePullSecrets: + {{- if .Values.image.pullSecret }} + - name: {{ .Values.image.pullSecret }} + {{- end }} + serviceAccountName: {{ .Values.global.serviceAccountName }} + containers: + - name: cleanup-keys + image: {{ .Values.image.keyUtils }} + imagePullPolicy: IfNotPresent + volumeMounts: + - name: package-manager + mountPath: /scripts/package-manager.sh + subPath: package-manager.sh + command: ["/bin/bash", "-c"] + args: + - | + + # Install necessary packages using custom package manager script + . /scripts/package-manager.sh + packages_to_install="curl" + install_packages "$packages_to_install" + # Download kubectl binary + curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.27.0/bin/linux/amd64/kubectl; + chmod u+x kubectl && mv kubectl /usr/local/bin/kubectl; + + #function to delete kubernetes secrets + function deleteAllSecret { + identity=$1 + secretData=$2 + jq -r 'to_entries[] | "\(.key) \(.value)"' <<< "$secretData" | \ + while read -r key value; do + jq -r 'to_entries[] | "\(.key) \(.value)"' <<< "$value" | \ + while read -r subkey subvalue; do + if [ "$key" == "identity" ]; then + # Do not iterate as identity has only 1 level of keys + secretName=$(echo "$identity-$key-$subkey" |sed 's/_/-/g') + if kubectl get secret --namespace {{ $.Release.Namespace }} $secretName &> /dev/null; then + kubectl delete secret --namespace {{ $.Release.Namespace }} $secretName + fi + else + # Otherwise, iterate over next set of key-value pairs + jq -r 'to_entries[] | "\(.key) \(.value)"' <<< "$subvalue" | \ + while read -r key1 value1; do + secretName=$(echo "$identity-$key-$subkey-$key1" |sed 's/_/-/g') + if kubectl get secret --namespace {{ $.Release.Namespace }} $secretName &> /dev/null; then + kubectl delete secret --namespace {{ $.Release.Namespace }} $secretName + fi + done + fi + done + done + } + +{{- if .Values.settings.removeKeysOnDelete }} + echo "Deleting Kubernetes Secrets" + {{- if .Values.settings.identities.trustee }} + trustees_json=$(generate_identity {{ .Values.settings.identities.trustee }} trustees) + json=$(echo "$trustees_json" | jq -r '.trustees."{{ .Values.settings.identities.trustee }}"') + deleteAllSecret "{{ .Values.settings.identities.trustee }}" "$json" + {{- end }} + {{- if .Values.settings.identities.endorser }} + endorsers_json=$(generate_identity {{ .Values.settings.identities.endorser }} endorsers) + json=$(echo "$endorsers_json" | jq -r '.endorsers."{{ .Values.settings.identities.endorser }}"') + deleteAllSecret "{{ .Values.settings.identities.endorser }}" "$json" + {{- end }} + {{- range .Values.settings.identities.stewards }} + stewards_json=$(generate_identity {{ . }} stewards) + json=$(echo "$stewards_json" | jq -r '.stewards."{{ . }}"') + deleteAllSecret "{{ . }}" "$json" + {{- end }} +{{- end }} + volumes: + - name: package-manager + configMap: + name: package-manager + defaultMode: 0777 diff --git a/platforms/hyperledger-indy/charts/indy-key-mgmt/templates/job.yaml b/platforms/hyperledger-indy/charts/indy-key-mgmt/templates/job.yaml index dd65c746a12..652fffa6af0 100644 --- a/platforms/hyperledger-indy/charts/indy-key-mgmt/templates/job.yaml +++ b/platforms/hyperledger-indy/charts/indy-key-mgmt/templates/job.yaml @@ -7,276 +7,172 @@ apiVersion: batch/v1 kind: Job metadata: - name: "{{ $.Values.metadata.name }}" - namespace: "{{ $.Values.metadata.namespace }}" + name: {{ include "indy-key-mgmt.name" . }}-job + namespace: {{ .Release.Namespace }} + annotations: + helm.sh/hook-delete-policy: "hook-succeeded" labels: - app: "{{ $.Values.metadata.name }}" + app: {{ .Release.Name }} + app.kubernetes.io/name: indy-key-mgmt-job + app.kubernetes.io/component: key-mgmt-job + app.kubernetes.io/part-of: {{ include "indy-key-mgmt.fullname" . }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} + app.kubernetes.io/managed-by: helm spec: + backoffLimit: 3 template: metadata: labels: - app: "{{ $.Values.metadata.name }}" + app: {{ .Release.Name }} + app.kubernetes.io/name: indy-key-mgmt-job + app.kubernetes.io/component: key-mgmt-job + app.kubernetes.io/part-of: {{ include "indy-key-mgmt.fullname" . }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} + app.kubernetes.io/managed-by: helm spec: restartPolicy: OnFailure imagePullSecrets: - - name: "{{ $.Values.image.pullSecret }}" - serviceAccountName: {{ $.Values.account.service }} + {{- if .Values.image.pullSecret }} + - name: {{ .Values.image.pullSecret }} + {{- end }} + serviceAccountName: {{ .Values.global.serviceAccountName }} containers: - - name: "{{ $.Values.image.name }}" - image: "{{ $.Values.image.repository }}" + - name: generate-keys + image: {{ .Values.image.keyUtils }} imagePullPolicy: IfNotPresent + volumeMounts: + - name: package-manager + mountPath: /scripts/package-manager.sh + subPath: package-manager.sh + {{- if eq .Values.global.vault.type "hashicorp" }} + - name: scripts-volume + mountPath: /scripts/bevel-vault.sh + subPath: bevel-vault.sh + env: + - name: VAULT_ADDR + value: "{{ .Values.global.vault.address }}" + - name: VAULT_SECRET_ENGINE + value: "{{ .Values.global.vault.secretEngine }}" + - name: VAULT_SECRET_PREFIX + value: "{{ .Values.global.vault.secretPrefix }}" + - name: KUBERNETES_AUTH_PATH + value: "{{ .Values.global.vault.authPath }}" + - name: VAULT_APP_ROLE + value: "{{ .Values.global.vault.role }}" + - name: VAULT_TYPE + value: "{{ .Values.global.vault.type }}" + {{- end }} command: ["/bin/bash", "-c"] args: - - |- - apt-get update; - apt-get install curl -y; - validateVaultResponse () { - if echo ${2} | grep "errors"; then - echo "ERROR: unable to retrieve ${1}: ${2}" - exit 1 - fi - if [ "$3" == "LOOKUPSECRETRESPONSE" ] + - | + + # Install necessary packages using custom package manager script + . /scripts/package-manager.sh + packages_to_install="curl" + install_packages "$packages_to_install" + # Download kubectl binary + curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.27.0/bin/linux/amd64/kubectl; + chmod u+x kubectl && mv kubectl /usr/local/bin/kubectl; + +{{- if eq .Values.global.vault.type "hashicorp" }} + . /scripts/bevel-vault.sh + echo "Getting Vault Token..." + vaultBevelFunc "init" + # Function to store secrets into Vault as well as K8s + function safeWriteSecret { + path=$1 + subpath=$2 + value=$3 + secretName=$(echo $subpath | sed 's/\//-/g' |sed 's/_/-/g') + vaultBevelFunc "readJson" "${VAULT_SECRET_ENGINE}/${VAULT_SECRET_PREFIX}/${path}/${subpath}" + if [ "$SECRETS_AVAILABLE" == "yes" ] then - http_code=$(curl -sS -o /dev/null -w "%{http_code}" \ - --header "X-Vault-Token: ${VAULT_TOKEN}" \ - ${VAULT_ADDR}/v1/${1}) - curl_response=$? - if test "$http_code" != "200" ; then - echo "Http response code from Vault - $http_code" - if test "$curl_response" != "0"; then - echo "Error: curl command failed with error code - $curl_response" - exit 1 - fi + # Create the Kubernetes Secret with data from Vault + echo "Secret found in Vault, only creating k8s secrets" + kubectl get secret --namespace {{ $.Release.Namespace }} "${secretName}" + if [ $? -ne 0 ]; then + kubectl create secret --namespace {{ $.Release.Namespace }} generic "${secretName}" --from-literal="value=${VAULT_SECRET}" fi + else + echo "Secret to be created on Vault and k8s" + # Store the value in Vault + echo " + { + \"data\": $value + }" > payload.json + + vaultBevelFunc 'write' "${VAULT_SECRET_ENGINE}/${VAULT_SECRET_PREFIX}/${path}/${subpath}" 'payload.json' + rm payload.json + # Create the Kubernetes Secret using kubectl + kubectl create secret --namespace {{ $.Release.Namespace }} generic "${secretName}" --from-literal="value=$value" fi } - - KUBE_TOKEN=$(cat /var/run/secrets/kubernetes.io/serviceaccount/token); - curl --request POST --data '{"jwt": "'"$KUBE_TOKEN"'", "role": "{{ $.Values.account.role }}"}' {{ $.Values.vault.address }}/v1/auth/{{ $.Values.vault.auth_path }}/login | jq -j '.auth.client_token' > token; - VAULT_TOKEN=$(cat token); - response_status=$(curl -o /dev/null -s -w "%{http_code}\n" --header "X-Vault-Token: ${VAULT_TOKEN}" {{ $.Values.vault.address }}/v1/auth/token/lookup-self) - validateVaultResponse ${response_status}; - - {{- if eq $.Values.vault.version "2" }} - generate_identityv2 {{ $.Values.vault.identity }} {{ $.Values.vault.keyPath }} vault {{ $.Values.vault.address }} 2 - {{- else }} - generate_identity {{ $.Values.vault.identity }} {{ $.Values.vault.keyPath }} vault {{ $.Values.vault.address }} - {{- end }} - - echo "Check if certs are stored in vault" - curl --request POST --data '{"jwt": "'"$KUBE_TOKEN"'", "role": "{{ $.Values.account.role }}"}' {{ $.Values.vault.address }}/v1/auth/{{ $.Values.vault.auth_path }}/login | jq -j '.auth.client_token' > token; - VAULT_TOKEN=$(cat token); - response_status=$(curl -o /dev/null -s -w "%{http_code}\n" --header "X-Vault-Token: ${VAULT_TOKEN}" {{ $.Values.vault.address }}/v1/auth/token/lookup-self) - validateVaultResponse ${response_status}; - - trustees_path={{ $.Values.vault.certsecretprefix }}/data/trustees - stewards_path={{ $.Values.vault.certsecretprefix }}/data/stewards - endorsers_path={{ $.Values.vault.certsecretprefix }}/data/endorsers - - client_public_keys=false client_verif_keys=false client_private_keys=false client_sig_keys=false identity_private_keys=false identity_public_keys=false node_verif_keys=false node_bls_keys=false node_public_keys=false node_sig_keys=false node_private_bls_keys=false node_private_keys=false - COUNTER=1 - while [ ${COUNTER} -lt {{ $.Values.vault.retries }} ] - do - # client_public_keys=false client_verif_keys=false client_private_keys=false client_sig_keys=false identity_private_keys=false identity_public_keys=false node_verif_keys=false node_bls_keys=false node_public_keys=false node_sig_keys=false node_private_bls_keys=false node_private_keys=false - - for field in $stewards_path $endorsers_path $trustees_path - do - if [ "$client_public_keys" == false ] - then - # Check if client public keys are stored in vault or not - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" {{ $.Values.vault.address }}/v1/${field}/{{ $.Values.vault.identity }}/client/public/public_keys | jq -r 'if .errors then . else . end') - public_key=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["public_key"]' 2>&1) - if [ "$public_key" == "null" ] || [ "$public_key" == "parse error"* ] - then - client_public_keys=false - echo "Client public keys are not present in vault" - else - client_public_keys=true - echo "Successfully got client public keys" - fi - fi - - # Check if client verif keys are stored in vault or not - if [ "$client_verif_keys" == false ] - then - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" {{ $.Values.vault.address }}/v1/${field}/{{ $.Values.vault.identity }}/client/public/verif_keys | jq -r 'if .errors then . else . end') - verification_key=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["verification-key"]' 2>&1) - if [ "$verification_key" == "null" ] || [ "$verification_key" == "parse error"* ] - then - client_verif_keys=false - echo "Client verif keys are not present in vault" - else - client_verif_keys=true - echo "Successfully got client verification keys" - fi - fi - - # Check if client private keys are stored in vault or not - if [ "$client_private_keys" == false ] - then - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" {{ $.Values.vault.address }}/v1/${field}/{{ $.Values.vault.identity }}/client/private/private_keys | jq -r 'if .errors then . else . end') - private_key=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["{{ $.Values.vault.identity }}C.key_secret"]' 2>&1) - if [ "$private_key" == "null" ] || [ "$private_key" == "parse error"* ] - then - client_private_keys=false - echo "Client private keys are not present in vault" - else - client_private_keys=true - echo "Successfully got client private keys" - fi - fi - - # Check if client sig keys are stored in vault or not - if [ "$client_sig_keys" == false ] - then - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" {{ $.Values.vault.address }}/v1/${field}/{{ $.Values.vault.identity }}/client/private/sig_keys | jq -r 'if .errors then . else . end') - sig_key=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["{{ $.Values.vault.identity }}C.key_secret"]' 2>&1) - if [ "$sig_key" == "null" ] || [ "$sig_key" == "parse error"* ] - then - client_sig_keys=false - echo "Client sig keys are not present in vault" - else - client_sig_keys=true - echo "Successfully got client private signature keys" - fi - fi - - # Check if identity private keys are stored in vault or not - if [ "$identity_private_keys" == false ] - then - # Check if identity keys are stored in vault or not - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" {{ $.Values.vault.address }}/v1/${field}/{{ $.Values.vault.identity }}/identity/private | jq -r 'if .errors then . else . end') - private_key=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["seed"]' 2>&1) - if [ "$private_key" == "null" ] || [ "$private_key" == "parse error"* ] - then - identity_private_keys=false - echo "Identity private keys are not present in vault" - else - identity_private_keys=true - echo "Successfully got identity private keys" - fi - fi - - # Check if identity public keys are stored in vault or not - if [ "$identity_public_keys" == false ] - then - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" {{ $.Values.vault.address }}/v1/${field}/{{ $.Values.vault.identity }}/identity/public | jq -r 'if .errors then . else . end') - public_key=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["did"]' 2>&1) - if [ "$public_key" == "null" ] || [ "$public_key" == "parse error"* ] - then - identity_public_keys=false - echo "Identity public keys are not present in vault" - else - identity_public_keys=true - echo "Successfully got identity public keys" - fi - fi - - # Check if node verif keys are stored in vault or not - if [ "$node_verif_keys" == false ] - then - # Check if node keys are stored in vault or not - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" {{ $.Values.vault.address }}/v1/${field}/{{ $.Values.vault.identity }}/node/public/verif_keys | jq -r 'if .errors then . else . end') - verification_key=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["verification-key"]' 2>&1) - if [ "$verification_key" == "null" ] || [ "$verification_key" == "parse error"* ] - then - node_verif_keys=false - echo "Node verif keys are not present in vault" - else - node_verif_keys=true - echo "Successfully got node verification keys" - fi - fi - - # Check if node bls keys are stored in vault or not - if [ "$node_bls_keys" == false ] - then - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" {{ $.Values.vault.address }}/v1/${field}/{{ $.Values.vault.identity }}/node/public/bls_keys | jq -r 'if .errors then . else . end') - bls_public_key=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["bls-public-key"]' 2>&1) - bls_key_pop=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["bls-key-pop"]' 2>&1) - bls_pk=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["bls_pk"]' 2>&1) - if [ "$bls_public_key" = "null" ] || [ "$bls_key_pop" = "null" ] || [ "$bls_pk" = "null" ] || [ "$bls_public_key" == "parse error"* ] || [ "$bls_key_pop" == "parse error"* ] || [ "$bls_pk" == "parse error"* ] - then - node_bls_keys=false - echo "Node bls keys are not present in vault" - else - node_bls_keys=true - echo "Successfully got node bls keys" - fi - fi - - # Check if node public keys are stored in vault or not - if [ "$node_public_keys" == false ] - then - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" {{ $.Values.vault.address }}/v1/${field}/{{ $.Values.vault.identity }}/node/public/public_keys | jq -r 'if .errors then . else . end') - public_key=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["public_key"]' 2>&1) - if [ "$public_key" == "null" ] || [ "$public_key" == "parse error"* ] - then - node_public_keys=false - echo "Node public keys are not present in vault" - else - node_public_keys=true - echo "Successfully got node public keys" - fi - fi - - # Check if node sig keys are stored in vault or not - if [ "$node_sig_keys" == false ] - then - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" {{ $.Values.vault.address }}/v1/${field}/{{ $.Values.vault.identity }}/node/private/sig_keys | jq -r 'if .errors then . else . end') - sig_key=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["{{ $.Values.vault.identity }}.key_secret"]' 2>&1) - if [ "$sig_key" == "null" ] || [ "$sig_key" == "parse error"* ] - then - node_sig_keys=false - echo "Node sig keys are not present in vault" - else - node_sig_keys=true - echo "Successfully got node private signature keys" - fi - fi - - # Check if node private bls keys are stored in vault or not - if [ "$node_private_bls_keys" == false ] - then - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" {{ $.Values.vault.address }}/v1/${field}/{{ $.Values.vault.identity }}/node/private/bls_keys | jq -r 'if .errors then . else . end') - bls_sk=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["bls_sk"]' 2>&1) - if [ "$bls_sk" == "null" ] || [ "$bls_sk" == "parse error"* ] - then - node_private_bls_keys=false - echo "Node private bls keys are not present in vault" - else - node_private_bls_keys=true - echo "Successfully got node private bls keys" - fi - fi - - # Check if node private keys are stored in vault or not - if [ "$node_private_keys" == false ] - then - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" {{ $.Values.vault.address }}/v1/${field}/{{ $.Values.vault.identity }}/node/private/private_keys | jq -r 'if .errors then . else . end') - private_key=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["{{ $.Values.vault.identity }}.key_secret"]' 2>&1) - if [ "$private_key" == "null" ] || [ "$private_key" == "parse error"* ] - then - node_private_keys=false - echo "Node private keys are not present in vault" +{{- else }} + # When Vault type is not hahsicorp + # function to create kubernetes secrets, add additional conditions here if cloud KMS is used + function safeWriteSecret { + path=$1 + subpath=$2 + value=$3 + secretName=$(echo $subpath | sed 's/\//-/g' |sed 's/_/-/g') + # Create the Kubernetes Secret using kubectl + kubectl get secret --namespace {{ $.Release.Namespace }} "${secretName}" + if [ $? -ne 0 ]; then + kubectl create secret --namespace {{ $.Release.Namespace }} generic "${secretName}" --from-literal="value=$value" + fi + } +{{- end }} + # function to write/save all secrets by parsing the json + function writeAllSecret { + identity=$1 + secretData=$2 + path=$3 + jq -r 'to_entries[] | "\(.key) \(.value)"' <<< "$secretData" | \ + while read -r key value; do + jq -r 'to_entries[] | "\(.key) \(.value)"' <<< "$value" | \ + while read -r subkey subvalue; do + if [ "$key" == "identity" ]; then + # Do not iterate as identity has only 1 level of keys + safeWriteSecret $path "$identity/$key/$subkey" "$subvalue" else - node_private_keys=true - echo "Successfully got node private keys" + # Otherwise, iterate over next set of key-value pairs + jq -r 'to_entries[] | "\(.key) \(.value)"' <<< "$subvalue" | \ + while read -r key1 value1; do + safeWriteSecret $path "$identity/$key/$subkey/$key1" "$value1" + done fi - fi - - if [ "$client_public_keys" == true ] || [ "$client_verif_keys" == true ] || [ "$client_private_keys" == true ] || [ "$client_sig_keys" == true ] || [ "$identity_private_keys" == true ] || [ "$identity_public_keys" == true ] || [ "$node_verif_keys" == true ] || [ "$node_bls_keys" == true ] || [ "$node_public_keys" == true ] || [ "$node_sig_keys" == true ] || [ "$node_private_bls_keys" == true ] || [ "$node_private_keys" == true ] - then - echo "All crypto materials are successfully stored in vault" - break - else - echo "Crypto materials are not stored in vault" - if [ "$COUNTER" -ge {{ $.Values.vault.retries }} ] - then - echo "Retry attempted $COUNTER times, certificates have not been saved in vault" - exit 1 - fi - fi + done done - COUNTER=`expr "$COUNTER" + 1` - done + } + echo "Generating the secrets for each identity" + {{- if .Values.settings.identities.trustee }} + trustees_json=$(generate_identity {{ .Values.settings.identities.trustee }} trustees) + # Parse the JSON and create Kubernetes secrets + json=$(echo "$trustees_json" | jq -r '.trustees."{{ .Values.settings.identities.trustee }}"') + writeAllSecret "{{ .Values.settings.identities.trustee }}" "$json" "trustees" + {{- end }} + {{- if .Values.settings.identities.endorser }} + endorsers_json=$(generate_identity {{ .Values.settings.identities.endorser }} endorsers) + # Parse the JSON and create Kubernetes secrets + json=$(echo "$endorsers_json" | jq -r '.endorsers."{{ .Values.settings.identities.endorser }}"') + writeAllSecret "{{ .Values.settings.identities.endorser }}" "$json" "endorsers" + {{- end }} + {{- range .Values.settings.identities.stewards }} + stewards_json=$(generate_identity {{ . }} stewards) + # Parse the JSON and create Kubernetes secrets + json=$(echo "$stewards_json" | jq -r '.stewards."{{ . }}"') + writeAllSecret "{{ . }}" "$json" "stewards" + {{- end }} + volumes: + {{- if eq .Values.global.vault.type "hashicorp" }} + - name: scripts-volume + configMap: + name: bevel-vault-script + defaultMode: 0777 + {{- end }} + - name: package-manager + configMap: + name: package-manager + defaultMode: 0777 diff --git a/platforms/hyperledger-indy/charts/indy-key-mgmt/values.yaml b/platforms/hyperledger-indy/charts/indy-key-mgmt/values.yaml index f877a088930..81ea01bfa72 100644 --- a/platforms/hyperledger-indy/charts/indy-key-mgmt/values.yaml +++ b/platforms/hyperledger-indy/charts/indy-key-mgmt/values.yaml @@ -3,74 +3,47 @@ # # SPDX-License-Identifier: Apache-2.0 ############################################################################################## - -# Default values for indy-key-mgmt. # This is a YAML-formatted file. # Declare variables to be passed into your templates. - -metadata: - #Provide the namespace for organization's peer - #Eg. namespace: bevel - namespace: - - #Provide the name for indy-key-mgmt release - #Eg. name: indy-key-mgmt - name: - -network: - #Provide the name for network - #Eg. name: bevel - name: +--- +# The following are for overriding global values +global: + #Provide the service account name which will be created. + serviceAccountName: vault-auth + cluster: + provider: aws # choose from: minikube | aws | azure | gcp + cloudNativeServices: false # only 'false' is implemented + #Provide the kubernetes host url + #Eg. kubernetesUrl: https://10.3.8.5:8443 + kubernetesUrl: + vault: + #Provide the type of vault + type: hashicorp # hashicorp | kubernetes + #Provide the vault role used. + role: vault-role + #Provide the network type + network: indy + #Provide the vault server address + address: + #Provide the vault authPath configured to be used. + authPath: authority + #Provide the secret engine. + secretEngine: secretsv2 + #Provide the vault path where the secrets will be stored + secretPrefix: "data/authority" image: - #Provide the image name for the indy-key-mgmt container - #Eg. name: indy-key-mgmt - name: - - #Provide the image repository for the indy-key-mgmt container - #Eg. repository: indy-key-mgmt:latest - repository: - - #Provide the image pull secret of image + #Provide the image for the job container + #Eg. keyUtils: ghcr.io/hyperledger/bevel-indy-key-mgmt:1.12.6 + keyUtils: ghcr.io/hyperledger/bevel-indy-key-mgmt:1.12.6 + #Provide the secret to use if private repository #Eg. pullSecret: regcred pullSecret: +settings: + removeKeysOnDelete: true + identities: + trustee: authority-trustee + endorser: + stewards: [] -vault: - #Provide the vault server address - #Eg. address: http://54.226.163.39:8200 - address: - - #Provide the vault secret version address - # Supported are "1" or "2" - version: "1" - - #Provide the key path for vault - #Eg. keyPath: provider.stewards - keyPath: - - #Provide the identity for vault - #Eg. identity: my-identity - identity: - - #Provide the authpath - #Eg. authpath: kubernetes-bevel-provider-admin-auth - auth_path: - # Provide the vault path where the certificates are stored - # Eg. certsecretprefix: secret/organisation-name - certsecretprefix: - # The amount of times to retry fetching from/writing to Vault before giving up. - # Eg. retries: 10 - retries: 10 - # The amount of time in seconds to wait after an error occurs when fetching from/writing to Vault. - # Eg. sleepTimeAfterError: 15 - sleepTimeAfterError: 15 - -account: - #Provide the service account name - #Eg. service: vault-auth-provider-agent-app - service: - - #Provide the service account role - #Eg. role: ro - role: diff --git a/platforms/hyperledger-indy/charts/indy-ledger-txn/Chart.yaml b/platforms/hyperledger-indy/charts/indy-ledger-txn/Chart.yaml deleted file mode 100644 index 8590567a4f3..00000000000 --- a/platforms/hyperledger-indy/charts/indy-ledger-txn/Chart.yaml +++ /dev/null @@ -1,11 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -apiVersion: v1 -appVersion: "2.0" -description: "hyperledger-indy: Indy Ledger Script for Issuing a NYM Transaction" -name: indy-ledger-chart -version: 1.0.0 diff --git a/platforms/hyperledger-indy/charts/indy-ledger-txn/README.md b/platforms/hyperledger-indy/charts/indy-ledger-txn/README.md deleted file mode 100644 index ff012480d1e..00000000000 --- a/platforms/hyperledger-indy/charts/indy-ledger-txn/README.md +++ /dev/null @@ -1,200 +0,0 @@ -[//]: # (##############################################################################################) -[//]: # (Copyright Accenture. All Rights Reserved.) -[//]: # (SPDX-License-Identifier: Apache-2.0) -[//]: # (##############################################################################################) - - -# indy-ledger-txn - -- [indy-ledger-txn Helm Chart](#indy-ledger-txn-helm-chart) -- [Prerequisites](#prerequisites) -- [Chart Structure](#chart-structure) -- [Configuration](#configuration) -- [Deployment](#deployment) -- [Verification](#verification) -- [Updating the Deployment](#updating-the-deployment) -- [Deletion](#deletion) -- [Contributing](#contributing) -- [License](#license) - - -## indy-ledger-txn Helm Chart ---- -This [Helm chart](https://github.com/hyperledger/bevel/tree/develop/platforms/hyperledger-indy/charts/indy-ledger-txn) helps to deploy indy ledger txn job. - - -## Prerequisites ---- -Before deploying the Helm chart, make sure to have the following prerequisites: - -- Kubernetes cluster up and running. -- A HashiCorp Vault instance is set up and configured to use Kubernetes service account token-based authentication. -- The Vault is unsealed and initialized. -- Helm installed. - - -## Chart Structure ---- -The structure of the Helm chart is as follows: - -``` -indy-ledger-txn/ - |- templates/ - |- _helpers.tpl - |- job.yaml - |- Chart.yaml - |- README.md - |- values.yaml -``` - -- `templates/`: This directory contains the template files for generating Kubernetes resources. -- `_helpers.tpl`: Contains custom label definitions used in other templates. -- `job.yaml`: This file provides information about the kubernetes job -- `Chart.yaml`: Provides metadata about the chart, such as its name, version, and description. -- `README.md`: This file provides information and instructions about the Helm chart. -- `values.yaml`: Contains the default configuration values for the chart. It includes configuration for the metadata, image, node, Vault, etc. - - -## Configuration ---- -The [values.yaml](https://github.com/hyperledger/bevel/blob/develop/platforms/hyperledger-indy/charts/indy-ledger-txn/values.yaml) file contains configurable values for the Helm chart. We can modify these values according to the deployment requirements. Here are some important configuration options: - -## Parameters ---- -### metadata - -| Name | Description | Default Value | -| ----------------| ----------------------------------------------- | ------------- | -| namespace | Provide the namespace for organization's peer | bevel | -| name | Provide the name for indy-ledger-txn release | indy-ledger-txn | - - -### network - -| Name | Description | Default Value | -| ------------ | ------------------------------------| ------------- | -| name | Provide the name of the network | bevel | - - -### organization - -| Name | Description | Default Value | -| -------- | ----------------------------------| ------------- | -| name | | bevel | -| adminIdentity | Provide the admin identity name | | -| name | Provide the admin identity name | admin_name | -| path | Provide the admin identity path |admin_path | -| newIdentity | -| name | Provide the new identity name | identity_name | -| path | Provide the new identity path | identity_path | -| role | Provide the new identity role | identity_role | -| did | Provide the new identity did | identity_did | -| verkey | Provide the new identity verkey | verification key value | - - -### image - -| Name | Description | Default Value | -| ------------ | ---------------------------------------------------------- | ------------- | -| name | Provide the image name for the indy-ledger-txn container | indy-ledger-txn | -| repository | Provide the image pull secret of image |alpine:3.9.4 | -| pullSecret | Provide the vault identity | regcred | - -### vault - -| Name | Description | Default Value | -| -------------------- | --------------------------------------| ------------- | -| address | Provide the vault server address | http://54.226.163.39:8200 | -| role | Provide the service account role | ro | -| serviceAccountName | Provide the authpath | vault-auth | -| auth_path | Provide the indy-ledger-txn node name | kubernetes-bevel-provider-steward-1-auth| - - -### node - -| Name | Description | Default Value | -| --------------| --------------------------------------- | ------------ | -| name | Provide the indy-ledger-txn node name | indy-ledger | - - - - -## Deployment ---- - -To deploy the indy-ledger-txn Helm chart, follow these steps: - -1. Modify the [values.yaml](https://github.com/hyperledger/bevel/blob/develop/platforms/hyperledger-indy/charts/indy-ledger-txn/values.yaml) file to set the desired configuration values. -2. Run the following Helm command to install the chart: - ``` - $ helm repo add bevel https://hyperledger.github.io/bevel/ - $ helm install ./indy-ledger-txn - ``` -Replace `` with the desired name for the release. - -This will deploy the indy auth job to the Kubernetes cluster based on the provided configurations. - - - -## Verification ---- - -To verify the jobs, we can use the following command: -``` -$ kubectl get jobs -n -``` -Replace `` with the actual namespace where the job was created. The command will display information about the jobs. - - - -## Updating the job ---- - -If we need to update the job with new configurations or changes, modify the same [values.yaml](https://github.com/hyperledger/bevel/blob/develop/platforms/hyperledger-indy/charts/indy-ledger-txn/values.yaml) file with the desired changes and run the following Helm command: -``` -$ helm upgrade ./indy-ledger-txn -``` -Replace `` with the name of the release. This command will apply the changes to the job , ensuring the job is up to date. - - - -## Deletion ---- - -To delete the jobs and associated resources, run the following Helm command: -``` -$ helm uninstall -``` -Replace `` with the name of the release. This command will remove all the resources created by the Helm chart. - - - -## Contributing ---- -If you encounter any bugs, have suggestions, or would like to contribute to the [INDY authorization job Helm Chart](https://github.com/hyperledger/bevel/tree/develop/platforms/hyperledger-indy/charts/indy-ledger-txn), please feel free to open an issue or submit a pull request on the [project's GitHub repository](https://github.com/hyperledger/bevel). - - - -## License - -This chart is licensed under the Apache v2.0 license. - -Copyright © 2023 Accenture - -### Attribution - -This chart is adapted from the [charts](https://hyperledger.github.io/bevel/) which is licensed under the Apache v2.0 License which is reproduced here: - -``` -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -``` diff --git a/platforms/hyperledger-indy/charts/indy-ledger-txn/templates/_helpers.tpl b/platforms/hyperledger-indy/charts/indy-ledger-txn/templates/_helpers.tpl deleted file mode 100644 index d43c09d8cef..00000000000 --- a/platforms/hyperledger-indy/charts/indy-ledger-txn/templates/_helpers.tpl +++ /dev/null @@ -1,5 +0,0 @@ -{{- define "labels.custom" }} - {{ range $key, $val := $.Values.metadata.labels }} - {{ $key }}: {{ $val }} - {{ end }} -{{- end }} diff --git a/platforms/hyperledger-indy/charts/indy-ledger-txn/templates/job.yaml b/platforms/hyperledger-indy/charts/indy-ledger-txn/templates/job.yaml deleted file mode 100644 index f602a70f9f3..00000000000 --- a/platforms/hyperledger-indy/charts/indy-ledger-txn/templates/job.yaml +++ /dev/null @@ -1,120 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -apiVersion: batch/v1 -kind: Job -metadata: - name: "{{ $.Values.metadata.name }}" - namespace: "{{ $.Values.metadata.namespace }}" - labels: - app: "{{ $.Values.metadata.name }}" - app.kubernetes.io/name: "{{ $.Values.metadata.name }}" - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} -spec: - backoffLimit: 7 - template: - metadata: - labels: - app: "{{ $.Values.metadata.name }}" - app.kubernetes.io/name: "{{ $.Values.metadata.name }}" - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} - spec: - restartPolicy: OnFailure - serviceAccountName: "{{ $.Values.vault.serviceAccountName }}" - imagePullSecrets: - - name: "{{ $.Values.image.cli.pullSecret }}" - volumes: - - name: {{ $.Values.organization.name }}-ptg - configMap: - name: {{ $.Values.organization.name }}-ptg - - name: shared-data - emptyDir: - medium: Memory - containers: - - name: init-container - image: "{{ $.Values.image.cli.repository }}" - stdin: true - tty: true - command: ["sh", "-c"] - args: - - |- - #!/usr/bin/env sh - validateVaultResponse () { - if echo ${2} | grep "errors"; then - echo "ERROR: unable to retrieve ${1}: ${2}" - exit 1 - fi - } - - KUBE_TOKEN=$(cat /var/run/secrets/kubernetes.io/serviceaccount/token); - echo "Getting secrets from Vault Server: ${VAULT_ADDR}" - # Login to Vault to get an approle token - curl --request POST --data '{"jwt": "'"${KUBE_TOKEN}"'", "role": "{{ $.Values.vault.role }}"}' ${VAULT_ADDR}/v1/auth/{{ $.Values.vault.auth_path }}/login | jq -j '.auth.client_token' > token; - VAULT_TOKEN=$(cat token); - validateVaultResponse 'vault login token' "${VAULT_TOKEN}" - QUERY_RES=$(curl -sS --header "X-Vault-Token: $VAULT_TOKEN" $VAULT_ADDR/v1/$ADMIN_PATH/$ADMIN_NAME/identity/private | jq -r 'if .errors then . else . end') - validateVaultResponse 'Admin Seed' "${QUERY_RES}" - admin_seed=$(echo ${QUERY_RES} | jq -r ".data.data[\"seed\"]"); - mkdir -p /data/seed; - echo "${admin_seed}" > /data/seed/adminseed.txt; - env: - - name: VAULT_APP_ROLE - value: {{ $.Values.vault.role }} - - name: KUBERNETES_AUTH_PATH - value: "{{ $.Values.vault.authpath }}" - - name: VAULT_ADDR - value: "{{ $.Values.vault.address }}" - - name: ADMIN_PATH - value: "{{ $.Values.organization.adminIdentity.path }}" - - name: ADMIN_NAME - value: "{{ $.Values.organization.adminIdentity.name }}" - - name: IDENTITY_NAME - value: "{{ $.Values.organization.newIdentity.name }}" - - name: IDENTITY_PATH - value: "{{ $.Values.organization.newIdentity.path }}" - volumeMounts: - - name: shared-data - mountPath: /data - - name: "{{ $.Values.image.cli.name }}" - image: "{{ $.Values.image.cli.repository }}" - stdin: true - tty: true - command: ["sh", "-c"] - args: - - |- - apt-get install curl -y - - ADMIN_SEED=$( cat /data/seed/adminseed.txt) - echo "Running ledger Transaction Script..."; - - ./home/indy-ledger.sh $ADMIN_DID $ADMIN_SEED $IDENTITY_DID $IDENTITY_ROLE $IDENTITY_VERKEY $POOL_GENESIS_PATH; - env: - - name: VAULT_APP_ROLE - value: {{ $.Values.vault.role }} - - name: KUBERNETES_AUTH_PATH - value: "{{ $.Values.vault.authpath }}" - - name: VAULT_ADDR - value: "{{ $.Values.vault.address }}" - - name: ADMIN_DID - value: "{{ $.Values.organization.adminIdentity.did }}" - - name: IDENTITY_DID - value: "{{ $.Values.organization.newIdentity.did }}" - - name: IDENTITY_ROLE - value: "{{ $.Values.organization.newIdentity.role }}" - - name: IDENTITY_VERKEY - value: "{{ $.Values.organization.newIdentity.verkey }}" - - name: POOL_GENESIS_PATH - value: /var/lib/indy/genesis/{{ $.Values.network.name }}/pool_transactions_genesis - volumeMounts: - - name: {{ $.Values.organization.name }}-ptg - mountPath: /var/lib/indy/genesis/{{ $.Values.network.name }}/pool_transactions_genesis - subPath: pool_transactions_genesis - - name: shared-data - mountPath: /data diff --git a/platforms/hyperledger-indy/charts/indy-ledger-txn/values.yaml b/platforms/hyperledger-indy/charts/indy-ledger-txn/values.yaml deleted file mode 100644 index 0587ee09bc2..00000000000 --- a/platforms/hyperledger-indy/charts/indy-ledger-txn/values.yaml +++ /dev/null @@ -1,92 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -# Default values for indy-ledger-txn. -# This is a YAML-formatted file. -# Declare variables to be passed into your templates. - -metadata: - #Provide the namespace for organization's peer - #Eg. namespace: bevel - namespace: - - #Provide the name for indy-ledger-txn release - #Eg. name: indy-ledger-txn - name: - -network: - #Provide the name for network - #Eg. name: bevel - name: - -organization: - name: - #Provide the organization name - adminIdentity: - #Provide the admin identity name - #Eg. name: admin_name - name: - - #Provide the admin identity path - #Eg. path: admin_path - path: - - newIdentity: - #Provide the new identity name - #Eg. name: identity_name - name: - - #Provide the new identity path - #Eg. path: identity_path - path: - - #Provide the new identity role - #Eg. role: identity_role - role: - - #Provide the new identity did - #Eg. did: identity_did - did: - - #Provide the new identity did - #Eg. verkey: verification key value - verkey: - -image: - cli: - #Provide the image name for the indy-ledger-txn container - #Eg. name: indy-ledger-txn - name: - - #Provide the image repository for the indy-ledger-txn container - #Eg. repository: alpine:3.9.4 - repository: - - #Provide the image pull secret of image - #Eg. pullSecret: regcred - pullSecret: - -vault: - #Provide the vault server address - #Eg. address: http://54.226.163.39:8200 - address: - - #Provide the service account role - #Eg. role: ro - role: - - #Provide the servicea ccount name for vault - #Eg. serviceaccountname: vault-auth - serviceAccountName: - - #Provide the authpath - #Eg. authpath: kubernetes-bevel-provider-steward-1-auth - auth_path: - -node: - #Provide the indy-ledger-txn node name - #Eg. name: indy-ledger - name: diff --git a/platforms/hyperledger-indy/charts/indy-node/Chart.yaml b/platforms/hyperledger-indy/charts/indy-node/Chart.yaml index b53afcfc482..cc6da6d6140 100644 --- a/platforms/hyperledger-indy/charts/indy-node/Chart.yaml +++ b/platforms/hyperledger-indy/charts/indy-node/Chart.yaml @@ -3,9 +3,23 @@ # # SPDX-License-Identifier: Apache-2.0 ############################################################################################## - apiVersion: v1 -appVersion: "2.0" -description: "hyperledger-indy: charts for indy-node StatefulSet" name: indy-node -version: 1.0.0 +description: Hyperledger Indy nodes for a SSI network +version: 1.0.1 +appVersion: latest +keywords: + - bevel + - identity + - indy + - hyperledger + - enterprise + - blockchain + - deployment + - accenture +home: https://hyperledger-bevel.readthedocs.io/en/latest/ +sources: + - https://github.com/hyperledger/bevel +maintainers: + - name: Hyperledger Bevel maintainers + email: bevel@lists.hyperledger.org diff --git a/platforms/hyperledger-indy/charts/indy-node/README.md b/platforms/hyperledger-indy/charts/indy-node/README.md index bf245d80fe8..acfa11ea0f9 100644 --- a/platforms/hyperledger-indy/charts/indy-node/README.md +++ b/platforms/hyperledger-indy/charts/indy-node/README.md @@ -3,238 +3,100 @@ [//]: # (SPDX-License-Identifier: Apache-2.0) [//]: # (##############################################################################################) - # indy-node -- [indy-node Helm Chart](#indy-node-helm-chart) -- [Prerequisites](#prerequisites) -- [Chart Structure](#chart-structure) -- [Configuration](#configuration) -- [Deployment](#deployment) -- [Verification](#verification) -- [Updating the Deployment](#updating-the-deployment) -- [Deletion](#deletion) -- [Contributing](#contributing) -- [License](#license) - - -## indy-node Helm Chart ---- -This [Helm chart](https://github.com/hyperledger/bevel/tree/develop/platforms/hyperledger-indy/charts/indy-node) helps to deploy indy node job. - - -## Prerequisites ---- -Before deploying the Helm chart, make sure to have the following prerequisites: - -- Kubernetes cluster up and running. -- A HashiCorp Vault instance is set up and configured to use Kubernetes service account token-based authentication. -- The Vault is unsealed and initialized. -- Helm installed. +This chart is a component of Hyperledger Bevel. The indy-node chart deploys a Hyperledger Indy node as a steward. See [Bevel documentation](https://hyperledger-bevel.readthedocs.io/en/latest/) for more details. - -## Chart Structure ---- -The structure of the Helm chart is as follows: +## TL;DR -``` -indy-node/ - |- templates/ - |- _helpers.tpl - |- job.yaml - |- Chart.yaml - |- README.md - |- values.yaml +```bash +helm repo add bevel https://hyperledger.github.io/bevel +helm install university-steward-1 bevel/indy-node ``` -- `templates/`: This directory contains the template files for generating Kubernetes resources. -- `helpers.tpl`: Contains custom label definitions used in other templates. -- `job.yaml`: This file provides information about the kubernetes job -- `Chart.yaml`: Provides metadata about the chart, such as its name, version, and description. -- `README.md`: This file provides information and instructions about the Helm chart. -- `values.yaml`: Contains the default configuration values for the chart. It includes configuration for the metadata, image, node, Vault, etc. +## Prerequisites - -## Configuration ---- -The [values.yaml](https://github.com/hyperledger/bevel/blob/develop/platforms/hyperledger-indy/charts/indy-node/values.yaml) file contains configurable values for the Helm chart. We can modify these values according to the deployment requirements. Here are some important configuration options: +- Kubernetes 1.19+ +- Helm 3.2.0+ -## Parameters ---- -### metadata - -| Name | Description | Default Value | -| ----------------| ----------------------------------------------- | ------------- | -| namespace | Provide the namespace for organization's peer | bevel | -| name | Provide the name for indy-node release | indy-node | - - -### replicas - -| Name | Description | Default Value | -| --------- | ---------------------------------------- | ------------- | -| replicas | Provide the number of indy-node replicas | 1 | - -### network - -| Name | Description | Default Value | -| ------- | ---------------------------- | ------------- | -| name | Provide the name for network | bevel | - - -### organization - -| Name | Description | Default Value | -| -------- | ----------------------------------- | ------------- | -| name | Provide the name for organization | provider | - -# add_new_org is true when adding new validator node to existing network -add_new_org: false - - - -### image - -| Name | Description | Default Value | -| ------------ | -------------------------------------------------------------- | ------------- | -| initContainer | -| name | Provide the image name for the indy-node init container | indy-node | -| repository | provide the image repository for the indy-node init | alpine:3.9.4 | -| cli | | | -| name | Provide the image name for the indy-ledger-txn container | indy-ledger-txn | -| repository | Provide the image repository for the indy-ledger-txn container | alpine:3.9.4 | -| indyNode | | | -| name | Provide the name for the indy node | indy-node | -| repository | Provide the image name for the indy-node container | alpine:3.9.4 | -| pullSecret | Provide the image pull secret of image | regcred | - - - -### node - -| Name | Description | Default Value | -| -----------------| -------------------------| ------------- | -| name | Provide the node name | indy-node | -| ip | Provide the node ip | 0.0.0.0 | -| publicIp | Provide the node ip | 0.0.0.0 | -| port | Provide the node port | 9752 | -| ambassadorPort | Provide the node port | 15911 | - -### client - -| Name | Description | Default Value | -| -----------------| -------------------------| ------------- | -| ip | Provide the node ip | 0.0.0.0 | -| publicIp | Provide the node ip | 0.0.0.0 | -| port | Provide the node port | 9752 | -| ambassadorPort | Provide the node port | 15912 | - -#### service -| Name | Description | Default Value | -| -------------------- | ---------------------------------------------| ------------- | -| type | Provide type of service (NodePort/ClusterIp) | NodePort | -| ports | | | -| nodePort | Provide the service node port | 9711 | -| nodeTargetPort | Provide the service node target port | 9711 | -| clientPort Provide | the service client port | 9712 | -| clientTargetPort | Provide the service client target port | 9712 | - -### configmap - -| Name | Description | Default Value | -| -------------------- | ---------------------------------------------| ------------- | -| domainGenesis | Provide the domain genesis | "" | -| poolGenesis | Provide the pool genesis | "" | - - - -### ambassador - -### vault - -| Name | Description | Default Value | -| -------------------- | ---------------------------------------------| ------------- | -| address | Provide the vault server address | http://54.226.163.39:8200 | -| serviceAccountName | Provide the service account name for vault |vault-auth-provider-agent-app"" | -| keyPath | Provide the key path for vault | /keys/udisp/keys/indy-node | -| auth_path | Provide the authpath | kubernetes-bevel-provider-steward-1-auth | -| nodeId | Provide the indy-node node Id | indy-node | -| role | Provide the indy-node role | ro| - - -### storage - -| Name | Description | Default Value | -| -------------------- | -------------------------------------------------- | ------------- | -| keys | | | -| storagesize | Provide the storage size for storage for keys | 512Mi | -| storageClassName | Provide the storageClassName for storage for keys | ebs | -| data | | | -| storagesize | Provide the storage size for storage for data | 5Gi| -| storageClassName | Provide the storageClassName for storage for data | ebs | - - -## Deployment ---- - -To deploy the indy-node Helm chart, follow these steps: - -1. Modify the [values.yaml](https://github.com/hyperledger/bevel/blob/develop/platforms/hyperledger-indy/charts/indy-node/values.yaml) file to set the desired configuration values. -2. Run the following Helm command to install the chart: - ``` - $ helm repo add bevel https://hyperledger.github.io/bevel/ - $ helm install ./indy-node - ``` -Replace `` with the desired name for the release. - -This will deploy the indy auth job to the Kubernetes cluster based on the provided configurations. - - - -## Verification ---- - -To verify the jobs, we can use the following command: -``` -$ kubectl get jobs -n -``` -Replace `` with the actual namespace where the job was created. The command will display information about the jobs. +If Hashicorp Vault is used, then +- HashiCorp Vault Server 1.13.1+ +> **Important**: Ensure the `indy-key-mgmt` and `indy-genesis` charts has been installed correctly before installing this. - -## Updating the job ---- +## Installing the Chart -If we need to update the job with new configurations or changes, modify the same [values.yaml](https://github.com/hyperledger/bevel/blob/develop/platforms/hyperledger-indy/charts/indy-node/values.yaml) file with the desired changes and run the following Helm command: -``` -$ helm upgrade ./indy-node +To install the chart with the release name `university-steward-1`: + +```bash +helm repo add bevel https://hyperledger.github.io/bevel +helm install university-steward-1 bevel/indy-node ``` -Replace `` with the name of the release. This command will apply the changes to the job , ensuring the job is up to date. +The command deploys the chart on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation. - -## Deletion ---- +> **Tip**: List all releases using `helm list` -To delete the jobs and associated resources, run the following Helm command: -``` -$ helm uninstall -``` -Replace `` with the name of the release. This command will remove all the resources created by the Helm chart. +## Uninstalling the Chart +To uninstall/delete the `university-steward-1` deployment: - -## Contributing ---- -If you encounter any bugs, have suggestions, or would like to contribute to the [INDY authorization job Helm Chart](https://github.com/hyperledger/bevel/tree/develop/platforms/hyperledger-indy/charts/indy-node), please feel free to open an issue or submit a pull request on the [project's GitHub repository](https://github.com/hyperledger/bevel). +```bash +helm uninstall university-steward-1 +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. +## Parameters +### Global parameters +These parameters are refered to as same in each parent or child chart +| Name | Description | Default Value | +|--------|---------|-------------| +|`global.serviceAccountName` | The serviceaccount name that will be created for Vault Auth management| `vault-auth` | +| `global.cluster.provider` | Kubernetes cluster provider like AWS EKS or minikube. Currently ony `aws` and `minikube` is tested | `aws` | +| `global.cluster.cloudNativeServices` | only `false` is implemented, `true` to use Cloud Native Services (SecretsManager and IAM for AWS; KeyVault & Managed Identities for Azure) is for future | `false` | +| `global.proxy.provider` | The proxy or Ingress provider. Can be `none` or `ambassador` | `ambassador` | + +### Storage + +| Name | Description | Default Value | +|--------|---------|-------------| +| `storage.keys` | Size of the PVC needed storing the formatted keys | `512Mi` | +| `storage.data` | Size of the PVC needed storing the node data | `4Gi` | +| `storage.reclaimPolicy` | Reclaim policy for the PVC. Choose from: `Delete` or `Retain` | `Delete` | +| `storage.volumeBindingMode` | Volume binding mode for the PVC. Choose from: `Immediate` or `WaitForFirstConsumer` | `Immediate` | +| `storage.allowedTopologies.enabled` | Check [bevel-storageclass](../../../shared/charts/bevel-storageclass/README.md) for details | `false` | + +### Image +| Name | Description | Default Value | +| -------------| ---------- | --------- | +| `image.pullSecret` | Secret name in the namespace containing private image registry credentials | `""` | +| `image.initContainer` | Init-container image repository and tag | `ghcr.io/hyperledger/bevel-alpine-ext:latest`| +| `image.cli` | Indy-cli indy-ledger-txn image repository and tag | `ghcr.io/hyperledger/bevel-indy-ledger-txn:latest`| +| `image.indyNode.repository` | Indy Node image repository | `ghcr.io/hyperledger/bevel-indy-node` | +| `image.indyNode.tag` | Indy Node image tag/version | `1.12.6` | + +### Settings + +| Name | Description | Default Value | +|--------|---------|-------------| +| `settings.network` | Network Name for Indy | `bevel` | +| `settings.addOrg` | Flag to denote if this is a new Node for existing Indy network | `false` | +| `settings.serviceType` | Choose between `ClusterIP` or `NodePort`; `NodePort` must be used for no-proxy | `ClusterIP` | +| `settings.node.ip` | Internal IP of the Indy node service | `0.0.0.0` | +| `settings.node.publicIp` | External IP of the Indy node service, use same IP from genesis | `""` | +| `settings.node.port` | Internal Port of the Indy node service | `9711` | +| `settings.node.externalPort` | External IP of the Indy node service, use same port from genesis | `15011` | +| `settings.client.ip` | Internal IP of the Indy client service | `0.0.0.0` | +| `settings.client.publicIp` | External IP of the Indy client service, use same IP from genesis | `""` | +| `settings.client.port` | Internal Port of the Indy client service | `9712` | +| `settings.client.externalPort` | External IP of the Indy client service, use same port from genesis | `15012` | - ## License This chart is licensed under the Apache v2.0 license. -Copyright © 2023 Accenture +Copyright © 2024 Accenture ### Attribution diff --git a/platforms/hyperledger-indy/charts/indy-node/requirements.yaml b/platforms/hyperledger-indy/charts/indy-node/requirements.yaml new file mode 100644 index 00000000000..895f0a0e1cf --- /dev/null +++ b/platforms/hyperledger-indy/charts/indy-node/requirements.yaml @@ -0,0 +1,7 @@ +dependencies: + - name: bevel-storageclass + alias: storage + repository: "file://../../../shared/charts/bevel-storageclass" + tags: + - storage + version: ~1.0.0 diff --git a/platforms/hyperledger-indy/charts/indy-node/templates/_helpers.tpl b/platforms/hyperledger-indy/charts/indy-node/templates/_helpers.tpl index d43c09d8cef..4dc76acaefc 100644 --- a/platforms/hyperledger-indy/charts/indy-node/templates/_helpers.tpl +++ b/platforms/hyperledger-indy/charts/indy-node/templates/_helpers.tpl @@ -1,5 +1,28 @@ -{{- define "labels.custom" }} - {{ range $key, $val := $.Values.metadata.labels }} - {{ $key }}: {{ $val }} - {{ end }} -{{- end }} +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "indy-node.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "indy-node.fullname" -}} +{{- $name := default .Chart.Name -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" $name .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "indy-node.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} diff --git a/platforms/hyperledger-indy/charts/indy-node/templates/configmap.yaml b/platforms/hyperledger-indy/charts/indy-node/templates/configmap.yaml index a307a43d542..0721163cd45 100644 --- a/platforms/hyperledger-indy/charts/indy-node/templates/configmap.yaml +++ b/platforms/hyperledger-indy/charts/indy-node/templates/configmap.yaml @@ -7,13 +7,32 @@ apiVersion: v1 kind: ConfigMap metadata: - name: {{ $.Values.metadata.name }}-config - namespace: {{ $.Values.metadata.namespace }} + name: {{ .Release.Name }}-config + namespace: {{ .Release.Namespace }} labels: - app.kubernetes.io/name: {{ $.Values.metadata.name }}-config - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/name: {{ .Release.Name }}-config + app.kubernetes.io/component: config + app.kubernetes.io/part-of: {{ include "indy-node.fullname" . }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} + app.kubernetes.io/managed-by: helm data: indy_config.py: | - {{ $.Values.configmap.indyConfig | nindent 6 }} + NETWORK_NAME = '{{ .Values.settings.network }}' + # Enable stdout logging + enableStdOutLogging = True + logRotationBackupCount = 10 + # Directory to store ledger. + LEDGER_DIR = '/var/lib/indy/data' + # Directory to store logs. + LOG_DIR = '/var/log/indy' + # Directory to store keys. + KEYS_DIR = '/var/lib/indy/keys' + # Directory to store genesis transactions files. + GENESIS_DIR = '/var/lib/indy/genesis' + # Directory to store backups. + BACKUP_DIR = '/var/lib/indy/backup' + # Directory to store plugins. + PLUGINS_DIR = '/var/lib/indy/plugins' + # Directory to store node info. + NODE_INFO_DIR = '/var/lib/indy/data' diff --git a/platforms/hyperledger-indy/charts/indy-node/templates/service.yaml b/platforms/hyperledger-indy/charts/indy-node/templates/service.yaml index c09aa104e61..8790b3c8012 100644 --- a/platforms/hyperledger-indy/charts/indy-node/templates/service.yaml +++ b/platforms/hyperledger-indy/charts/indy-node/templates/service.yaml @@ -7,35 +7,45 @@ apiVersion: v1 kind: Service metadata: - name: "{{ $.Values.metadata.name }}" - namespace: "{{ $.Values.metadata.namespace }}" + name: {{ .Release.Name }} + namespace: {{ .Release.Namespace }} + labels: + app.kubernetes.io/name: {{ include "indy-node.fullname" . }} + app.kubernetes.io/component: service + app.kubernetes.io/part-of: {{ include "indy-node.fullname" . }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} + app.kubernetes.io/managed-by: helm spec: - type: {{ $.Values.service.type }} + type: {{ .Values.settings.serviceType }} ports: - name: indy-node-node - port: {{ $.Values.service.ports.nodePort }} - targetPort: {{ $.Values.service.ports.nodeTargetPort }} - {{ if eq $.Values.service.type "NodePort" }} - nodePort: {{ $.Values.service.ports.nodeTargetPort }} + port: {{ .Values.settings.node.port }} + targetPort: {{ .Values.settings.node.port }} + {{ if eq .Values.settings.serviceType "NodePort" }} + nodePort: {{ .Values.settings.node.externalPort }} {{ end }} - name: indy-node-client - port: {{ $.Values.service.ports.clientPort }} - targetPort: {{ $.Values.service.ports.clientTargetPort }} - {{ if eq $.Values.service.type "NodePort" }} - nodePort: {{ $.Values.service.ports.clientTargetPort }} + port: {{ .Values.settings.client.port }} + targetPort: {{ .Values.settings.client.port }} + {{ if eq .Values.settings.serviceType "NodePort" }} + nodePort: {{ .Values.settings.client.externalPort }} {{ end }} selector: - app: "{{ $.Values.metadata.name }}" + app: {{ .Release.Name }} + app.kubernetes.io/part-of: {{ include "indy-node.fullname" . }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} -{{- if eq $.Values.proxy.provider "ambassador" }} +{{- if eq .Values.global.proxy.provider "ambassador" }} --- apiVersion: getambassador.io/v3alpha1 kind: Listener metadata: - name: "{{ .Values.node.name }}-node-listener" - namespace: {{ .Values.metadata.namespace }} + name: "{{ .Release.Name }}-node-listener" + namespace: {{ .Release.Namespace }} spec: - port: {{ .Values.node.ambassadorPort }} + port: {{ .Values.settings.node.externalPort }} protocol: TCP securityModel: XFP hostBinding: @@ -45,20 +55,20 @@ spec: apiVersion: getambassador.io/v3alpha1 kind: TCPMapping metadata: - name: "{{ .Values.node.name }}-node-mapping" - namespace: {{ .Values.metadata.namespace }} + name: "{{ .Release.Name }}-node-mapping" + namespace: {{ .Release.Namespace }} spec: - port: {{ .Values.node.ambassadorPort }} - service: "{{ .Values.node.name }}.{{ .Values.metadata.namespace }}:{{ .Values.node.targetPort }}" + port: {{ .Values.settings.node.externalPort }} + service: {{ .Release.Name }}.{{ .Release.Namespace }}:{{ .Values.settings.node.port }} --- apiVersion: getambassador.io/v3alpha1 kind: Listener metadata: - name: "{{ .Values.node.name }}-client-listener" - namespace: {{ .Values.metadata.namespace }} + name: "{{ .Release.Name }}-client-listener" + namespace: {{ .Release.Namespace }} spec: - port: {{ .Values.client.ambassadorPort }} + port: {{ .Values.settings.client.externalPort }} protocol: TCP securityModel: XFP hostBinding: @@ -68,10 +78,9 @@ spec: apiVersion: getambassador.io/v3alpha1 kind: TCPMapping metadata: - name: "{{ .Values.node.name }}-client-mapping" - namespace: {{ .Values.metadata.namespace }} + name: "{{ .Release.Name }}-client-mapping" + namespace: {{ .Release.Namespace }} spec: - port: {{ .Values.client.ambassadorPort }} - service: "{{ .Values.client.name }}.{{ .Values.metadata.namespace }}:{{ .Values.client.targetPort }}" + port: {{ .Values.settings.client.externalPort }} + service: {{ .Release.Name }}.{{ .Release.Namespace }}:{{ .Values.settings.client.port }} {{- end }} - diff --git a/platforms/hyperledger-indy/charts/indy-node/templates/statefulset.yaml b/platforms/hyperledger-indy/charts/indy-node/templates/statefulset.yaml index a3b87a10946..15296240d3b 100644 --- a/platforms/hyperledger-indy/charts/indy-node/templates/statefulset.yaml +++ b/platforms/hyperledger-indy/charts/indy-node/templates/statefulset.yaml @@ -7,66 +7,63 @@ apiVersion: apps/v1 kind: StatefulSet metadata: - name: "{{ $.Values.metadata.name }}" - namespace: "{{ $.Values.metadata.namespace }}" + name: {{ .Release.Name }} + namespace: {{ .Release.Namespace }} labels: - app: "{{ $.Values.metadata.name }}" + app: {{ .Release.Name }} + app.kubernetes.io/name: indy-node-statefulset + app.kubernetes.io/component: indy + app.kubernetes.io/part-of: {{ include "indy-node.fullname" . }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} + app.kubernetes.io/managed-by: helm spec: - serviceName: "{{ $.Values.metadata.name }}" + serviceName: {{ .Release.Name }} replicas: 1 + podManagementPolicy: OrderedReady + updateStrategy: + type: RollingUpdate selector: matchLabels: - app: "{{ $.Values.metadata.name }}" + app: {{ .Release.Name }} + app.kubernetes.io/name: indy-node-statefulset + app.kubernetes.io/component: indy + app.kubernetes.io/part-of: {{ include "indy-node.fullname" . }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} + app.kubernetes.io/managed-by: helm template: metadata: labels: - app: "{{ $.Values.metadata.name }}" + app: {{ .Release.Name }} + app.kubernetes.io/name: indy-node-statefulset + app.kubernetes.io/component: indy + app.kubernetes.io/part-of: {{ include "indy-node.fullname" . }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} + app.kubernetes.io/managed-by: helm spec: securityContext: fsGroup: 1000 - serviceAccountName: {{ $.Values.vault.serviceAccountName }} + serviceAccountName: {{ .Values.global.serviceAccountName }} + imagePullSecrets: + {{- if .Values.image.pullSecret }} + - name: {{ .Values.image.pullSecret }} + {{- end }} initContainers: - - name: "{{ $.Values.image.initContainer.name }}" - image: "{{ $.Values.image.initContainer.repository }}" + - name: format-certs + image: {{ .Values.image.initContainer }} imagePullPolicy: IfNotPresent env: - - name: VAULT_ADDR - value: {{ $.Values.vault.address }} - - name: VAULT_AUTH_PATH - value: {{ $.Values.vault.authPath }} - - name: VAULT_ROLE - value: {{ $.Values.vault.role }} - - name: KEY_PATH - value: {{ $.Values.vault.keyPath }} - - name: NODE_ID - value: {{ $.Values.vault.nodeId }} - - name: ORGANIZATION_NAME - value: "{{ $.Values.organization.name }}" - - name: ADD_NEW_ORG - value: "{{ $.Values.add_new_org }}" - command: - - "sh" - - "-c" - - > - apk update; - apk add curl jq; - - validateVaultResponse () { - if echo ${2} | grep "errors"; then - echo "ERROR: unable to retrieve ${1}: ${2}" - exit 1 - fi - } - - KUBE_TOKEN=$(cat /var/run/secrets/kubernetes.io/serviceaccount/token); - - response=$(curl -s -o /dev/null -w "%{http_code}" $VAULT_ADDR/v1/sys/health); - if [ $response != 200 ]; then - exit 1 - fi - - curl --request POST --data '{"jwt": "'"$KUBE_TOKEN"'", "role": "'"$VAULT_ROLE"'"}' $VAULT_ADDR/v1/auth/$VAULT_AUTH_PATH/login | jq -j '.auth.client_token' > token; - X_VAULT_TOKEN=$(cat token); + - name: KEY_PATH + value: "/keys/{{ .Values.settings.network }}/keys/{{ .Release.Name }}" + - name: NODE_ID + value: "{{ .Release.Name }}" + - name: ADD_NEW_ORG + value: "{{ .Values.settings.addOrg }}" + command: ["sh", "-c"] + args: + - | if $ADD_NEW_ORG ; then @@ -74,85 +71,70 @@ spec: else mkdir -p ${KEY_PATH}/bls_keys ${KEY_PATH}/private_keys ${KEY_PATH}/public_keys ${KEY_PATH}/sig_keys ${KEY_PATH}/verif_keys ${KEY_PATH}C/private_keys ${KEY_PATH}C/public_keys ${KEY_PATH}C/sig_keys ${KEY_PATH}C/verif_keys; fi; - - LOOKUP_SECRET_RESPONSE=$(curl --header "X-Vault-Token: $X_VAULT_TOKEN" $VAULT_ADDR/v1/"${ORGANIZATION_NAME}"/data/stewards/${NODE_ID}/node/public/bls_keys | jq -r 'if .errors then . else . end'); - validateVaultResponse "secret (${NODE_ID}/node/public/bls_keys)" "${LOOKUP_SECRET_RESPONSE}"; - echo "${LOOKUP_SECRET_RESPONSE}" | jq -j ".data.data.bls_pk" > ${KEY_PATH}/bls_keys/bls_pk; + # Get the secrets from Kubernetes secret and save as files + LOOKUP_SECRET_RESPONSE=$(kubectl get secret -n {{ .Release.Namespace }} ${NODE_ID}-node-public-bls-keys -o jsonpath='{.data.value}' | base64 -d); + echo "${LOOKUP_SECRET_RESPONSE}" | jq -j ".bls_pk" > ${KEY_PATH}/bls_keys/bls_pk; chmod 644 ${KEY_PATH}/bls_keys/bls_pk; - LOOKUP_SECRET_RESPONSE=$(curl --header "X-Vault-Token: $X_VAULT_TOKEN" $VAULT_ADDR/v1/"${ORGANIZATION_NAME}"/data/stewards/${NODE_ID}/node/private/bls_keys | jq -r 'if .errors then . else . end'); - validateVaultResponse "secret (${NODE_ID}/node/private/bls_keys)" "${LOOKUP_SECRET_RESPONSE}"; - echo "${LOOKUP_SECRET_RESPONSE}" | jq -j ".data.data.bls_sk" > ${KEY_PATH}/bls_keys/bls_sk; + LOOKUP_SECRET_RESPONSE=$(kubectl get secret -n {{ .Release.Namespace }} ${NODE_ID}-node-private-bls-keys -o jsonpath='{.data.value}' | base64 -d); + echo "${LOOKUP_SECRET_RESPONSE}" | jq -j ".bls_sk" > ${KEY_PATH}/bls_keys/bls_sk; chmod 640 ${KEY_PATH}/bls_keys/bls_sk; - LOOKUP_SECRET_RESPONSE=$(curl --header "X-Vault-Token: $X_VAULT_TOKEN" $VAULT_ADDR/v1/"${ORGANIZATION_NAME}"/data/stewards/${NODE_ID}/node/private/private_keys | jq -r 'if .errors then . else . end'); - validateVaultResponse "secret (${NODE_ID}/node/private/private_keys)" "${LOOKUP_SECRET_RESPONSE}"; - echo "${LOOKUP_SECRET_RESPONSE}" | jq -j ".data.data.\"${NODE_ID}.key_secret\"" > ${KEY_PATH}/private_keys/${NODE_ID}.key_secret; + LOOKUP_SECRET_RESPONSE=$(kubectl get secret -n {{ .Release.Namespace }} ${NODE_ID}-node-private-private-keys -o jsonpath='{.data.value}' | base64 -d); + echo "${LOOKUP_SECRET_RESPONSE}" | jq -j ".\"${NODE_ID}.key_secret\"" > ${KEY_PATH}/private_keys/${NODE_ID}.key_secret; chmod 640 ${KEY_PATH}/private_keys/${NODE_ID}.key_secret; - LOOKUP_SECRET_RESPONSE=$(curl --header "X-Vault-Token: $X_VAULT_TOKEN" $VAULT_ADDR/v1/"${ORGANIZATION_NAME}"/data/stewards/${NODE_ID}/node/public/public_keys | jq -r 'if .errors then . else . end'); - validateVaultResponse "secret (${NODE_ID}/node/public/public_keys)" "${LOOKUP_SECRET_RESPONSE}"; - echo "${LOOKUP_SECRET_RESPONSE}" | jq -j ".data.data.\"${NODE_ID}.key\"" > ${KEY_PATH}/public_keys/${NODE_ID}.key.bootstrap; + LOOKUP_SECRET_RESPONSE=$(kubectl get secret -n {{ .Release.Namespace }} ${NODE_ID}-node-public-public-keys -o jsonpath='{.data.value}' | base64 -d); + echo "${LOOKUP_SECRET_RESPONSE}" | jq -j ".\"${NODE_ID}.key\"" > ${KEY_PATH}/public_keys/${NODE_ID}.key.bootstrap; chmod 644 ${KEY_PATH}/public_keys/${NODE_ID}.key.bootstrap; - LOOKUP_SECRET_RESPONSE=$(curl --header "X-Vault-Token: $X_VAULT_TOKEN" $VAULT_ADDR/v1/"${ORGANIZATION_NAME}"/data/stewards/${NODE_ID}/node/private/sig_keys | jq -r 'if .errors then . else . end'); - validateVaultResponse "secret (${NODE_ID}/node/private/sig_keys)" "${LOOKUP_SECRET_RESPONSE}"; - echo "${LOOKUP_SECRET_RESPONSE}" | jq -j ".data.data.\"${NODE_ID}.key_secret\"" > ${KEY_PATH}/sig_keys/${NODE_ID}.key_secret; + LOOKUP_SECRET_RESPONSE=$(kubectl get secret -n {{ .Release.Namespace }} ${NODE_ID}-node-private-sig-keys -o jsonpath='{.data.value}' | base64 -d); + echo "${LOOKUP_SECRET_RESPONSE}" | jq -j ".\"${NODE_ID}.key_secret\"" > ${KEY_PATH}/sig_keys/${NODE_ID}.key_secret; chmod 640 ${KEY_PATH}/sig_keys/${NODE_ID}.key_secret; - LOOKUP_SECRET_RESPONSE=$(curl --header "X-Vault-Token: $X_VAULT_TOKEN" $VAULT_ADDR/v1/"${ORGANIZATION_NAME}"/data/stewards/${NODE_ID}/node/public/verif_keys | jq -r 'if .errors then . else . end'); - validateVaultResponse "secret (${NODE_ID}/node/public/verif_keys)" "${LOOKUP_SECRET_RESPONSE}"; - echo "${LOOKUP_SECRET_RESPONSE}" | jq -j ".data.data.\"${NODE_ID}.key\"" > ${KEY_PATH}/verif_keys/${NODE_ID}.key.bootstrap; + LOOKUP_SECRET_RESPONSE=$(kubectl get secret -n {{ .Release.Namespace }} ${NODE_ID}-node-public-verif-keys -o jsonpath='{.data.value}' | base64 -d); + echo "${LOOKUP_SECRET_RESPONSE}" | jq -j ".\"${NODE_ID}.key\"" > ${KEY_PATH}/verif_keys/${NODE_ID}.key.bootstrap; chmod 644 ${KEY_PATH}/verif_keys/${NODE_ID}.key.bootstrap; - LOOKUP_SECRET_RESPONSE=$(curl --header "X-Vault-Token: $X_VAULT_TOKEN" $VAULT_ADDR/v1/"${ORGANIZATION_NAME}"/data/stewards/${NODE_ID}/client/private/private_keys | jq -r 'if .errors then . else . end'); - validateVaultResponse "secret (${NODE_ID}/client/private/private_keys)" "${LOOKUP_SECRET_RESPONSE}"; - echo "${LOOKUP_SECRET_RESPONSE}" | jq -j ".data.data.\"${NODE_ID}C.key_secret\"" > ${KEY_PATH}C/private_keys/${NODE_ID}C.key_secret; + LOOKUP_SECRET_RESPONSE=$(kubectl get secret -n {{ .Release.Namespace }} ${NODE_ID}-client-private-private-keys -o jsonpath='{.data.value}' | base64 -d); + echo "${LOOKUP_SECRET_RESPONSE}" | jq -j ".\"${NODE_ID}C.key_secret\"" > ${KEY_PATH}C/private_keys/${NODE_ID}C.key_secret; chmod 640 ${KEY_PATH}C/private_keys/${NODE_ID}C.key_secret; - LOOKUP_SECRET_RESPONSE=$(curl --header "X-Vault-Token: $X_VAULT_TOKEN" $VAULT_ADDR/v1/"${ORGANIZATION_NAME}"/data/stewards/${NODE_ID}/client/public/public_keys | jq -r 'if .errors then . else . end'); - validateVaultResponse "secret (${NODE_ID}/client/public/public_keys)" "${LOOKUP_SECRET_RESPONSE}"; - echo "${LOOKUP_SECRET_RESPONSE}" | jq -j ".data.data.\"${NODE_ID}C.key\"" > ${KEY_PATH}C/public_keys/${NODE_ID}C.key; + LOOKUP_SECRET_RESPONSE=$(kubectl get secret -n {{ .Release.Namespace }} ${NODE_ID}-client-public-public-keys -o jsonpath='{.data.value}' | base64 -d); + echo "${LOOKUP_SECRET_RESPONSE}" | jq -j ".\"${NODE_ID}C.key\"" > ${KEY_PATH}C/public_keys/${NODE_ID}C.key; chmod 644 ${KEY_PATH}C/public_keys/${NODE_ID}C.key; - LOOKUP_SECRET_RESPONSE=$(curl --header "X-Vault-Token: $X_VAULT_TOKEN" $VAULT_ADDR/v1/"${ORGANIZATION_NAME}"/data/stewards/${NODE_ID}/client/private/sig_keys | jq -r 'if .errors then . else . end'); - validateVaultResponse "secret (${NODE_ID}/client/private/sig_keys)" "${LOOKUP_SECRET_RESPONSE}"; - echo "${LOOKUP_SECRET_RESPONSE}" |jq -j ".data.data.\"${NODE_ID}C.key_secret\"" > ${KEY_PATH}C/sig_keys/${NODE_ID}C.key_secret; + LOOKUP_SECRET_RESPONSE=$(kubectl get secret -n {{ .Release.Namespace }} ${NODE_ID}-client-private-sig-keys -o jsonpath='{.data.value}' | base64 -d); + echo "${LOOKUP_SECRET_RESPONSE}" |jq -j ".\"${NODE_ID}C.key_secret\"" > ${KEY_PATH}C/sig_keys/${NODE_ID}C.key_secret; chmod 640 ${KEY_PATH}C/sig_keys/${NODE_ID}C.key_secret; - LOOKUP_SECRET_RESPONSE=$(curl --header "X-Vault-Token: $X_VAULT_TOKEN" $VAULT_ADDR/v1/"${ORGANIZATION_NAME}"/data/stewards/${NODE_ID}/client/public/verif_keys | jq -r 'if .errors then . else . end'); - validateVaultResponse "secret (${NODE_ID}/client/public/verif_keys)" "${LOOKUP_SECRET_RESPONSE}"; - echo "${LOOKUP_SECRET_RESPONSE}" | jq -j ".data.data.\"${NODE_ID}C.key\"" > ${KEY_PATH}C/verif_keys/${NODE_ID}C.key; + LOOKUP_SECRET_RESPONSE=$(kubectl get secret -n {{ .Release.Namespace }} ${NODE_ID}-client-public-verif-keys -o jsonpath='{.data.value}' | base64 -d); + echo "${LOOKUP_SECRET_RESPONSE}" | jq -j ".\"${NODE_ID}C.key\"" > ${KEY_PATH}C/verif_keys/${NODE_ID}C.key; chmod 644 ${KEY_PATH}C/verif_keys/${NODE_ID}C.key; if $ADD_NEW_ORG ; then - LOOKUP_SECRET_RESPONSE=$(curl --header "X-Vault-Token: $X_VAULT_TOKEN" $VAULT_ADDR/v1/"${ORGANIZATION_NAME}"/data/stewards/${NODE_ID}/node/public/bls_keys | jq -r 'if .errors then . else . end'); - validateVaultResponse "secret (${NODE_ID}/node/public/bls_keys)" "${LOOKUP_SECRET_RESPONSE}"; - echo "${LOOKUP_SECRET_RESPONSE}" | jq -j ".data.data.\"bls-key-pop\"" > ${KEY_PATH}/bls_keys/bls-key-pop; + LOOKUP_SECRET_RESPONSE=$(kubectl get secret -n {{ .Release.Namespace }} ${NODE_ID}-node-public-bls-keys -o jsonpath='{.data.value}' | base64 -d); + echo "${LOOKUP_SECRET_RESPONSE}" | jq -j ".\"bls-key-pop\"" > ${KEY_PATH}/bls_keys/bls-key-pop; chmod 644 ${KEY_PATH}/bls_keys/bls-key-pop; - LOOKUP_SECRET_RESPONSE=$(curl --header "X-Vault-Token: $X_VAULT_TOKEN" $VAULT_ADDR/v1/"${ORGANIZATION_NAME}"/data/stewards/${NODE_ID}/node/public/verif_keys | jq -r 'if .errors then . else . end'); - validateVaultResponse "secret (${NODE_ID}/node/public/verif_keys)" "${LOOKUP_SECRET_RESPONSE}"; - echo "${LOOKUP_SECRET_RESPONSE}" | jq -j ".data.data.\"verification-key\"" > ${KEY_PATH}/verif_keys/verification-key; + LOOKUP_SECRET_RESPONSE=$(kubectl get secret -n {{ .Release.Namespace }} ${NODE_ID}-node-public-verif-keys -o jsonpath='{.data.value}' | base64 -d); + echo "${LOOKUP_SECRET_RESPONSE}" | jq -j ".\"verification-key\"" > ${KEY_PATH}/verif_keys/verification-key; chmod 644 ${KEY_PATH}/verif_keys/verification-key; - LOOKUP_SECRET_RESPONSE=$(curl --header "X-Vault-Token: $X_VAULT_TOKEN" $VAULT_ADDR/v1/"${ORGANIZATION_NAME}"/data/stewards/${NODE_ID}/identity/private | jq -r 'if .errors then . else . end'); - validateVaultResponse "secret (${NODE_ID}/identity/private)" "${LOOKUP_SECRET_RESPONSE}"; - echo "${LOOKUP_SECRET_RESPONSE}" | jq -j ".data.data.seed" > ${KEY_PATH}/identity/private/seed; + LOOKUP_SECRET_RESPONSE=$(kubectl get secret -n {{ .Release.Namespace }} ${NODE_ID}-identity-private -o jsonpath='{.data.value}' | base64 -d); + echo "${LOOKUP_SECRET_RESPONSE}" | jq -j ".seed" > ${KEY_PATH}/identity/private/seed; chmod 644 ${KEY_PATH}/identity/private/seed; - LOOKUP_SECRET_RESPONSE=$(curl --header "X-Vault-Token: $X_VAULT_TOKEN" $VAULT_ADDR/v1/"${ORGANIZATION_NAME}"/data/stewards/${NODE_ID}/identity/public | jq -r 'if .errors then . else . end'); - validateVaultResponse "secret (${NODE_ID}/identity/public)" "${LOOKUP_SECRET_RESPONSE}"; - echo "${LOOKUP_SECRET_RESPONSE}" | jq -j ".data.data.did" > ${KEY_PATH}/identity/public/did; + LOOKUP_SECRET_RESPONSE=$(kubectl get secret -n {{ .Release.Namespace }} ${NODE_ID}-identity-public -o jsonpath='{.data.value}' | base64 -d); + echo "${LOOKUP_SECRET_RESPONSE}" | jq -j ".did" > ${KEY_PATH}/identity/public/did; chmod 644 ${KEY_PATH}/identity/public/did; - fi; volumeMounts: - - name: ebs-indy-node-keys - mountPath: /keys -{{- if eq $.Values.add_new_org true }} - - name: "{{ $.Values.image.cli.name }}" - image: "{{ $.Values.image.cli.repository }}" + - name: ebs-indy-node-keys + mountPath: /keys +{{- if eq .Values.settings.addOrg true }} + - name: "add-org" + image: {{ .Values.image.cli }} stdin: true tty: true command: ["sh", "-c"] @@ -187,89 +169,87 @@ spec: cat init_node_txn.log; env: - name: POOL_GENESIS_PATH - value: /var/lib/indy/genesis/{{ $.Values.network.name }}/pool_transactions_genesis + value: /var/lib/indy/genesis/{{ .Values.settings.network }}/pool_transactions_genesis - name: INDY_NETWORK_NAME - value: "{{ $.Values.network.name }}" + value: {{ .Values.settings.network }} - name: INDY_NODE_NAME - value: "{{ $.Values.node.name }}" + value: {{ .Release.Name }} - name: INDY_NODE_IP - value: "{{ $.Values.node.publicIp }}" + value: "{{ .Values.settings.node.publicIp }}" - name: INDY_NODE_PORT - value: "{{ $.Values.node.ambassadorPort }}" + value: "{{ .Values.settings.node.externalPort }}" - name: INDY_CLIENT_IP - value: "{{ $.Values.client.publicIp }}" + value: "{{ .Values.settings.client.publicIp }}" - name: INDY_CLIENT_PORT - value: "{{ $.Values.client.ambassadorPort }}" + value: "{{ .Values.settings.client.externalPort }}" volumeMounts: - name: ebs-indy-node-keys mountPath: /var/lib/indy/keys - - name: {{ $.Values.organization.name }}-domain-transactions-genesis - mountPath: /var/lib/indy/genesis/{{ $.Values.network.name }}/domain_transactions_genesis + - name: domain-transactions-genesis + mountPath: /var/lib/indy/genesis/{{ .Values.settings.network }}/domain_transactions_genesis subPath: domain_transactions_genesis - - name: {{ $.Values.organization.name }}-pool-transactions-genesis - mountPath: /var/lib/indy/genesis/{{ $.Values.network.name }}/pool_transactions_genesis + - name: pool-transactions-genesis + mountPath: /var/lib/indy/genesis/{{ .Values.settings.network }}/pool_transactions_genesis subPath: pool_transactions_genesis {{- end }} containers: - - name: "{{ $.Values.image.indyNode.name }}" - image: "{{ $.Values.image.indyNode.repository }}" + - name: indy-node + image: {{ .Values.image.indyNode.repository }}:{{ .Values.image.indyNode.tag }} imagePullPolicy: IfNotPresent ports: - - containerPort: {{ $.Values.node.port }} - - containerPort: {{ $.Values.client.port }} + - containerPort: {{ .Values.settings.node.port }} + - containerPort: {{ .Values.settings.client.port }} env: - name: INDY_NODE_NAME - value: "{{ $.Values.vault.nodeId }}" + value: {{ .Release.Name }} - name: INDY_NODE_IP - value: "{{ $.Values.node.ip }}" + value: "{{ .Values.settings.node.ip }}" - name: INDY_NODE_PORT - value: "{{ $.Values.node.port }}" + value: "{{ .Values.settings.node.port }}" - name: INDY_CLIENT_IP - value: "{{ $.Values.client.ip }}" + value: "{{ .Values.settings.client.ip }}" - name: INDY_CLIENT_PORT - value: "{{ $.Values.client.port }}" + value: "{{ .Values.settings.client.port }}" - name: INDY_NETWORK_NAME - value: "{{ $.Values.network.name }}" + value: {{ .Values.settings.network }} volumeMounts: - name: ebs-indy-node-data mountPath: /var/lib/indy/data - name: ebs-indy-node-keys mountPath: /var/lib/indy/keys - - name: {{ $.Values.metadata.name }}-config + - name: {{ .Release.Name }}-config mountPath: /etc/indy/indy_config.py subPath: indy_config.py - - name: {{ $.Values.organization.name }}-domain-transactions-genesis - mountPath: /var/lib/indy/genesis/{{ $.Values.network.name }}/domain_transactions_genesis + - name: domain-transactions-genesis + mountPath: /var/lib/indy/genesis/{{ .Values.settings.network }}/domain_transactions_genesis subPath: domain_transactions_genesis - - name: {{ $.Values.organization.name }}-pool-transactions-genesis - mountPath: /var/lib/indy/genesis/{{ $.Values.network.name }}/pool_transactions_genesis + - name: pool-transactions-genesis + mountPath: /var/lib/indy/genesis/{{ .Values.settings.network }}/pool_transactions_genesis subPath: pool_transactions_genesis - imagePullSecrets: - - name: "{{ $.Values.image.pullSecret }}" volumes: - - name: {{ $.Values.metadata.name }}-config + - name: {{ .Release.Name }}-config configMap: - name: {{ $.Values.metadata.name }}-config - - name: {{ $.Values.organization.name }}-domain-transactions-genesis + name: {{ .Release.Name }}-config + - name: domain-transactions-genesis configMap: - name: {{ $.Values.organization.name }}-dtg - - name: {{ $.Values.organization.name }}-pool-transactions-genesis + name: dtg + - name: pool-transactions-genesis configMap: - name: {{ $.Values.organization.name }}-ptg + name: ptg volumeClaimTemplates: - metadata: name: ebs-indy-node-data spec: accessModes: ["ReadWriteOnce"] - storageClassName: "{{ $.Values.storage.data.storageClassName }}" + storageClassName: storage-{{ .Release.Name }} resources: requests: - storage: "{{ $.Values.storage.data.storagesize }}" + storage: "{{ .Values.storage.data }}" - metadata: name: ebs-indy-node-keys spec: accessModes: ["ReadWriteOnce"] - storageClassName: "{{ $.Values.storage.data.storageClassName }}" + storageClassName: storage-{{ .Release.Name }} resources: requests: - storage: "{{ $.Values.storage.keys.storagesize }}" + storage: "{{ .Values.storage.keys }}" diff --git a/platforms/hyperledger-indy/charts/indy-node/values.yaml b/platforms/hyperledger-indy/charts/indy-node/values.yaml index 52bced2a12d..9bc8a83b349 100644 --- a/platforms/hyperledger-indy/charts/indy-node/values.yaml +++ b/platforms/hyperledger-indy/charts/indy-node/values.yaml @@ -3,190 +3,86 @@ # # SPDX-License-Identifier: Apache-2.0 ############################################################################################## - # Default values for indy-node. # This is a YAML-formatted file. # Declare variables to be passed into your templates. - -metadata: - #Provide the namespace for organization's peer - #Eg. namespace: bevel - namespace: - - #Provide the name for indy-node release - #Eg. name: indy-node - name: - -#Provide the number of indy-node replicas -#Eg. replicas: 1 -replicas: - -network: - #Provide the name for network - #Eg. name: bevel - name: - -organization: - #Provide the name for organization - #Eg. name: provider - name: - -# add_new_org is true when adding new validator node to existing network -add_new_org: false +global: + serviceAccountName: vault-auth + cluster: + provider: azure # choose from: minikube | aws | azure | gcp + cloudNativeServices: false # set to true to use Cloud Native Services (SecretsManager and IAM for AWS; KeyVault & Managed Identities for Azure) + # This section contains the proxy ports. + proxy: + # Mention the proxy provider. Currently ambassador or none is supported. + # Eg. provider: ambassador + provider: ambassador + +# Override necessary Subchart values +storage: + #Provide the storage size for storage for keys + #Eg. keys: 512Mi + keys: "512Mi" + #Provide the storage size for storage for data + #Eg. data: 5Gi + data: "4Gi" + # NOTE: when you set this to Retain, the volume WILL persist after the chart is delete and you need to manually delete it + reclaimPolicy: "Delete" # choose from: Delete | Retain + volumeBindingMode: Immediate # choose from: Immediate | WaitForFirstConsumer + allowedTopologies: + enabled: false image: - initContainer: - #Provide the image name for the indy-node init container - #Eg. name: indy-node - name: - - #Provide the image repository for the indy-node init container - #Eg. repository: alpine:3.9.4 - repository: - cli: - #Provide the image name for the indy-ledger-txn container - #Eg. name: indy-ledger-txn - name: - - #Provide the image repository for the indy-ledger-txn container - #Eg. repository: alpine:3.9.4 - repository: - - indyNode: - #Provide the name for the indy node - #Eg. name: indy-node - name: - - #Provide the image name for the indy-node container - #Eg. repository: xxxx.accenture.com/uisp/indy-node-1.9.2:SR1.0.0-SNAPSHOT - repository: - #Provide the image pull secret of image #Eg. pullSecret: regcred pullSecret: + #Provide the image repository for the init container + #Eg. initContainer: ghcr.io/hyperledger/bevel-alpine-ext:latest + initContainer: ghcr.io/hyperledger/bevel-alpine-ext:latest + #Provide the image repository for the indy-ledger-txn container + #Eg. cli: ghcr.io/hyperledger/bevel-indy-ledger-txn:latest + cli: ghcr.io/hyperledger/bevel-indy-ledger-txn:latest + indyNode: + #Provide the image name for the indy-node container + #Eg. repository: ghcr.io/hyperledger/bevel-indy-node + repository: ghcr.io/hyperledger/bevel-indy-node + #Provide the image version for the indy-node container + #Eg. tag: 1.12.6 + tag: 1.12.6 -node: - #Provide the node name - #Eg. name: indy-node - name: - - #Provide the node ip - #Eg. ip: 0.0.0.0 - ip: - - #Provide the node ip - #Eg. ip: 0.0.0.0 - publicIp: - - #Provide the node port - #Eg. port: 9752 - port: - - #Provide the node port - #Eg. port: 15911 - ambassadorPort: - -client: - #Provide the client ip - #Eg. ip: 0.0.0.0 - ip: - - #Provide the client port - #Eg. port: 9752 - port: - - #Provide the client ip - #Eg. ip: 0.0.0.0 - publicIp: - - #Provide the client port - #Eg. port: 15912 - ambassadorPort: - -service: - type: - #Provide type of service (NodePort/ClusterIp) - ports: - #Provide the service node port - #Eg. nodePort: 9711 - nodePort: - - #Provide the service node target port - #Eg. nodeTargetPort: 9711 - nodeTargetPort: - - #Provide the service client port - #Eg. clientPort: 9712 - clientPort: - - #Provide the service client target port - #Eg. clientTargetPort: 9712 - clientTargetPort: - -configmap: - #Provide the domain genesis - domainGenesis: - - #Provide the pool genesis - poolGenesis: - -#Provide annotations for ambassador service configuration -#Only use HTTPS as HTTP and HTTPS don't work together ( https://github.com/datawire/ambassador/issues/1000 ) -#Eg. -# annotations: |- -# apiVersion: ambassador/v2 -# kind: TCPMapping -# name: indy-node-node-mapping -# port: 9711 -# service: custodian-blockchain-indy:9712 -# apiVersion: ambassador/v2 -# kind: TCPMapping -# name: indy-node-client-mapping -# port: 9712 -# service: custodian-blockchain-indy:9712 -ambassador: - -vault: - #Provide the vault server address - #Eg. address: http://54.226.163.39:8200 - address: - - #Provide the service account name for vault - #Eg. serviceaccountname: vault-auth-provider-agent-app - serviceAccountName: - - #Provide the key path for vault - #Eg. keyPath: /keys/udisp/keys/indy-node - keyPath: - - #Provide the authpath - #Eg. authpath: kubernetes-bevel-provider-steward-1-auth - auth_path: - - #Provide the indy-node node Id - #Eg. nodeId: indy-node - nodeId: - - #Provide the indy-node role - #Eg. role: ro - role: - - -storage: - keys: - #Provide the storage size for storage for keys - #Eg. storagesize: 512Mi - storagesize: - - #Provide the storageClassName for storage for keys - #Eg. storageClassName: ebs - storageClassName: - - data: - #Provide the storage size for storage for data - #Eg. storagesize: 5Gi - storagesize: - - #Provide the storageClassName for storage for data - #Eg. storageClassName: ebs - storageClassName: +settings: + #Provide the name for network + #Eg. network: bevel + network: bevel + # addOrg is true when adding new validator node to existing network + addOrg: false + #Provide type of service (NodePort or ClusterIP) + serviceType: ClusterIP + node: + #Provide the node ip + #Eg. ip: 0.0.0.0 + ip: 0.0.0.0 + #Provide the node ip + #Eg. publicIp: 192.168.2.1 + publicIp: + #Provide the node port + #Eg. port: 9711 + port: 9711 + # Mention the external port configured on proxy. + # NOTE: Make sure that the port is enabled and not already used. + # Eg. externalPort: 15011 + externalPort: 15011 + + client: + #Provide the client ip + #Eg. ip: 0.0.0.0 + ip: 0.0.0.0 + #Provide the client ip + #Eg. ip: 192.168.2.1 + publicIp: + #Provide the client port + #Eg. port: 9752 + port: 9712 + # Mention the external port configured on proxy. + # NOTE: Make sure that the port is enabled and not already used. + # Eg. externalPort: 15012 + externalPort: 15012 diff --git a/platforms/hyperledger-indy/charts/indy-pool-genesis/Chart.yaml b/platforms/hyperledger-indy/charts/indy-pool-genesis/Chart.yaml deleted file mode 100644 index 222456797a6..00000000000 --- a/platforms/hyperledger-indy/charts/indy-pool-genesis/Chart.yaml +++ /dev/null @@ -1,12 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -apiVersion: v1 -appVersion: "2.0" -description: "hyperledger-indy: indy-pool-genesis" -name: indy-pool-genesis -version: 1.0.0 - diff --git a/platforms/hyperledger-indy/charts/indy-pool-genesis/README.md b/platforms/hyperledger-indy/charts/indy-pool-genesis/README.md deleted file mode 100644 index 07325fbbe96..00000000000 --- a/platforms/hyperledger-indy/charts/indy-pool-genesis/README.md +++ /dev/null @@ -1,159 +0,0 @@ -[//]: # (##############################################################################################) -[//]: # (Copyright Accenture. All Rights Reserved.) -[//]: # (SPDX-License-Identifier: Apache-2.0) -[//]: # (##############################################################################################) - - -# indy-pool-genesis - -- [indy-pool-genesis Helm Chart](#indy-pool-genesis-helm-chart) -- [Prerequisites](#prerequisites) -- [Chart Structure](#chart-structure) -- [Configuration](#configuration) -- [Deployment](#deployment) -- [Verification](#verification) -- [Updating the Deployment](#updating-the-deployment) -- [Deletion](#deletion) -- [Contributing](#contributing) -- [License](#license) - - -## indy-pool-genesis Helm Chart ---- -This [Helm chart](https://github.com/hyperledger/bevel/tree/develop/platforms/hyperledger-indy/charts/indy-pool-genesis) helps to deploy indy pool genesis job. - - -## Prerequisites ---- -Before deploying the Helm chart, make sure to have the following prerequisites: - -- Kubernetes cluster up and running. -- A HashiCorp Vault instance is set up and configured to use Kubernetes service account token-based authentication. -- The Vault is unsealed and initialized. -- Helm installed. - - -## Chart Structure ---- -The structure of the Helm chart is as follows: - -``` -indy-pool-genesis/ - |- templates/ - |- _helpers.tpl - |- job.yaml - |- Chart.yaml - |- README.md - |- values.yaml -``` - -- `templates/`: This directory contains the template files for generating Kubernetes resources. -- `job.yaml`: This file provides information about the kubernetes job -- `_helpers.tpl`: Contains custom label definitions used in other templates. -- `Chart.yaml`: Provides metadata about the chart, such as its name, version, and description. -- `README.md`: This file provides information and instructions about the Helm chart. -- `values.yaml`: Contains the default configuration values for the chart. It includes configuration for the metadata, image, node, Vault, etc. - - -## Configuration ---- -The [values.yaml](https://github.com/hyperledger/bevel/blob/develop/platforms/hyperledger-indy/charts/indy-pool-genesis/values.yaml) file contains configurable values for the Helm chart. We can modify these values according to the deployment requirements. Here are some important configuration options: - -## Parameters ---- -### metadata - -| Name | Description | Default Value | -| ----------------| ----------------------------------------------- | ------------- | -| namespace | Provide the namespace for organization's peer | bevel | -| name | Provide the name for indy-pool-genesis release | indy-pool-genesis | - -### organization - -| Name | Description | Default Value | -| -------- | ----------------------------------- | ------------- | -| name | Provide the name for organization | provider | -| configmap | | | -| poolGenesis | Provide the poolGenesis | poolGenesis | - - - -## Deployment ---- - -To deploy the indy-pool-genesis Helm chart, follow these steps: - -1. Modify the [values.yaml](https://github.com/hyperledger/bevel/blob/develop/platforms/hyperledger-indy/charts/indy-pool-genesis/values.yaml) file to set the desired configuration values. -2. Run the following Helm command to install the chart: - ``` - $ helm repo add bevel https://hyperledger.github.io/bevel/ - $ helm install ./indy-pool-genesis - ``` -Replace `` with the desired name for the release. - -This will deploy the indy auth job to the Kubernetes cluster based on the provided configurations. - - - -## Verification ---- - -To verify the jobs, we can use the following command: -``` -$ kubectl get jobs -n -``` -Replace `` with the actual namespace where the job was created. The command will display information about the jobs. - - - -## Updating the job ---- - -If we need to update the job with new configurations or changes, modify the same [values.yaml](https://github.com/hyperledger/bevel/blob/develop/platforms/hyperledger-indy/charts/indy-pool-genesis/values.yaml) file with the desired changes and run the following Helm command: -``` -$ helm upgrade ./indy-pool-genesis -``` -Replace `` with the name of the release. This command will apply the changes to the job , ensuring the job is up to date. - - - -## Deletion ---- - -To delete the jobs and associated resources, run the following Helm command: -``` -$ helm uninstall -``` -Replace `` with the name of the release. This command will remove all the resources created by the Helm chart. - - - -## Contributing ---- -If you encounter any bugs, have suggestions, or would like to contribute to the [INDY authorization job Helm Chart](https://github.com/hyperledger/bevel/tree/develop/platforms/hyperledger-indy/charts/indy-pool-genesis), please feel free to open an issue or submit a pull request on the [project's GitHub repository](https://github.com/hyperledger/bevel). - - - -## License - -This chart is licensed under the Apache v2.0 license. - -Copyright © 2023 Accenture - -### Attribution - -This chart is adapted from the [charts](https://hyperledger.github.io/bevel/) which is licensed under the Apache v2.0 License which is reproduced here: - -``` -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -``` diff --git a/platforms/hyperledger-indy/charts/indy-pool-genesis/templates/_helpers.tpl b/platforms/hyperledger-indy/charts/indy-pool-genesis/templates/_helpers.tpl deleted file mode 100644 index d43c09d8cef..00000000000 --- a/platforms/hyperledger-indy/charts/indy-pool-genesis/templates/_helpers.tpl +++ /dev/null @@ -1,5 +0,0 @@ -{{- define "labels.custom" }} - {{ range $key, $val := $.Values.metadata.labels }} - {{ $key }}: {{ $val }} - {{ end }} -{{- end }} diff --git a/platforms/hyperledger-indy/charts/indy-pool-genesis/values.yaml b/platforms/hyperledger-indy/charts/indy-pool-genesis/values.yaml deleted file mode 100644 index 4df1b43f59e..00000000000 --- a/platforms/hyperledger-indy/charts/indy-pool-genesis/values.yaml +++ /dev/null @@ -1,32 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -# Default values for indy-pool-genesis. -# This is a YAML-formatted file. -# Declare variables to be passed into your templates. - -metadata: - #Provide the namespace for organization's peer - #Eg. namespace: bevel - namespace: - - #Provide the name for indy-pool-genesis release - #Eg. name: indy-pool-genesis - name: - -organization: - #Provide the name for organization - #Eg. name: provider - name: - - configmap: - #Provide the poolGenesis - #Eg. poolGenesis: |- - #{"reqSignature":{},"txn":{"data":{"data":{"alias":"provider-steward-1","blskey":"HHoREgUbA932WS2b6UjuUqLCGftPwp5WHvRGHRhA4QrsbV56qf13u7v6JZSRLV3cw4voQ8tqevf8dikmacwtsNh6Lr7KTPJfYuYcZd3iM7RhV1KkMKy6xkYgHEz9HeKwS5EtmWbuK2hU5ADjjJyj8gHvyxGV45GtSyuv4SXXhk28og","blskey_pop":"QuvE3nzfx3Jb81H2HVB37TABzg7h9zUKu2cWD3EupQ9YCo3JTh1BQsY65WdEw9uwrywRgcfXRAkFf3t35JFXhi5eYKB42qpmcWqFi3XQ1aorXo1jz2WnX1rNJeRozTnsxTMMNABRr69exxNFbgH6z3pA44vgyeFQAR68Wd69LNrGsy","client_ip":"3.221.78.194","client_port":19712,"node_ip":"3.221.78.194","node_port":19711,"services":["VALIDATOR"]},"dest":"Ad2wXywwt8NiBDXhQU6am2CVHbHyYRRY38HCxATE7pzz"},"metadata":{"from":"JerLtFwVmp8f4LS6tdTDwA"},"type":"0"},"txnMetadata":{"seqNo":1,"txnId":"917292505e1e4063b0e6b30d73c131649e5a16271d3c96c3284751a66ef1ff50"},"ver":"1"} - #{"reqSignature":{},"txn":{"data":{"data":{"alias":"provider-steward-2","blskey":"2GaTCgC6qmGopiCGxirPWVQDT6odF2QXzwrPAFQZFvtrBTS8GNo5LHNq5iuP8KrXSXfV6RFJTpqyHzAxuCiU5CMGEGao4zTgQj3HvWhrZ7KUJDbLUgVcdyGv3sjwaU37oAQyKUv3gX8BKf6qwMyM9VAG1BqR8VRNXgQjNfV279FGV2h","blskey_pop":"R1VtRJprMMV7YrYaYGforT1sckiUZ2UzDTDSnTGKon611Mnv8VQiNXrW4W4eFsJBtU774PDNgvWbNbZd9Vujq8Tng8RgAfzLmbNeGjtze55X71Rf2EVG3swUyuTBoLChHfCh9VBogHZC18UJZxPvHwHE2kDn4shGcFWLfxNKqKHBzF","client_ip":"3.221.78.194","client_port":19722,"node_ip":"3.221.78.194","node_port":19721,"services":["VALIDATOR"]},"dest":"2pkLP55RVqjwPPZvRyrMXNasNxkGfvUuHs2sXgpvjgLv"},"metadata":{"from":"4M286TT2qVTSWn2i7d6Ggg"},"type":"0"},"txnMetadata":{"seqNo":2,"txnId":"aecd2adbe0d6c4c0ef6e3f78503bddfa8e6078f030381ae895bae9d0da84124f"},"ver":"1"} - #{"reqSignature":{},"txn":{"data":{"data":{"alias":"partner-steward-1","blskey":"3kpJ27VSF4mDKxkeTGa7CdvwGjFrJTT53MH7KaKsWgbAke38fznDhGCpe5QapkEhyoxb2xvCPfDRS3Nnu5GiRZ9GxEdZLsKkYGVkjCA4TysscXUeq6jKZnKkpZG2PMKM9YtfLjzF36tXHmXrvweZcAq4bUnhEckn2iJSuorK6ZyGmmZ","blskey_pop":"RFRVgKNbHe6EF2L419bgs8Yaws1NwYchPLKYFgdMi2bBZRiL1uDdqtpELrW5Q2TvXfKGDjChTvDiR8mCZLBDgxD54a8gzt5CNzPtkBrv96u8jAVj1LLLzx7T3Z6YuHvtSYKh4Bs83Uag8TkdDnrrcsgRdnER7jGhmq758z8Z1tb4MD","client_ip":"3.221.78.194","client_port":19732,"node_ip":"3.221.78.194","node_port":19731,"services":["VALIDATOR"]},"dest":"EZN4GQMvFhUv7jqDbf3Q7aow9Yb7JcKgidfSTR8zbsp5"},"metadata":{"from":"Rsn88jsgAGSyABaB8b73V4"},"type":"0"},"txnMetadata":{"seqNo":3,"txnId":"62a426a55142745dcd68456212d322c8b63682274fcdffd10c3d274e0f87469b"},"ver":"1"} - #{"reqSignature":{},"txn":{"data":{"data":{"alias":"partner-steward-2","blskey":"24sdVVdpMotejPMswqfuNks1tZUy2d6aB2Gxb74QXuP7LTWfxYcQUeedyL3J1zADDAf7Mj5oZfxupvV1yJrYwHscP6biY9sJVSMYYaEmdFjsy6LWYP5Gtja4uyBiYksXB3rzbp39cj4HLwwrxPrSAx4Ar6TRvEWGerYQBhXWBNYvR2W","blskey_pop":"Qkn8sg6RPLGihnap32vmLURjdA4z9CjjwmDadcdBrwRVLb6FSAbnNynkVkK9vXBYZti4xLBB6rtktZVwrVm6k3thZQZehMbEeEgpX148D5v7Z8aLz2AkhtJUZKeNtn6S2umhRTHgon6Fs4YxRRFYR3uVFMot6r1i4unYZeP6JChvD5","client_ip":"3.221.78.194","client_port":19742,"node_ip":"3.221.78.194","node_port":19741,"services":["VALIDATOR"]},"dest":"53HWJHMUDFEaVwRLk8awK9meoatqZrqiyNsJqHP3M6sN"},"metadata":{"from":"8QrUxhXHb7v63D2PPwdZr3"},"type":"0"},"txnMetadata":{"seqNo":4,"txnId":"bf525e539b7e0d3e0eeeb2a075198dde681f56ce7d724aca0ebf6d606289a523"},"ver":"1"} - poolGenesis: diff --git a/platforms/hyperledger-indy/charts/indy-register-identity/Chart.yaml b/platforms/hyperledger-indy/charts/indy-register-identity/Chart.yaml new file mode 100644 index 00000000000..7778aa5dcbc --- /dev/null +++ b/platforms/hyperledger-indy/charts/indy-register-identity/Chart.yaml @@ -0,0 +1,26 @@ +############################################################################################## +# Copyright Accenture. All Rights Reserved. +# +# SPDX-License-Identifier: Apache-2.0 +############################################################################################## + +apiVersion: v1 +name: indy-register-identity +description: "Hyperledger Indy: Indy Ledger Script for registering new identities" +version: 1.1.0 +appVersion: latest +keywords: + - bevel + - identity + - indy + - hyperledger + - enterprise + - blockchain + - deployment + - accenture +home: https://hyperledger-bevel.readthedocs.io/en/latest/ +sources: + - https://github.com/hyperledger/bevel +maintainers: + - name: Hyperledger Bevel maintainers + email: bevel@lists.hyperledger.org diff --git a/platforms/hyperledger-indy/charts/indy-register-identity/README.md b/platforms/hyperledger-indy/charts/indy-register-identity/README.md new file mode 100644 index 00000000000..b2ee0f648f0 --- /dev/null +++ b/platforms/hyperledger-indy/charts/indy-register-identity/README.md @@ -0,0 +1,102 @@ +[//]: # (##############################################################################################) +[//]: # (Copyright Accenture. All Rights Reserved.) +[//]: # (SPDX-License-Identifier: Apache-2.0) +[//]: # (##############################################################################################) + +# indy-register-identity + +This chart is a component of Hyperledger Bevel. The indy-register-identity chart registers a new Identiy for an existing Indy network; it should be executed by a `trustee`. See [Bevel documentation](https://hyperledger-bevel.readthedocs.io/en/latest/) for more details. + +> **Important**: The public key files for the new identity should already be placed in `files` before installing this chart. Check **Prerequisites**. + +## TL;DR + +```bash +helm repo add bevel https://hyperledger.github.io/bevel +helm install endorser-registration bevel/indy-register-identity +``` + +## Prerequisites + +- Kubernetes 1.19+ +- Helm 3.2.0+ + +If Hashicorp Vault is used, then +- HashiCorp Vault Server 1.13.1+ + +Before running indy-register-identity, the public key information for the endorser/identity should be saved in the `files` directory. For example, given an endorser called `university-endorser`, run the following commands to save the public key info. + +> **Important**: The [indy-key-mgmt](../indy-key-mgmt/README.md) chart generates these keys, so should be installed with matching endorser name before this chart. + +```bash +cd files +# endorser files are in university-ns namespace +endorser_namespace=university-ns +endorser_name=university-endorser +kubectl --namespace $endorser_namespace get secret $endorser_name-identity-public -o jsonpath='{.data.value}' | base64 -d | jq '.["did"]'> $endorser_name-did.json +kubectl --namespace $endorser_namespace get secret $endorser_name-node-public-verif-keys -o jsonpath='{.data.value}' | base64 -d | jq '.["verification-key"]' > $endorser_name-verkey.json + +``` + +## Installing the Chart +To install the chart with the release name `endorser-registration`: + +```bash +helm repo add bevel https://hyperledger.github.io/bevel +helm install endorser-registration bevel/indy-register-identity +``` + +The command deploys the chart on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation. + +> **Tip**: List all releases using `helm list` + +## Uninstalling the Chart + +To uninstall/delete the `endorser-registration` deployment: + +```bash +helm uninstall endorser-registration +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Parameters +### Image + +| Name | Description | Default Value | +| -------------| ---------- | --------- | +| `image.cli` | Indy Cli image repository and tag | `ghcr.io/hyperledger/bevel-indy-ledger-txn:latest` | +| `image.pullSecret` | Secret name in the namespace containing private image registry credentials | `""` | + +### Settings + +| Name | Description | Default Value | +|--------|---------|-------------| +|`network` | Network Name for Indy | `bevel` | +| `admin` | Trustee name who is running the registration, ensure the chart is installed on this trustee namespace | `authority-trustee` | +| `newIdentity.name` | Name of the new identity | `university-endorser` | +| `newIdentity.role` | Role of the new identity | `ENDORSER` | + +## License + +This chart is licensed under the Apache v2.0 license. + +Copyright © 2024 Accenture + +### Attribution + +This chart is adapted from the [charts](https://hyperledger.github.io/bevel/) which is licensed under the Apache v2.0 License which is reproduced here: + +``` +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +``` diff --git a/platforms/hyperledger-indy/charts/indy-register-identity/files/readme.txt b/platforms/hyperledger-indy/charts/indy-register-identity/files/readme.txt new file mode 100644 index 00000000000..672160e2f49 --- /dev/null +++ b/platforms/hyperledger-indy/charts/indy-register-identity/files/readme.txt @@ -0,0 +1 @@ +This is a dummy file. Place the public key files in this folder. \ No newline at end of file diff --git a/platforms/hyperledger-indy/charts/indy-register-identity/templates/_helpers.tpl b/platforms/hyperledger-indy/charts/indy-register-identity/templates/_helpers.tpl new file mode 100644 index 00000000000..a51345d5c81 --- /dev/null +++ b/platforms/hyperledger-indy/charts/indy-register-identity/templates/_helpers.tpl @@ -0,0 +1,28 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "indy-register-identity.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "indy-register-identity.fullname" -}} +{{- $name := default .Chart.Name -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" $name .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "indy-register-identity.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} diff --git a/platforms/hyperledger-indy/charts/indy-pool-genesis/templates/configmap.yaml b/platforms/hyperledger-indy/charts/indy-register-identity/templates/configmap.yaml similarity index 54% rename from platforms/hyperledger-indy/charts/indy-pool-genesis/templates/configmap.yaml rename to platforms/hyperledger-indy/charts/indy-register-identity/templates/configmap.yaml index cb98b4bae52..14248b6a03e 100644 --- a/platforms/hyperledger-indy/charts/indy-pool-genesis/templates/configmap.yaml +++ b/platforms/hyperledger-indy/charts/indy-register-identity/templates/configmap.yaml @@ -3,16 +3,17 @@ # # SPDX-License-Identifier: Apache-2.0 ############################################################################################## - +--- apiVersion: v1 kind: ConfigMap metadata: - name: {{ $.Values.organization.name }}-ptg - namespace: {{ $.Values.metadata.namespace }} + name: {{ .Release.Name }}-keys + namespace: {{ .Release.Namespace }} labels: - app.kubernetes.io/name: {{ $.Values.organization.name }}-ptg - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} app.kubernetes.io/instance: {{ .Release.Name }} data: - pool_transactions_genesis: | - {{ $.Values.configmap.poolGenesis | nindent 6 }} + did: | +{{ .Files.Get (printf "files/%s-did.json" .Values.newIdentity.name) | replace "\"" "" | indent 4 }} + verkey: | +{{ .Files.Get (printf "files/%s-verkey.json" .Values.newIdentity.name) | replace "\"" "" | indent 4 }} + diff --git a/platforms/hyperledger-indy/charts/indy-register-identity/templates/job.yaml b/platforms/hyperledger-indy/charts/indy-register-identity/templates/job.yaml new file mode 100644 index 00000000000..ccc30ff64ab --- /dev/null +++ b/platforms/hyperledger-indy/charts/indy-register-identity/templates/job.yaml @@ -0,0 +1,85 @@ +############################################################################################## +# Copyright Accenture. All Rights Reserved. +# +# SPDX-License-Identifier: Apache-2.0 +############################################################################################## + +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ .Release.Name }} + namespace: {{ .Release.Namespace }} + annotations: + helm.sh/hook-delete-policy: "hook-succeeded" + labels: + app: {{ .Release.Name }} + app.kubernetes.io/name: indy-register-identity-job + app.kubernetes.io/component: register-identity-job + app.kubernetes.io/part-of: {{ include "indy-register-identity.fullname" . }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} + app.kubernetes.io/managed-by: helm +spec: + backoffLimit: 7 + template: + metadata: + labels: + app: {{ .Release.Name }} + app.kubernetes.io/name: indy-register-identity-job + app.kubernetes.io/component: register-identity-job + app.kubernetes.io/part-of: {{ include "indy-register-identity.fullname" . }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} + app.kubernetes.io/managed-by: helm + spec: + restartPolicy: OnFailure + imagePullSecrets: + {{- if .Values.image.pullSecret }} + - name: {{ .Values.image.pullSecret }} + {{- end }} + volumes: + - name: {{ .Release.Name }}-ptg + configMap: + name: ptg + - name: identity-keys + configMap: + name: {{ .Release.Name }}-keys + - name: admin-public + secret: + secretName: {{ .Values.admin }}-identity-public + - name: admin-private + secret: + secretName: {{ .Values.admin }}-identity-private + containers: + - name: register-endorser + image: {{ .Values.image.cli }} + stdin: true + tty: true + command: ["sh", "-c"] + args: + - | + + #apt-get install curl -y + + ADMIN_SEED=$(cat /data/admin/private/value | jq -r ".seed") + ADMIN_DID=$(cat /data/admin/public/value | jq -r ".did") + IDENTITY_DID=$(cat /data/identity/did) + IDENTITY_VERKEY=$(cat /data/identity/verkey) + echo "Running ledger Transaction Script..."; + + ./home/indy-ledger.sh $ADMIN_DID $ADMIN_SEED $IDENTITY_DID $IDENTITY_ROLE $IDENTITY_VERKEY $POOL_GENESIS_PATH; + env: + - name: IDENTITY_ROLE + value: "{{ .Values.newIdentity.role }}" + - name: POOL_GENESIS_PATH + value: /var/lib/indy/genesis/{{ .Values.network }}/pool_transactions_genesis + volumeMounts: + - name: {{ .Release.Name }}-ptg + mountPath: /var/lib/indy/genesis/{{ .Values.network }}/pool_transactions_genesis + subPath: pool_transactions_genesis + - name: identity-keys + mountPath: /data/identity + - name: admin-public + mountPath: /data/admin/public + - name: admin-private + mountPath: /data/admin/private diff --git a/platforms/hyperledger-indy/charts/indy-register-identity/values.yaml b/platforms/hyperledger-indy/charts/indy-register-identity/values.yaml new file mode 100644 index 00000000000..396eafc2043 --- /dev/null +++ b/platforms/hyperledger-indy/charts/indy-register-identity/values.yaml @@ -0,0 +1,30 @@ +############################################################################################## +# Copyright Accenture. All Rights Reserved. +# +# SPDX-License-Identifier: Apache-2.0 +############################################################################################## + +# Default values for indy-ledger-txn. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. +image: + # Provide the image repository for the indy-ledger-txn container + #Eg. cli: ghcr.io/hyperledger/bevel-indy-ledger-txn:latest + cli: ghcr.io/hyperledger/bevel-indy-ledger-txn:latest + #Provide the image pull secret of image + #Eg. pullSecret: regcred + pullSecret: + +# Provide the name for network +#Eg. network: bevel +network: bevel +# Provide the admin trustee for this network, ensure the job is run on trustee namespace +#Eg. admin: authority-trustee +admin: authority-trustee +newIdentity: + # Provide the new identity name + #Eg. name: university-endorser + name: university-endorser + # Provide the new identity role + #Eg. role: ENDORSER + role: ENDORSER diff --git a/platforms/hyperledger-indy/charts/values/noproxy-and-novault/authority-keys.yaml b/platforms/hyperledger-indy/charts/values/noproxy-and-novault/authority-keys.yaml new file mode 100644 index 00000000000..0f9de2cdf04 --- /dev/null +++ b/platforms/hyperledger-indy/charts/values/noproxy-and-novault/authority-keys.yaml @@ -0,0 +1,17 @@ +--- +#helm install member-2 -f values/noproxy-and-novault/txnode-sec.yml -n carrier-bes besu-node +global: + serviceAccountName: bevel-auth + cluster: + provider: azure # choose from: minikube | aws | azure | gcp + cloudNativeServices: false + vault: + type: kubernetes + network: indy + proxy: + provider: none + +settings: + removeKeysOnDelete: true + identities: + trustee: authority-trustee diff --git a/platforms/hyperledger-indy/charts/values/noproxy-and-novault/genesis-sec.yaml b/platforms/hyperledger-indy/charts/values/noproxy-and-novault/genesis-sec.yaml new file mode 100644 index 00000000000..1f5393dc799 --- /dev/null +++ b/platforms/hyperledger-indy/charts/values/noproxy-and-novault/genesis-sec.yaml @@ -0,0 +1,14 @@ +#helm install genesis -f values/noproxy-and-novault/genesis.yaml -n supplychain-bes besu-genesis +global: + serviceAccountName: bevel-auth + vault: + type: kubernetes + cluster: + provider: azure # choose from: minikube | aws | azure | gcp + cloudNativeServices: false + +settings: + # Flag to ensure the genesis configmaps are removed on helm uninstall + removeGenesisOnDelete: true + # Flag to copy domain and pool genesis from files for secondary members + secondaryGenesis: true diff --git a/platforms/hyperledger-indy/charts/values/noproxy-and-novault/genesis.yaml b/platforms/hyperledger-indy/charts/values/noproxy-and-novault/genesis.yaml new file mode 100644 index 00000000000..b27e6db78e9 --- /dev/null +++ b/platforms/hyperledger-indy/charts/values/noproxy-and-novault/genesis.yaml @@ -0,0 +1,30 @@ +#helm install genesis -f values/noproxy-and-novault/genesis.yaml -n supplychain-bes besu-genesis +global: + serviceAccountName: bevel-auth + vault: + type: kubernetes + cluster: + provider: azure # choose from: minikube | aws | azure | gcp + cloudNativeServices: false + +settings: + # Flag to ensure the genesis configmaps are removed on helm uninstall + removeGenesisOnDelete: true + # Flag to copy domain and pool genesis from files for secondary members + secondaryGenesis: false + # Provide the steward details by following trustee tree as per example below + trustees: + - name: authority-trustee + stewards: + - name: university-steward-1 # Steward name + publicIp: "node-ip" # Steward public IP Address /Kubernetes API IP for noproxy + nodePort: 30011 # Node external port + clientPort: 30012 # Client external port + - name: university-steward-2 + publicIp: "node-ip" + nodePort: 30021 + clientPort: 30022 + - name: university-steward-3 + publicIp: "node-ip" + nodePort: 30031 + clientPort: 30032 diff --git a/platforms/hyperledger-indy/charts/values/noproxy-and-novault/steward.yaml b/platforms/hyperledger-indy/charts/values/noproxy-and-novault/steward.yaml new file mode 100644 index 00000000000..854014b2880 --- /dev/null +++ b/platforms/hyperledger-indy/charts/values/noproxy-and-novault/steward.yaml @@ -0,0 +1,31 @@ +--- +#helm install validator-1 -f values/noproxy-and-novault/validator.yml -n supplychain-bes besu-node +#helm upgrade validator-1 -f values/noproxy-and-novault/validator.yml -n supplychain-bes besu-node +global: + serviceAccountName: bevel-auth + cluster: + provider: azure # choose from: minikube | aws | azure | gcp + cloudNativeServices: false + proxy: + provider: none + +storage: + keys: "512Mi" + data: "4Gi" + +image: + indyNode: + repository: ghcr.io/hyperledger/bevel-indy-node + tag: 1.12.6 + +settings: + serviceType: NodePort + node: + publicIp: "node-ip" + port: 30011 + externalPort: 30011 + + client: + publicIp: "node-ip" + port: 30012 + externalPort: 30012 diff --git a/platforms/hyperledger-indy/charts/values/noproxy-and-novault/university-keys.yaml b/platforms/hyperledger-indy/charts/values/noproxy-and-novault/university-keys.yaml new file mode 100644 index 00000000000..1b7609b3262 --- /dev/null +++ b/platforms/hyperledger-indy/charts/values/noproxy-and-novault/university-keys.yaml @@ -0,0 +1,22 @@ +--- +#helm install member-2 -f values/noproxy-and-novault/txnode-sec.yml -n carrier-bes besu-node +global: + serviceAccountName: bevel-auth + cluster: + provider: azure # choose from: minikube | aws | azure | gcp + cloudNativeServices: false + vault: + type: kubernetes + network: indy + proxy: + provider: none + +settings: + removeKeysOnDelete: true + identities: + trustee: + endorser: university-endorser + stewards: + - university-steward-1 + - university-steward-2 + - university-steward-3 diff --git a/platforms/hyperledger-indy/charts/values/proxy-and-vault/authority-keys.yaml b/platforms/hyperledger-indy/charts/values/proxy-and-vault/authority-keys.yaml new file mode 100644 index 00000000000..0f953a18f27 --- /dev/null +++ b/platforms/hyperledger-indy/charts/values/proxy-and-vault/authority-keys.yaml @@ -0,0 +1,31 @@ +--- +#helm install member-2 -f values/noproxy-and-novault/txnode-sec.yml -n carrier-bes besu-node +global: + serviceAccountName: vault-auth + cluster: + provider: azure # choose from: minikube | aws | azure | gcp + cloudNativeServices: false + #Provide the kubernetes host url + #Eg. kubernetesUrl: https://10.3.8.5:8443 + kubernetesUrl: "https://kubernetes.url" + vault: + type: hashicorp + #Provide the vault role used. + role: vault-role + #Provide the network type + network: indy + #Provide the vault server address + address: "http://vault.url:8200" + #Provide the vault authPath configured to be used. + authPath: authority + #Provide the secret engine. + secretEngine: secretsv2 + #Provide the vault path where the secrets will be stored + secretPrefix: "data/authority" + proxy: + provider: ambassador + +settings: + removeKeysOnDelete: true + identities: + trustee: authority-trustee diff --git a/platforms/hyperledger-indy/charts/values/proxy-and-vault/genesis-sec.yaml b/platforms/hyperledger-indy/charts/values/proxy-and-vault/genesis-sec.yaml new file mode 100644 index 00000000000..dfa2da45175 --- /dev/null +++ b/platforms/hyperledger-indy/charts/values/proxy-and-vault/genesis-sec.yaml @@ -0,0 +1,26 @@ +#helm install genesis -f values/noproxy-and-novault/genesis.yaml -n supplychain-bes besu-genesis +global: + serviceAccountName: vault-auth + cluster: + provider: azure # choose from: minikube | aws | azure | gcp + cloudNativeServices: false + vault: + type: hashicorp + #Provide the vault role used. + role: vault-role + #Provide the network type + network: indy + #Provide the vault server address + address: "http://vault.url:8200" + #Provide the vault authPath configured to be used. + authPath: university + #Provide the secret engine. + secretEngine: secretsv2 + #Provide the vault path where the secrets will be stored + secretPrefix: "data/university" + +settings: + # Flag to ensure the genesis configmaps are removed on helm uninstall + removeGenesisOnDelete: true + # Flag to copy domain and pool genesis from files for secondary members + secondaryGenesis: true diff --git a/platforms/hyperledger-indy/charts/values/proxy-and-vault/genesis.yaml b/platforms/hyperledger-indy/charts/values/proxy-and-vault/genesis.yaml new file mode 100644 index 00000000000..4e2b2c14605 --- /dev/null +++ b/platforms/hyperledger-indy/charts/values/proxy-and-vault/genesis.yaml @@ -0,0 +1,46 @@ +#helm install genesis -f values/noproxy-and-novault/genesis.yaml -n supplychain-bes besu-genesis +global: + serviceAccountName: vault-auth + cluster: + provider: azure # choose from: minikube | aws | azure | gcp + cloudNativeServices: false + vault: + type: hashicorp + #Provide the vault role used. + role: vault-role + #Provide the network type + network: indy + #Provide the vault server address + address: "http://vault.url:8200" + #Provide the vault authPath configured to be used. + authPath: authority + #Provide the secret engine. + secretEngine: secretsv2 + #Provide the vault path where the secrets will be stored + secretPrefix: "data/authority" + +settings: + # Flag to ensure the genesis configmaps are removed on helm uninstall + removeGenesisOnDelete: true + # Flag to copy domain and pool genesis from files for secondary members + secondaryGenesis: false + # Provide the steward details by following trustee tree as per example below + trustees: + - name: authority-trustee + stewards: + - name: university-steward-1 # Steward name + publicIp: "ambassador-public-ip" # Steward public IP Address /Kubernetes API IP for noproxy + nodePort: 15011 # Client external port + clientPort: 15012 # Node external port + - name: university-steward-2 + publicIp: "ambassador-public-ip" + nodePort: 15021 + clientPort: 15022 + - name: university-steward-3 + publicIp: "ambassador-public-ip" + nodePort: 15031 + clientPort: 15032 + - name: university-steward-4 + publicIp: "ambassador-public-ip" + nodePort: 15041 + clientPort: 15042 diff --git a/platforms/hyperledger-indy/charts/values/proxy-and-vault/steward.yaml b/platforms/hyperledger-indy/charts/values/proxy-and-vault/steward.yaml new file mode 100644 index 00000000000..6b75829c302 --- /dev/null +++ b/platforms/hyperledger-indy/charts/values/proxy-and-vault/steward.yaml @@ -0,0 +1,31 @@ +--- +#helm install validator-1 -f values/noproxy-and-novault/validator.yml -n supplychain-bes besu-node +#helm upgrade validator-1 -f values/noproxy-and-novault/validator.yml -n supplychain-bes besu-node +global: + serviceAccountName: vault-auth + cluster: + provider: azure # choose from: minikube | aws | azure | gcp + cloudNativeServices: false + proxy: + provider: ambassador + +storage: + keys: "512Mi" + data: "4Gi" + +image: + indyNode: + repository: ghcr.io/hyperledger/bevel-indy-node + tag: 1.12.6 + +settings: + serviceType: ClusterIP + node: + publicIp: "ambassador-public-ip" + port: 9711 + externalPort: 15011 + + client: + publicIp: "ambassador-public-ip" + port: 9712 + externalPort: 15012 diff --git a/platforms/hyperledger-indy/charts/values/proxy-and-vault/university-keys.yaml b/platforms/hyperledger-indy/charts/values/proxy-and-vault/university-keys.yaml new file mode 100644 index 00000000000..c012cc29319 --- /dev/null +++ b/platforms/hyperledger-indy/charts/values/proxy-and-vault/university-keys.yaml @@ -0,0 +1,38 @@ +--- +#helm install member-2 -f values/noproxy-and-novault/txnode-sec.yml -n carrier-bes besu-node +global: + serviceAccountName: vault-auth + cluster: + provider: azure # choose from: minikube | aws | azure | gcp + cloudNativeServices: false + #Provide the kubernetes host url + #Eg. kubernetesUrl: https://10.3.8.5:8443 + kubernetesUrl: "https://kubernetes.url" + vault: + type: hashicorp + #Provide the vault role used. + role: vault-role + #Provide the network type + network: indy + #Provide the vault server address + address: "http://vault.url:8200" + #Provide the vault authPath configured to be used. + authPath: university + #Provide the secret engine. + secretEngine: secretsv2 + #Provide the vault path where the secrets will be stored + secretPrefix: "data/university" + proxy: + provider: ambassador + +settings: + removeKeysOnDelete: true + identities: + trustee: + endorser: university-endorser + stewards: + - university-steward-1 + - university-steward-2 + - university-steward-3 + - university-steward-4 + \ No newline at end of file diff --git a/platforms/hyperledger-indy/configuration/cleanup.yaml b/platforms/hyperledger-indy/configuration/cleanup.yaml index a0b5da8760c..92b70722c2e 100644 --- a/platforms/hyperledger-indy/configuration/cleanup.yaml +++ b/platforms/hyperledger-indy/configuration/cleanup.yaml @@ -13,17 +13,19 @@ no_log: "{{ no_ansible_log | default(false) }}" tasks: # Cleanup all organizations' vault indy crypto - - name: Cleanup Vault indy crypto + - name: "Clean up Vault indy crypto" include_role: name: clean/vault vars: - organization: "{{ organizationItem.name | lower }}" - organization_ns: "{{ organization }}-ns" - services: "{{ organizationItem.services }}" - acount: "{{ organization }}-admin-vault-auth" - vault: "{{ organizationItem.vault }}" - role: "rw" - auth_path: "kubernetes-{{ organization }}" + org_name: "{{ org.name | lower }}" + org_ns: "{{ org_name }}-ns" + services: "{{ org.services }}" + vault: "{{ org.vault }}" loop: "{{ network['organizations'] }}" loop_control: - loop_var: organizationItem + loop_var: org + + # Clean up helpers directory + - name: "Clean up helpers directory" + include_role: + name: clean/local_directories diff --git a/platforms/hyperledger-indy/configuration/deploy-network.yaml b/platforms/hyperledger-indy/configuration/deploy-network.yaml index d39a207dbc5..bb2a555fdb7 100644 --- a/platforms/hyperledger-indy/configuration/deploy-network.yaml +++ b/platforms/hyperledger-indy/configuration/deploy-network.yaml @@ -4,10 +4,11 @@ # SPDX-License-Identifier: Apache-2.0 ############################################################################################## -######################### +############################################################################################## # Playbook to create deployment files for namespaces, service account and clusterrolebinding # Playbook arguments: complete network.yaml -######################### +############################################################################################## +--- - hosts: ansible_provisioners gather_facts: no no_log: "{{ no_ansible_log | default(false) }}" @@ -24,204 +25,100 @@ name: check/validation # Create namespaces for organizations - - name: 'Create namespace' + - name: "Create namespace" include_role: name: create/namespace vars: - component_name: "{{ organizationItem.name | lower }}-ns" - component_type_name: "{{ organizationItem.type | lower }}" - kubernetes: "{{ organizationItem.k8s }}" - release_dir: "{{playbook_dir}}/../../../{{organizationItem.gitops.release_dir}}/{{ organizationItem.name | lower }}" + component_name: "{{ org.name | lower }}-ns" + component_type_name: "{{ org.type | lower }}" + kubernetes: "{{ org.k8s }}" + release_dir: "{{playbook_dir}}/../../../{{org.gitops.release_dir}}/{{ org.name | lower }}" loop: "{{ network['organizations'] }}" loop_control: - loop_var: organizationItem - when: organizationItem.org_status is not defined or organizationItem.org_status == 'new' + loop_var: org - # Create service accounts - - name: 'Create service accounts' + # Create necessary Kubernetes secrets for each organization + - name: "Create k8s secrets" include_role: - name: create/serviceaccount/main + name: create/secrets vars: - component_ns: "{{ organizationItem.name | lower }}-ns" - organization: "{{ organizationItem.name | lower }}" - component_type_name: "{{ organization }}" - services: "{{ organizationItem.services }}" - gitops: "{{ organizationItem.gitops }}" - kubernetes: "{{ organizationItem.k8s }}" + component_ns: "{{ org.name | lower }}-ns" + kubernetes: "{{ org.k8s }}" + vault: "{{ org.vault }}" loop: "{{ network['organizations'] }}" loop_control: - loop_var: organizationItem - when: organizationItem.org_status is not defined or organizationItem.org_status == 'new' + loop_var: org - # Create StorageClass - - name: Create Storage Class + # Generate keys for each nodes + - name: "Generate keys" include_role: - name: "{{ playbook_dir }}/../../../platforms/shared/configuration/roles/setup/storageclass" + name: setup/generate-keys vars: org_name: "{{ org.name | lower }}" - sc_name: "{{ org_name }}-bevel-storageclass" - region: "{{ org.k8s.region | default('eu-west-1') }}" + stewards: "{{ org.services.stewards }}" + cloud_provider: "{{ org.cloud_provider | lower }}" + vault: "{{ org.vault }}" + kubernetes: "{{ org.k8s }}" + component_type: "generate-keys" + component_ns: "{{ org_name }}-ns" + component_name: "{{ org_name }}-keys" + values_dir: "{{playbook_dir}}/../../../{{org.gitops.release_dir}}" + charts_dir: "{{ org.gitops.chart_source }}" loop: "{{ network['organizations'] }}" loop_control: loop_var: org - when: org.org_status is not defined or org.org_status == 'new' - - # Admin K8S auth - - name: Admin K8S auth - include_role: - name: setup/vault_kubernetes - vars: - organization: "{{ organizationItem.name | lower }}" - component_ns: "{{ organizationItem.name | lower }}-ns" - component_name: "{{ organization }}-bevel-ac-vault-auth" - component_type: "GetServiceAccount" - vault: "{{ organizationItem.vault }}" - auth_path: "kubernetes-{{ organization }}-admin-auth" - kubernetes: "{{ organizationItem.k8s }}" - loop: "{{ network['organizations'] }}" - loop_control: - loop_var: organizationItem - when: organizationItem.org_status is not defined or organizationItem.org_status == 'new' - - # Generate auth job - - name: 'Generate auth job' - include_role: - name: setup/auth_job - vars: - organization: "{{ organizationItem.name | lower }}" - component_ns: "{{ organizationItem.name | lower }}-ns" - component_name: "{{ organization }}" - services: "{{ organizationItem.services }}" - kubernetes: "{{ organizationItem.k8s }}" - vault: "{{ organizationItem.vault }}" - gitops: "{{ organizationItem.gitops }}" - loop: "{{ network['organizations'] }}" - loop_control: - loop_var: organizationItem - when: organizationItem.org_status is not defined or organizationItem.org_status == 'new' - - # Get Vault AC Token via Service Account - - name: Get Vault AC Token via Service Account - include_role: - name: check/k8_component - vars: - organization: "{{ organizationItem.name | lower }}" - component_ns: "{{ organizationItem.name | lower }}-ns" - component_name: "{{ organization }}-bevel-ac-vault-auth" - component_type: "GetServiceAccount" - vault: "{{ organizationItem.vault }}" - kubernetes: "{{ organizationItem.k8s }}" - loop: "{{ network['organizations'] }}" - loop_control: - loop_var: organizationItem - # Generate indy crypto and insert into Vault - - name: 'Generate indy crypto and insert into Vault' + # Get each node keys for the Genesis setup + - name: "Get keys for the Genesis setup" include_role: - name: setup/crypto + name: setup/genesis-node-keys vars: - organization: "{{ organizationItem.name | lower }}" - component_ns: "{{ organizationItem.name | lower }}-ns" - component_name: "{{ organization }}" - services: "{{ organizationItem.services }}" - kubernetes: "{{ organizationItem.k8s }}" - vault: "{{ organizationItem.vault }}" - gitops: "{{ organizationItem.gitops }}" - vault_ac_token: "{{ ac_vault_tokens[organization] }}" + component_ns: "{{ org.name | lower }}-ns" + kubernetes: "{{ org.k8s }}" loop: "{{ network['organizations'] }}" loop_control: - loop_var: organizationItem - when: organizationItem.org_status is not defined or organizationItem.org_status == 'new' - - # Create and deploy domain genesis - - name: 'Create domain genesis' - include_role: - name: setup/domain_genesis - - # Create and deploy pool genesis - - name: 'Create pool genesis' - include_role: - name: setup/pool_genesis + loop_var: org - # Add new Trustees via existing Trustee - - name: "Add New Trustees via existing Trustee" + # Install Genesis + - name: "Install Genesis" include_role: - name: setup/trustees - vars: - new_org_query: "organizations[?org_status=='new']" - neworg: "{{ network | json_query(new_org_query) | first }}" - organization: "{{ organizationItem.name | lower }}" - component_ns: "{{ organizationItem.name | lower }}-ns" - component_name: "{{ organization }}" - kubernetes: "{{ organizationItem.k8s }}" - gitops: "{{ organizationItem.gitops }}" - vault: "{{ organizationItem.vault }}" - loop: "{{ network['organizations'] }}" - loop_control: - loop_var: organizationItem - when: - - (add_new_org|bool and add_new_org_network_trustee_present|bool) - - (organizationItem.org_status is not defined or organizationItem.org_status == 'existing') + name: setup/genesis - # Add new Stewards via existing Trustee - - name: "Add New Stewards via existing Trustee" + # Install Steward nodes + - name: Install Steward nodes include_role: name: setup/stewards vars: - new_org_query: "organizations[?org_status=='new']" - neworg: "{{ network | json_query(new_org_query) | first }}" - organization: "{{ organizationItem.name | lower }}" - component_ns: "{{ organizationItem.name | lower }}-ns" - component_name: "{{ organization }}" - kubernetes: "{{ organizationItem.k8s }}" - gitops: "{{ organizationItem.gitops }}" - vault: "{{ organizationItem.vault }}" + org_name: "{{ org.name | lower }}" + cloud_provider: "{{ org.cloud_provider | lower }}" + kubernetes: "{{ org.k8s }}" + component_ns: "{{ org_name }}-ns" + component_type: "stewards" + values_dir: "{{playbook_dir}}/../../../{{org.gitops.release_dir}}" + charts_dir: "{{ org.gitops.chart_source }}" loop: "{{ network['organizations'] }}" loop_control: - loop_var: organizationItem - when: - - (add_new_org|bool and add_new_org_network_trustee_present|bool) - - (organizationItem.org_status is not defined or organizationItem.org_status == 'existing') + loop_var: org - # Deploy all other nodes - - name: 'Deploy nodes' + # Install Endorser node + - name: "Install Endorser node" include_role: - name: setup/node + name: setup/endorser vars: - organization: "{{ organizationItem.name | lower }}" - sc_name: "{{ organization }}-bevel-storageclass" - component_ns: "{{ organizationItem.name | lower }}-ns" - services: "{{ organizationItem.services }}" - kubernetes: "{{ organizationItem.k8s }}" - vault: "{{ organizationItem.vault }}" - gitops: "{{ organizationItem.gitops }}" - genesis: "{{ network.genesis }}" + org_name: "{{ org.name | lower }}" + endorser: "{{ org.services.endorser.name | lower }}" + trustee: "{{ org.services.trustee.name | lower }}" + kubernetes: "{{ org.k8s }}" + component_name: "{{ endorser }}" + component_ns: "{{ org_name }}-ns" + values_dir: "{{ playbook_dir }}/../../../{{ org.gitops.release_dir }}/{{ org_name }}/build" + charts_dir: "{{ org.gitops.chart_source }}" loop: "{{ network['organizations'] }}" loop_control: - loop_var: organizationItem + loop_var: org when: - - (organizationItem.type == 'peer') - - (organizationItem.org_status is not defined or organizationItem.org_status == 'new') - - (not add_new_org|bool or (add_new_org|bool and add_new_org_new_nyms_on_ledger_present|bool)) + - (org.services.endorser is defined) and (org.services.endorser.name | length > 0) - # Create and deploy Endorser Identities - - name: 'Create Endorser Identities' - include_role: - name: setup/endorsers - vars: - organization: "{{ organizationItem.name | lower }}" - component_ns: "{{ organizationItem.name | lower }}-ns" - kubernetes: "{{ organizationItem.k8s }}" - gitops: "{{ organizationItem.gitops }}" - vault: "{{ organizationItem.vault }}" - loop: "{{ network['organizations'] }}" - loop_control: - loop_var: organizationItem - when: - - (organizationItem.type == 'peer') - - (organizationItem.org_status is not defined or organizationItem.org_status == 'new') - - (not add_new_org|bool or (add_new_org|bool and add_new_org_new_nyms_on_ledger_present|bool)) - # These variables can be overriden from the command line vars: install_os: "linux" # Default to linux OS diff --git a/platforms/hyperledger-indy/configuration/roles/check/k8_component/tasks/main.yaml b/platforms/hyperledger-indy/configuration/roles/check/k8_component/tasks/main.yaml index bbba207ce33..efe8ccdf9a6 100644 --- a/platforms/hyperledger-indy/configuration/roles/check/k8_component/tasks/main.yaml +++ b/platforms/hyperledger-indy/configuration/roles/check/k8_component/tasks/main.yaml @@ -28,7 +28,7 @@ role: "ro" shell: | secret="{{ service_account }}-token" - kube_token=$(kubectl --kubeconfig={{ kubernetes.config_file }} -n {{ component_ns }} get secret ${secret} -o jsonpath="{.data.token}" | base64 --decode) + kube_token="$(KUBECONFIG={{ kubernetes.config_file }} kubectl get secret ${secret} -n {{ component_ns }} -o go-template={% raw %}'{{ .data.token }}'{% endraw %} | base64 -d)" vault_token=$(curl --request POST --data '{"jwt": "'"$kube_token"'", "role": "{{ role }}"}' {{ vault.url }}/v1/auth/kubernetes-{{ organization }}-bevel-ac-auth/login | jq -j '.auth.client_token') echo $vault_token register: token_output diff --git a/platforms/hyperledger-indy/configuration/roles/check/validation/tasks/check_count.yaml b/platforms/hyperledger-indy/configuration/roles/check/validation/tasks/check_count.yaml index 3f90de962ae..10131d9f5b6 100644 --- a/platforms/hyperledger-indy/configuration/roles/check/validation/tasks/check_count.yaml +++ b/platforms/hyperledger-indy/configuration/roles/check/validation/tasks/check_count.yaml @@ -4,40 +4,23 @@ # SPDX-License-Identifier: Apache-2.0 ############################################################################################## -# Reset counters -- name: Reset counters +# Counting the number of steward nodes +- name: "Count steward nodes" set_fact: - trustee_count=0 - steward_count=0 - endorser_count=0 + total_stewards: "{{ total_stewards | int + 1 }}" + loop: "{{ org.services.stewards }}" + loop_control: + loop_var: stewards + when: (stewards is defined) and (stewards | length > 0) -# Counting Genesis Stewards -- name: "Counting Genesis Stewards" +# Counting the number of trustee nodes +- name: "Count trustee nodes" set_fact: - steward_count={{ steward_count|default(0)|int + 1 }} - total_stewards={{ total_stewards|default(0)|int + 1 }} - loop: "{{ stewards }}" + total_trustee: "{{ total_trustee | int + 1 }}" + when: (org.services.trustee is defined) and (org.services.trustee.name | length > 0) -# Counting trustees per Org -- name: "Counting trustees per Org" +# Counting the number of endorser nodes +- name: "Count endorser nodes" set_fact: - trustee_count={{ trustee_count|default(0)|int + 1 }} - total_trustees={{ total_trustees|default(0)|int + 1 }} - loop: "{{ trustees }}" - -# Print error and end playbook if trustee count limit fails -- name: Print error and end playbook if trustee count limit fails - debug: msg="The trustee count is {{ trustee_count }}. There should be max 1 trustee per organization." - failed_when: trustee_count|int > 1 - -# Counting Endorsers -- name: "Counting Endorsers" - set_fact: - endorser_count={{ endorser_count|default(0)|int + 1 }} - loop: "{{ endorsers }}" - -# Print error abd end playbook if endorser count limit fails -- name: Print error abd end playbook if endorser count limit fails - debug: msg="The endorser count is {{ endorser_count }}. There should be max 1 endorser per organization." - failed_when: endorser_count|int > 1 - when: endorser_count is defined + total_endorser: "{{ total_endorser | int + 1 }}" + when: (org.services.endorser is defined) and (org.services.endorser.name | length > 0) diff --git a/platforms/hyperledger-indy/configuration/roles/check/validation/tasks/main.yaml b/platforms/hyperledger-indy/configuration/roles/check/validation/tasks/main.yaml index aa4835adeaa..da3abda8212 100644 --- a/platforms/hyperledger-indy/configuration/roles/check/validation/tasks/main.yaml +++ b/platforms/hyperledger-indy/configuration/roles/check/validation/tasks/main.yaml @@ -6,36 +6,43 @@ ############################################################################################## # This role checks for validation of network.yaml -# Conditions to be checked -# At least 4 genesis stewards -# Max 1 trustee per org -# Max 1 endorser per org -# At least one trustee per network.yaml +# Conditions to be checked: +# - Exactly 1 trustee is required per organization. +# - Up to 1 endorser is allowed per organization. +# - At least 4 stewards are required collectively across the entire Indy network. ############################################################################################## # Set variables - name: Set counters set_fact: total_stewards=0 - total_trustees=0 + total_trustee=0 + total_endorser=0 + organization_count="{{ network['organizations'] | length }}" -# Check Validation -- name: "Check Validation" +# Loop through each organization to count nodes +- name: Count nodes include_tasks: check_count.yaml vars: - trustees: "{{ organizationItem.services.trustees|default([]) }}" - endorsers: "{{ organizationItem.services.endorsers|default([]) }}" - stewards: "{{ organizationItem.services.stewards|default([]) }}" + peers: "{{ item.services.peers }}" loop: "{{ network['organizations'] }}" loop_control: - loop_var: organizationItem + loop_var: org -# Print error and end playbook if genesis steward count limit fails -- name: Print error and end playbook if genesis steward count limit fails - debug: msg="The total genesis steward count is {{ total_stewards }}. There should be at least 4 genesis stewards (in case of a fully Hyperledger Bevel-managed cluster)." - failed_when: not add_new_org and total_stewards|int < 4 +# Stop execution if total trustee is not equal to 1 +- name: "Stop execution if total trustee is not equal to 1" + fail: + msg: "Exactly 1 trustee is required per indy network." + when: (total_trustee | int) != (organization_count | int) -# Print error and end playbook if total trustee count limit fails -- name: Print error and end playbook if total trustee count limit fails - debug: msg="The total trustee count is {{ total_trustees }}. There should be at least 1 trustee per network (in case of a fully Hyperledger Bevel-managed cluster)." - failed_when: not add_new_org and total_trustees|int < 1 +# Stop execution if total endorser is not equal to 1 +- name: "Stop execution if total endorser is not equal to 1" + fail: + msg: "Up to 1 endorser is allowed per organization." + when: (total_endorser | int) > (organization_count | int) + +# Stop execution if total stewards are less than 4 +- name: Stop execution if total stewards are less than 4 + fail: + msg: "At least 4 stewards are required collectively across the entire Indy network." + when: (total_stewards | int) < 4 diff --git a/platforms/hyperledger-indy/configuration/roles/clean/local_directories/tasks/main.yaml b/platforms/hyperledger-indy/configuration/roles/clean/local_directories/tasks/main.yaml new file mode 100644 index 00000000000..113569f1da5 --- /dev/null +++ b/platforms/hyperledger-indy/configuration/roles/clean/local_directories/tasks/main.yaml @@ -0,0 +1,33 @@ +# Find and delete .json files in platforms/hyperledger-indy/charts/indy-genesis/files directory +- name: "Find .json files in indy-genesis files directory" + find: + paths: "{{ playbook_dir }}/../../hyperledger-indy/charts/indy-genesis/files" + patterns: "*.json" + register: genesis_files_to_delete + +# Delete .json files in indy-genesis files directory +- name: "Delete .json files in indy-genesis files directory" + file: + path: "{{ item.path }}" + state: absent + loop: "{{ genesis_files_to_delete.files }}" + +# Find and delete .json files in platforms/hyperledger-indy/charts/indy-register-identity/files directory +- name: "Find .json files in indy-register-identity files directory" + find: + paths: "{{ playbook_dir }}/../../hyperledger-indy/charts/indy-register-identity/files" + patterns: "*.json" + register: register_files_to_delete + +# Delete .json files in indy-register-identity files directory +- name: "Delete .json files in indy-register-identity files directory" + file: + path: "{{ item.path }}" + state: absent + loop: "{{ register_files_to_delete.files }}" + +# Delete the build directory in platforms/hyperledger-indy/configuration +- name: "Remove build directory from configuration" + file: + path: "{{ playbook_dir }}/../../hyperledger-indy/configuration/build" + state: absent diff --git a/platforms/hyperledger-indy/configuration/roles/clean/vault/tasks/delete_node_keys.yaml b/platforms/hyperledger-indy/configuration/roles/clean/vault/tasks/delete_node_keys.yaml new file mode 100644 index 00000000000..7d1f0436b98 --- /dev/null +++ b/platforms/hyperledger-indy/configuration/roles/clean/vault/tasks/delete_node_keys.yaml @@ -0,0 +1,19 @@ +# Delete keys from HashiCorp Vault +- name: "Delete keys for {{ node_name }} in {{ org_name }} organization from Vault" + shell: | + vault kv delete {{ vault.secret_path | default('secretsv2') }}/{{ org_name }}/{{ node_type }}/{{ node_name }}/client/private/private_keys + vault kv delete {{ vault.secret_path | default('secretsv2') }}/{{ org_name }}/{{ node_type }}/{{ node_name }}/client/private/sig_keys + vault kv delete {{ vault.secret_path | default('secretsv2') }}/{{ org_name }}/{{ node_type }}/{{ node_name }}/client/public/public_keys + vault kv delete {{ vault.secret_path | default('secretsv2') }}/{{ org_name }}/{{ node_type }}/{{ node_name }}/client/public/verif_keys + vault kv delete {{ vault.secret_path | default('secretsv2') }}/{{ org_name }}/{{ node_type }}/{{ node_name }}/identity/private + vault kv delete {{ vault.secret_path | default('secretsv2') }}/{{ org_name }}/{{ node_type }}/{{ node_name }}/identity/public + vault kv delete {{ vault.secret_path | default('secretsv2') }}/{{ org_name }}/{{ node_type }}/{{ node_name }}/node/private/bls_keys + vault kv delete {{ vault.secret_path | default('secretsv2') }}/{{ org_name }}/{{ node_type }}/{{ node_name }}/node/private/private_keys + vault kv delete {{ vault.secret_path | default('secretsv2') }}/{{ org_name }}/{{ node_type }}/{{ node_name }}/node/private/sig_keys + vault kv delete {{ vault.secret_path | default('secretsv2') }}/{{ org_name }}/{{ node_type }}/{{ node_name }}/node/public/bls_keys + vault kv delete {{ vault.secret_path | default('secretsv2') }}/{{ org_name }}/{{ node_type }}/{{ node_name }}/node/public/public_keys + vault kv delete {{ vault.secret_path | default('secretsv2') }}/{{ org_name }}/{{ node_type }}/{{ node_name }}/node/public/verif_keys + environment: + VAULT_ADDR: "{{ vault.url }}" + VAULT_TOKEN: "{{ vault.root_token }}" + ignore_errors: true diff --git a/platforms/hyperledger-indy/configuration/roles/clean/vault/tasks/delete_policy_auth.yaml b/platforms/hyperledger-indy/configuration/roles/clean/vault/tasks/delete_policy_auth.yaml new file mode 100644 index 00000000000..95fb4f0ebc6 --- /dev/null +++ b/platforms/hyperledger-indy/configuration/roles/clean/vault/tasks/delete_policy_auth.yaml @@ -0,0 +1,29 @@ +# Remove Policies of trustees +- name: Remove Policies of trustees + environment: + vault_token: "{{ vault.root_token }}" + shell: | + validateVaultResponse () { + if [ ${1} != 204 ]; then + echo "ERROR: Unable to retrieve. Http status: ${1}" + exit 1 + fi + } + response_status=$(curl -o /dev/null -s -w "%{http_code}\n" --header "X-Vault-Token: ${vault_token}" --request DELETE {{ vault.url }}/v1/sys/policy/bevel-vault-mgmt-{{ org_name }}-keys-{{ org_ns }}-policy) + validateVaultResponse ${response_status} + ignore_errors: true + +# Remove Kubernetes Authentication Methods of organizations +- name: Remove Kubernetes Authentication Methods of {{ org_name }} + environment: + vault_token: "{{ vault.root_token }}" + shell: | + validateVaultResponse () { + if [ ${1} != 204 ]; then + echo "ERROR: Unable to retrieve. Http status: ${1}" + exit 1 + fi + } + response_status=$(curl -o /dev/null -s -w "%{http_code}\n" --header "X-Vault-Token: ${vault_token}" --request DELETE {{ vault.url }}/v1/sys/auth/{{ org_name }}) + validateVaultResponse ${response_status} + ignore_errors: true diff --git a/platforms/hyperledger-indy/configuration/roles/clean/vault/tasks/main.yaml b/platforms/hyperledger-indy/configuration/roles/clean/vault/tasks/main.yaml index 2eae964156e..38f2d696771 100644 --- a/platforms/hyperledger-indy/configuration/roles/clean/vault/tasks/main.yaml +++ b/platforms/hyperledger-indy/configuration/roles/clean/vault/tasks/main.yaml @@ -9,166 +9,34 @@ ############################################################################################## --- -# Remove Indy Crypto -- name: Remove Indy Crypto of {{ organization }} - environment: - vault_token: "{{ vault.root_token }}" - shell: | - validateVaultResponse () { - if [ ${1} != 200 ]; then - echo "ERROR: Unable to retrieve. Http status: ${1}" - exit 1 - fi - } - # Check if vault URL is valid - response_status=$(curl -o /dev/null -s -w "%{http_code}\n" {{ vault.url }}/ui/) - validateVaultResponse ${response_status} - - curl --header "X-Vault-Token: ${vault_token}" --request DELETE {{ vault.url }}/v1/sys/mounts/{{ organization }} - -# Remove Policies of trustees -- name: Remove Policies of trustees - environment: - vault_token: "{{ vault.root_token }}" - shell: | - validateVaultResponse () { - if [ ${1} != 204 ]; then - echo "ERROR: Unable to retrieve. Http status: ${1}" - exit 1 - fi - } - response_status=$(curl -o /dev/null -s -w "%{http_code}\n" --header "X-Vault-Token: ${vault_token}" --request DELETE {{ vault.url }}/v1/sys/policy/{{ organization }}-{{ serviceItem.name }}-ro) - validateVaultResponse ${response_status} - loop: "{{ services.trustees }}" - loop_control: - loop_var: serviceItem - when: services.trustees is defined - -# Remove Policies of stewards -- name: Remove Policies of stewards - environment: - vault_token: "{{ vault.root_token }}" - shell: | - validateVaultResponse () { - if [ ${1} != 204 ]; then - echo "ERROR: Unable to retrieve. Http status: ${1}" - exit 1 - fi - } - response_status=$(curl -o /dev/null -s -w "%{http_code}\n" --header "X-Vault-Token: ${vault_token}" --request DELETE {{ vault.url }}/v1/sys/policy/{{ organization }}-{{ serviceItem.name }}-ro) - validateVaultResponse ${response_status} - loop: "{{ services.stewards }}" - loop_control: - loop_var: serviceItem - when: services.stewards is defined - -# Remove Policies of endorsers -- name: Remove Policies of endorsers - environment: - vault_token: "{{ vault.root_token }}" - shell: | - validateVaultResponse () { - if [ ${1} != 204 ]; then - echo "ERROR: Unable to retrieve. Http status: ${1}" - exit 1 - fi - } - response_status=$(curl -o /dev/null -s -w "%{http_code}\n" --header "X-Vault-Token: ${vault_token}" --request DELETE {{ vault.url }}/v1/sys/policy/{{ organization }}-{{ serviceItem.name }}-ro) - validateVaultResponse ${response_status} - loop: "{{ services.endorsers }}" - loop_control: - loop_var: serviceItem - when: services.endorsers is defined - -# Remove Policies of organization -- name: Remove Policies of {{ organization }} - environment: - vault_token: "{{ vault.root_token }}" - shell: | - validateVaultResponse () { - if [ ${1} != 204 ]; then - echo "ERROR: Unable to retrieve. Http status: ${1}" - exit 1 - fi - } - - response_status=$(curl -o /dev/null -s -w "%{http_code}\n" --header "X-Vault-Token: ${vault_token}" --request DELETE {{ vault.url }}/v1/sys/policy/{{ organization }}-bevel-ac-ro) - validateVaultResponse ${response_status} - response_status=$(curl -o /dev/null -s -w "%{http_code}\n" --header "X-Vault-Token: ${vault_token}" --request DELETE {{ vault.url }}/v1/sys/policy/{{ organization }}-admin-rw) - validateVaultResponse ${response_status} - -# Remove Kubernetes Authentication Methods of organizations -- name: Remove Kubernetes Authentication Methods of {{ organization }} +# Delete keys associated with trustee nodes +- name: Delete trustee keys + include_tasks: delete_node_keys.yaml vars: - auth_path: "kubernetes-{{ organization }}" - environment: - vault_token: "{{ vault.root_token }}" - shell: | - validateVaultResponse () { - if [ ${1} != 204 ]; then - echo "ERROR: Unable to retrieve. Http status: ${1}" - exit 1 - fi - } - response_status=$(curl -o /dev/null -s -w "%{http_code}\n" --header "X-Vault-Token: ${vault_token}" --request DELETE {{ vault.url }}/v1/sys/auth/{{ auth_path }}-admin-auth) - validateVaultResponse ${response_status} - response_status=$(curl -o /dev/null -s -w "%{http_code}\n" --header "X-Vault-Token: ${vault_token}" --request DELETE {{ vault.url }}/v1/sys/auth/{{ auth_path }}-bevel-ac-auth) - validateVaultResponse ${response_status} - when: vault.root_token is defined + node_name: "{{ org.services.trustee.name | lower }}" + node_type: "trustees" + when: (org.services.trustee is defined) and (org.services.trustee.name | length > 0) -# Remove Kubernetes Authentication Methods of trustees -- name: Remove Kubernetes Authentication Methods of {{ organization }} of trustees - environment: - vault_token: "{{ vault.root_token }}" - shell: | - validateVaultResponse () { - if [ ${1} != 204 ]; then - echo "ERROR: Unable to retrieve. Http status: ${1}" - exit 1 - fi - } - auth_path="kubernetes-{{ organization }}-{{ serviceItem.name }}-auth" - response_status=$(curl -o /dev/null -s -w "%{http_code}\n" --header "X-Vault-Token: ${vault_token}" --request DELETE {{ vault.url }}/v1/sys/auth/${auth_path}) - validateVaultResponse ${response_status} - loop: "{{ services.trustees }}" - loop_control: - loop_var: serviceItem - when: vault.root_token is defined and services.trustees is defined +# Delete keys associated with endorser nodes +- name: Delete endorser keys + include_tasks: delete_node_keys.yaml + vars: + node_name: "{{ org.services.endorser.name | lower }}" + node_type: "endorsers" + when: (org.services.endorser is defined) and (org.services.endorser.name | length > 0) -# Remove Kubernetes Authentication Methods of stewards -- name: Remove Kubernetes Authentication Methods of {{ organization }} of stewards - environment: - vault_token: "{{ vault.root_token }}" - shell: | - validateVaultResponse () { - if [ ${1} != 204 ]; then - echo "ERROR: Unable to retrieve. Http status: ${1}" - exit 1 - fi - } - auth_path="kubernetes-{{ organization }}-{{ serviceItem.name }}-auth" - response_status=$(curl -o /dev/null -s -w "%{http_code}\n" --header "X-Vault-Token: ${vault_token}" --request DELETE {{ vault.url }}/v1/sys/auth/${auth_path}) - validateVaultResponse ${response_status} - loop: "{{ services.stewards }}" +# Delete keys associated with steward nodes +- name: Delete steward keys + include_tasks: delete_node_keys.yaml + vars: + node_name: "{{ stewards.name | lower }}" + node_type: "stewards" + loop: "{{ org.services.stewards }}" loop_control: - loop_var: serviceItem - when: vault.root_token is defined and services.stewards is defined + loop_var: stewards + when: (stewards is defined) and (stewards | length > 0) -# Remove Kubernetes Authentication Methods of endorsers -- name: Remove Kubernetes Authentication Methods of {{ organization }} of endorsers - environment: - vault_token: "{{ vault.root_token }}" - shell: | - validateVaultResponse () { - if [ ${1} != 204 ]; then - echo "ERROR: Unable to retrieve. Http status: ${1}" - exit 1 - fi - } - auth_path="kubernetes-{{ organization }}-{{ serviceItem.name }}-auth" - response_status=$(curl -o /dev/null -s -w "%{http_code}\n" --header "X-Vault-Token: ${vault_token}" --request DELETE {{ vault.url }}/v1/sys/auth/${auth_path}) - validateVaultResponse ${response_status} - loop: "{{ services.endorsers }}" - loop_control: - loop_var: serviceItem - when: vault.root_token is defined and services.endorsers is defined +# Delete Organization policy and auth engine +- name: "Delete Organization {{ org_name }} policy and auth engine" + include_tasks: delete_policy_auth.yaml + when: vault.root_token is defined diff --git a/platforms/hyperledger-indy/configuration/roles/create/helm_component/auth_job/templates/auth_job.tpl b/platforms/hyperledger-indy/configuration/roles/create/helm_component/auth_job/templates/auth_job.tpl index 183d1e7ccfa..c609ca6c98c 100644 --- a/platforms/hyperledger-indy/configuration/roles/create/helm_component/auth_job/templates/auth_job.tpl +++ b/platforms/hyperledger-indy/configuration/roles/create/helm_component/auth_job/templates/auth_job.tpl @@ -1,4 +1,4 @@ -apiVersion: helm.toolkit.fluxcd.io/v2beta1 +apiVersion: helm.toolkit.fluxcd.io/v2 kind: HelmRelease metadata: name: {{ component_name }}-{{ identity_name }}-auth-job diff --git a/platforms/hyperledger-indy/configuration/roles/create/helm_component/crypto/templates/crypto-generate.tpl b/platforms/hyperledger-indy/configuration/roles/create/helm_component/crypto/templates/crypto-generate.tpl index 8b692cddd0c..0417e1c68cd 100644 --- a/platforms/hyperledger-indy/configuration/roles/create/helm_component/crypto/templates/crypto-generate.tpl +++ b/platforms/hyperledger-indy/configuration/roles/create/helm_component/crypto/templates/crypto-generate.tpl @@ -1,4 +1,4 @@ -apiVersion: helm.toolkit.fluxcd.io/v2beta1 +apiVersion: helm.toolkit.fluxcd.io/v2 kind: HelmRelease metadata: name: {{ component_name }}-{{ identity_name }} diff --git a/platforms/hyperledger-indy/configuration/roles/create/helm_component/domain_genesis/templates/domain_genesis.tpl b/platforms/hyperledger-indy/configuration/roles/create/helm_component/domain_genesis/templates/domain_genesis.tpl index 8951d125941..804e13447a7 100644 --- a/platforms/hyperledger-indy/configuration/roles/create/helm_component/domain_genesis/templates/domain_genesis.tpl +++ b/platforms/hyperledger-indy/configuration/roles/create/helm_component/domain_genesis/templates/domain_genesis.tpl @@ -1,4 +1,4 @@ -apiVersion: helm.toolkit.fluxcd.io/v2beta1 +apiVersion: helm.toolkit.fluxcd.io/v2 kind: HelmRelease metadata: name: {{ component_name }} diff --git a/platforms/hyperledger-indy/configuration/roles/create/helm_component/ledger_txn/templates/ledger-txn.tpl b/platforms/hyperledger-indy/configuration/roles/create/helm_component/ledger_txn/templates/ledger-txn.tpl index ad6ad3a64c4..ba4d16b0af4 100644 --- a/platforms/hyperledger-indy/configuration/roles/create/helm_component/ledger_txn/templates/ledger-txn.tpl +++ b/platforms/hyperledger-indy/configuration/roles/create/helm_component/ledger_txn/templates/ledger-txn.tpl @@ -1,4 +1,4 @@ -apiVersion: helm.toolkit.fluxcd.io/v2beta1 +apiVersion: helm.toolkit.fluxcd.io/v2 kind: HelmRelease metadata: name: {{ component_name }}-{{ identity_name }}-transaction diff --git a/platforms/hyperledger-indy/configuration/roles/create/helm_component/node/templates/node.tpl b/platforms/hyperledger-indy/configuration/roles/create/helm_component/node/templates/node.tpl index 58017acc7b7..8e70131cc45 100644 --- a/platforms/hyperledger-indy/configuration/roles/create/helm_component/node/templates/node.tpl +++ b/platforms/hyperledger-indy/configuration/roles/create/helm_component/node/templates/node.tpl @@ -1,4 +1,4 @@ -apiVersion: helm.toolkit.fluxcd.io/v2beta1 +apiVersion: helm.toolkit.fluxcd.io/v2 kind: HelmRelease metadata: name: {{ component_name }} @@ -40,18 +40,15 @@ spec: name: {{ component_name }} repository: {{ network.docker.url }}/bevel-indy-node:{{ network.version }} node: - name: {{ component_name }} + name: {{ stewardItem.name }} ip: 0.0.0.0 publicIp: {{ stewardItem.publicIp }} port: {{ stewardItem.node.port }} - targetPort: {{ stewardItem.node.targetPort }} ambassadorPort: {{ stewardItem.node.ambassador }} client: - name: {{ component_name }} publicIp: {{ stewardItem.publicIp }} ip: 0.0.0.0 port: {{ stewardItem.client.port }} - targetPort: {{ stewardItem.client.targetPort }} ambassadorPort: {{ stewardItem.client.ambassador }} service: {% if organizationItem.cloud_provider != 'minikube' %} @@ -102,3 +99,4 @@ spec: keys: storagesize: 3Gi storageClassName: {{ sc_name }} + diff --git a/platforms/hyperledger-indy/configuration/roles/create/helm_component/peer/tasks/main.yaml b/platforms/hyperledger-indy/configuration/roles/create/helm_component/peer/tasks/main.yaml new file mode 100644 index 00000000000..e48cdedcb61 --- /dev/null +++ b/platforms/hyperledger-indy/configuration/roles/create/helm_component/peer/tasks/main.yaml @@ -0,0 +1,24 @@ +# Ensure teh required dir exists +- name: "Ensure {{ values_dir }}/{{ org_name }} dir exists" + include_role: + name: "{{ playbook_dir }}/../../shared/configuration/roles/check/directory" + vars: + path: "{{ values_dir }}/{{ org_name }}" + +# Generate Indy vault policy and role for stewards +- name: Stewards vault policy and role generating + template: + src: "{{ dlt_templates[component_type] }}" + dest: "{{ values_dir }}/{{ org_name }}/{{ component_name }}.yaml" + +############################################################################################ +# Test the value file for syntax errors/ missing values +# This is done by calling the helm_lint role and passing the value file parameter +# When a new helm_component is added, changes should be made in helm_lint role as well +- name: Helm lint + include_role: + name: "{{ playbook_dir }}/../../shared/configuration/roles/helm_lint" + vars: + helmtemplate_type: "{{ component_type }}" + chart_path: "{{ charts_dir }}" + value_file: "{{ values_dir }}/{{ org_name }}/{{ component_name }}.yaml" diff --git a/platforms/hyperledger-indy/configuration/roles/create/helm_component/peer/templates/generate_genesis.tpl b/platforms/hyperledger-indy/configuration/roles/create/helm_component/peer/templates/generate_genesis.tpl new file mode 100644 index 00000000000..f0886271502 --- /dev/null +++ b/platforms/hyperledger-indy/configuration/roles/create/helm_component/peer/templates/generate_genesis.tpl @@ -0,0 +1,53 @@ +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: "{{ component_name }}" + annotations: + fluxcd.io/automated: "false" + namespace: "{{ component_ns }}" +spec: + releaseName: "{{ component_name }}" + interval: 1m + chart: + spec: + interval: 1m + chart: "{{ charts_dir }}/indy-genesis" + sourceRef: + kind: GitRepository + name: flux-{{ network.env.type }} + namespace: flux-{{ network.env.type }} + values: + global: + serviceAccountName: vault-auth + cluster: + provider: "{{ cloud_provider }}" + cloudNativeServices: false + kubernetesUrl: "{{ kubernetes_server }}" + vault: + type: hashicorp + role: vault-role + network: indy + address: "{{ vault.url }}" + authPath: "{{ org_name }}" + secretEngine: secretsv2 + secretPrefix: "data/{{ org_name }}" + proxy: + provider: ambassador + image: + alpineutils: "{{ network.docker.url }}/bevel-alpine-ext:latest" + settings: + removeKeysOnDelete: true + secondaryGenesis: {{ secondaryGenesis }} +{% if (not secondaryGenesis) and (trustee_name is defined) %} + trustees: + - name: "{{ trustee_name }}" +{% if steward_list is defined %} + stewards: +{% for steward in steward_list %} + - name: {{ steward.name }} + publicIp: {{ steward.publicIp }} + nodePort: {{ steward.nodePort }} + clientPort: {{ steward.clientPort }} +{% endfor %} +{% endif %} +{% endif %} diff --git a/platforms/hyperledger-indy/configuration/roles/create/helm_component/peer/templates/generate_keys.tpl b/platforms/hyperledger-indy/configuration/roles/create/helm_component/peer/templates/generate_keys.tpl new file mode 100644 index 00000000000..7d64eaf4b67 --- /dev/null +++ b/platforms/hyperledger-indy/configuration/roles/create/helm_component/peer/templates/generate_keys.tpl @@ -0,0 +1,52 @@ +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: "{{ component_name }}" + annotations: + fluxcd.io/automated: "false" + namespace: "{{ component_ns }}" +spec: + releaseName: "{{ component_name }}" + interval: 1m + chart: + spec: + interval: 1m + chart: "{{ charts_dir }}/indy-key-mgmt" + sourceRef: + kind: GitRepository + name: flux-{{ network.env.type }} + namespace: flux-{{ network.env.type }} + values: + global: + serviceAccountName: vault-auth + cluster: + provider: "{{ cloud_provider }}" + cloudNativeServices: false + kubernetesUrl: "{{ kubernetes_server }}" + vault: + type: hashicorp + role: vault-role + network: indy + address: "{{ vault.url }}" + authPath: "{{ org_name }}" + secretEngine: secretsv2 + secretPrefix: "data/{{ org_name }}" + proxy: + provider: ambassador + image: + alpineutils: "{{ network.docker.url }}/bevel-indy-key-mgmt:1.12.6" + settings: + removeKeysOnDelete: true + identities: +{% if trustee_name %} + trustee: "{{ trustee_name }}" +{% endif %} +{% if endorser_name %} + endorser: "{{ endorser_name }}" +{% endif %} +{% if steward_list %} + stewards: +{% for steward in steward_list %} + - "{{ steward }}" +{% endfor %} +{% endif %} diff --git a/platforms/hyperledger-indy/configuration/roles/create/helm_component/peer/templates/stewards.tpl b/platforms/hyperledger-indy/configuration/roles/create/helm_component/peer/templates/stewards.tpl new file mode 100644 index 00000000000..e3ac494f6f7 --- /dev/null +++ b/platforms/hyperledger-indy/configuration/roles/create/helm_component/peer/templates/stewards.tpl @@ -0,0 +1,50 @@ +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: "{{ component_name }}" + annotations: + fluxcd.io/automated: "false" + namespace: "{{ component_ns }}" +spec: + releaseName: "{{ component_name }}" + interval: 1m + chart: + spec: + interval: 1m + chart: "{{ charts_dir }}/indy-node" + sourceRef: + kind: GitRepository + name: flux-{{ network.env.type }} + namespace: flux-{{ network.env.type }} + values: + global: + serviceAccountName: vault-auth + cluster: + provider: "{{ cloud_provider }}" + cloudNativeServices: false + proxy: + provider: ambassador + storage: + keys: "512Mi" + data: "4Gi" + reclaimPolicy: "Delete" + volumeBindingMode: Immediate + allowedTopologies: + enabled: false + image: + initContainer: "{{ network.docker.url }}/bevel-alpine-ext:latest" + cli: "{{ network.docker.url }}/bevel-indy-ledger-txn:latest" + indyNode: + repository: "{{ network.docker.url }}/bevel-indy-node" + tag: 1.12.6 + settings: + network: bevel + serviceType: ClusterIP + node: + publicIp: {{ node_public_ip }} + port: {{ node_port }} + externalPort: {{ node_external_port }} + client: + publicIp: {{ client_public_ip }} + port: {{ client_port }} + externalPort: {{ client_external_port }} diff --git a/platforms/r3-corda/configuration/roles/setup/springboot_services/vars/main.yaml b/platforms/hyperledger-indy/configuration/roles/create/helm_component/peer/vars/main.yaml similarity index 74% rename from platforms/r3-corda/configuration/roles/setup/springboot_services/vars/main.yaml rename to platforms/hyperledger-indy/configuration/roles/create/helm_component/peer/vars/main.yaml index 8445331d9c6..80c755ef91e 100644 --- a/platforms/r3-corda/configuration/roles/setup/springboot_services/vars/main.yaml +++ b/platforms/hyperledger-indy/configuration/roles/create/helm_component/peer/vars/main.yaml @@ -5,4 +5,6 @@ ############################################################################################## dlt_templates: - web: web.tpl \ No newline at end of file + generate-keys: generate_keys.tpl + generate-genesis: generate_genesis.tpl + stewards: stewards.tpl diff --git a/platforms/hyperledger-indy/configuration/roles/create/helm_component/pool_genesis/templates/pool_genesis.tpl b/platforms/hyperledger-indy/configuration/roles/create/helm_component/pool_genesis/templates/pool_genesis.tpl index 745c0e0f226..4b05ca13394 100644 --- a/platforms/hyperledger-indy/configuration/roles/create/helm_component/pool_genesis/templates/pool_genesis.tpl +++ b/platforms/hyperledger-indy/configuration/roles/create/helm_component/pool_genesis/templates/pool_genesis.tpl @@ -1,4 +1,4 @@ -apiVersion: helm.toolkit.fluxcd.io/v2beta1 +apiVersion: helm.toolkit.fluxcd.io/v2 kind: HelmRelease metadata: name: {{ component_name }} diff --git a/platforms/hyperledger-indy/configuration/roles/create/namespace/tasks/main.yaml b/platforms/hyperledger-indy/configuration/roles/create/namespace/tasks/main.yaml index 001241ccc72..19ecefdf744 100644 --- a/platforms/hyperledger-indy/configuration/roles/create/namespace/tasks/main.yaml +++ b/platforms/hyperledger-indy/configuration/roles/create/namespace/tasks/main.yaml @@ -35,5 +35,5 @@ name: "{{ playbook_dir }}/../../shared/configuration/roles/git_push" vars: GIT_DIR: "{{ playbook_dir }}/../../../" - gitops: "{{ organizationItem.gitops }}" + gitops: "{{ org.gitops }}" msg: "[ci skip] Pushing deployment files for namespace" diff --git a/platforms/hyperledger-indy/configuration/roles/create/secrets/tasks/main.yaml b/platforms/hyperledger-indy/configuration/roles/create/secrets/tasks/main.yaml new file mode 100644 index 00000000000..cc31dd73c32 --- /dev/null +++ b/platforms/hyperledger-indy/configuration/roles/create/secrets/tasks/main.yaml @@ -0,0 +1,32 @@ +############################################################################################## +# Copyright Accenture. All Rights Reserved. +# +# SPDX-License-Identifier: Apache-2.0 +############################################################################################## + +# Wait for namespace to be created by flux +- name: "Wait for the namespace {{ component_ns }} to be created" + include_role: + name: "{{ playbook_dir }}/../../shared/configuration/roles/check/k8_component" + vars: + component_type: "Namespace" + component_name: "{{ component_ns }}" + type: "retry" + +# Create the vault roottoken secret +- name: "Create vault token secret" + include_role: + name: "{{ playbook_dir }}/../../shared/configuration/roles/create/shared_k8s_secrets" + vars: + namespace: "{{ component_ns }}" + check: "token_secret" + +# Create the docker pull credentials for image registry +- name: "Create docker credentials secret" + include_role: + name: "{{ playbook_dir }}/../../shared/configuration/roles/create/shared_k8s_secrets" + vars: + namespace: "{{ component_ns }}" + check: "docker_credentials" + when: + - network.docker.username is defined diff --git a/platforms/hyperledger-indy/configuration/roles/setup/endorser/tasks/endorser_keys.yaml b/platforms/hyperledger-indy/configuration/roles/setup/endorser/tasks/endorser_keys.yaml new file mode 100644 index 00000000000..f93e9a74075 --- /dev/null +++ b/platforms/hyperledger-indy/configuration/roles/setup/endorser/tasks/endorser_keys.yaml @@ -0,0 +1,41 @@ +############################################################################################## +# Copyright Accenture. All Rights Reserved. +# +# SPDX-License-Identifier: Apache-2.0 +############################################################################################## + +# Get endorser public identity secret +- name: "Get endorser public identity secret" + k8s_info: + kind: Secret + name: "{{ endorser }}-identity-public" + namespace: "{{ component_ns }}" + kubeconfig: "{{ kubernetes.config_file }}" + register: endorser_identity_public + +# Extract and save the endorser's DID to a JSON file +- name: "Extract and save endorser DID to a JSON file" + copy: + content: "{{ endorser_identity_public.resources[0].data.value | b64decode | from_json | json_query('did') }}" + dest: "{{ files_dir }}/{{ endorser }}-did.json" + vars: + files_dir: "{{ playbook_dir }}/../../hyperledger-indy/charts/indy-register-identity/files" + when: endorser_identity_public is defined and endorser_identity_public.resources[0].data.value is defined + +# Get endorser node public verification keys secret +- name: "Get endorser node public verification keys secret" + k8s_info: + kind: Secret + name: "{{ endorser }}-node-public-verif-keys" + namespace: "{{ component_ns }}" + kubeconfig: "{{ kubernetes.config_file }}" + register: endorser_node_public_verif_keys + +# Extract and save the endorser's verification key to a JSON file +- name: "Extract and save the endorser's verification key to a JSON file" + copy: + content: "{{ endorser_node_public_verif_keys.resources[0].data.value | b64decode | from_json | json_query('\"verification-key\"') }}" + dest: "{{ files_dir }}/{{ endorser }}-verkey.json" + vars: + files_dir: "{{ playbook_dir }}/../../hyperledger-indy/charts/indy-register-identity/files" + when: endorser_node_public_verif_keys is defined and endorser_node_public_verif_keys.resources[0].data.value is defined diff --git a/platforms/hyperledger-indy/configuration/roles/setup/endorser/tasks/main.yaml b/platforms/hyperledger-indy/configuration/roles/setup/endorser/tasks/main.yaml new file mode 100644 index 00000000000..fc0d4e0d4ab --- /dev/null +++ b/platforms/hyperledger-indy/configuration/roles/setup/endorser/tasks/main.yaml @@ -0,0 +1,24 @@ +############################################################################################## +# Copyright Accenture. All Rights Reserved. +# +# SPDX-License-Identifier: Apache-2.0 +############################################################################################## + +# Get Endorser keys +- name: "Get Endorser keys" + include_tasks: endorser_keys.yaml + +# Deploy endorser node +- name: "Deploy endorser node" + include_role: + name: "{{ playbook_dir }}/../../shared/configuration/roles/create/job_component" + vars: + type: "indy_endorser" + +# Check if endorser job is completed +- name: "Check if endorser job is completed" + include_role: + name: "{{ playbook_dir }}/../../shared/configuration/roles/check/helm_component" + vars: + component_type: Job + namespace: "{{ component_ns }}" diff --git a/platforms/hyperledger-indy/configuration/roles/setup/endorsers/tasks/main.yaml b/platforms/hyperledger-indy/configuration/roles/setup/endorsers/tasks/main.yaml deleted file mode 100644 index ab771d7c78f..00000000000 --- a/platforms/hyperledger-indy/configuration/roles/setup/endorsers/tasks/main.yaml +++ /dev/null @@ -1,49 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -################################################################################################### -# This role creates the deployment files for endorsers and pushes them to repository -################################################################################################### - -# Wait for namespace creation for identities - - name: "Wait for namespace creation for identities" - include_role: - name: "{{ playbook_dir }}/../../shared/configuration/roles/check/k8_component" - vars: - component_type: "Namespace" - component_name: "{{ component_ns }}" - type: "retry" - -# Create image pull secrets - - name: "Create image pull secret for identities" - include_role: - name: create/imagepullsecret - -# Create Deployment files for new Identities - - name: "Create Deployment files" - include_tasks: nested_main.yaml - vars: - component_type: "identity" - component_name: "{{ organizationItem.name }}" - indy_version: "indy-{{ network.version }}" - release_dir: "{{playbook_dir}}/../../../{{organizationItem.gitops.release_dir}}/{{ organizationItem.name | lower }}" - newIdentity: "{{ organizationItem.services.endorsers }}" - component_ns: "{{ organizationItem.name | lower }}-ns" - org_vault_url: "{{ organizationItem.vault.url}}" - when: organizationItem is defined and organizationItem.services.endorsers is defined - -# Wait until identities are creating - - name: "Wait until identities are creating" - include_role: - name: "{{ playbook_dir }}/../../shared/configuration/roles/check/helm_component" - vars: - component_type: "Job" - namespace: "{{ component_ns }}" - component_name: "{{ organizationItem.name }}-{{ endorserItem.name }}-transaction" - loop: "{{ organizationItem.services.endorsers }}" - when: organizationItem is defined and organizationItem.services.endorsers is defined - loop_control: - loop_var: endorserItem diff --git a/platforms/hyperledger-indy/configuration/roles/setup/endorsers/tasks/nested_main.yaml b/platforms/hyperledger-indy/configuration/roles/setup/endorsers/tasks/nested_main.yaml deleted file mode 100644 index fbc71bacc26..00000000000 --- a/platforms/hyperledger-indy/configuration/roles/setup/endorsers/tasks/nested_main.yaml +++ /dev/null @@ -1,131 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -# This Selects the Admin Identity for an organization from Network yaml. -# If trustee is present the first trustee will be the admin -# If Steward is present the first steward will be the admin -# If both trustee and steward are not present in a particular organization, -# the first global admin will be the trustee for this organization. ---- -- name: Select Admin Identity for Organisation {{ component_name }} - shell: | - selectedAdmin="" - first_global_admin="" - admin_Org="" - global_Org="" - admin_url="" - global_url="" - global_type="" - admin_type="" - {% if network['organizations'] is defined %} - {% for organization in network['organizations'] %} - first_admin_in_org="" - {% if organization.services.trustees is defined %} - {% for trustee in organization.services.trustees %} - if [ -z "$first_admin_in_org" ] - then - if [ {{ organization.name }} == "{{ component_name }}" ] - then - first_admin_in_org="{{ trustee.name }}" - admin_Org="{{ organization.name }}" - admin_url="{{ organization.vault.url }}" - admin_type="trustees" - fi - fi - if [ -z "$first_global_admin" ] - then - first_global_admin="{{ trustee.name }}" - global_Org="{{ organization.name }}" - global_url="{{ organization.vault.url }}" - global_type="trustees" - fi - {% endfor %} - {% endif %} - {% if organization.services.stewards is defined %} - {% for steward in organization.services.stewards %} - if [ -z "$first_admin_in_org" ] - then - if [ {{ organization.name }} == "{{ component_name }}" ] - then - first_admin_in_org="{{ steward.name }}" - admin_Org="{{ organization.name }}" - admin_url="{{ organization.vault.url }}" - admin_type="stewards" - fi - fi - if [ -z "$first_global_admin" ] - then - first_global_admin="{{ steward.name }}" - global_Org="{{ organization.name }}" - global_url="{{ organization.vault.url }}" - global_type="stewards" - fi - {% endfor %} - {% endif %} - {% endfor %} - {% endif %} - - if [ ! -z "$first_admin_in_org" ] - then - selectedAdmin="${first_admin_in_org}" - adminUrl="${admin_url}" - adminOrg="${admin_Org}" - admin_type="${admin_type}" - else - selectedAdmin="${first_global_admin}" - adminUrl="${global_url}" - adminOrg="${global_Org}" - admin_type="${global_type}" - fi - rm -rf admin.yaml - echo "selectedAdmin: ${selectedAdmin}" >> admin.yaml - echo "adminUrl: ${adminUrl}" >> admin.yaml - echo "adminOrg: ${adminOrg}" >> admin.yaml - echo "type: ${admin_type}" >> admin.yaml - register: admin_file - -#---------------------------------------------------------------------------------------------- -- name: "Inserting file into Variable" - include_vars: - file: admin.yaml - name: admin_var - -#---------------------------------------------------------------------------------------------- -# Create Deployment files for new Identities -- name: "Calling Helm Release Development Role..." - include_role: - name: create/helm_component/ledger_txn - vars: - component_type: "identity" - component_name: "{{ organizationItem.name }}" - indy_version: "indy-{{ network.version }}" - release_dir: "{{playbook_dir}}/../../../{{organizationItem.gitops.release_dir}}/{{ organizationItem.name | lower }}" - component_ns: "{{ organizationItem.name | lower }}-ns" - newIdentityName: "{{ newIdentityItem.name }}" - newIdentityRole: "ENDORSER" - adminIdentityName: "{{ admin_var.selectedAdmin }}" - admin_component_name: "{{ admin_var.adminOrg }}" - admin_org_vault_url: "{{ admin_var.adminUrl }}" - new_org_vault_url: "{{ organizationItem.vault.url}}" - new_component_name: "{{ component_name }}" - admin_type: "{{ admin_var.type }}" - identity_type: "endorsers" - loop: "{{ newIdentity }}" - loop_control: - loop_var: newIdentityItem - when: newIdentity is defined - -- name: "Delete file" - shell: | - rm admin.yaml -# --------------------------------------------------------------------- -# push the created deployment files to repository -- name: "Push the created deployment files to repository" - include_role: - name: "{{ playbook_dir }}/../../shared/configuration/roles/git_push" - vars: - GIT_DIR: "{{ playbook_dir }}/../../../" - msg: "[ci skip] Pushing deployment files" diff --git a/platforms/hyperledger-indy/configuration/roles/setup/generate-keys/tasks/main.yaml b/platforms/hyperledger-indy/configuration/roles/setup/generate-keys/tasks/main.yaml new file mode 100644 index 00000000000..e5c9dc3a183 --- /dev/null +++ b/platforms/hyperledger-indy/configuration/roles/setup/generate-keys/tasks/main.yaml @@ -0,0 +1,56 @@ +############################################################################################## +# Copyright Accenture. All Rights Reserved. +# +# SPDX-License-Identifier: Apache-2.0 +############################################################################################## + +# Initialize variables for trustee, endorser, and stewards +- name: "Initialize trustee, endorser and stewards variables" + set_fact: + trustee_name: "{{ org.services.trustee.name | default('') }}" + endorser_name: "{{ org.services.endorser.name | default('') }}" + steward_list: [] + +# Add stewards to the steward list +- name: "Add stewards to the steward list" + set_fact: + steward_list: "{{ steward_list + [stewards_item.name] }}" + loop: "{{ stewards }}" + loop_control: + loop_var: stewards_item + ignore_errors: true + +# Gather Kubernetes cluster information +- name: Gather Kubernetes cluster information + community.kubernetes.k8s_cluster_info: + kubeconfig: "{{ kubernetes.config_file }}" + register: cluster_info + +# Set the Kubernetes server URL fact +- name: Set kubernetes_server_url fact + set_fact: + kubernetes_server_url: "{{ cluster_info.connection.host }}" + +# Generate the HR file for the specified organization +- name: "Generate HR file for {{ org_name }} organization" + include_role: + name: create/helm_component/peer + vars: + kubernetes_server: "{{ kubernetes_server_url }}" + +# Push the created deployment files to repository +- name: "Push the created deployment files to repository" + include_role: + name: "{{ playbook_dir }}/../../shared/configuration/roles/git_push" + vars: + GIT_DIR: "{{ playbook_dir }}/../../../" + msg: "[ci skip] Pushing key management job files for {{ component_ns }}" + gitops: "{{ org.gitops }}" + +# Check if the job is completed +- name: "Check if {{ component_name }} job is completed in the {{ org_name }} organization" + include_role: + name: "{{ playbook_dir }}/../../shared/configuration/roles/check/helm_component" + vars: + component_type: Job + namespace: "{{ component_ns }}" diff --git a/platforms/hyperledger-indy/configuration/roles/setup/genesis-node-keys/tasks/main.yaml b/platforms/hyperledger-indy/configuration/roles/setup/genesis-node-keys/tasks/main.yaml new file mode 100644 index 00000000000..b8030a3f871 --- /dev/null +++ b/platforms/hyperledger-indy/configuration/roles/setup/genesis-node-keys/tasks/main.yaml @@ -0,0 +1,22 @@ +############################################################################################## +# Copyright Accenture. All Rights Reserved. +# +# SPDX-License-Identifier: Apache-2.0 +############################################################################################## + +# Retrieve Trustee's keys if Trustee service is defined +- name: "Retrieve Trustee's keys" + include_tasks: trustee_keys.yaml + when: + - org.services.trustee is defined + - org.services.trustee.name | length > 0 + +# Retrieve Steward's keys for each steward in the list of stewards if stewards are defined +- name: "Retrieve Steward's keys" + include_tasks: steward_keys.yaml + loop: "{{ org.services.stewards }}" + loop_control: + loop_var: steward + when: + - steward is defined + - steward | length > 0 diff --git a/platforms/hyperledger-indy/configuration/roles/setup/genesis-node-keys/tasks/steward_keys.yaml b/platforms/hyperledger-indy/configuration/roles/setup/genesis-node-keys/tasks/steward_keys.yaml new file mode 100644 index 00000000000..a68683bb5d2 --- /dev/null +++ b/platforms/hyperledger-indy/configuration/roles/setup/genesis-node-keys/tasks/steward_keys.yaml @@ -0,0 +1,68 @@ +############################################################################################## +# Copyright Accenture. All Rights Reserved. +# +# SPDX-License-Identifier: Apache-2.0 +############################################################################################## + +# Get steward public identity secret +- name: "Get steward public identity secret" + k8s_info: + kind: Secret + name: "{{ steward.name }}-identity-public" + namespace: "{{ component_ns }}" + kubeconfig: "{{ kubernetes.config_file }}" + register: steward_identity_public + +# Extract and save steward DID to a JSON file +- name: "Extract and save steward DID to a JSON file" + copy: + content: "{{ steward_identity_public.resources[0].data.value | b64decode | from_json | json_query('did') }}" + dest: "{{ files_dir }}/{{ steward.name }}-did.json" + vars: + files_dir: "{{ playbook_dir }}/../../hyperledger-indy/charts/indy-genesis/files" + when: steward_identity_public is defined and steward_identity_public.resources[0].data.value is defined + +# Get steward node public verif keys +- name: "Get steward node public verif keys" + k8s_info: + kind: Secret + name: "{{ steward.name }}-node-public-verif-keys" + namespace: "{{ component_ns }}" + kubeconfig: "{{ kubernetes.config_file }}" + register: steward_node_public_verif_keys + +# Extract and save the steward's verification key to a JSON file +- name: "Extract and save the steward's verification key to a JSON file" + copy: + content: "{{ steward_node_public_verif_keys.resources[0].data.value | b64decode | from_json | json_query('\"verification-key\"') }}" + dest: "{{ files_dir }}/{{ steward.name }}-verkey.json" + vars: + files_dir: "{{ playbook_dir }}/../../hyperledger-indy/charts/indy-genesis/files" + when: steward_node_public_verif_keys is defined and steward_node_public_verif_keys.resources[0].data.value is defined + +# Get steward's node public BLS keys +- name: "Get steward's node public BLS keys" + k8s_info: + kind: Secret + name: "{{ steward.name }}-node-public-bls-keys" + namespace: "{{ component_ns }}" + kubeconfig: "{{ kubernetes.config_file }}" + register: steward_node_public_bls_keys + +# Extract and save the steward's BLS POP to a JSON file +- name: "Extract and save the steward's BLS POP to a JSON file" + copy: + content: "{{ steward_node_public_bls_keys.resources[0].data.value | b64decode | from_json | json_query('\"bls-key-pop\"') }}" + dest: "{{ files_dir }}/{{ steward.name }}-blspop.json" + vars: + files_dir: "{{ playbook_dir }}/../../hyperledger-indy/charts/indy-genesis/files" + when: steward_node_public_bls_keys is defined and steward_node_public_bls_keys.resources[0].data.value is defined + +# Extract and save the steward's BLS public key to a JSON file +- name: "Extract and save the steward's BLS public key to a JSON file" + copy: + content: "{{ steward_node_public_bls_keys.resources[0].data.value | b64decode | from_json | json_query('\"bls-public-key\"') }}" + dest: "{{ files_dir }}/{{ steward.name }}-blspub.json" + vars: + files_dir: "{{ playbook_dir }}/../../hyperledger-indy/charts/indy-genesis/files" + when: steward_node_public_bls_keys is defined and steward_node_public_bls_keys.resources[0].data.value is defined diff --git a/platforms/hyperledger-indy/configuration/roles/setup/genesis-node-keys/tasks/trustee_keys.yaml b/platforms/hyperledger-indy/configuration/roles/setup/genesis-node-keys/tasks/trustee_keys.yaml new file mode 100644 index 00000000000..dbe0c679490 --- /dev/null +++ b/platforms/hyperledger-indy/configuration/roles/setup/genesis-node-keys/tasks/trustee_keys.yaml @@ -0,0 +1,41 @@ +############################################################################################## +# Copyright Accenture. All Rights Reserved. +# +# SPDX-License-Identifier: Apache-2.0 +############################################################################################## + +# Get trustee public identity secret +- name: "Get trustee public identity secret" + k8s_info: + kind: Secret + name: "{{ org.services.trustee.name }}-identity-public" + namespace: "{{ component_ns }}" + kubeconfig: "{{ kubernetes.config_file }}" + register: trustee_identity_public_secret + +# Extract and save trustee DID to a JSON file +- name: "Extract and save trustee DID to a JSON file" + copy: + content: "{{ trustee_identity_public_secret.resources[0].data.value | b64decode | from_json | json_query('did') }}" + dest: "{{ files_dir }}/{{ org.services.trustee.name }}-did.json" + vars: + files_dir: "{{ playbook_dir }}/../../hyperledger-indy/charts/indy-genesis/files" + when: trustee_identity_public_secret is defined and trustee_identity_public_secret.resources[0].data.value is defined + +# Get trustee node public verif keys +- name: "Get trustee node public verif keys" + k8s_info: + kind: Secret + name: "{{ org.services.trustee.name }}-node-public-verif-keys" + namespace: "{{ component_ns }}" + kubeconfig: "{{ kubernetes.config_file }}" + register: trustee_node_public_verif_keys + +# Extract and save the trustee's verification key to a JSON file +- name: "Extract and save the trustee's verification key to a JSON file" + copy: + content: "{{ trustee_node_public_verif_keys.resources[0].data.value | b64decode | from_json | json_query('\"verification-key\"') }}" + dest: "{{ files_dir }}/{{ org.services.trustee.name }}-verkey.json" + vars: + files_dir: "{{ playbook_dir }}/../../hyperledger-indy/charts/indy-genesis/files" + when: trustee_node_public_verif_keys is defined and trustee_node_public_verif_keys.resources[0].data.value is defined diff --git a/platforms/quorum/configuration/roles/create/crypto/ibft/tasks/main.yaml b/platforms/hyperledger-indy/configuration/roles/setup/genesis/tasks/main.yaml similarity index 50% rename from platforms/quorum/configuration/roles/create/crypto/ibft/tasks/main.yaml rename to platforms/hyperledger-indy/configuration/roles/setup/genesis/tasks/main.yaml index 452646df471..f36a00cfc33 100644 --- a/platforms/quorum/configuration/roles/create/crypto/ibft/tasks/main.yaml +++ b/platforms/hyperledger-indy/configuration/roles/setup/genesis/tasks/main.yaml @@ -4,9 +4,11 @@ # SPDX-License-Identifier: Apache-2.0 ############################################################################################## -# Create crypto material for each peer with IBFT consensus -- name: Create crypto material for each peer with IBFT consensus - include_tasks: nested_main.yaml - loop: "{{ peers }}" - loop_control: - loop_var: peer +# Primary genesis setup +- name: "Primary genesis setup" + include_tasks: primary_genesis.yaml + +# Secondary genesis setup if there are multiple organizations +- name: "Secondary genesis Setup" + include_tasks: secondary_genesis.yaml + when: network['organizations'] | length > 1 diff --git a/platforms/hyperledger-indy/configuration/roles/setup/genesis/tasks/primary_genesis.yaml b/platforms/hyperledger-indy/configuration/roles/setup/genesis/tasks/primary_genesis.yaml new file mode 100644 index 00000000000..e92de1006d5 --- /dev/null +++ b/platforms/hyperledger-indy/configuration/roles/setup/genesis/tasks/primary_genesis.yaml @@ -0,0 +1,66 @@ +############################################################################################## +# Copyright Accenture. All Rights Reserved. +# +# SPDX-License-Identifier: Apache-2.0 +############################################################################################## + +# Initialize variable and list +- name: "Initialize variable and list" + set_fact: + trustee_list: [] + steward_list: [] + +# Store Trustee, Endorser and Stewards info +- name: "Store Trustee, Endorser and Stewards info" + include_tasks: primary_genesis_peers.yaml + vars: + org_name: "{{ org.name | lower }}" + stewards: "{{ org.services.stewards }}" + loop: "{{ network['organizations'] }}" + loop_control: + loop_var: org + +# Gather Kubernetes cluster information +- name: Gather Kubernetes cluster information + community.kubernetes.k8s_cluster_info: + kubeconfig: "{{ network['organizations'][0].k8s.config_file }}" + register: cluster_info + +# Set the Kubernetes server URL fact +- name: Set kubernetes_server_url fact + set_fact: + kubernetes_server_url: "{{ cluster_info.connection.host }}" + +# Install primary genesis +- name: "Install primary genesis" + include_role: + name: "{{ playbook_dir }}/../../shared/configuration/roles/create/job_component" + vars: + type: "indy_genesis" + org: "{{ network['organizations'] | first }}" + org_name: "{{ org.name | lower }}" + stewards: "{{ org.services.stewards }}" + cloud_provider: "{{ org.cloud_provider | lower }}" + vault: "{{ org.vault }}" + kubernetes_server: "{{ kubernetes_server_url }}" + kubernetes: "{{ org.k8s }}" + component_type: "generate-genesis" + component_ns: "{{ org_name }}-ns" + component_name: "{{ org_name }}-genesis" + secondaryGenesis: false + values_dir: "{{ playbook_dir }}/../../../{{ org.gitops.release_dir }}/{{ org_name }}/build" + charts_dir: "{{ org.gitops.chart_source }}" + +# Check if primary genesis job is completed +- name: "Check if primary genesis job is completed" + include_role: + name: "{{ playbook_dir }}/../../shared/configuration/roles/check/helm_component" + vars: + org: "{{ network['organizations'] | first }}" + org_name: "{{ org.name | lower }}" + component_name: "{{ org_name }}-genesis" + component_type: Job + org: "{{ network['organizations'] | first }}" + component_ns: "{{ org.name | lower }}-ns" + namespace: "{{ component_ns }}" + kubernetes: "{{ org.k8s }}" diff --git a/platforms/hyperledger-indy/configuration/roles/setup/genesis/tasks/primary_genesis_peers.yaml b/platforms/hyperledger-indy/configuration/roles/setup/genesis/tasks/primary_genesis_peers.yaml new file mode 100644 index 00000000000..849461468b3 --- /dev/null +++ b/platforms/hyperledger-indy/configuration/roles/setup/genesis/tasks/primary_genesis_peers.yaml @@ -0,0 +1,20 @@ +############################################################################################## +# Copyright Accenture. All Rights Reserved. +# +# SPDX-License-Identifier: Apache-2.0 +############################################################################################## + +# Store trustee's name if it is not already set +- name: "Store trustee's name" + set_fact: + trustee_list: "{{ trustee_list + [org.services.trustee.name] }}" + when: (org.services.trustee is defined) and (org.services.trustee.name | length > 0) + +# Add each steward's details (name, public IP, node port, client port) to the steward_list +- name: "Maintain each steward's node info" + set_fact: + steward_list: "{{ steward_list + [{'name': stewards_item.name, 'publicIp': stewards_item.publicIp, 'nodePort': stewards_item.node.ambassador, 'clientPort': stewards_item.client.ambassador}] }}" + loop: "{{ stewards }}" + loop_control: + loop_var: stewards_item + ignore_errors: true diff --git a/platforms/hyperledger-indy/configuration/roles/setup/genesis/tasks/secondary_genesis.yaml b/platforms/hyperledger-indy/configuration/roles/setup/genesis/tasks/secondary_genesis.yaml new file mode 100644 index 00000000000..e2820cea0a2 --- /dev/null +++ b/platforms/hyperledger-indy/configuration/roles/setup/genesis/tasks/secondary_genesis.yaml @@ -0,0 +1,60 @@ +############################################################################################## +# Copyright Accenture. All Rights Reserved. +# +# SPDX-License-Identifier: Apache-2.0 +############################################################################################## + +# Initialize variables for the first organization +- name: "Initialize variables for the first organization" + set_fact: + first_org_name: "{{ network.organizations[0].name | lower }}" + first_org_kubernetes: "{{ network.organizations[0].k8s }}" + +# Retrieve the ConfigMap for domain transactions genesis for the first organization +- name: "Get domain transactions genesis ConfigMap" + community.kubernetes.k8s_info: + api_version: v1 + kind: ConfigMap + name: dtg + namespace: "{{ first_org_name }}-ns" + kubeconfig: "{{ first_org_kubernetes.config_file }}" + register: dtg_configmap + +# Retrieve the ConfigMap for pool transactions genesis for the first organization +- name: "Get pool transactions genesis ConfigMap" + community.kubernetes.k8s_info: + api_version: v1 + kind: ConfigMap + name: ptg + namespace: "{{ first_org_name }}-ns" + kubeconfig: "{{ first_org_kubernetes.config_file }}" + register: ptg_configmap + +# Save the domain transactions genesis content to a file +- name: "Save domain transactions genesis to file" + copy: + content: "{{ dtg_configmap.resources[0].data.domain_transactions_genesis }}" + dest: "{{ playbook_dir }}/../../hyperledger-indy/charts/indy-genesis/files/domain_transactions_genesis.json" + when: dtg_configmap.resources[0].data.domain_transactions_genesis is defined + +# Save the pool transactions genesis content to a file +- name: "Save pool transactions genesis to file" + copy: + content: "{{ ptg_configmap.resources[0].data.pool_transactions_genesis }}" + dest: "{{ playbook_dir }}/../../hyperledger-indy/charts/indy-genesis/files/pool_transactions_genesis.json" + when: ptg_configmap.resources[0].data.pool_transactions_genesis is defined + +# Generate secondary genesis HR files for the remaining organizations +- name: "Generate secondary genesis HR file for the remaining organization" + include_tasks: secondary_genesis_orgs.yaml + vars: + org_name: "{{ org.name | lower }}" + component_name: "{{ org_name }}-genesis" + component_ns: "{{ org_name }}-ns" + component_type: "generate-genesis" + cloud_provider: "{{ org.cloud_provider | lower }}" + kubernetes: "{{ org.k8s }}" + vault: "{{ org.vault }}" + loop: "{{ network['organizations'][1:] }}" # Skip the first organization + loop_control: + loop_var: org diff --git a/platforms/hyperledger-indy/configuration/roles/setup/genesis/tasks/secondary_genesis_orgs.yaml b/platforms/hyperledger-indy/configuration/roles/setup/genesis/tasks/secondary_genesis_orgs.yaml new file mode 100644 index 00000000000..1e3d3a34b66 --- /dev/null +++ b/platforms/hyperledger-indy/configuration/roles/setup/genesis/tasks/secondary_genesis_orgs.yaml @@ -0,0 +1,35 @@ +############################################################################################## +# Copyright Accenture. All Rights Reserved. +# +# SPDX-License-Identifier: Apache-2.0 +############################################################################################## + +# Gather Kubernetes cluster information +- name: Gather cluster info + community.kubernetes.k8s_cluster_info: + kubeconfig: "{{ kubernetes.config_file }}" + register: cluster_info + +# Set the Kubernetes server URL fact +- name: Set kubernetes_server_url fact + set_fact: + kubernetes_server_url: "{{ cluster_info.connection.host }}" + +# Install the secondary genesis component for the specified organization +- name: "Install secondary genesis for the {{ org_name }} organization" + include_role: + name: "{{ playbook_dir }}/../../shared/configuration/roles/create/job_component" + vars: + type: "indy_genesis" + kubernetes_server: "{{ kubernetes_server_url }}" + secondaryGenesis: true + values_dir: "{{ playbook_dir }}/../../../{{ org.gitops.release_dir }}/{{ org_name }}/build" + charts_dir: "{{ org.gitops.chart_source }}" + +# Check if the secondary genesis job for the specified organization is completed +- name: "Check if secondary genesis job {{ org_name }} for is completed" + include_role: + name: "{{ playbook_dir }}/../../shared/configuration/roles/check/helm_component" + vars: + component_type: Job + namespace: "{{ component_ns }}" diff --git a/platforms/hyperledger-indy/configuration/roles/setup/stewards/tasks/main.yaml b/platforms/hyperledger-indy/configuration/roles/setup/stewards/tasks/main.yaml index f7ef830772c..7d966e3a485 100644 --- a/platforms/hyperledger-indy/configuration/roles/setup/stewards/tasks/main.yaml +++ b/platforms/hyperledger-indy/configuration/roles/setup/stewards/tasks/main.yaml @@ -4,45 +4,10 @@ # SPDX-License-Identifier: Apache-2.0 ############################################################################################## -################################################################################################### -# This role creates the deployment files for stewards and pushes them to repository -################################################################################################### - -# Wait for namespace creation for identities - - name: "Wait for namespace creation for identities" - include_role: - name: "{{ playbook_dir }}/../../shared/configuration/roles/check/k8_component" - vars: - component_type: "Namespace" - component_name: "{{ component_ns }}" - type: "retry" - -# Create image pull secrets - - name: "Create image pull secret for identities" - include_role: - name: create/imagepullsecret - -# Create Deployment files for new Identities - - name: "Create Deployment files" - include_tasks: nested_main.yaml - vars: - component_type: "identity" - component_name: "{{ organizationItem.name }}" - indy_version: "indy-{{ network.version }}" - release_dir: "{{playbook_dir}}/../../../{{organizationItem.gitops.release_dir}}/{{ organizationItem.name | lower }}" - newIdentity: "{{ neworg.services.stewards }}" - org_vault_url: "{{ organizationItem.vault.url }}" - when: organizationItem is defined and organizationItem.services.stewards is defined - -# Wait until identities are creating - - name: "Wait until identities are creating" - include_role: - name: "{{ playbook_dir }}/../../shared/configuration/roles/check/helm_component" - vars: - component_type: "Job" - namespace: "{{ component_ns }}" - component_name: "{{ organizationItem.name }}-{{ stewardItem.name }}-transaction" - loop: "{{ neworg.services.stewards }}" - when: neworg is defined and neworg.services.stewards is defined - loop_control: - loop_var: stewardItem +# Deploy Steward nodes +- name: "Deploy Steward nodes" + include_tasks: nested.yaml + loop: "{{ org.services.stewards }}" + loop_control: + loop_var: steward + when: steward is defined and steward | length > 0 diff --git a/platforms/hyperledger-indy/configuration/roles/setup/stewards/tasks/nested.yaml b/platforms/hyperledger-indy/configuration/roles/setup/stewards/tasks/nested.yaml new file mode 100644 index 00000000000..b0a778572b4 --- /dev/null +++ b/platforms/hyperledger-indy/configuration/roles/setup/stewards/tasks/nested.yaml @@ -0,0 +1,38 @@ +############################################################################################## +# Copyright Accenture. All Rights Reserved. +# +# SPDX-License-Identifier: Apache-2.0 +############################################################################################## + +# Deploy Steward's node +- name: "Deploy {{ steward.name }} node in the {{ org.name }} organization" + include_role: + name: create/helm_component/peer + vars: + node_public_ip: "{{ steward.publicIp }}" + node_port: "{{ steward.node.port | int }}" + node_external_port: "{{ steward.node.ambassador | int }}" + client_public_ip: "{{ steward.publicIp }}" + client_port: "{{ steward.client.port | int }}" + client_external_port: "{{ steward.client.ambassador | int }}" + component_name: "{{ steward.name | lower }}" + +# Push the created deployment files to repository +- name: "Push the created deployment files to repository" + include_role: + name: "{{ playbook_dir }}/../../shared/configuration/roles/git_push" + vars: + GIT_DIR: "{{ playbook_dir }}/../../../" + msg: "[ci skip] Pushing key management job files for {{ component_ns }}" + gitops: "{{ org.gitops }}" + +# Check if Steward's node is running +- name: "Check if {{ steward.name }} node is running in the {{ org.name }} organization" + include_role: + name: "{{ playbook_dir }}/../../shared/configuration/roles/check/helm_component" + vars: + component_type: Pod + component_name: "{{ steward.name | lower }}" + label_selectors: + - app = {{ component_name }} + namespace: "{{ component_ns }}" diff --git a/platforms/hyperledger-indy/configuration/roles/setup/vault_kubernetes/tasks/main.yaml b/platforms/hyperledger-indy/configuration/roles/setup/vault_kubernetes/tasks/main.yaml index 1772a393d0b..6a97048382c 100644 --- a/platforms/hyperledger-indy/configuration/roles/setup/vault_kubernetes/tasks/main.yaml +++ b/platforms/hyperledger-indy/configuration/roles/setup/vault_kubernetes/tasks/main.yaml @@ -61,7 +61,7 @@ VAULT_ADDR: "{{ vault.url }}" VAULT_TOKEN: "{{ vault.root_token }}" when: not vault_auth_status # Only when auth_path is NOT in the output of auth_list - ignore_errors: yes + ignore_errors: true ############################################################################################ # This task get the certificate for the cluster mentioned in k8 secret @@ -90,7 +90,7 @@ VAULT_ADDR: "{{ vault.url }}" VAULT_TOKEN: "{{ vault.root_token }}" register: vault_policy_result - ignore_errors: yes + ignore_errors: true ##################################################################################################################### # This task creates the access policy for organizations diff --git a/platforms/hyperledger-indy/configuration/samples/network-indy-newnode-to-baf-network.yaml b/platforms/hyperledger-indy/configuration/samples/network-indy-newnode-to-baf-network.yaml index c6cfc9d2b97..b9d765389ba 100644 --- a/platforms/hyperledger-indy/configuration/samples/network-indy-newnode-to-baf-network.yaml +++ b/platforms/hyperledger-indy/configuration/samples/network-indy-newnode-to-baf-network.yaml @@ -21,14 +21,13 @@ network: env: type: indy # tag for the environment. Important to run multiple flux on single cluster proxy: ambassador # value has to be 'ambassador' as 'haproxy' has not been implemented for Indy - proxy_namespace: "ambassador" # Namespace for the proxy - ambassadorPorts: - # Specify a list of individual ports to use - ports: [15010, 15023, 15024, 15025, 15033, 15034, 15035, 15043, 15044, 15045] - # Alternatively, specify a range of ports to use all ports within the specified range - # portRange: - # from: 15010 # Starting port of the range - # to: 15045 # Ending port of the range + proxy_namespace: "ambassador" + # These ports are enabled per cluster, so if you have multiple clusters you do not need so many ports + # This sample uses a single cluster, so we have to open 3 ports for each Node. These ports are again specified for each organization below + ambassadorPorts: # Any additional Ambassador ports can be given here, this is valid only if proxy='ambassador' + portRange: # For a range of ports + from: 15010 + to: 15052 loadBalancerSourceRanges: # (Optional) Default value is '0.0.0.0/0', this value can be changed to any other IP adres or list (comma-separated without spaces) of IP adresses, this is valid only if proxy='ambassador' retry_count: 40 # Retry count for the checks external_dns: enabled # Should be enabled if using external-dns for automatic route configuration @@ -37,7 +36,7 @@ network: # Please ensure all required images are built and stored in this registry. # Do not check-in docker_password. docker: - url: "index.docker.io/hyperledgerlabs" + url: "ghcr.io/hyperledger" username: "docker_username" password: "docker_password" @@ -100,10 +99,6 @@ network: # Services maps to the pods that will be deployed on the k8s cluster # This sample has trustee, 2 stewards and endoorser services: - trustees: - - trustee: - name: university-trustee - genesis: true stewards: - steward: name: university-steward-1 @@ -157,18 +152,6 @@ network: port: 9720 targetPort: 9720 ambassador: 9720 # Port for ambassador service - endorsers: - - endorser: - name: university-endorser - full_name: Some Decentralized Identity Mobile Services Partner - avatar: http://university.com/avatar.png - # public endpoint will be {{ endorser.name}}.{{ external_url_suffix}}:{{endorser.server.httpPort}} - # Eg. In this sample http://university-endorser.indy.blockchaincloudpoc.com:15033/ - # For minikube: http://>:15033 - server: - httpPort: 15033 - apiPort: 15034 - webhookPort: 15035 - organization: name: bank type: peer @@ -216,10 +199,6 @@ network: # Services maps to the pods that will be deployed on the k8s cluster # This sample has trustee, 2 stewards and endoorser services: - trustees: - - trustee: - name: bank-trustee - genesis: true stewards: - steward: name: bank-steward-1 @@ -234,8 +213,3 @@ network: port: 9712 targetPort: 9712 ambassador: 9712 # Port for ambassador service - endorsers: - - endorser: - name: bank-endorser - full_name: Some Decentralized Identity Mobile Services Provider - avatar: http://bank.com/avatar.png diff --git a/platforms/hyperledger-indy/configuration/samples/network-indy-newnode-to-non-baf-network.yaml b/platforms/hyperledger-indy/configuration/samples/network-indy-newnode-to-non-baf-network.yaml index 47796592ec0..8c3c692d7a8 100644 --- a/platforms/hyperledger-indy/configuration/samples/network-indy-newnode-to-non-baf-network.yaml +++ b/platforms/hyperledger-indy/configuration/samples/network-indy-newnode-to-non-baf-network.yaml @@ -20,14 +20,13 @@ network: env: type: indy # tag for the environment. Important to run multiple flux on single cluster proxy: ambassador # value has to be 'ambassador' as 'haproxy' has not been implemented for Indy - proxy_namespace: "ambassador" # Namespace for the proxy - ambassadorPorts: - # Specify a list of individual ports to use - ports: [15010, 15023, 15024, 15025, 15033, 15034, 15035, 15043, 15044, 15045] - # Alternatively, specify a range of ports to use all ports within the specified range - # portRange: - # from: 15010 # Starting port of the range - # to: 15045 # Ending port of the range + proxy_namespace: "ambassador" + # These ports are enabled per cluster, so if you have multiple clusters you do not need so many ports + # This sample uses a single cluster, so we have to open 3 ports for each Node. These ports are again specified for each organization below + ambassadorPorts: # Any additional Ambassador ports can be given here, this is valid only if proxy='ambassador' + portRange: # For a range of ports + from: 15010 + to: 15052 loadBalancerSourceRanges: # (Optional) Default value is '0.0.0.0/0', this value can be changed to any other IP adres or list (comma-separated without spaces) of IP adresses, this is valid only if proxy='ambassador' retry_count: 40 # Retry count for the checks external_dns: enabled # Should be enabled if using external-dns for automatic route configuration @@ -36,7 +35,7 @@ network: # Please ensure all required images are built and stored in this registry. # Do not check-in docker_password. docker: - url: "index.docker.io/hyperledgerlabs" + url: "ghcr.io/hyperledger" username: "docker_username" password: "docker_password" @@ -113,8 +112,3 @@ network: port: 9712 targetPort: 9712 ambassador: 9712 # Port for ambassador service - endorsers: - - endorser: - name: bank-endorser - full_name: Some Decentralized Identity Mobile Services Provider - avatar: http://bank.com/avatar.png diff --git a/platforms/hyperledger-indy/configuration/samples/network-indyv3-aries.yaml b/platforms/hyperledger-indy/configuration/samples/network-indyv3-aries.yaml index 0a169ab5886..ea7ada93189 100644 --- a/platforms/hyperledger-indy/configuration/samples/network-indyv3-aries.yaml +++ b/platforms/hyperledger-indy/configuration/samples/network-indyv3-aries.yaml @@ -14,21 +14,19 @@ network: # Network level configuration specifies the attributes required for each organization # to join an existing network. type: indy - version: 1.12.6 # Supported versions 1.11.0, 1.12.1 & 1.12.6 + version: 1.12.1 # Supported versions 1.11.0 and 1.12.1 #Environment section for Kubernetes setup env: type: "bevel" # tag for the environment. Important to run multiple flux on single cluster - proxy: ambassador # value has to be 'ambassador' as 'haproxy' has not been implemented for Indy - proxy_namespace: "ambassador" # Namespace for the proxy - # Must be different from all stward ambassador ports specified in the rest of this network yaml - ambassadorPorts: # Any additional Ambassador ports can be given here, this is valid only if proxy='ambassador' - # Specify a list of individual ports to use - ports: [15010,15023,15024,15025,15033,15034,15035,15043,15044,15045] # Each Client Agent uses 3 ports # Indy does not use a port range as it creates an NLB, and only necessary ports should be opened - # Alternatively, specify a range of ports to use all ports within the specified range - # portRange: - # from: 15010 # Starting port of the range - # to: 15045 # Ending port of the range + proxy: ambassador # value has to be 'ambassador' as 'haproxy' has not been implemented for Indy + proxy_namespace: "ambassador" + # These ports are enabled per cluster, so if you have multiple clusters you do not need so many ports + # This sample uses a single cluster, so we have to open 3 ports for each Node. These ports are again specified for each organization below + ambassadorPorts: # Any additional Ambassador ports can be given here, this is valid only if proxy='ambassador' + portRange: # For a range of ports + from: 15010 + to: 15052 loadBalancerSourceRanges: # (Optional) Default value is '0.0.0.0/0', this value can be changed to any other IP adres or list (comma-separated without spaces) of IP adresses, this is valid only if proxy='ambassador' retry_count: 20 # Retry count for the checks external_dns: enabled # Should be enabled if using external-dns for automatic route configuration @@ -37,7 +35,7 @@ network: # Please ensure all required images are built and stored in this registry. # Do not check-in docker_password. docker: - url: "index.docker.io/hyperledgerlabs" + url: "ghcr.io/hyperledger" username: "docker_username" password: "docker_password" @@ -69,7 +67,6 @@ network: publicIps: ["1.1.1.1", "2.2.2.2"] # List of all public IP addresses of each availability zone from all organizations in the same k8s cluster azure: node_resource_group: "MC_myResourceGroup_myCluster_westeurope" - # Kubernetes cluster deployment variables. The config file path has to be provided in case # the cluster has already been created. k8s: @@ -99,8 +96,7 @@ network: # Services maps to the pods that will be deployed on the k8s cluster # This sample has trustee services: - trustees: - - trustee: + trustee: name: authority-trustee genesis: true server: @@ -124,7 +120,6 @@ network: publicIps: ["192.168.99.173"] # List of all public IP addresses of each availability zone from all organizations in the same k8s cluster azure: node_resource_group: "MC_myResourceGroup_myCluster_westeurope" - # Kubernetes cluster deployment variables. The config file path has to be provided in case # the cluster has already been created. k8s: @@ -154,6 +149,12 @@ network: # Services maps to the pods that will be deployed on the k8s cluster # This sample has trustee, 4 stewards and endorser services: + trustee: + name: university-trustee + genesis: true + server: + port: 8000 + ambassador: 15010 stewards: - steward: name: university-steward-1 @@ -207,8 +208,7 @@ network: port: 15742 targetPort: 15742 ambassador: 15742 # Port for ambassador service - endorsers: - - endorser: + endorser: name: university-endorser full_name: Faber university of the Demo. avatar: http://faber.com/avatar.png @@ -218,4 +218,4 @@ network: server: httpPort: 15033 apiPort: 15034 - webhookPort: 15035 + webhookPort: 15035 diff --git a/platforms/hyperledger-indy/configuration/samples/network-indyv3.yaml b/platforms/hyperledger-indy/configuration/samples/network-indyv3.yaml index 1bfd1806dc5..cd33f02c6e3 100644 --- a/platforms/hyperledger-indy/configuration/samples/network-indyv3.yaml +++ b/platforms/hyperledger-indy/configuration/samples/network-indyv3.yaml @@ -5,53 +5,57 @@ ############################################################################################## --- -# yaml-language-server: $schema=../../../../platforms/network-schema.json -# This is a sample configuration file for hyperledger indy which can reused for a sample indy network of 9 nodes. -# It has 3 organizations: -# 1. organization "authority" with 1 trustee -# 2. organization "provider" with 1 trustee, 2 stewards and 1 endorser -# 3. organization "partner" with 1 trustee, 2 stewards and 1 endorser +############################################################################################## +# Network Configuration File for HyperLedger-Indy Distributed Ledger Technology (DLT) Platform + +## Overview +# This configuration file is intended for deploying a HyperLedger-Indy platform. +# The deployment must adhere to the following network rules: +# - Exactly 1 trustee is required per organization. +# - Up to 1 endorser is allowed per organization. +# - At least 4 stewards are required collectively across the entire Indy network. + +## Sample Configuration +# This sample configuration file demonstrates a HyperLedger-Indy network with four organizations: +# - Organization 1: Contains only the Trustee. +# - Organization 2: Contains one Trustee, two Stewards, and one Endorser. +# - Organization 3: Contains one Trustee, two Stewards, and one Endorser. +# - Organization 4: Contains one Trustee and one Endorser. + +## Customization +# We can customize this configuration to include any number of organizations. +# However, it is imperative to comply with the network rules mentioned in the Overview section. +############################################################################################## network: # Network level configuration specifies the attributes required for each organization # to join an existing network. type: indy - version: 1.12.6 # Supported versions 1.11.0, 1.12.1 & 1.12.6 + version: 1.12.1 # Supported versions 1.11.0 and 1.12.1 #Environment section for Kubernetes setup env: - type: "dev" # tag for the environment. Important to run multiple flux on single cluster - proxy: ambassador # value has to be 'ambassador' as 'haproxy' has not been implemented for Indy - proxy_namespace: "ambassador" # Namespace for the proxy - # Must be different from all other ports specified in the rest of this network yaml - ambassadorPorts: # Any additional Ambassador ports can be given here, this is valid only if proxy='ambassador' - # Specify a list of individual ports to use - ports: [15010,15023,15024,15025,15033,15034,15035,15043,15044,15045] # Each Client Agent uses 3 ports # Indy does not use a port range as it creates an NLB, and only necessary ports should be opened - # Alternatively, specify a range of ports to use all ports within the specified range - # portRange: - # from: 15010 # Starting port of the range - # to: 15045 # Ending port of the range - loadBalancerSourceRanges: # (Optional) Default value is '0.0.0.0/0', this value can be changed to any other IP adres or list (comma-separated without spaces) of IP adresses, this is valid only if proxy='ambassador' - retry_count: 20 # Retry count for the checks - external_dns: enabled # Should be enabled if using external-dns for automatic route configuration + type: "dev" # Environment tag, useful for running multiple instances on a single cluster + proxy: ambassador # Must be 'ambassador' as 'haproxy' is not implemented for Indy + proxy_namespace: "ambassador" # Namespace for the proxy + # These ports are enabled per cluster, so if you have multiple clusters you do not need so many ports + # This sample uses a single cluster, so we have to open 3 ports for each Node. These ports are again specified for each organization below + ambassadorPorts: # Any additional Ambassador ports can be given here, this is valid only if proxy='ambassador' + portRange: # Range of ports for Ambassador + from: 15010 + to: 15052 + loadBalancerSourceRanges: # (Optional) Default value is '0.0.0.0/0', this value can be changed to any other IP adres or list (comma-separated without spaces) of IP adresses, this is valid only if proxy='ambassador' + retry_count: 20 # Retry count for the checks + external_dns: enabled # Should be enabled if using external-dns for automatic route configuration # Docker registry details where images are stored. This will be used to create k8s secrets # Please ensure all required images are built and stored in this registry. # Do not check-in docker_password. docker: - url: "index.docker.io/hyperledgerlabs" + url: "ghcr.io/hyperledger" username: "docker_username" password: "docker_password" - # It's used as the Indy network name (has impact e.g. on paths where the Indy nodes look for crypto files on their local filesystem) - name: bevel - - # Information about pool transaction genesis and domain transactions genesis - genesis: - state: absent # must be absent when network is created from scratch - pool: /path/to/pool_transactions_genesis # path where pool_transactions_genesis will be stored locally - domain: /path/to/domain_transactions_genesis # path where domain_transactions_genesis will be stored locally - # Allows specification of one or many organizations that will be connecting to a network. organizations: # Specification for the 1st organization. Each organization maps to a VPC and a separate k8s cluster @@ -59,7 +63,7 @@ network: name: authority type: peer external_url_suffix: indy.blockchaincloudpoc.com # Provide the external dns suffix. Only used when Indy webserver/Clients are deployed. - cloud_provider: aws-baremetal # Values can be 'aws-baremetal', 'aws' or 'minikube' + cloud_provider: aws # Supported values: 'aws-baremetal' | 'aws' | 'azure' | 'gcp' | 'minikube' | aws: access_key: "aws_access_key" # AWS Access key @@ -101,8 +105,7 @@ network: # Services maps to the pods that will be deployed on the k8s cluster # This sample has trustee services: - trustees: - - trustee: + trustee: name: authority-trustee genesis: true server: @@ -111,9 +114,9 @@ network: # Specification for the 2nd organization. Each organization maps to a VPC and a separate k8s cluster - organization: - name: provider + name: university type: peer - cloud_provider: aws + cloud_provider: aws # Supported values: 'aws-baremetal' | 'aws' | 'azure' | 'gcp' | 'minikube' | external_url_suffix: indy.blockchaincloudpoc.com # Provide the external dns suffix. Only used when Indy webserver/Clients are deployed. aws: @@ -156,13 +159,15 @@ network: # Services maps to the pods that will be deployed on the k8s cluster # This sample has trustee, 2 stewards and endoorser services: - trustees: - - trustee: - name: provider-trustee + trustee: + name: university-trustee genesis: true + server: + port: 8001 + ambassador: 15011 stewards: - steward: - name: provider-steward-1 + name: university-steward-1 type: VALIDATOR genesis: true publicIp: 3.221.78.194 # IP address of current organization in current availability zone @@ -175,46 +180,45 @@ network: targetPort: 9712 ambassador: 9712 # Port for ambassador service - steward: - name: provider-steward-2 + name: university-steward-2 type: VALIDATOR genesis: true - publicIp: 3.221.78.194 # IP address of current organization in current availability zone + publicIp: 108.142.59.4 # 3.221.78.194 # IP address of current organization in current availability zone node: port: 9721 targetPort: 9721 - ambassador: 9721 # Port for ambassador service + ambassador: 9721 # Port for ambassador service client: port: 9722 targetPort: 9722 - ambassador: 9722 # Port for ambassador service - endorsers: - - endorser: - name: provider-endorser - full_name: Some Decentralized Identity Mobile Services Provider - avatar: http://provider.com/avatar.png + ambassador: 9722 # Port for ambassador service + endorser: + name: university-endorser + full_name: Some Decentralized Identity Mobile Services Partner + avatar: http://partner.com/avatar.png # public endpoint will be {{ endorser.name}}.{{ external_url_suffix}}:{{endorser.server.httpPort}} - # Eg. In this sample http://provider-endorser.indy.blockchaincloudpoc.com:15023/ - # For minikube: http://>:15023 + # Eg. In this sample http://partner-endorser.indy.blockchaincloudpoc.com:15012/ + # For minikube: http://>:15012 server: - httpPort: 15023 - apiPort: 15024 - webhookPort: 15025 - - # Specification for the 3rd organization. Each organization maps to a VPC and a separate k8s cluster + httpPort: 15012 + apiPort: 15013 + webhookPort: 15014 + + # Specification for the 3nd organization. Each organization maps to a VPC and a separate k8s cluster - organization: - name: partner + name: provider type: peer - cloud_provider: aws + cloud_provider: aws # Supported values: 'aws-baremetal' | 'aws' | 'azure' | 'gcp' | 'minikube' | external_url_suffix: indy.blockchaincloudpoc.com # Provide the external dns suffix. Only used when Indy webserver/Clients are deployed. - + aws: access_key: "aws_access_key" # AWS Access key secret_key: "aws_secret_key" # AWS Secret key encryption_key: "encryption_key_id" # AWS encryption key. If present, it's used as the KMS key id for K8S storage class encryption. zone: "availability_zone" # AWS availability zone region: "region" # AWS region - - publicIps: ["3.221.78.194"] # List of all public IP addresses of each availability zone from all organizations in the same k8s cluster + + publicIps: ["3.221.78.194"] # List of all public IP addresses of each availability zone from all organizations in the same k8s cluster azure: node_resource_group: "MC_myResourceGroup_myCluster_westeurope" @@ -234,59 +238,125 @@ network: # Do not check-in git_access_token gitops: git_protocol: "https" # Option for git over https or ssh - git_url: "https://github.com//bevel.git" # Gitops https or ssh url for flux value files - branch: "develop" # Git branch where release is being made - release_dir: "platforms/hyperledger-indy/releases/dev" # Relative Path in the Git repo for flux sync per environment. - chart_source: "platforms/hyperledger-indy/charts" # Relative Path where the Helm charts are stored in Git repo - git_repo: "github.com//bevel.git" # Gitops git repository URL for git push - username: "git_username" # Git Service user who has rights to check-in in all branches - password: "git_access_token" # Git Server user password - email: "git@email.com" # Email to use in git config - private_key: "path_to_private_key" # Path to private key file which has write-access to the git repo (Optional for https; Required for ssh) + git_url: "https://github.com//bevel.git" # Gitops https or ssh url for flux value files + branch: "develop" # Git branch where release is being made + release_dir: "platforms/hyperledger-indy/releases/dev" # Relative Path in the Git repo for flux sync per environment. + chart_source: "platforms/hyperledger-indy/charts" # Relative Path where the Helm charts are stored in Git repo + git_repo: "github.com//bevel.git" # Gitops git repository URL for git push + username: "git_username" # Git Service user who has rights to check-in in all branches + password: "git_access_token" # Git Server user password + email: "git@email.com" # Email to use in git config + private_key: "path_to_private_key" # Path to private key file which has write-access to the git repo (Optional for https; Required for ssh) # Services maps to the pods that will be deployed on the k8s cluster # This sample has trustee, 2 stewards and endoorser services: - trustees: - - trustee: - name: partner-trustee + trustee: + name: provider-trustee genesis: true + server: + port: 8002 + ambassador: 15021 stewards: - steward: - name: partner-steward-1 + name: provider-steward-1 type: VALIDATOR genesis: true - publicIp: 3.221.78.194 # IP address of current organization in current availability zone + publicIp: 3.221.78.194 # IP address of the ambassador proxy node: port: 9731 targetPort: 9731 - ambassador: 9731 # Port for ambassador service + ambassador: 9721 # Port for ambassador service client: port: 9732 targetPort: 9732 - ambassador: 9732 # Port for ambassador service + ambassador: 9722 # Port for ambassador service - steward: - name: partner-steward-2 + name: provider-steward-2 type: VALIDATOR genesis: true - publicIp: 3.221.78.194 # IP address of current organization in current availability zone + publicIp: 3.221.78.194 # IP address of the ambassador proxy node: port: 9741 targetPort: 9741 - ambassador: 9741 # Port for ambassador service + ambassador: 9721 # Port for ambassador service client: port: 9742 targetPort: 9742 - ambassador: 9742 # Port for ambassador service - endorsers: - - endorser: + ambassador: 9722 # Port for ambassador service + endorser: + name: provider-endorser + full_name: Some Decentralized Identity Mobile Services Provider + avatar: http://provider.com/avatar.png + # public endpoint will be {{ endorser.name}}.{{ external_url_suffix}}:{{endorser.server.httpPort}} + # Eg. In this sample http://provider-endorser.indy.blockchaincloudpoc.com:15022/ + # For minikube: http://>:15022 + server: + httpPort: 15022 + apiPort: 15023 + webhookPort: 15024 + + # Specification for the 4th organization. Each organization maps to a VPC and a separate k8s cluster + - organization: + name: partner + type: peer + cloud_provider: aws # Supported values: 'aws-baremetal' | 'aws' | 'azure' | 'gcp' | 'minikube' | + external_url_suffix: indy.blockchaincloudpoc.com # Provide the external dns suffix. Only used when Indy webserver/Clients are deployed. + + aws: + access_key: "aws_access_key" # AWS Access key + secret_key: "aws_secret_key" # AWS Secret key + encryption_key: "encryption_key_id" # AWS encryption key. If present, it's used as the KMS key id for K8S storage class encryption. + zone: "availability_zone" # AWS availability zone + region: "region" # AWS region + + publicIps: ["3.221.78.194"] # List of all public IP addresses of each availability zone from all organizations in the same k8s cluster + azure: + node_resource_group: "MC_myResourceGroup_myCluster_westeurope" + + # Kubernetes cluster deployment variables. The config file path has to be provided in case + # the cluster has already been created. + k8s: + config_file: "/path/to/cluster_config" + context: "kubernetes-admin@kubernetes" + + # Hashicorp Vault server address and root-token. Vault should be unsealed. + # Do not check-in root_token + vault: + url: "vault_addr" + root_token: "vault_root_token" + + # Git Repo details which will be used by GitOps/Flux. + # Do not check-in git_access_token + gitops: + git_protocol: "https" # Option for git over https or ssh + git_url: "https://github.com//bevel.git" # Gitops https or ssh url for flux value files + branch: "develop" # Git branch where release is being made + release_dir: "platforms/hyperledger-indy/releases/dev" # Relative Path in the Git repo for flux sync per environment. + chart_source: "platforms/hyperledger-indy/charts" # Relative Path where the Helm charts are stored in Git repo + git_repo: "github.com//bevel.git" # Gitops git repository URL for git push + username: "git_username" # Git Service user who has rights to check-in in all branches + password: "git_access_token" # Git Server user password + email: "git@email.com" # Email to use in git config + private_key: "path_to_private_key" # Path to private key file which has write-access to the git repo (Optional for https; Required for ssh) + + # Services maps to the pods that will be deployed on the k8s cluster + # This sample has trustee, 2 stewards and endoorser + services: + trustee: + name: partner-trustee + genesis: true + server: + port: 8004 + ambassador: 15031 + endorser: name: partner-endorser full_name: Some Decentralized Identity Mobile Services Partner avatar: http://partner.com/avatar.png # public endpoint will be {{ endorser.name}}.{{ external_url_suffix}}:{{endorser.server.httpPort}} - # Eg. In this sample http://partner-endorser.indy.blockchaincloudpoc.com:15033/ - # For minikube: http://>:15033 + # Eg. In this sample http://partner-endorser.indy.blockchaincloudpoc.com:15032/ + # For minikube: http://>:15032 server: - httpPort: 15033 - apiPort: 15034 - webhookPort: 15035 + httpPort: 15032 + apiPort: 15033 + webhookPort: 15034 diff --git a/platforms/hyperledger-indy/configuration/samples/network-minikube-aries.yaml b/platforms/hyperledger-indy/configuration/samples/network-minikube-aries.yaml index 026efe9594f..c15a5649884 100644 --- a/platforms/hyperledger-indy/configuration/samples/network-minikube-aries.yaml +++ b/platforms/hyperledger-indy/configuration/samples/network-minikube-aries.yaml @@ -27,7 +27,7 @@ network: # Please ensure all required images are built and stored in this registry. # Do not check-in docker_password. docker: - url: "index.docker.io/hyperledgerlabs" + url: "ghcr.io/hyperledger" username: "docker_username" password: "docker_password" @@ -80,8 +80,7 @@ network: # Services maps to the pods that will be deployed on the k8s cluster # This sample has trustee services: - trustees: - - trustee: + trustee: name: authority-trustee genesis: true server: @@ -179,8 +178,7 @@ network: port: 15742 targetPort: 15742 ambassador: 15742 - endorsers: - - endorser: + endorser: name: university-endorser full_name: Faber university of the Demo. avatar: http://faber.com/avatar.png diff --git a/platforms/hyperledger-indy/configuration/samples/network-minikube.yaml b/platforms/hyperledger-indy/configuration/samples/network-minikube.yaml index af0f2e66cc1..ec9a7f41d83 100644 --- a/platforms/hyperledger-indy/configuration/samples/network-minikube.yaml +++ b/platforms/hyperledger-indy/configuration/samples/network-minikube.yaml @@ -24,7 +24,7 @@ network: # Please ensure all required images are built and stored in this registry. # Do not check-in docker_password. docker: - url: "index.docker.io/hyperledgerlabs" + url: "ghcr.io/hyperledger" username: "docker_username" password: "docker_password" @@ -77,8 +77,7 @@ network: # Services maps to the pods that will be deployed on the k8s cluster # This sample has trustee services: - trustees: - - trustee: + trustee: name: authority-trustee genesis: true server: @@ -150,8 +149,7 @@ network: port: 15722 targetPort: 15722 ambassador: 15722 - endorsers: - - endorser: + endorser: name: provider-endorser full_name: Some Decentralized Identity Mobile Services Provider avatar: http://provider.com/avatar.png @@ -228,15 +226,3 @@ network: port: 15742 targetPort: 15742 ambassador: 15742 - endorsers: - - endorser: - name: partner-endorser - full_name: Some Decentralized Identity Mobile Services Partner - avatar: http://partner.com/avatar.png - # public endpoint will be {{ endorser.name}}.{{ external_url_suffix}}:{{endorser.server.httpPort}} - # Eg. In this sample http://provider-endorser.indy.blockchaincloudpoc.com:15033/ - # For minikube: http://>:15033 - server: - httpPort: 15033 - apiPort: 15034 - webhookPort: 15035 diff --git a/platforms/hyperledger-indy/images/indy-key-mgmt/Dockerfile b/platforms/hyperledger-indy/images/indy-key-mgmt/Dockerfile index 28d620f33fd..2b8eefccf1e 100644 --- a/platforms/hyperledger-indy/images/indy-key-mgmt/Dockerfile +++ b/platforms/hyperledger-indy/images/indy-key-mgmt/Dockerfile @@ -4,7 +4,7 @@ USER root ARG ROCKS_DB_VERSION=5.8.8 ARG LIBINDY_CRYPTO_VERSION=0.4.5 -ARG INDY_NODE_VERSION=1.12.1 +ARG INDY_NODE_VERSION=1.12.6 ENV VIRTUALENVWRAPPER_PYTHON=/usr/bin/python3 \ WORKON_HOME=$HOME/.virtualenvs \ @@ -45,7 +45,7 @@ RUN apt-get update && \ RUN add-apt-repository "deb http://us.archive.ubuntu.com/ubuntu xenial main universe" && \ apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys CE7709D068DB5E88 && \ - add-apt-repository "deb https://repo.sovrin.org/deb xenial master" && \ + add-apt-repository "deb https://repo.sovrin.org/deb xenial stable" && \ add-apt-repository "deb https://repo.sovrin.org/sdk/deb xenial stable" && \ apt-get update @@ -66,7 +66,7 @@ RUN apt-get install -y libindy libindy-crypto=${LIBINDY_CRYPTO_VERSION} RUN mkdir -p ${INDY_HOME} WORKDIR ${INDY_HOME} COPY config/indy_config.py ${INDY_CONFIG_DIR} -RUN /bin/bash -c "source /usr/local/bin/virtualenvwrapper.sh; mkvirtualenv ${NETWORK_NAME}; workon ${NETWORK_NAME}; pip3 install Cython==0.29.36" +RUN /bin/bash -c "source /usr/local/bin/virtualenvwrapper.sh; mkvirtualenv ${NETWORK_NAME}; workon ${NETWORK_NAME}; pip3 install Cython==0.29.36 certifi==2021.10.8 pyzmq" RUN /bin/bash -c "source /usr/local/bin/virtualenvwrapper.sh; workon ${NETWORK_NAME}; pip3 install indy-node==${INDY_NODE_VERSION}" RUN /bin/bash -c "source /usr/local/bin/virtualenvwrapper.sh; workon ${NETWORK_NAME}; pip3 install flake8 requests" diff --git a/platforms/hyperledger-indy/images/indy-key-mgmt/README.md b/platforms/hyperledger-indy/images/indy-key-mgmt/README.md index 97be12c578d..1174d589b83 100644 --- a/platforms/hyperledger-indy/images/indy-key-mgmt/README.md +++ b/platforms/hyperledger-indy/images/indy-key-mgmt/README.md @@ -17,12 +17,12 @@ docker build -t /bevel-indy-key-mgmt:1.12.1 . When you would like to use older version, then override build arguments.
Example for use version 1.11.0: ```bash -docker build --build-arg INDY_NODE_VERSION=v1.9.2 -t /bevel-indy-key-mgmt:1.9.2 . +docker build --build-arg INDY_NODE_VERSION=1.12.6 -t /bevel-indy-key-mgmt:1.12.6 . ``` #### Build arguments with default values - ROCKS_DB_VERSION=5.8.8 - LIBINDY_CRYPTO_VERSION=0.4.5 - - INDY_NODE_VERSION=1.12.1 + - INDY_NODE_VERSION=1.12.6 ## How to use @@ -45,12 +45,12 @@ docker run -it --rm -e VAULT_TOKEN= /bevel-indy-key-mgmt Insert to vault: ```bash -docker run -it --rm -e VAULT_TOKEN="s.ev8ehHRFYgluTkVDYFH7X5vE" ghcr.io/hyperledger/bevel-indy-key-mgmt:1.12.1 generate_identity my-identity provider.stewards vault http://host.docker.internal:8200 +docker run -it --rm -e VAULT_TOKEN="s.ev8ehHRFYgluTkVDYFH7X5vE" ghcr.io/hyperledger/bevel-indy-key-mgmt:1.12.6 generate_identity my-identity provider.stewards vault http://host.docker.internal:8200 ``` Print on console: ```bash -docker run -it --rm ghcr.io/hyperledger/bevel-indy-key-mgmt:1.12.1 bash -c "generate_identity my-identity provider.stewards | jq" +docker run -it --rm ghcr.io/hyperledger/bevel-indy-key-mgmt:1.12.6 bash -c "generate_identity my-identity provider.stewards | jq" ``` > You could use `| jq` for smooth printing of JSON diff --git a/platforms/hyperledger-indy/images/indy-key-mgmt/src/identity_crypto_generator.py b/platforms/hyperledger-indy/images/indy-key-mgmt/src/identity_crypto_generator.py index 231519cadf1..91ff7042f22 100644 --- a/platforms/hyperledger-indy/images/indy-key-mgmt/src/identity_crypto_generator.py +++ b/platforms/hyperledger-indy/images/indy-key-mgmt/src/identity_crypto_generator.py @@ -6,6 +6,7 @@ import ujson as json import base58 import re +import html from crypto.bls.bls_crypto import BlsGroupParamsLoader from crypto.bls.bls_factory import BlsFactoryCrypto @@ -278,29 +279,27 @@ def path_iteration(dictionary, path): paths.append({path: {key: value}}) return paths return path_iteration(dictionary, '') +def prevent_injections(input_string): + # Regex pattern to prevent SQL injection + sql_injection_pattern = re.compile(r"\b(?:SELECT|INSERT|UPDATE|DELETE|DROP|UNION|CREATE|ALTER|EXEC|--)\b", re.IGNORECASE) + # Regex pattern to prevent HTML injection + html_injection_pattern = re.compile(r"<[a-z][\s\S]*>", re.IGNORECASE) -class IdentityCreator: - - def prevent_injections(input_string): - # Regex pattern to prevent SQL injection - sql_injection_pattern = re.compile(r"\b(?:SELECT|INSERT|UPDATE|DELETE|DROP|UNION|CREATE|ALTER|EXEC|--)\b", re.IGNORECASE) + # Check for SQL injection + if sql_injection_pattern.search(input_string): + raise ValueError("Invalid input. Detected potential SQL injection attempt.") - # Regex pattern to prevent HTML injection - html_injection_pattern = re.compile(r"<[a-z][\s\S]*>", re.IGNORECASE) + # Check for HTML injection + if html_injection_pattern.search(input_string): + raise ValueError("Invalid input. Detected potential HTML injection attempt.") - # Check for SQL injection - if sql_injection_pattern.search(input_string): - raise ValueError("Invalid input. Detected potential SQL injection attempt.") + # HTML escape the input + escaped_string = html.escape(input_string) - # Check for HTML injection - if html_injection_pattern.search(input_string): - raise ValueError("Invalid input. Detected potential HTML injection attempt.") + return escaped_string - # HTML escape the input - escaped_string = html.escape(input_string) - - return escaped_string +class IdentityCreator: @classmethod def process(cls): diff --git a/platforms/hyperledger-indy/images/indy-key-mgmt/src/identity_crypto_generator_v2.py b/platforms/hyperledger-indy/images/indy-key-mgmt/src/identity_crypto_generator_v2.py index 5337f786495..2d04855680e 100644 --- a/platforms/hyperledger-indy/images/indy-key-mgmt/src/identity_crypto_generator_v2.py +++ b/platforms/hyperledger-indy/images/indy-key-mgmt/src/identity_crypto_generator_v2.py @@ -6,6 +6,7 @@ import ujson as json import base58 import re +import html from crypto.bls.bls_crypto import BlsGroupParamsLoader from crypto.bls.bls_factory import BlsFactoryCrypto @@ -286,28 +287,27 @@ def path_iteration(dictionary, path): return paths return path_iteration(dictionary, '') +def prevent_injections(input_string): + # Regex pattern to prevent SQL injection + sql_injection_pattern = re.compile(r"\b(?:SELECT|INSERT|UPDATE|DELETE|DROP|UNION|CREATE|ALTER|EXEC|--)\b", re.IGNORECASE) -class IdentityCreatorV2: - - def prevent_injections(input_string): - # Regex pattern to prevent SQL injection - sql_injection_pattern = re.compile(r"\b(?:SELECT|INSERT|UPDATE|DELETE|DROP|UNION|CREATE|ALTER|EXEC|--)\b", re.IGNORECASE) + # Regex pattern to prevent HTML injection + html_injection_pattern = re.compile(r"<[a-z][\s\S]*>", re.IGNORECASE) - # Regex pattern to prevent HTML injection - html_injection_pattern = re.compile(r"<[a-z][\s\S]*>", re.IGNORECASE) + # Check for SQL injection + if sql_injection_pattern.search(input_string): + raise ValueError("Invalid input. Detected potential SQL injection attempt.") - # Check for SQL injection - if sql_injection_pattern.search(input_string): - raise ValueError("Invalid input. Detected potential SQL injection attempt.") + # Check for HTML injection + if html_injection_pattern.search(input_string): + raise ValueError("Invalid input. Detected potential HTML injection attempt.") - # Check for HTML injection - if html_injection_pattern.search(input_string): - raise ValueError("Invalid input. Detected potential HTML injection attempt.") + # HTML escape the input + escaped_string = html.escape(input_string) - # HTML escape the input - escaped_string = html.escape(input_string) + return escaped_string - return escaped_string +class IdentityCreatorV2: @classmethod def process(cls): diff --git a/platforms/hyperledger-indy/images/indy-node/Dockerfile b/platforms/hyperledger-indy/images/indy-node/Dockerfile index 9216a38f2dc..316cb191a53 100644 --- a/platforms/hyperledger-indy/images/indy-node/Dockerfile +++ b/platforms/hyperledger-indy/images/indy-node/Dockerfile @@ -4,44 +4,22 @@ ARG uid=1000 # Install environment RUN apt-get update -y && apt-get install -y \ - git \ - wget \ - python3.5 \ - python3-pip \ - python-setuptools \ - python3-nacl \ apt-transport-https \ ca-certificates \ supervisor -RUN pip3 install -U \ - pip==9.0.3 \ - setuptools - RUN apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys CE7709D068DB5E88 -ARG indy_stream=master -RUN echo "deb https://repo.sovrin.org/deb xenial $indy_stream" >> /etc/apt/sources.list +RUN echo "deb https://repo.sovrin.org/deb xenial stable" >> /etc/apt/sources.list RUN useradd -ms /bin/bash -u $uid indy -ARG indy_plenum_ver=1.12.1~dev993 -ARG indy_node_ver=1.12.1~dev1179 -ARG python3_indy_crypto_ver=0.4.5 -ARG indy_crypto_ver=0.4.5 -ARG python3_pyzmq_ver=18.1.0 -ARG python3_orderedset_ver=2.0 -ARG python3_psutil_ver=5.4.3 -ARG python3_pympler_ver=0.5 +ARG indy_plenum_ver=1.12.6 +ARG indy_node_ver=1.12.6 RUN apt-get update -y && apt-get install -y \ - indy-plenum=${indy_plenum_ver} \ - indy-node=${indy_node_ver} \ - python3-indy-crypto=${python3_indy_crypto_ver} \ - libindy-crypto=${indy_crypto_ver} \ - python3-pyzmq=${python3_pyzmq_ver} \ - python3-orderedset=${python3_orderedset_ver} \ - python3-psutil=${python3_psutil_ver} \ - python3-pympler=${python3_pympler_ver} + indy-node=${indy_node_ver} \ + indy-plenum=${indy_plenum_ver} \ + && rm -rf /var/lib/apt/lists/* COPY start-indy-node.sh /var/lib/indy RUN chmod +x /var/lib/indy/start-indy-node.sh diff --git a/platforms/hyperledger-indy/images/indy-node/README.md b/platforms/hyperledger-indy/images/indy-node/README.md index c2143d3c6d2..ef2aeb9897e 100644 --- a/platforms/hyperledger-indy/images/indy-node/README.md +++ b/platforms/hyperledger-indy/images/indy-node/README.md @@ -10,21 +10,17 @@ Docker image of an Indy node (runs using a Steward identity) Ideally, the build of the image should be run from this directory.
For build run command below: ```bash -docker build -t /bevel-indy-node:1.11.0 . +docker build -t /bevel-indy-node:1.12.6 . ``` *NOTE*: Version 1.11.0 is default version also for version of Hyperledger Indy in this Docker image.
When you would like to use older version, then override build arguments.
Example for use version 1.9.2: ```bash -docker build --build-arg indy_plenum_ver=1.9.2~dev871 --build-arg indy_node_ver=1.9.2~dev1061 --build-arg python3_pyzmq_ver=17.0.0 -t /bevel-indy-node:1.9.2 . +docker build --build-arg indy_plenum_ver=1.12.6 --build-arg indy_node_ver=1.12.6 -t /bevel-indy-node:1.12.6 . ``` #### Build arguments with default values - - indy_plenum_ver=1.11.0~dev945 - - indy_node_ver=1.11.0~dev1123 - - python3_indy_crypto_ver=0.4.5 - - indy_crypto_ver=0.4.5 - - python3_pyzmq_ver=18.1.0 - + - indy_plenum_ver=1.12.6 + - indy_node_ver=1.12.6 ### Using The Docker image is created specially for Helm Chart [indy-node](../../charts/indy-node). diff --git a/platforms/hyperledger-indy/images/indy-node/start-indy-node.sh b/platforms/hyperledger-indy/images/indy-node/start-indy-node.sh index 2ce51f5ddce..e7146912156 100644 --- a/platforms/hyperledger-indy/images/indy-node/start-indy-node.sh +++ b/platforms/hyperledger-indy/images/indy-node/start-indy-node.sh @@ -3,7 +3,7 @@ set -u set -e mkdir -p /var/lib/indy/keys/$INDY_NETWORK_NAME/keys/$INDY_NODE_NAME/public_keys /var/lib/indy/keys/$INDY_NETWORK_NAME/keys/$INDY_NODE_NAME/verif_keys -chown indy:indy /var/lib/indy/keys /var/lib/indy/keys/$INDY_NETWORK_NAME /var/lib/indy/keys/$INDY_NETWORK_NAME/keys /var/lib/indy/keys/$INDY_NETWORK_NAME/keys/$INDY_NODE_NAME /var/lib/indy/keys/$INDY_NETWORK_NAME/keys/$INDY_NODE_NAME/public_keys /var/lib/indy/keys/$INDY_NETWORK_NAME/keys/$INDY_NODE_NAME/verif_keys +chown -R indy:indy /var/lib/indy/keys /var/lib/indy/keys/$INDY_NETWORK_NAME /var/lib/indy/keys/$INDY_NETWORK_NAME/keys /var/lib/indy/keys/$INDY_NETWORK_NAME/keys/$INDY_NODE_NAME /var/lib/indy/keys/$INDY_NETWORK_NAME/keys/$INDY_NODE_NAME/public_keys /var/lib/indy/keys/$INDY_NETWORK_NAME/keys/$INDY_NODE_NAME/verif_keys if [[ ! -f "/var/lib/indy/keys/$INDY_NETWORK_NAME/keys/$INDY_NODE_NAME/public_keys/$INDY_NODE_NAME.key" ]]; then cp /var/lib/indy/keys/$INDY_NETWORK_NAME/keys/$INDY_NODE_NAME/public_keys/$INDY_NODE_NAME.key.bootstrap /var/lib/indy/keys/$INDY_NETWORK_NAME/keys/$INDY_NODE_NAME/public_keys/$INDY_NODE_NAME.key diff --git a/platforms/hyperledger-indy/scripts/Setup b/platforms/hyperledger-indy/scripts/Setup deleted file mode 100644 index 5794d8306e9..00000000000 --- a/platforms/hyperledger-indy/scripts/Setup +++ /dev/null @@ -1,5 +0,0 @@ -[//]: # (##############################################################################################) -[//]: # (Copyright Accenture. All Rights Reserved.) -[//]: # (SPDX-License-Identifier: Apache-2.0) -[//]: # (##############################################################################################) - diff --git a/platforms/hyperledger-indy/scripts/genesis/get_keys.sh b/platforms/hyperledger-indy/scripts/genesis/get_keys.sh new file mode 100755 index 00000000000..6a3112acf90 --- /dev/null +++ b/platforms/hyperledger-indy/scripts/genesis/get_keys.sh @@ -0,0 +1,33 @@ +#!/bin/bash +FILES_DIR=../../charts/indy-genesis/files +kubectl --namespace authority-ns get secret authority-trustee-identity-public -o jsonpath='{.data.value}' | base64 -d | jq '.["did"]'> $FILES_DIR/authority-trustee-did.json +kubectl --namespace authority-ns get secret authority-trustee-node-public-verif-keys -o jsonpath='{.data.value}' | base64 -d | jq '.["verification-key"]' > $FILES_DIR/authority-trustee-verkey.json + +kubectl --namespace university-ns get secret university-steward-1-identity-public -o jsonpath='{.data.value}' | base64 -d | jq '.["did"]'> $FILES_DIR/university-steward-1-did.json +kubectl --namespace university-ns get secret university-steward-1-node-public-verif-keys -o jsonpath='{.data.value}' | base64 -d | jq '.["verification-key"]' > $FILES_DIR/university-steward-1-verkey.json +kubectl --namespace university-ns get secret university-steward-1-node-public-bls-keys -o jsonpath='{.data.value}' | base64 -d | jq '.["bls-key-pop"]' > $FILES_DIR/university-steward-1-blspop.json +kubectl --namespace university-ns get secret university-steward-1-node-public-bls-keys -o jsonpath='{.data.value}' | base64 -d | jq '.["bls-public-key"]' > $FILES_DIR/university-steward-1-blspub.json + +kubectl --namespace university-ns get secret university-steward-2-identity-public -o jsonpath='{.data.value}' | base64 -d | jq '.["did"]'> $FILES_DIR/university-steward-2-did.json +kubectl --namespace university-ns get secret university-steward-2-node-public-verif-keys -o jsonpath='{.data.value}' | base64 -d | jq '.["verification-key"]' > $FILES_DIR/university-steward-2-verkey.json +kubectl --namespace university-ns get secret university-steward-2-node-public-bls-keys -o jsonpath='{.data.value}' | base64 -d | jq '.["bls-key-pop"]' > $FILES_DIR/university-steward-2-blspop.json +kubectl --namespace university-ns get secret university-steward-2-node-public-bls-keys -o jsonpath='{.data.value}' | base64 -d | jq '.["bls-public-key"]' > $FILES_DIR/university-steward-2-blspub.json + +kubectl --namespace university-ns get secret university-steward-3-identity-public -o jsonpath='{.data.value}' | base64 -d | jq '.["did"]'> $FILES_DIR/university-steward-3-did.json +kubectl --namespace university-ns get secret university-steward-3-node-public-verif-keys -o jsonpath='{.data.value}' | base64 -d | jq '.["verification-key"]' > $FILES_DIR/university-steward-3-verkey.json +kubectl --namespace university-ns get secret university-steward-3-node-public-bls-keys -o jsonpath='{.data.value}' | base64 -d | jq '.["bls-key-pop"]' > $FILES_DIR/university-steward-3-blspop.json +kubectl --namespace university-ns get secret university-steward-3-node-public-bls-keys -o jsonpath='{.data.value}' | base64 -d | jq '.["bls-public-key"]' > $FILES_DIR/university-steward-3-blspub.json + +kubectl --namespace university-ns get secret university-steward-4-identity-public -o jsonpath='{.data.value}' | base64 -d | jq '.["did"]'> $FILES_DIR/university-steward-4-did.json +kubectl --namespace university-ns get secret university-steward-4-node-public-verif-keys -o jsonpath='{.data.value}' | base64 -d | jq '.["verification-key"]' > $FILES_DIR/university-steward-4-verkey.json +kubectl --namespace university-ns get secret university-steward-4-node-public-bls-keys -o jsonpath='{.data.value}' | base64 -d | jq '.["bls-key-pop"]' > $FILES_DIR/university-steward-4-blspop.json +kubectl --namespace university-ns get secret university-steward-4-node-public-bls-keys -o jsonpath='{.data.value}' | base64 -d | jq '.["bls-public-key"]' > $FILES_DIR/university-steward-4-blspub.json + +# Sample below for employer option +# kubectl --namespace employer-ns get secret employer-trustee-identity-public -o jsonpath='{.data.value}' | base64 -d | jq '.["did"]'> $FILES_DIR/employer-trustee-did.json +# kubectl --namespace employer-ns get secret employer-trustee-node-public-verif-keys -o jsonpath='{.data.value}' | base64 -d | jq '.["verification-key"]' > $FILES_DIR/employer-trustee-verkey.json + +# kubectl --namespace employer-ns get secret employer-steward-1-identity-public -o jsonpath='{.data.value}' | base64 -d | jq '.["did"]'> $FILES_DIR/employer-steward-1-did.json +# kubectl --namespace employer-ns get secret employer-steward-1-node-public-verif-keys -o jsonpath='{.data.value}' | base64 -d | jq '.["verification-key"]' > $FILES_DIR/employer-steward-1-verkey.json +# kubectl --namespace employer-ns get secret employer-steward-1-node-public-bls-keys -o jsonpath='{.data.value}' | base64 -d | jq '.["bls-key-pop"]' > $FILES_DIR/employer-steward-1-blspop.json +# kubectl --namespace employer-ns get secret employer-steward-1-node-public-bls-keys -o jsonpath='{.data.value}' | base64 -d | jq '.["bls-public-key"]' > $FILES_DIR/employer-steward-1-blspub.json diff --git a/platforms/network-schema.json b/platforms/network-schema.json index 6a89ec9f1ad..0ccf5575a1f 100755 --- a/platforms/network-schema.json +++ b/platforms/network-schema.json @@ -408,6 +408,10 @@ "proxy_namespace": { "type": "string", "description": "Proxy namespace" + }, + "labels":{ + "$ref": "#/definitions/shared_env_labels", + "description": "Labels for kubernetes components." } }, "required": [ @@ -529,15 +533,6 @@ "cluster_id": { "type": "string" }, - "provider": { - "type": "string", - "enum": [ - "aws", - "azure", - "gcp", - "minikube" - ] - }, "region": { "type": "string" }, @@ -771,17 +766,11 @@ "uri": { "type": "string", "pattern": "^(http|https):\/\/(((?:[a-z0-9](?:[a-z0-9-]{0,61}[a-z0-9])?\\.)+[a-z0-9][a-z0-9-]{0,61}[a-z0-9])|((?:[0-9]{1,3}\\.){3}[0-9]{1,3}))(?::[0-9]{2,5})?$" - }, - "certificate": { - "type": "string", - "pattern": "^\/?([^\/ ]*\/)+[^\/ ]+\\.(crt|pem)$", - "description": "Absolute path to the public certificates" } }, "required": [ "type", - "uri", - "certificate" + "uri" ], "additionalProperties": false }, @@ -844,7 +833,7 @@ "type": { "type": "string", "enum": [ - "doorman-nms-notary", + "network-service", "notary", "node", "cenm" @@ -983,54 +972,16 @@ "type": "object", "properties": { "keystore": { - "type": "object", - "properties": { - "keystore": { - "type": "string" - }, - "idman": { - "type": "string" - }, - "networkmap": { - "type": "string" - }, - "subordinateca": { - "type": "string" - }, - "rootca": { - "type": "string" - }, - "tlscrlsigner": { - "type": "string" - } - }, + "type": "string", "required": [ - "keystore", - "idman", - "networkmap", - "subordinateca", - "rootca", - "tlscrlsigner" + "keystore" ], "additionalProperties": false }, "truststore": { - "type": "object", - "properties": { - "truststore": { - "type": "string" - }, - "rootca": { - "type": "string" - }, - "ssl": { - "type": "string" - } - }, + "type": "string", "required": [ - "truststore", - "rootca", - "ssl" + "truststore" ], "additionalProperties": false }, @@ -1065,8 +1016,7 @@ }, "required": [ "keystore", - "truststore", - "ssl" + "truststore" ], "additionalProperties": false }, @@ -1309,6 +1259,9 @@ }, "apiPort": { "type": "number" + }, + "enabled": { + "type": "boolean" } }, "required": [ @@ -1353,19 +1306,13 @@ "uri": { "type": "string", "pattern": "^(?:[a-z0-9](?:[a-z0-9-]{0,61}[a-z0-9])?\\.)+[a-z0-9][a-z0-9-]{0,61}[a-z0-9](?::[0-9]{2,5})?$" - }, - "certificate": { - "type": "string", - "pattern": "^(\/[^\/ ]*)+[^\/ ]+\\.crt$", - "description": "Absolute path to the public certificates" } }, "required": [ "name", "type", "org_name", - "uri", - "certificate" + "uri" ], "additionalProperties": false }, @@ -1410,20 +1357,6 @@ ], "additionalProperties": false }, - "genesis": { - "type": "object", - "properties": { - "name": { - "type": "string", - "pattern": "^[A-Za-z0-9-]{1,30}$", - "description": "Name of the genesis block" - } - }, - "required": [ - "name" - ], - "additionalProperties": false - }, "orderers": { "type": "array", "items": { @@ -1453,9 +1386,9 @@ "channel", "consortium", "channel_name", - "genesis", "orderers", - "participants" + "participants", + "channel_status" ], "additionalProperties": false }, @@ -1503,6 +1436,14 @@ "pattern": "^[a-z0-9-]{1,30}$", "description": "Name of the peer" }, + "type": { + "type": "string", + "enum": [ + "anchor", + "nonanchor" + ], + "description": "At least one peer should be anchor peer" + }, "peerstatus": { "type": "string", "enum": [ @@ -1605,13 +1546,6 @@ "maxLength": 12, "description": "Name of the organization" }, - "type": { - "type": "string", - "enum": [ - "orderer", - "peer" - ] - }, "country": { "type": "string", "description": "Country of the organization" @@ -1640,31 +1574,15 @@ "delete" ] }, - "if": { - "properties": { - "type": { - "const": "peer" - } - } - }, - "then": { - "properties": { - "orderer_org": { - "type": "string", - "pattern": "^[a-z0-9-]{1,30}$", - "description": "Name of the organization providing the ordering service" - } - } + "orderer_org": { + "type": "string", + "pattern": "^[a-z0-9-]{1,30}$", + "description": "Name of the organization providing the ordering service" }, "ca_data": { "type": "object", "description": "Contains the certificate authority url and certificate path; this has not been implemented yet", "properties": { - "url": { - "type": "string", - "pattern": "^(?:[a-z0-9](?:[a-z0-9-]{0,61}[a-z0-9])?\\.)+[a-z0-9][a-z0-9-]{0,61}[a-z0-9](?::[0-9]{2,5})?$", - "description": "Gossip address of the peer" - }, "certificate": { "type": "string", "pattern": "^\/?([^\/ ]*\/)+[^\/ ]+\\.crt$", @@ -1672,35 +1590,15 @@ } }, "required": [ - "url", "certificate" ], "additionalProperties": false - } - }, - "if": { - "properties": { - "type": { - "const": "orderer" - } - } - }, - "then": { - "properties": { - "services": { - "$ref": "#/definitions/fabric_orderer_services" - } - } - }, - "else": { - "properties": { - "services": { - "$ref": "#/definitions/fabric_peer_services" - } + }, + "services": { + "$ref": "#/definitions/fabric_services" } }, "required": [ - "type", "country", "state", "location", @@ -1709,7 +1607,7 @@ "services" ] }, - "fabric_orderer_services": { + "fabric_services": { "type": "object", "properties": { "ca": { @@ -1723,20 +1621,6 @@ "items": { "$ref": "#/definitions/fabric_service_orderer" } - } - }, - "required": [ - "ca", - "consensus", - "orderers" - ], - "additionalProperties": false - }, - "fabric_peer_services": { - "type": "object", - "properties": { - "ca": { - "$ref": "#/definitions/fabric_service_ca" }, "peers": { "type": "array", @@ -1746,8 +1630,7 @@ } }, "required": [ - "ca", - "peers" + "ca" ], "additionalProperties": false }, @@ -1889,7 +1772,7 @@ }, "name": { "type": "string", - "pattern": "^peer[0-9]{1,2}$", + "pattern": "^[a-z0-9-]{1,30}$", "description": "Name of the peer. Must be of the format peer0 for the first peer, peer1 for the second peer and so on." }, "gossippeeraddress": { @@ -2443,8 +2326,7 @@ "indy_services": { "type": "object", "properties": { - "trustees": { - "type": "array", + "trustee": { "items": { "$ref": "#/definitions/indy_service_trustee" } @@ -2455,8 +2337,7 @@ "$ref": "#/definitions/indy_service_steward" } }, - "endorsers": { - "type": "array", + "endorser": { "items": { "$ref": "#/definitions/indy_service_endorser" } @@ -2643,6 +2524,11 @@ }, "bootnode": { "$ref": "#/definitions/quorum_config_bootnode" + }, + "accounts": { + "type": "array", + "minItems": 0, + "description": "Array of accounts which start with default ETH" } }, "required": [ diff --git a/platforms/quorum/charts/README.md b/platforms/quorum/charts/README.md index 1658c836b54..c3b1567cb1c 100644 --- a/platforms/quorum/charts/README.md +++ b/platforms/quorum/charts/README.md @@ -6,17 +6,217 @@ # Charts for Quorum components ## About -This folder contains helm charts which are used by the ansible playbooks for the deployment of the Quorum components. Each chart folder contain a folder for templates, chart file and the corresponding value file. - -## Example Folder Structure ### -``` -/quorum-member-node -|-- templates -| |-- _helpers.tpl -| |-- configmap.yaml -| |-- deployment.yaml -| |-- ingress.yaml -| |-- service.yaml -|-- Chart.yaml -|-- values.yaml +This folder contains the helm charts which are used for the deployment of the Hyperledger Quorum components. Each helm that you can use has the following keys and you need to set them. The `global.cluster.provider` is used as a key for the various cloud features enabled. Also you only need to specify one cloud provider, **not** both if deploying to cloud. As of writing this doc, AWS is fully supported. + +```yaml +global: + serviceAccountName: vault-auth + cluster: + provider: aws # choose from: minikube | aws + cloudNativeServices: false # future: set to true to use Cloud Native Services + kubernetesUrl: "https://yourkubernetes.com" # Provide the k8s URL, ignore if not using Hashicorp Vault + vault: + type: hashicorp # choose from hashicorp | kubernetes + network: quorum # must be quorum for these charts + # Following are necessary only when hashicorp vault is used. + address: http://vault.url:8200 + authPath: supplychain + secretEngine: secretsv2 + secretPrefix: "data/supplychain" + role: vault-role +``` + +## Usage + +### Pre-requisites + +- Kubernetes Cluster (either Managed cloud option like EKS or local like minikube) +- Accessible and unsealed Hahsicorp Vault (if using Vault) +- Configured Ambassador AES (if using Ambassador as proxy) +- Update the dependencies + ``` + helm dependency update quorum-genesis + helm dependency update quorum-node + ``` + + +## `Without Proxy and Vault` + +### 1. Install Genesis Node +```bash +# Install the genesis node +helm install genesis ./quorum-genesis --namespace supplychain-quo --create-namespace --values ./values/noproxy-and-novault/genesis.yaml +``` + +### 2. Install Validator Nodes +```bash +# Install validator nodes +helm install validator-1 ./quorum-node --namespace supplychain-quo --values ./values/noproxy-and-novault/validator.yaml +helm install validator-2 ./quorum-node --namespace supplychain-quo --values ./values/noproxy-and-novault/validator.yaml +helm install validator-3 ./quorum-node --namespace supplychain-quo --values ./values/noproxy-and-novault/validator.yaml +helm install validator-4 ./quorum-node --namespace supplychain-quo --values ./values/noproxy-and-novault/validator.yaml +``` + +### 3. Deploy Member and Tessera Node Pair +```bash +# Deploy Quorum and Tessera node pair +helm install member-1 ./quorum-node --namespace supplychain-quo --values ./values/noproxy-and-novault/txnode.yaml +``` + +### Setting Up Another Member in a Different Namespace + +```bash +# Get the genesis and static nodes from existing member and and place them in the directory 'besu-genesis/files' +cd ./quorum-genesis/files/ +kubectl --namespace supplychain-quo get configmap quorum-peers -o jsonpath='{.data.static-nodes\.json}' > static-nodes.json +kubectl --namespace supplychain-quo get configmap quorum-genesis -o jsonpath='{.data.genesis\.json}' > genesis.json + +# Install secondary genesis node +helm install genesis ./quorum-genesis --namespace carrier-quo --values ./values/noproxy-and-novault/genesis-sec.yaml + +# Install secondary member node +helm install member-2 ./quorum-node --namespace carrier-quo --values ./values/noproxy-and-novault/txnode-sec.yaml +``` + +--- + +## `With Ambassador Proxy and Vault` + +### 1. Create Namespace and Secret +```bash +# Create a namespace +kubectl create namespace supplychain-quo + +# Create the roottoken secret +kubectl -n supplychain-quo create secret generic roottoken --from-literal=token= +``` + +### 2. Install Genesis Node +```bash +# Install the genesis node +helm install genesis ./quorum-genesis --namespace supplychain-quo --values ./values/proxy-and-vault/genesis.yaml +``` + +### 3. Install Validator Nodes +```bash +# Install validator nodes +helm install validator-1 ./quorum-node --namespace supplychain-quo --values ./values/proxy-and-vault/validator.yaml --set global.proxy.p2p=15011 +helm install validator-2 ./quorum-node --namespace supplychain-quo --values ./values/proxy-and-vault/validator.yaml --set global.proxy.p2p=15012 +helm install validator-3 ./quorum-node --namespace supplychain-quo --values ./values/proxy-and-vault/validator.yaml --set global.proxy.p2p=15013 +helm install validator-4 ./quorum-node --namespace supplychain-quo --values ./values/proxy-and-vault/validator.yaml --set global.proxy.p2p=15014 +``` + +### 4. Deploy Member and Tessera Node Pair +```bash +# Deploy Quorum and Tessera node pair +helm install supplychain ./quorum-node --namespace supplychain-quo --values ./values/proxy-and-vault/txnode.yaml --set global.proxy.p2p=15015 +``` + +### Setting Up Another Member in a Different Namespace + +```bash +# Get the genesis and static nodes from existing member and and place them in the directory 'quorum-genesis/files' +cd ./quorum-genesis/files/ +kubectl --namespace supplychain-quo get configmap quorum-peers -o jsonpath='{.data.static-nodes\.json}' > static-nodes.json +kubectl --namespace supplychain-quo get configmap quorum-genesis -o jsonpath='{.data.genesis\.json}' > genesis.json + +# Create a new namespace +kubectl create namespace carrier-quo + +# Create the roottoken secret +kubectl -n carrier-quo create secret generic roottoken --from-literal=token= + +# Install secondary genesis node +helm install genesis ./quorum-genesis --namespace carrier-quo --values ./values/proxy-and-vault/genesis-sec.yaml + +# Install secondary member node +helm install carrier ./quorum-node --namespace carrier-quo --values ./values/proxy-and-vault/txnode-sec.yaml --set global.proxy.p2p=15016 +``` + +## `API call` + +Once your services are deployed, they can be accessed using the domain name provided in your `global.proxy.externalUrlSuffix`. + +1. **Retrieve the Source Host for Your Node** + + Run the following command to get the mapping for your node: + + ```bash + kubectl get mapping --namespace supplychain-quo + ``` + + From the output, copy the source host for your node. + +2. **Make HTTP RPC API Calls** + + You can interact with your node using HTTP RPC API calls. Here's an example of how to do it: + + ```bash + curl -X POST -H "Content-Type: application/json" --data '{"jsonrpc":"2.0","method":"eth_blockNumber","params":[],"id":1}' http:// + ``` + + Replace `` with the source host you copied earlier. + +3. **Verify the Node Syncing Status** + + If the node running the JSON-RPC service is syncing correctly, the previous command should return the following: + + ```json + { + "jsonrpc" : "2.0", + "id" : 1, + "result" : "0x64" + } + ``` + + This confirms that your node is syncing as expected. + +## `Managing IBFT Validators Deployment` + +To deploy the proposed validator chart for IBFT, you first need to set up the Quorum DLT network. Below are the steps you can follow: + +1. **Deploy Quorum DLT Network**: + You have two options for deploying the Quorum DLT network: + + - **With Vault and Proxy** + - **Without Vault and Proxy** + + Choose the appropriate method based on your requirements. + +2. **Install Validator Chart**: + Utilize Helm for installing the validator chart. Ensure to adjust values accordingly: + + ```bash + helm install validator-5 ./quorum-propose-validator --namespace supplychain-quo --values quorum-propose-validator/values.yaml + ``` + + This chart facilitates the addition or removal of validators through majority voting. + +3. **Verify Validator Status**: + Confirm the validator status by executing: + + ```bash + curl -X POST -H "Content-Type: application/json" --data '{"jsonrpc":"2.0","method":"istanbul_getValidators","params":["latest"],"id":1}' http:// + ``` + + This command retrieves the current list of validators for the latest block. + + Replace `` with the appropriate host address. + + +## `Clean-up` + +To clean up, simply uninstall the Helm releases. It's important to uninstall the genesis Helm chart at the end to prevent any cleanup failure. + +```bash +helm uninstall --namespace supplychain-quo validator-1 +helm uninstall --namespace supplychain-quo validator-2 +helm uninstall --namespace supplychain-quo validator-3 +helm uninstall --namespace supplychain-quo validator-4 +helm uninstall --namespace supplychain-quo validator-5 +helm uninstall --namespace supplychain-quo supplychain +helm uninstall --namespace supplychain-quo genesis + +helm uninstall --namespace carrier-quo carrier +helm uninstall --namespace carrier-quo genesis ``` diff --git a/platforms/quorum/charts/quorum-genesis/Chart.yaml b/platforms/quorum/charts/quorum-genesis/Chart.yaml new file mode 100644 index 00000000000..480cfac68f7 --- /dev/null +++ b/platforms/quorum/charts/quorum-genesis/Chart.yaml @@ -0,0 +1,21 @@ +apiVersion: v2 +name: quorum-genesis +description: "Quorum: This Helm chart deploys genesis." +type: application +version: 1.0.0 +appVersion: latest +keywords: + - bevel + - ethereum + - quorum + - hyperledger + - enterprise + - blockchain + - deployment + - accenture +home: https://hyperledger-bevel.readthedocs.io/en/latest/ +sources: + - https://github.com/hyperledger/bevel +maintainers: + - name: Hyperledger Bevel maintainers + email: bevel@lists.hyperledger.org diff --git a/platforms/quorum/charts/quorum-genesis/README.md b/platforms/quorum/charts/quorum-genesis/README.md new file mode 100644 index 00000000000..37d07990a14 --- /dev/null +++ b/platforms/quorum/charts/quorum-genesis/README.md @@ -0,0 +1,125 @@ +[//]: # (##############################################################################################) +[//]: # (Copyright Accenture. All Rights Reserved.) +[//]: # (SPDX-License-Identifier: Apache-2.0) +[//]: # (##############################################################################################) + + +# quorum-genesis + +This Helm chart is a component of Hyperledger Bevel, designed to facilitate the creation of the genesis file for a quorum network. If enabled, the cryptographic keys are securely stored on the configured vault and managed as Kubernetes secrets. Refer to the [Bevel documentation](https://hyperledger-bevel.readthedocs.io/en/latest/) for comprehensive details. + +### TL;DR + +```bash +helm repo add bevel https://hyperledger.github.io/bevel +helm install genesis bevel/quorum-genesis +``` + +### Prerequisites + +- Kubernetes 1.19+ +- Helm 3.2.0+ + +If HashiCorp Vault is utilized, ensure: +- HashiCorp Vault Server 1.13.1+ + +> **Note**: Verify the dependent charts for additional prerequisites. + +### Installation + +To install the chart with the release name `genesis`, execute: + +```bash +helm repo add bevel https://hyperledger.github.io/bevel +helm install genesis bevel/quorum-genesis +``` + +This command deploys the chart onto the Kubernetes cluster using default configurations. Refer to the [Parameters](#parameters) section for customizable options. + +> **Tip**: Utilize `helm list` to list all releases. + +### Uninstallation + +To remove the `genesis` deployment, use: + +```bash +helm uninstall genesis +``` + +This command eliminates all Kubernetes components associated with the chart and deletes the release. + +### Parameters + +#### Global Parameters +These parameters remain consistent across parent or child charts. + +| Name | Description | Default Value | +|--------|---------|-------------| +| `global.serviceAccountName` | Name of the service account for Vault Auth and Kubernetes Secret management | `vault-auth` | +| `global.cluster.provider` | Kubernetes cluster provider (e.g., AWS EKS, minikube). Currently tested with `aws` and `minikube`. | `aws` | +| `global.cluster.cloudNativeServices` | Future implementation for utilizing Cloud Native Services (`true` for SecretsManager and IAM for AWS; KeyVault & Managed Identities for Azure). | `false` | +| `global.cluster.kubernetesUrl` | URL of the Kubernetes Cluster | `""` | +| `global.vault.type` | Vault type support for other providers. Currently supports `hashicorp` and `kubernetes`. | `hashicorp` | +| `global.vault.role` | Role used for authentication with Vault | `vault-role` | +| `global.vault.network` | Deployed network type | `besu` | +| `global.vault.address`| URL of the Vault server. | `""` | +| `global.vault.authPath` | Authentication path for Vault | `supplychain` | +| `global.vault.secretEngine` | Vault secret engine name | `secretsv2` | +| `global.vault.secretPrefix` | Vault secret prefix; must start with `data/` | `data/supplychain` | + +#### Image + +| Name | Description | Default Value | +| -------------| ---------- | --------- | +| `image.repository` | Repository of the Quorum/Besu hooks image | `ghcr.io/hyperledger/bevel-k8s-hooks` | +| `image.tag` | Tag of the Quorum/Besu hooks image | `qgt-0.2.12` | +| `image.pullSecret` | Docker secret name in the namespace | `""` | +| `image.pullPolicy` | Pull policy for Docker images | `IfNotPresent` | + +#### TLS + +| Name | Description | Default Value | +|--------|---------|-------------| +|`settings.removeGenesisOnDelete` | Deletes genesis configmaps when uninstalling the release | `true` | +| `settings.secondaryGenesis` | Set to true to skip network initialization from scratch. Useful for deploying additional nodes, possibly in another namespace. Set to false to start the network from scratch. | `false` | + +#### Genesis Config + +| Name | Description | Default Value | +| ----------------| ----------- | ------------- | +| `rawGenesisConfig.genesis.config.chainId` | Chain Id of the Besu network | `1337` | +| `rawGenesisConfig.genesis.config.algorithm.consensus` | Consensus mechanism of the Besu network: `ibft2`, `qbft`, `clique` | `qbft` | +| `rawGenesisConfig.genesis.config.algorithm.blockperiodseconds` | Block period in seconds | `10` | +| `rawGenesisConfig.genesis.config.algorithm.epochlength` | Epoch length | `30000` | +| `rawGenesisConfig.genesis.config.algorithm.requesttimeoutseconds` | Request timeout in seconds | `20` | +| `rawGenesisConfig.genesis.gasLimit` | Gas limit for each transaction | `'0xf7b760'` | +| `rawGenesisConfig.genesis.difficulty` | Difficulty setting | `'0x1'` | +| `rawGenesisConfig.genesis.coinbase` | Coinbase setting | `'0x0000000000000000000000000000000000000000'` | +| `rawGenesisConfig.genesis.includeQuickStartAccounts` | Include default accounts flag | `false` | +| `rawGenesisConfig.blockchain.nodes.generate` | Flag to generate initial nodes as per the specified `count` | `true` | +| `rawGenesisConfig.blockchain.nodes.count` | Number of validators. | `4` | +| `rawGenesisConfig.blockchain.accountPassword` | Default password for new accounts | `'password'` | + +## License + +This chart is licensed under the Apache v2.0 license. + +© 2023 Accenture + +### Attribution + +This chart is adapted from the [charts](https://hyperledger.github.io/bevel/), licensed under the Apache v2.0 License: + +``` +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +``` diff --git a/platforms/quorum/charts/quorum-genesis/files/readme.txt b/platforms/quorum/charts/quorum-genesis/files/readme.txt new file mode 100644 index 00000000000..3679d0cf4fa --- /dev/null +++ b/platforms/quorum/charts/quorum-genesis/files/readme.txt @@ -0,0 +1 @@ +This is a dummy file. Place the staticnodes and genesis files in this folder for secondary genesis. \ No newline at end of file diff --git a/platforms/quorum/charts/quorum-genesis/requirements.yaml b/platforms/quorum/charts/quorum-genesis/requirements.yaml new file mode 100644 index 00000000000..b1195396c5f --- /dev/null +++ b/platforms/quorum/charts/quorum-genesis/requirements.yaml @@ -0,0 +1,11 @@ +dependencies: + - name: bevel-vault-mgmt + repository: "file://../../../shared/charts/bevel-vault-mgmt" + tags: + - bevel + version: ~1.0.0 + - name: bevel-scripts + repository: "file://../../../shared/charts/bevel-scripts" + tags: + - bevel + version: ~1.0.0 diff --git a/platforms/quorum/charts/quorum-genesis/templates/_helpers.tpl b/platforms/quorum/charts/quorum-genesis/templates/_helpers.tpl new file mode 100644 index 00000000000..4ecd2acc34a --- /dev/null +++ b/platforms/quorum/charts/quorum-genesis/templates/_helpers.tpl @@ -0,0 +1,28 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "quorum-genesis.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "quorum-genesis.fullname" -}} +{{- $name := default .Chart.Name -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" $name .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "quorum-genesis.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} diff --git a/platforms/quorum/charts/quorum-genesis/templates/configmap.yaml b/platforms/quorum/charts/quorum-genesis/templates/configmap.yaml new file mode 100644 index 00000000000..072e7efbbbb --- /dev/null +++ b/platforms/quorum/charts/quorum-genesis/templates/configmap.yaml @@ -0,0 +1,39 @@ +############################################################################################## +# Copyright Accenture. All Rights Reserved. +# +# SPDX-License-Identifier: Apache-2.0 +############################################################################################## +{{- if .Values.settings.secondaryGenesis }} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: quorum-genesis + namespace: {{ .Release.Namespace }} + labels: + app.kubernetes.io/name: quorum-genesis-job + app.kubernetes.io/component: genesis-job + app.kubernetes.io/part-of: {{ include "quorum-genesis.fullname" . }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} + app.kubernetes.io/managed-by: helm +data: + genesis.json: |- + {{ .Files.Get "files/genesis.json" | nindent 8 }} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: quorum-peers + namespace: {{ .Release.Namespace }} + labels: + app.kubernetes.io/name: quorum-genesis-job + app.kubernetes.io/component: genesis-job + app.kubernetes.io/part-of: {{ include "quorum-genesis.fullname" . }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} + app.kubernetes.io/managed-by: helm +data: + static-nodes.json: |- + {{ .Files.Get "files/static-nodes.json" | nindent 8 }} +{{- end }} diff --git a/platforms/quorum/charts/quorum-genesis/templates/genesis-job-cleanup.yaml b/platforms/quorum/charts/quorum-genesis/templates/genesis-job-cleanup.yaml new file mode 100644 index 00000000000..ccdc3c1143b --- /dev/null +++ b/platforms/quorum/charts/quorum-genesis/templates/genesis-job-cleanup.yaml @@ -0,0 +1,48 @@ +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ include "quorum-genesis.name" . }}-cleanup + labels: + app.kubernetes.io/name: quorum-genesis-job-cleanup + app.kubernetes.io/component: genesis-job-cleanup + app.kubernetes.io/part-of: {{ include "quorum-genesis.fullname" . }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/managed-by: helm + namespace: {{ .Release.Namespace }} + annotations: + helm.sh/hook-weight: "0" + helm.sh/hook: "pre-delete" + helm.sh/hook-delete-policy: "hook-succeeded" +spec: + backoffLimit: 3 + completions: 1 + template: + metadata: + labels: + app.kubernetes.io/name: quorum-genesis-job-cleanup + app.kubernetes.io/component: genesis-job-cleanup + app.kubernetes.io/part-of: {{ include "quorum-genesis.fullname" . }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/managed-by: helm + spec: + serviceAccountName: {{ .Values.global.serviceAccountName }} + restartPolicy: "Never" + containers: + - name: delete-genesis + image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + securityContext: + runAsUser: 0 + imagePullPolicy: {{ .Values.image.pullPolicy }} + command: + - /bin/bash + - -c + args: + - | + +{{- if .Values.settings.removeGenesisOnDelete }} + if kubectl get configmap --namespace {{ .Release.Namespace }} quorum-genesis &> /dev/null; then + echo "Deleting genesis configmap in k8s ..." + kubectl delete configmap --namespace {{ .Release.Namespace }} quorum-genesis + fi +{{- end}} diff --git a/platforms/quorum/charts/quorum-genesis/templates/genesis-job-init.yaml b/platforms/quorum/charts/quorum-genesis/templates/genesis-job-init.yaml new file mode 100644 index 00000000000..2e06ee22b2d --- /dev/null +++ b/platforms/quorum/charts/quorum-genesis/templates/genesis-job-init.yaml @@ -0,0 +1,238 @@ +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ include "quorum-genesis.name" . }}-init + labels: + app: {{ .Release.Name }} + app.kubernetes.io/name: quorum-genesis-job + app.kubernetes.io/component: genesis-job + app.kubernetes.io/part-of: {{ include "quorum-genesis.fullname" . }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} + app.kubernetes.io/managed-by: helm + namespace: {{ .Release.Namespace }} + annotations: + helm.sh/hook-delete-policy: "hook-succeeded" +spec: + backoffLimit: 3 + template: + metadata: + labels: + app: {{ .Release.Name }} + app.kubernetes.io/name: quorum-genesis-job + app.kubernetes.io/component: genesis-job + app.kubernetes.io/part-of: {{ include "quorum-genesis.fullname" . }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/managed-by: helm + spec: + serviceAccountName: {{ .Values.global.serviceAccountName }} + restartPolicy: OnFailure + containers: + - name: generate-genesis + image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + securityContext: + runAsUser: 0 + imagePullPolicy: {{ .Values.image.pullPolicy }} +{{- if eq .Values.global.vault.type "hashicorp" }} + volumeMounts: + - name: scripts-volume + mountPath: /scripts/bevel-vault.sh + subPath: bevel-vault.sh + env: + - name: VAULT_ADDR + value: "{{ .Values.global.vault.address }}" + - name: VAULT_SECRET_ENGINE + value: "{{ .Values.global.vault.secretEngine }}" + - name: VAULT_SECRET_PREFIX + value: "{{ .Values.global.vault.secretPrefix }}" + - name: KUBERNETES_AUTH_PATH + value: "{{ .Values.global.vault.authPath }}" + - name: VAULT_APP_ROLE + value: "{{ .Values.global.vault.role }}" + - name: VAULT_TYPE + value: "{{ .Values.global.vault.type }}" +{{- end }} + command: + - /bin/bash + - -c + args: + - | + + # Check if the vault type is HashiCorp +{{- if eq .Values.global.vault.type "hashicorp" }} + # Source the script containing vault-related functions + . /scripts/bevel-vault.sh + + echo "Generate a customize token." + vaultBevelFunc "init" + + # Function to safely write keys + safeWriteSecret() { + local key="$1" + local fpath="$2" + + # Read secret from vault + vaultBevelFunc "readJson" "${VAULT_SECRET_ENGINE}/${VAULT_SECRET_PREFIX}/${key}" + # Check if secrets are available in the vault + if [ "$SECRETS_AVAILABLE" == "yes" ] + then + # Extract secrets from JSON response + local accountAddress=$(echo ${VAULT_SECRET} | jq -r '.["accountAddress"]') + local accountKeystore=$(echo ${VAULT_SECRET} | jq -r '.["accountKeystore_base64"]' | base64 -d) + local accountPassword=$(echo ${VAULT_SECRET} | jq -r '.["accountPassword"]') + local accountPrivateKey=$(echo ${VAULT_SECRET} | jq -r '.["accountPrivateKey"]') + local address=$(echo ${VAULT_SECRET} | jq -r '.["nodeAddress"]') + local nodeKey=$(echo ${VAULT_SECRET} | jq -r '.["nodeKey"]') + local nodekey_pub=$(echo ${VAULT_SECRET} | jq -r '.["nodeKeyPub"]') + + # Check if Kubernetes secret exists, if not, create one + if ! kubectl get secret quorum-node-validator-${i}-keys --namespace {{ .Release.Namespace }} &> /dev/null; then + kubectl create secret generic quorum-node-validator-${i}-keys --namespace {{ .Release.Namespace }} \ + --from-literal=accountAddress=${accountAddress} \ + --from-literal=accountKeystore=${accountKeystore} \ + --from-literal=accountPassword=${accountPassword} \ + --from-literal=accountPrivateKey=${accountPrivateKey} \ + --from-literal=address=${address} \ + --from-literal=nodekey=${nodeKey} \ + --from-literal=nodekey.pub=${nodekey_pub} + fi + else + # Read data from files if secrets are not available in the vault + local node_address=$(cat "${fpath}/address") + local node_key=$(cat "${fpath}/nodekey") + local node_key_pub=$(cat "${fpath}/nodekey.pub") + local account_private_key=$(cat "${fpath}/accountPrivateKey") + local account_password=$(cat "${fpath}/accountPassword") + local account_keystore_base64=$(cat "${fpath}/accountKeystore" | base64 -w 0) + local account_address=$(cat "${fpath}/accountAddress") + + # Construct JSON payload + echo " + { + \"data\": + { + \"nodeAddress\": \"${node_address}\", + \"nodeKey\": \"${node_key}\", + \"nodeKeyPub\": \"${node_key_pub}\", + \"accountPrivateKey\": \"${account_private_key}\", + \"accountPassword\": \"${account_password}\", + \"accountKeystore_base64\": \"${account_keystore_base64}\", + \"accountAddress\": \"${account_address}\" + } + }" > nodePayload.json + + # Push data to vault + vaultBevelFunc 'write' "${VAULT_SECRET_ENGINE}/${VAULT_SECRET_PREFIX}/${key}" 'nodePayload.json' + + rm nodePayload.json + fi + } + + # Function to safely write genesis + safeWriteGenesis() { + local key="$1" + local fpath="$2" + + # Read genesis file from vault + vaultBevelFunc "readJson" "${VAULT_SECRET_ENGINE}/${VAULT_SECRET_PREFIX}/${key}" + + # Check if secrets are available in the vault + if [ "$SECRETS_AVAILABLE" == "yes" ] + then + echo ${VAULT_SECRET} | jq -r '.["genesis_base64"]' | base64 -d > ./genesis.json + if ! kubectl get configmap "quorum-genesis" --namespace {{ .Release.Namespace }} &> /dev/null; then + kubectl create configmap "quorum-genesis" --from-file=genesis.json=./genesis.json --namespace {{ .Release.Namespace }} + fi + else + # Read genesis data from files if secrets are not available in the vault + local genesis_base64=$(cat "${fpath}/genesis.json" | base64 -w 0) + + # Construct JSON payload + echo " + { + \"data\": + { + \"genesis_base64\": \"${genesis_base64}\" + } + }" > genesisPayload.json + + # Push data to vault + vaultBevelFunc 'write' "${VAULT_SECRET_ENGINE}/${VAULT_SECRET_PREFIX}/${key}" 'genesisPayload.json' + + rm genesisPayload.json + fi + } +{{- else }} + safeWriteSecret() { + # Placeholder: + # - Implement code to fetch the keys if using any cloud-native service or platform different from HashiCorp to store the keys + # - After fetching the keys, create Kubernetes secrets from them + # - For guidance, refer to the code written for HashiCorp Vault for the same purpose + return 0 + } +{{- end }} + # Check if secondary genesis is enabled + {{- if .Values.settings.secondaryGenesis }} + echo "Secondary Genesis, configmaps are created from local files." + {{- else }} + # Use quorum-genesis-tool to generate genesis, keys and other required files + FOLDER_PATH=$(quorum-genesis-tool --consensus {{ .Values.rawGenesisConfig.genesis.config.algorithm.consensus }} \ + {{ if .Values.rawGenesisConfig.blockchain.nodes.generate }} --validators {{ .Values.rawGenesisConfig.blockchain.nodes.count }} {{ else }} --validators 0 {{ end }} \ + --members 0 --bootnodes 0 --chainID {{ .Values.rawGenesisConfig.genesis.config.chainId }} --blockperiod {{ .Values.rawGenesisConfig.genesis.config.algorithm.blockperiodseconds }} \ + --emptyBlockPeriod {{ .Values.rawGenesisConfig.genesis.config.algorithm.emptyBlockPeriod }} --epochLength {{ .Values.rawGenesisConfig.genesis.config.algorithm.epochlength }} \ + --requestTimeout {{ .Values.rawGenesisConfig.genesis.config.algorithm.requesttimeoutseconds }} --difficulty {{ .Values.rawGenesisConfig.genesis.difficulty }} \ + --gasLimit {{ .Values.rawGenesisConfig.genesis.gasLimit }} --coinbase {{ .Values.rawGenesisConfig.genesis.coinbase }} \ + {{ if .Values.rawGenesisConfig.blockchain.accountPassword }} --accountPassword {{ .Values.rawGenesisConfig.blockchain.accountPassword }} {{ end }} \ + --quickstartDevAccounts {{ .Values.rawGenesisConfig.genesis.includeQuickStartAccounts }} \ + --outputPath /generated-config | tail -1 | sed -e "s/^Artifacts in folder: //") + + # Check if quorum-genesis configmap exists, if not, create one + if ! kubectl get configmap "quorum-genesis" --namespace {{ .Release.Namespace }} &> /dev/null; then + kubectl create configmap "quorum-genesis" --namespace {{ .Release.Namespace }} --from-file=genesis.json="$FOLDER_PATH/goQuorum/genesis.json" + fi + + # Count the number of validators based on their directories + validator_count=$(ls -d $FOLDER_PATH/validator* | wc -l) + # Iterate through the validators using a for loop + for ((i = 0; i < validator_count; i++)); do + current_validator_dir="$FOLDER_PATH/validator${i}" + + {{- if and (ne .Values.global.cluster.provider "minikube") (.Values.global.cluster.cloudNativeServices) }} + # Safely write keys for cloud-native services + safeWriteSecret quorum-node-validator-${i}-nodekey ${current_validator_dir}/nodekey + safeWriteSecret quorum-node-validator-${i}-nodekeypub ${current_validator_dir}/nodekey.pub + safeWriteSecret quorum-node-validator-${i}-enode ${current_validator_dir}/nodekey.pub + safeWriteSecret quorum-node-validator-${i}-address ${current_validator_dir}/address + kubectl create configmap --namespace {{ .Release.Namespace }} "quorum-node-validator-${i}-address" --from-file=address=${current_validator_dir}/address + safeWriteSecret quorum-node-validator-${i}-accountPrivateKey ${current_validator_dir}/accountPrivateKey + safeWriteSecret quorum-node-validator-${i}-accountPassword ${current_validator_dir}/accountPassword + safeWriteSecret quorum-node-validator-${i}-accountKeystore ${current_validator_dir}/accountKeystore + safeWriteSecret quorum-node-validator-${i}-accountAddress ${current_validator_dir}/accountAddress + {{- else }} + # Safely write keys and genesis to the Hashicorp Vault + safeWriteSecret "quorum-node-validator-${i}-keys" "${current_validator_dir}" + safeWriteGenesis "quorum-genesis" "$FOLDER_PATH/goQuorum" + {{- end }} + + # Check if Kubernetes secret exists, if not, create one + if ! kubectl get secret quorum-node-validator-${i}-keys --namespace {{ .Release.Namespace }} &> /dev/null; then + kubectl create secret generic quorum-node-validator-${i}-keys --namespace {{ .Release.Namespace }} \ + --from-literal=accountAddress=$(cat "${current_validator_dir}/accountAddress") \ + --from-literal=accountKeystore=$(cat "${current_validator_dir}/accountKeystore") \ + --from-literal=accountPassword=$(cat "${current_validator_dir}/accountPassword") \ + --from-literal=accountPrivateKey=$(cat "${current_validator_dir}/accountPrivateKey") \ + --from-literal=address=$(cat "${current_validator_dir}/address") \ + --from-literal=nodekey=$(cat "${current_validator_dir}/nodekey") \ + --from-literal=nodekey.pub=$(cat "${current_validator_dir}/nodekey.pub") + fi + done + {{- end }} + echo "Completed." +{{- if eq .Values.global.vault.type "hashicorp" }} + volumes: + - name: scripts-volume + configMap: + name: bevel-vault-script + defaultMode: 0777 +{{- end }} diff --git a/platforms/quorum/charts/quorum-genesis/values.yaml b/platforms/quorum/charts/quorum-genesis/values.yaml new file mode 100644 index 00000000000..a63179ee576 --- /dev/null +++ b/platforms/quorum/charts/quorum-genesis/values.yaml @@ -0,0 +1,54 @@ +# The following are for overriding global values +global: + #Provide the service account name which will be created. + serviceAccountName: vault-auth + cluster: + provider: aws # choose from: minikube | aws + cloudNativeServices: false # 'false' is implemented + #Provide the kubernetes host url + #Eg. kubernetesUrl: https://10.3.8.5:6443 + kubernetesUrl: + vault: + #Provide the type of vault + type: hashicorp + #Provide the vault role used. + role: vault-role + #Provide the network type + network: quorum + #Provide the vault server address + address: + #Provide the vault authPath configured to be used. + authPath: supplychain + #Provide the secret engine. + secretEngine: secretsv2 + #Provide the vault path where the secrets will be stored + secretPrefix: "data/supplychain" +image: + repository: ghcr.io/hyperledger/bevel-k8s-hooks + tag: qgt-0.2.12 + pullPolicy: IfNotPresent + # Provide the secret to use if private repository + # Eg. pullSecret: regcred + pullSecret: +settings: + # Flag to ensure the genesis configmap is removed on helm uninstall + removeGenesisOnDelete: true +rawGenesisConfig: + genesis: + config: + chainId: 1337 + algorithm: + consensus: qbft # choose from: ibft | qbft | raft | clique + blockperiodseconds: 10 + emptyBlockPeriod: 60 + epochlength: 30000 + requesttimeoutseconds: 20 + gasLimit: '0x47b760' + difficulty: '0x1' + coinbase: '0x0000000000000000000000000000000000000000' + includeQuickStartAccounts: false + blockchain: + nodes: # refers to validators/signers + generate: true + count: 4 + accountPassword: 'password' diff --git a/platforms/quorum/charts/quorum-ibft-crypto-gen/Chart.yaml b/platforms/quorum/charts/quorum-ibft-crypto-gen/Chart.yaml deleted file mode 100644 index 8e07980511e..00000000000 --- a/platforms/quorum/charts/quorum-ibft-crypto-gen/Chart.yaml +++ /dev/null @@ -1,11 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -apiVersion: v1 -appVersion: "2.0" -description: "Quorum: This Helm Chart generates the crypto materials for ibft consensus only if they are not already available in the vault." -name: quorum-ibft-crypto-gen -version: 1.0.0 diff --git a/platforms/quorum/charts/quorum-ibft-crypto-gen/README.md b/platforms/quorum/charts/quorum-ibft-crypto-gen/README.md deleted file mode 100644 index 9f37111676c..00000000000 --- a/platforms/quorum/charts/quorum-ibft-crypto-gen/README.md +++ /dev/null @@ -1,194 +0,0 @@ -[//]: # (##############################################################################################) -[//]: # (Copyright Accenture. All Rights Reserved.) -[//]: # (SPDX-License-Identifier: Apache-2.0) -[//]: # (##############################################################################################) - - -# IBFT Crypto GoQuorum Deployment - -- [IBFT Crypto GoQuorum Deployment Helm Chart](#ibft-crypto-goquorum-deployment-helm-chart) -- [Prerequisites](#prerequisites) -- [Chart Structure](#chart-structure) -- [Configuration](#configuration) -- [Deployment](#deployment) -- [Verification](#verification) -- [Updating the Deployment](#updating-the-deployment) -- [Deletion](#deletion) -- [Contributing](#contributing) -- [License](#license) - - -## IBFT Crypto GoQuorum Deployment Helm Chart ---- -This [Helm chart](https://github.com/hyperledger/bevel/blob/develop/platforms/quorum/charts/quorum-ibft-crypto-gen) responsible for interacting with a Vault server to fetch and validate secrets, as well as generating and saving cryptographic materials in the vault. - - -## Prerequisites ---- -Before deploying the Helm chart, make sure to have the following prerequisites: - -- Kubernetes cluster up and running. -- The GoQuorum network is set up and running. -- A HashiCorp Vault instance is set up and configured to use Kubernetes service account token-based authentication. -- The Vault is unsealed and initialized. -- Helm installed. - - -## Chart Structure ---- -The structure of the Helm chart is as follows: - -``` -quorum-ibft-crypto-gen/ - |- templates/ - |- helpers.tpl - |- job.yaml - |- Chart.yaml - |- README.md - |- values.yaml -``` - -- `templates/`: This directory contains the template files for generating Kubernetes resources. -- `helpers.tpl`: A template file used for defining custom labels in the Helm chart. -- `job.yaml`: Interacting with a Vault server to fetch and validate secrets, as well as generating and saving cryptographic materials in the vault. -- `Chart.yaml`: Provides metadata about the chart, such as its name, version, and description. -- `README.md`: This file provides information and instructions about the Helm chart. -- `values.yaml`: Contains the default configuration values for the chart. It includes settings for the metadata, peer, image, and Vault configurations. - - - -## Configuration ---- -The [values.yaml](https://github.com/hyperledger/bevel/blob/develop/platforms/quorum/charts/quorum-ibft-crypto-gen/values.yaml) file contains configurable values for the Helm chart. We can modify these values according to the deployment requirements. Here are some important configuration options: - -## Parameters ---- - -### Metadata - -| Name | Description | Default Value | -| ----------------| --------------------------------------------------------------------------------------- | --------------------- | -| namespace | Provide the namespace for organization's peer | default | -| name | Provide the name for quorum-ibft-crypto job release | quorum-crypto-ibft | - -### Peer - -| Name | Description | Default Value | -| ----------------| --------------------------------------------------------------------------------------- | ------------- | -| name | Provide the name of the peer | carrier | -| gethPassphrase | Provide the passphrase for building the crypto files.
Eg. gethPassphrase: 12345 | 12345 | - -### Image - -| Name | Description | Default Value | -| -------------------| -------------------------------------------------------------------------------------------- | --------------------------------- | -| initContainerName | Provide the alpine utils image, which is used for all init-containers of deployments/jobs | ghcr.io/hyperledger/bevel-alpine:latest | -| pullPolicy | Pull policy to be used for the Docker image | IfNotPresent | -| node | Pull quorum Docker image | "" | - -### Vault - -| Name | Description | Default Value | -| ------------------- | ------------------------------------------------------------------------| ------------- | -| address | Provide the vault address/URL | "" | -| role | Provide the vault role used | vault-role | -| authpath | Provide the authpath configured to be used | "" | -| serviceAccountName | Provide the already created service account name autheticated to vault | vault-auth | -| certSecretPrefix | Provide the vault path where the certificates are stored | "" | -| retries | Number of retries to check contents from vault | 30 | - -### Sleep - -| Name | Description | Default Value | -| ------------------------- | ------------------------------------------------------- | ------------- | -| sleepTimeAfterError | Sleep time in seconds when error while registration | 120 | -| sleepTime | custom sleep time in seconds | 20 | - -### Healthcheck - -| Name | Description | Default Value | -| ----------------------------| ----------------------------------------------------------------------------------------------------------- | ------------- | -| readinesscheckinterval | Provide the wait interval in seconds in fetching certificates from vault | 5 | -| readinessthreshold | Provide the threshold number of retries in fetching certificates from vault | 2 | - - - -## Deployment ---- - -To deploy the quorum-ibft-crypto-gen Helm chart, follow these steps: - -1. Modify the [values.yaml](https://github.com/hyperledger/bevel/blob/develop/platforms/quorum/charts/quorum-ibft-crypto-gen/values.yaml) file to set the desired configuration values. -2. Run the following Helm command to install the chart: - ``` - $ helm repo add bevel https://hyperledger.github.io/bevel/ - $ helm install ./quorum-ibft-crypto-gen - ``` -Replace `` with the desired name for the release. - -This will deploy the ibft node to the Kubernetes cluster based on the provided configurations. - - - -## Verification ---- - -To verify the deployment, we can use the following command: -``` -$ kubectl get jobs -n -``` -Replace `` with the actual namespace where the Job was created. This command will display information about the Job, including the number of completions and the current status of the Job's pods. - - - -## Updating the Deployment ---- - -If we need to update the deployment with new configurations or changes, modify the same [values.yaml](https://github.com/hyperledger/bevel/blob/develop/platforms/quorum/charts/quorum-ibft-crypto-gen/values.yaml) file with the desired changes and run the following Helm command: -``` -$ helm upgrade ./quorum-ibft-crypto-gen -``` -Replace `` with the name of the release. This command will apply the changes to the deployment, ensuring the ibft node is up to date. - - - -## Deletion ---- - -To delete the deployment and associated resources, run the following Helm command: -``` -$ helm uninstall -``` -Replace `` with the name of the release. This command will remove all the resources created by the Helm chart. - - - -## Contributing ---- -If you encounter any bugs, have suggestions, or would like to contribute to the [IBFT Crypto GoQuorum Deployment Helm Chart](https://github.com/hyperledger/bevel/blob/develop/platforms/quorum/charts/quorum-ibft-crypto-gen) , please feel free to open an issue or submit a pull request on the [project's GitHub repository](https://github.com/hyperledger/bevel). - - - -## License - -This chart is licensed under the Apache v2.0 license. - -Copyright © 2023 Accenture - -### Attribution - -This chart is adapted from the [charts](https://hyperledger.github.io/bevel/) which is licensed under the Apache v2.0 License which is reproduced here: - -``` -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -``` diff --git a/platforms/quorum/charts/quorum-ibft-crypto-gen/templates/_helpers.tpl b/platforms/quorum/charts/quorum-ibft-crypto-gen/templates/_helpers.tpl deleted file mode 100644 index d43c09d8cef..00000000000 --- a/platforms/quorum/charts/quorum-ibft-crypto-gen/templates/_helpers.tpl +++ /dev/null @@ -1,5 +0,0 @@ -{{- define "labels.custom" }} - {{ range $key, $val := $.Values.metadata.labels }} - {{ $key }}: {{ $val }} - {{ end }} -{{- end }} diff --git a/platforms/quorum/charts/quorum-ibft-crypto-gen/templates/job.yaml b/platforms/quorum/charts/quorum-ibft-crypto-gen/templates/job.yaml deleted file mode 100644 index f80b26c519d..00000000000 --- a/platforms/quorum/charts/quorum-ibft-crypto-gen/templates/job.yaml +++ /dev/null @@ -1,198 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -apiVersion: batch/v1 -kind: Job -metadata: - name: "{{ $.Values.metadata.name }}" - namespace: "{{ $.Values.metadata.namespace }}" - labels: - app: "{{ $.Values.metadata.name }}" - app.kubernetes.io/name: "{{ $.Values.metadata.name }}" - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} -spec: - backoffLimit: 6 - template: - metadata: - labels: - app: "{{ $.Values.metadata.name }}" - app.kubernetes.io/name: "{{ $.Values.metadata.name }}" - app.kubernetes.io/instance: {{ .Release.Name }} - spec: - restartPolicy: OnFailure - serviceAccountName: {{ $.Values.vault.serviceaccountname }} - securityContext: - fsGroup: 1000 - initContainers: - - name: crypto-init - image: {{ $.Values.image.initContainerName }} - imagePullPolicy: {{ $.Values.image.pullPolicy }} - env: - - name: VAULT_ADDR - value: {{ $.Values.vault.address }} - - name: KUBERNETES_AUTH_PATH - value: {{ $.Values.vault.authpath }} - - name: VAULT_APP_ROLE - value: {{ $.Values.vault.role }} - - name: CERTS_SECRET_PREFIX - value: {{ .Values.vault.certsecretprefix }} - - name: MOUNT_PATH - value: "/certcheck" - - name: PEER_NAME - value: "{{ $.Values.peer.name }}" - - name: VAULT_TYPE - value: "{{ $.Values.vault.type }}" - volumeMounts: - - name: certcheck - mountPath: /certcheck - - name: scripts-volume - mountPath: /scripts/bevel-vault.sh - subPath: bevel-vault.sh - command: ["sh", "-c"] - args: - - |- - #!/usr/bin/env bash - . /scripts/bevel-vault.sh - mkdir -p ${MOUNT_PATH} - - # Calling a function to retrieve the vault token. - vaultBevelFunc "init" - - # Calling a function to retrieve secrets from Vault only if they exist. - vaultBevelFunc "readJson" "${CERTS_SECRET_PREFIX}/crypto/${PEER_NAME}/quorum" - - checknodekey=$(echo ${VAULT_SECRET} | jq -r '.["keystore"]') - # Create an empty file to indicate that the secret is absent or present in vault. - if [ -z "$checknodekey" ] || [ "$checknodekey" == "null" ] - then - echo "Certificates absent in vault. Ignore error warning." - touch ${MOUNT_PATH}/absent.txt - else - echo "Certificates present in vault." - touch ${MOUNT_PATH}/present.txt - fi - - echo "Done checking for certificates in vault." - containers: - - name: generate-cryptomaterials - image: {{ $.Values.image.node }} - imagePullPolicy: {{ $.Values.image.pullPolicy }} - volumeMounts: - - name: certcheck - mountPath: /certcheck - - name: scripts-volume - mountPath: /scripts/bevel-vault.sh - subPath: bevel-vault.sh - - name: package-manager - mountPath: /scripts/package-manager.sh - subPath: package-manager.sh - env: - - name: VAULT_ADDR - value: {{ $.Values.vault.address }} - - name: KUBERNETES_AUTH_PATH - value: {{ $.Values.vault.authpath }} - - name: VAULT_APP_ROLE - value: {{ $.Values.vault.role }} - - name: MOUNT_PATH - value: "/certcheck" - - name: CERTS_SECRET_PREFIX - value: {{ .Values.vault.certsecretprefix }} - - name: GETH_PASSPHRASE - value: "{{ $.Values.peer.gethPassphrase }}" - - name: PEER_NAME - value: "{{ $.Values.peer.name }}" - - name: DB_USER - value: "demouser" - - name: DB_PASSWORD - value: "password" - - name: VAULT_TYPE - value: "{{ $.Values.vault.type }}" - command: ["sh", "-c"] - args: - - |- - #!/usr/bin/env sh - . /scripts/package-manager.sh - - # Define the packages to install - packages_to_install="jq curl openssl" - install_packages "$packages_to_install" - - . /scripts/bevel-vault.sh - - # Skip secret creation if "present.txt" exists in /certcheck/ - if [ -e /certcheck/present.txt ] - then - echo "Certificates already present in the vault. Skipping." - exit 0 - fi - - echo "Fetching nodekey from vault" - # Calling a function to retrieve the vault token. - vaultBevelFunc "init" - # Calling a function to retrieve secrets from Vault only if they exist. - vaultBevelFunc "readJson" "${CERTS_SECRET_PREFIX}/crypto/${PEER_NAME}/quorum" - nodekey=$(echo ${VAULT_SECRET} | jq -r '.["nodekey"]') - - echo "If nodekey does not exist, we generate one and will be saved in vault" - if [ -z "$nodekey" ] || [ "$nodekey" == "null" ] - then - bootnode -genkey nodevalue - nodekey=$(cat nodevalue) - else - echo "Nodekey already exists" - fi - echo "Creating files and certs" - echo $nodekey >> nodekey - echo $GETH_PASSPHRASE >> password - bootnode --nodekey nodekey --writeaddress >> enode - geth account new --datadir ${PEER_NAME} --password password - cp ${PEER_NAME}/keystore/* keystorefile - tr -d "\n\r" < keystorefile > newchange - openssl base64 -in newchange -out base64_keystore - echo " - { - \"data\": { - \"nodekey\": \"$nodekey\", - \"keystore\": \"$(cat base64_keystore)\", - \"db_password\": \"${DB_PASSWORD}\", - \"geth_password\": \"${GETH_PASSPHRASE}\", - \"db_user\": \"${DB_USER}\" - }}" > finalJSON.json - - # Calling a function to write secrets to the Vault. - vaultBevelFunc 'write' "${CERTS_SECRET_PREFIX}/crypto/${PEER_NAME}/quorum" 'finalJSON.json' - - # get nodekey from vault - # Calling a function to retrieve secrets from Vault. - vaultBevelFunc "readJson" "${CERTS_SECRET_PREFIX}/crypto/${PEER_NAME}/quorum" - nodekey=$(echo ${VAULT_SECRET} | jq -r '.["nodekey"]' 2>&1) - # get keystore from vault - keystore=$(echo ${VAULT_SECRET} | jq -r '.["keystore"]' 2>&1) - - if [ "$nodekey" == "null" ] || [ "$cat base64_keystore" == "null" ] || [ "$nodekey" == "parse error"* ] || [ "$cat base64_keystore" == "parse error"* ] - then - echo "certificates write or read fail" - sleep {{ $.Values.sleepTime }} - if [ "$COUNTER" -ge {{ $.Values.vault.retries }} ] - then - echo "Retry attempted $COUNTER times, certificates have not been saved" - exit 1 - fi - COUNTER=`expr "$COUNTER" + 1` - fi - - volumes: - - name: certcheck - emptyDir: - medium: Memory - - name: scripts-volume - configMap: - name: bevel-vault-script - - name: package-manager - configMap: - name: package-manager diff --git a/platforms/quorum/charts/quorum-ibft-crypto-gen/values.yaml b/platforms/quorum/charts/quorum-ibft-crypto-gen/values.yaml deleted file mode 100644 index 92a2e8c1f3e..00000000000 --- a/platforms/quorum/charts/quorum-ibft-crypto-gen/values.yaml +++ /dev/null @@ -1,78 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -# Default values for Crypto IBFT chart. -# This is a YAML-formatted file. -# Declare variables to be passed into your templates. - -############################################################# -# Basic Configuration # -############################################################# -# Default values for indy-key-mgmt. -# This is a YAML-formatted file. -# Declare variables to be passed into your templates. - -metadata: - #Provide the namespace for organization's peer - #Eg. namespace: supplychain-quo - namespace: default - - #Provide the name for quorum-ibft-crypto job release - #Eg. name: quorum-crypto-ibft - name: quorum-crypto-ibft - -peer: - # Provide the name of the peer - # Eg. name: carrier - name: carrier - # Provide the passphrase for building the crypto files - # Eg. 12345 - gethPassphrase: 12345 -# This section contains the Quorum Crypto IBFT metadata. - -# Provide information regarding the Docker images used. -image: - # Provide the alpine utils image, which is used for all init-containers of deployments/jobs. - # NOTE: The alpine image used is the base alpine image with CURL installed. - # Eg. initContainerName: ghcr.io/hyperledger/bevel-alpine:latest - initContainerName: ghcr.io/hyperledger/bevel-alpine:latest - # Pull policy to be used for the Docker image - # Eg. pullPolicy: Always - pullPolicy: IfNotPresent - # Pull quorum Docker image - node: quorumengineering/quorum:21.4.2 - -############################################################# -# HashiCorp Vault Configuration # -############################################################# -# NOTE: Make sure that the vault is already unsealed, intialized and configured to -# use Kubernetes service account token based authentication. -# For more info, see https://www.vaultproject.io/docs/auth/kubernetes - -vault: - # Provide the vault address - # Eg. address: http://vault.example.com:8200 - address: - # Provide the vault role used. - # Eg. role: vault-role - role: vault-role - # Provide the authpath configured to be used. - authpath: - # Provide the service account name autheticated to vault. - # NOTE: Make sure that the service account is already created and autheticated to use the vault. - # Eg. serviceaccountname: vault-auth - serviceAccountName: vault-auth - # Provide the vault path where the certificates are stored - # Eg. certsecretprefix: secret/cenm-org-name - certSecretPrefix: - # Number of retries to check contents from vault - retries: 30 - -############################################################# -# Settings # -############################################################# -# custom sleep time in seconds -sleepTime: 20 diff --git a/platforms/quorum/charts/quorum-member-node/Chart.yaml b/platforms/quorum/charts/quorum-member-node/Chart.yaml deleted file mode 100644 index 78d999da791..00000000000 --- a/platforms/quorum/charts/quorum-member-node/Chart.yaml +++ /dev/null @@ -1,11 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -apiVersion: v1 -appVersion: "2.0" -description: "Quorum: This Helm chart deploys an Quorum member (non-validator) node with or without the integration of Tessera transaction manager." -name: quorum-member-node -version: 1.0.0 diff --git a/platforms/quorum/charts/quorum-member-node/README.md b/platforms/quorum/charts/quorum-member-node/README.md deleted file mode 100644 index 065ee3b277f..00000000000 --- a/platforms/quorum/charts/quorum-member-node/README.md +++ /dev/null @@ -1,256 +0,0 @@ -[//]: # (##############################################################################################) -[//]: # (Copyright Accenture. All Rights Reserved.) -[//]: # (SPDX-License-Identifier: Apache-2.0) -[//]: # (##############################################################################################) - - -# Quorum Member Node Deployment - -- [Quorum Member Node Deployment Helm Chart](#quorum-member-node-deployment-helm-chart) -- [Prerequisites](#prerequisites) -- [Chart Structure](#chart-structure) -- [Configuration](#configuration) -- [Deployment](#deployment) -- [Verification](#verification) -- [Updating the Deployment](#updating-the-deployment) -- [Deletion](#deletion) -- [Contributing](#contributing) -- [License](#license) - - -## Quorum Member Node Deployment Helm Chart ---- -This [Helm chart](https://github.com/hyperledger/bevel/blob/develop/platforms/quorum/charts/quorum-member-node) helps to deploy Quorum Member (non-validator) nodes with tessera transaction manager. - - - -## Prerequisites ---- -Before deploying the Helm chart, make sure to have the following prerequisites: - -- Kubernetes cluster up and running. -- The GoQuorum network is set up and running. -- A HashiCorp Vault instance is set up and configured to use Kubernetes service account token-based authentication. -- The Vault is unsealed and initialized. -- Either HAproxy or Ambassador is required as ingress controller. -- Helm installed. - - - -## Chart Structure ---- -The structure of the Helm chart is as follows: - -``` -quorum-member-node/ - |- templates/ - |- _helpers.yaml - |- configmap.yaml - |- deployment.yaml - |- ingress.yaml - |- service.yaml - |- Chart.yaml - |- README.md - |- values.yaml -``` - -- `templates/`: This directory contains the template files for generating Kubernetes resources. -- `helpers.tpl`: A template file used for defining custom labels in the Helm chart. -- `configmap.yaml`: The file defines a ConfigMap that stores the base64-encoded content of the "genesis.json" file under the key "genesis.json.base64" in the specified namespace. -- `deployment.yaml`: This file is a configuration file for deploying a StatefulSet in Kubernetes. It creates a StatefulSet with a specified number of replicas and defines various settings for the deployment. It includes initialization containers for fetching secrets from a Vault server, an init container for initializing the Quorum blockchain network, and a main container for running the Quorum member node. It also specifies volume mounts for storing certificates and data. The StatefulSet ensures stable network identities for each replica. -- `ingress.yaml`: This file is a Kubernetes configuration file for setting up an Ingress resource with HAProxy as the provider. It includes annotations for SSL passthrough and specifies rules for routing traffic based on the host and path. -- `service.yaml`: This file defines a Kubernetes Service with multiple ports for different protocols and targets, and supports Ambassador proxy annotations for specific configurations when using the "ambassador" proxy provider. -- `Chart.yaml`: Provides metadata about the chart, such as its name, version, and description. -- `README.md`: This file provides information and instructions about the Helm chart. -- `values.yaml`: Contains the default configuration values for the chart. It includes configuration for the metadata, image, node, Vault, etc. - - - -## Configuration ---- -The [values.yaml](https://github.com/hyperledger/bevel/blob/develop/platforms/quorum/charts/quorum-member-node/values.yaml) file contains configurable values for the Helm chart. We can modify these values according to the deployment requirements. Here are some important configuration options: - -## Parameters ---- - -### replicaCount - -| Name | Description | Default Value | -| ------------------------ | ------------------------------------ | ------------- | -| replicaCount | Number of replicas | 1 | - -### metadata - -| Name | Description | Default Value | -| ----------------| ---------------------------------------------------------------------------- | ------------- | -| namespace | Provide the namespace for the Quorum node | default | -| labels | Provide any additional labels | "" | - -### image - -| Name | Description | Default Value | -| ---------------| ------------------------------------------------------------------------------------ | ------------------------------------- | -| node | Provide the valid image name and version for quorum node | quorumengineering/quorum:2.1.1 | -| alpineutils | Provide the valid image name and version to read certificates from vault server | ghcr.io/hyperledger/bevel-alpine:latest | -| tessera | Provide the valid image name and version for quorum tessera | quorumengineering/tessera:0.9.2 | -| busybox | Provide the valid image name and version for busybox | busybox | -| mysql | Provide the valid image name and version for MySQL. This is used as the DB for TM | mysql/mysql-server:5.7 | - -### node - -| Name | Description | Default Value | -| ------------------------ | ------------------------------------------------------------------------------ | ------------- | -| name | Provide the name for Quorum node | node-1 | -| status | Provide the status of the node as default,additional | default | -| peer_id | Provide the id which is obtained when the new peer is added for raft consensus | 5 | -| consensus | Provide the consesus for the Quorum network, values can be 'raft' or 'ibft' | ibft | -| mountPath | Provide the mountpath for Quorum pod | /etc/quorum/qdata | -| imagePullSecret | Provide the docker secret name in the namespace | regcred | -| keystore | Provide the keystore file name | keystore_1 | -| servicetype | Provide the k8s service type | ClusterIP | -| ports.rpc | Provide the rpc service ports | 8546 | -| ports.raft | Provide the raft service ports | 50401 | -| ports.tm | Provide the Tessera Transaction Manager service ports | 15013 | -| ports.quorum | Provide the Quorum port | 21000 | -| ports.db | Provide the DataBase port | 3306 | -| dbname | Provide the mysql DB name | demodb | -| mysqluser | Provide the mysql username | demouser | -| mysqlpassword | Provide the mysql user password | password | - -### vault - -| Name | Description | Default Value | -| ---------------- | -------------------------------------------------------------------------| ------------- | -| address | Address/URL of the Vault server. | "" | -| secretprefix | Provide the Vault secret path from where secrets will be read | secret/org1/crypto/node_1 | -| serviceaccountname | Provide the service account name verified with Vault | vault-auth | -| keyname | Provide the key name from where Quorum secrets will be read | quorum | -| tm_keyname | Provide the key name from where transaction manager secrets will be read | tm | -| role | Provide the service role verified with Vault | vault-role | -| authpath | Provide the Vault auth path created for the namespace | quorumorg1 | - -### tessera - -| Name | Description | Default Value | -| ------------- | ----------------------------------------------------------------------------------------------------------------- | ------------- | -| dburl | Provide the Database URL | jdbc:mysql://localhost:3306/demodb| -| dbusername | Provide the Database username | demouser | -| dbpassword | Provide the Database password | "" | -| url | Provide the tessera node's own url. This should be local. Use http if tls is OFF | "" | -| othernodes | Provide the list of tessera nodes to connect in `url: ` format. This should be reachable from this node | "" | -| tls | Provide if tessera will use tls | STRICT | -| trust | Provide the server/client trust configuration for transaction manager nodes | TOFU | - - -### genesis - -| Name | Description | Default Value | -| --------| ---------------------------------------------- | ------------- | -| genesis | Provide the genesis.json file in base64 format | "" | - - -### staticnodes - -| Name | Description | Default Value | -| ----------------| --------------------------------------| ------------- | -| staticnodes | Provide the static nodes as an array | "" | - -### proxy - -| Name | Description | Default Value | -| --------------------- | --------------------------------------------------------------------- | ------------- | -| provider | The proxy/ingress provider (ambassador, haproxy) | ambassador | -| external_url | This field contains the external URL of the node | "" | -| portTM | The TM port exposed externally via the proxy | 15013 | -| rpcport | The RPC port exposed externally via the proxy | 15030 | -| quorumport | The Quorum port exposed externally via the proxy | 15031 | -| portRaft | The Raft port exposed externally via the proxy | 15032 | - -### storage - -| Name | Description | Default Value | -| --------------------- | ------------------------------------------| ------------- | -| storageclassname | The Kubernetes storage class for the node | awsstorageclass | -| storagesize | The memory for the node | 1Gi | -| dbstorage | Provide the memory for database | 1Gi | - - -## Deployment ---- - -To deploy the quorum-member-node Helm chart, follow these steps: - -1. Modify the [values.yaml](https://github.com/hyperledger/bevel/blob/develop/platforms/quorum/charts/quorum-member-node/values.yaml) file to set the desired configuration values. -2. Run the following Helm command to install the chart: - ``` - $ helm repo add bevel https://hyperledger.github.io/bevel/ - $ helm install ./quorum-member-node - ``` -Replace `` with the desired name for the release. - -This will deploy the quorum member node to the Kubernetes cluster based on the provided configurations. - - - -## Verification ---- - -To verify the deployment, we can use the following command: -``` -$ kubectl get statefulsets -n -``` -Replace `` with the actual namespace where the StatefulSet was created. This command will display information about the StatefulSet, including the number of replicas and their current status. - - - -## Updating the Deployment ---- - -If we need to update the deployment with new configurations or changes, modify the same [values.yaml](https://github.com/hyperledger/bevel/blob/develop/platforms/quorum/charts/quorum-member-node/values.yaml) file with the desired changes and run the following Helm command: -``` -$ helm upgrade ./quorum-member-node -``` -Replace `` with the name of the release. This command will apply the changes to the deployment, ensuring the quorum member node is up to date. - - - -## Deletion ---- - -To delete the deployment and associated resources, run the following Helm command: -``` -$ helm uninstall -``` -Replace `` with the name of the release. This command will remove all the resources created by the Helm chart. - - - -## Contributing ---- -If you encounter any bugs, have suggestions, or would like to contribute to the [Quorum Member Node Deployment Helm Chart](https://github.com/hyperledger/bevel/blob/develop/platforms/quorum/charts/quorum-member-node), please feel free to open an issue or submit a pull request on the [project's GitHub repository](https://github.com/hyperledger/bevel). - - - -## License - -This chart is licensed under the Apache v2.0 license. - -Copyright © 2023 Accenture - -### Attribution - -This chart is adapted from the [charts](https://hyperledger.github.io/bevel/) which is licensed under the Apache v2.0 License which is reproduced here: - -``` -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -``` diff --git a/platforms/quorum/charts/quorum-member-node/templates/_helpers.tpl b/platforms/quorum/charts/quorum-member-node/templates/_helpers.tpl deleted file mode 100644 index a4793a721ee..00000000000 --- a/platforms/quorum/charts/quorum-member-node/templates/_helpers.tpl +++ /dev/null @@ -1,5 +0,0 @@ -{{- define "labels.custom" -}} - {{- range $key, $val := .Values.metadata.labels -}} - {{- $key -}}: {{- $val -}} - {{- end -}} -{{- end -}} diff --git a/platforms/quorum/charts/quorum-member-node/templates/configmap.yaml b/platforms/quorum/charts/quorum-member-node/templates/configmap.yaml deleted file mode 100644 index 1f9acd713e9..00000000000 --- a/platforms/quorum/charts/quorum-member-node/templates/configmap.yaml +++ /dev/null @@ -1,20 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - ---- -apiVersion: v1 -kind: ConfigMap -metadata: - name: genesis-{{ .Values.node.name }} - namespace: {{ .Values.metadata.namespace }} - labels: - app.kubernetes.io/name: genesis-{{ .Values.node.name }} - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} - {{- include "labels.custom" . | nindent 2 }} -data: - genesis.json.base64: {{ .Values.genesis | quote }} diff --git a/platforms/quorum/charts/quorum-member-node/templates/deployment.yaml b/platforms/quorum/charts/quorum-member-node/templates/deployment.yaml deleted file mode 100644 index 448984c0a5c..00000000000 --- a/platforms/quorum/charts/quorum-member-node/templates/deployment.yaml +++ /dev/null @@ -1,267 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -apiVersion: apps/v1 -kind: StatefulSet -metadata: - namespace: {{ .Values.metadata.namespace }} - creationTimestamp: null - labels: - app.kubernetes.io/name: {{ .Values.node.name }} - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} - {{- include "labels.custom" . | nindent 2 }} - name: {{ .Values.node.name }} -spec: - serviceName: {{ .Values.node.name }} - replicas: {{ .Values.replicaCount }} - selector: - matchLabels: - app: consortiumchain - service.rpc: {{ .Values.node.name }} - app.kubernetes.io/name: {{ .Values.node.name }} - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} - {{- include "labels.custom" . | nindent 2 }} - template: - metadata: - creationTimestamp: null - labels: - app: consortiumchain - name: {{ .Values.node.name }} - service.rpc: {{ .Values.node.name }} - app.kubernetes.io/name: {{ .Values.node.name }} - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} - {{- include "labels.custom" . | nindent 2 }} - spec: - serviceAccountName: {{ .Values.vault.serviceaccountname }} - hostname: {{ .Values.node.name }} - imagePullSecrets: - - name: {{ .Values.node.imagePullSecret }} - volumes: - - name: certificates - emptyDir: - medium: Memory - - name: mysql - emptyDir: - medium: Memory - - name: {{ .Values.node.name }}-genesis-volume - configMap: - name: genesis-{{ .Values.node.name }} - items: - - key: genesis.json.base64 - path: genesis.json.base64 - - name: scripts-volume - configMap: - name: bevel-vault-script - - name: package-manager - configMap: - name: package-manager - initContainers: - - name: certificates-init - image: {{ .Values.images.alpineutils }} - imagePullPolicy: IfNotPresent - env: - - name: VAULT_ADDR - value: {{ .Values.vault.address }} - - name: VAULT_SECRET_PREFIX - value: {{ .Values.vault.secretprefix }} - - name: KUBERNETES_AUTH_PATH - value: {{ .Values.vault.authpath }} - - name: VAULT_APP_ROLE - value: {{ .Values.vault.role }} - - name: MOUNT_PATH - value: "/secret" - - name: VAULT_TYPE - value: "{{ $.Values.vault.type }}" - command: ["sh", "-c"] - args: - - |- - #!/bin/bash - . /scripts/bevel-vault.sh - - # Calling a function to retrieve the vault token. - vaultBevelFunc "init" - - # Calling a function to retrieve secrets from Vault only if they exist. - vaultBevelFunc "readJson" "${VAULT_SECRET_PREFIX}/{{ .Values.vault.keyname }}" - nodekey=$(echo ${VAULT_SECRET} | jq -r '.["nodekey"]') - keystore=$(echo ${VAULT_SECRET} | jq -r '.["keystore"]' | base64 -d ) - gethpassword=$(echo ${VAULT_SECRET} | jq -r '.["geth_password"]') - username=$(echo ${VAULT_SECRET} | jq -r '.["db_user"]') - password=$(echo ${VAULT_SECRET} | jq -r '.["db_password"]') - - OUTPUT_PATH="${MOUNT_PATH}/keys" - mkdir -p ${OUTPUT_PATH} - - echo -n "${gethpassword}" > ${OUTPUT_PATH}/passwords.txt - echo -n "${nodekey}" > ${OUTPUT_PATH}/nodekey - echo -n "${keystore}" > ${OUTPUT_PATH}/{{ .Values.node.keystore }} - echo "${username}" > ${OUTPUT_PATH}/username - echo "${password}" > ${OUTPUT_PATH}/password - - if [ {{ $.Values.tm.type }} != "none" ] - then - # Calling a function to retrieve secrets from Vault only if they exist. - vaultBevelFunc "readJson" "${VAULT_SECRET_PREFIX}/{{ .Values.vault.tm_keyname }}" - tmpub=$(echo ${VAULT_SECRET} | jq -r '.["publicKey"]') - tmkey=$(echo ${VAULT_SECRET} | jq -r '.["privateKey"]') - - echo -n "${tmpub}" > ${OUTPUT_PATH}/tm.pub - echo -n "${tmkey}" > ${OUTPUT_PATH}/tm.key - fi - echo "Done checking for certificates in vault." - volumeMounts: - - name: certificates - mountPath: /secret - - name: scripts-volume - mountPath: /scripts/bevel-vault.sh - subPath: bevel-vault.sh - - name: quorum-genesis-init-container - image: "{{ .Values.images.node }}" - imagePullPolicy: IfNotPresent - command: [ "sh" ] - args: - - "-cx" - - "mkdir -p $QUORUM_DATA_DIR; - cat {{ .Values.node.mountPath }}/genesis/genesis.json.base64 | base64 -d > $QUORUM_DATA_DIR/genesis.json; - if [ ! -f $QUORUM_DATA_DIR/genesis_created ]; then - echo \"running geth init\"; - /usr/local/bin/geth --datadir $QUORUM_DATA_DIR init $QUORUM_DATA_DIR/genesis.json; - date > $QUORUM_DATA_DIR/genesis_created; - fi; - " - env: - - name: QUORUM_DATA_DIR - value: {{ .Values.node.mountPath }}/dd - - name: QHOME - value: {{ .Values.node.mountPath }} - volumeMounts: - - name: {{ .Values.node.name }}-pv - mountPath: {{ .Values.node.mountPath }} - - name: {{ .Values.node.name }}-genesis-volume - mountPath: {{ .Values.node.mountPath }}/genesis - containers: - - name: quorum - image: "{{ .Values.images.node }}" - imagePullPolicy: IfNotPresent - command: ["sh"] - args: - - "-cx" - - |- - #!/usr/bin/env sh - . /scripts/package-manager.sh - - # Define the packages to install - packages_to_install="curl" - install_packages "$packages_to_install" - - echo -n {{ .Values.staticnodes | toRawJson | quote }} > $QUORUM_DATA_DIR/static-nodes.json - mkdir -p $QUORUM_DATA_DIR/geth - mkdir -p $QUORUM_DATA_DIR/keystore - # touch $qd/passwords.txt - cp $QUORUM_HOME/crypto/keys/{{ .Values.node.keystore }} $QUORUM_DATA_DIR/keystore/ - cp $QUORUM_HOME/crypto/keys/nodekey $QUORUM_DATA_DIR/geth/ - cp $QUORUM_HOME/crypto/keys/passwords.txt $QUORUM_DATA_DIR/password.txt - - rm -f $QUORUM_HOME/crypto/keys/{{ .Values.node.keystore }} - rm -f $QUORUM_HOME/crypto/keys/nodekey - rm -f $QUORUM_HOME/crypto/keys/passwords.txt - - if [ {{ $.Values.tm.type }} != "none" ] - then - until $(curl -k --output /dev/null --silent --head --fail {{ .Values.tessera.url }}/upcheck); do echo 'waiting for transaction manager to start...'; sleep 5; done; - echo transaction manager is up; - fi - - args="" - NODE_STATUS="" - if [ $CONSENSUS == 'raft' ]; then - NODE_STATUS={{ .Values.node.status }} - fi; - if [ $CONSENSUS == 'raft' ] && [ $NODE_STATUS == 'additional' ]; then - args="--raftdnsenable --raft --raftport {{ .Values.node.ports.raft }} --raftblocktime 300 --http.api admin,eth,debug,miner,net,txpool,personal,web3,quorumExtension,raft --raftjoinexisting {{ .Values.node.peer_id }}"; - fi; - if [ $CONSENSUS == 'raft' ] && [ $NODE_STATUS == 'default' ]; then - args="--raftdnsenable --raft --raftport {{ .Values.node.ports.raft }} --raftblocktime 300 --http.api admin,eth,debug,miner,net,txpool,personal,web3,quorumExtension,raft"; - fi; - if [ $CONSENSUS == 'ibft' ]; then - args=" --istanbul.blockperiod 3 --syncmode full --http.api admin,eth,debug,miner,net,txpool,personal,web3,istanbul,quorumExtension" - fi; - - LOCK_STATUS={{ .Values.node.lock }} - if [ $LOCK_STATUS = false ] - then - args=" ${args} --unlock 0 " - fi - - ptm_url="{{ .Values.tessera.clienturl | quote }}" - if [ -n "$ptm_url" ] && [ "{{ $.Values.tm.type }}" != "none" ] - then - ptm_url="--ptm.url $ptm_url" - else - ptm_url="" - fi - - /usr/local/bin/geth \ - --datadir $QUORUM_DATA_DIR \ - $args \ - --identity {{ .Values.node.subject | quote }} \ - --vmdebug \ - --gcmode=archive \ - --nodiscover \ - --nat=none \ - --verbosity 9 \ - --emitcheckpoints \ - --http \ - --http.addr 0.0.0.0 \ - --http.port {{ .Values.node.ports.rpc }} \ - --http.corsdomain "*" \ - --http.vhosts "*" \ - --allow-insecure-unlock \ - --port {{ .Values.node.ports.quorum }} \ - $ptm_url \ - --password $QUORUM_DATA_DIR/password.txt 2>&1 | tee -a $QUORUM_HOME/logs/quorum.log; - - ports: - - containerPort: {{ .Values.node.ports.rpc }} - - containerPort: {{ .Values.node.ports.quorum }} -{{- if eq $.Values.node.consensus "raft" }} - - containerPort: {{ .Values.node.ports.raft }} -{{- end }} - env: - - name: QUORUM_DATA_DIR - value: {{ .Values.node.mountPath }}/dd - - name: QUORUM_HOME - value: {{ .Values.node.mountPath }} - - name: QHOME - value: {{ .Values.node.mountPath }} - - name: TM_HOME - value: {{ .Values.node.mountPath }}/tm/ - - name: CONSENSUS - value: {{ .Values.node.consensus }} - volumeMounts: - - name: certificates - mountPath: {{ .Values.node.mountPath }}/crypto/ - - name: {{ .Values.node.name }}-pv - mountPath: {{ .Values.node.mountPath }} - - name: package-manager - mountPath: /scripts/package-manager.sh - subPath: package-manager.sh - restartPolicy: Always - volumeClaimTemplates: - - metadata: - name: {{ .Values.node.name }}-pv - spec: - storageClassName: {{ .Values.storage.storageclassname }} - accessModes: [ "ReadWriteOnce" ] - resources: - requests: - storage: {{ .Values.storage.storagesize }} diff --git a/platforms/quorum/charts/quorum-member-node/templates/ingress.yaml b/platforms/quorum/charts/quorum-member-node/templates/ingress.yaml deleted file mode 100644 index 718a5a10a88..00000000000 --- a/platforms/quorum/charts/quorum-member-node/templates/ingress.yaml +++ /dev/null @@ -1,50 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -{{- if eq .Values.proxy.provider "haproxy" }} -apiVersion: networking.k8s.io/v1 -kind: Ingress -metadata: - name: {{ .Values.node.name }} - namespace: {{ .Values.metadata.namespace }} - annotations: - kubernetes.io/ingress.class: "haproxy" - ingress.kubernetes.io/ssl-passthrough: "true" -spec: - rules: - - host: {{ .Values.node.name }}rpc.{{ .Values.proxy.external_url }} - http: - paths: - - path: / - pathType: Prefix - backend: - service: - name: {{ .Values.node.name }} - port: - number: {{ .Values.node.ports.rpc }} - - host: {{ .Values.node.name }}tcp.{{ .Values.proxy.external_url }} - http: - paths: - - path: / - pathType: Prefix - backend: - service: - name: {{ .Values.node.name }} - port: - number: {{ .Values.node.ports.quorum }} - {{- if eq $.Values.node.consensus "raft" }} - - host: {{ .Values.node.name }}raft.{{ .Values.proxy.external_url }} - http: - paths: - - path: / - pathType: Prefix - backend: - service: - name: {{ .Values.node.name }} - port: - number: {{ .Values.node.ports.raft }} - {{- end }} -{{- end }} diff --git a/platforms/quorum/charts/quorum-member-node/templates/service.yaml b/platforms/quorum/charts/quorum-member-node/templates/service.yaml deleted file mode 100644 index c49e55b16fe..00000000000 --- a/platforms/quorum/charts/quorum-member-node/templates/service.yaml +++ /dev/null @@ -1,93 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## -apiVersion: v1 -kind: Service -metadata: - namespace: {{ .Values.metadata.namespace }} - annotations: - app: consortiumchain - version: '1' - creationTimestamp: null - labels: - app.kubernetes.io/name: {{ .Values.node.name }} - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} - {{- include "labels.custom" . | nindent 2 }} - name: {{ .Values.node.name }} -spec: - selector: - app.kubernetes.io/name: {{ .Values.node.name }} - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} - {{- include "labels.custom" . | nindent 2 }} - service.rpc: {{ .Values.node.name }} - type: {{ .Values.node.servicetype }} - ports: - - name: wsrpc-listener - protocol: TCP - port: {{ .Values.node.ports.rpc }} - targetPort: {{ .Values.node.ports.rpc }} -{{- if eq .Values.node.consensus "raft" }} - - name: raft - protocol: TCP - port: {{ .Values.node.ports.raft }} - targetPort: {{ .Values.node.ports.raft }} -{{- end }} - - name: quorum-listener - protocol: TCP - port: {{ .Values.node.ports.quorum }} - targetPort: {{ .Values.node.ports.quorum }} - -{{- if eq .Values.proxy.provider "ambassador" }} -## Listeners for rlpx (p2p) ports ---- -apiVersion: getambassador.io/v3alpha1 -kind: Listener -metadata: - name: {{ .Values.node.name }}-rlpx - namespace: {{ .Values.metadata.namespace }} -spec: - port: {{ .Values.proxy.quorumport }} - protocol: TCP - securityModel: XFP - hostBinding: - namespace: - from: SELF -## Mapping for rpc ---- -apiVersion: getambassador.io/v3alpha1 -kind: Mapping -metadata: - name: {{ .Values.node.name }}-json-rpc - namespace: {{ .Values.metadata.namespace }} -spec: - hostname: '{{ .Values.node.name }}rpc.{{ .Values.proxy.external_url }}' - prefix: / - service: http://{{ .Values.node.name }}.{{ .Values.metadata.namespace }}:{{ .Values.node.ports.rpc }} -## TCPMapping for rlpx ---- -apiVersion: getambassador.io/v3alpha1 -kind: TCPMapping -metadata: - name: {{ .Values.node.name }}-rlpx - namespace: {{ .Values.metadata.namespace }} -spec: - port: {{ .Values.proxy.quorumport }} - service: {{ .Values.node.name }}.{{ .Values.metadata.namespace }}:{{ .Values.node.ports.quorum }} -{{- if eq .Values.node.consensus "raft" }} ---- -apiVersion: getambassador.io/v3alpha1 -kind: TCPMapping -metadata: - name: {{ .Values.node.name }}-raft - namespace: {{ .Values.metadata.namespace }} -spec: - port: {{ .Values.proxy.portRaft }} - service: {{ .Values.node.name }}.{{ .Values.metadata.namespace }}:{{ .Values.node.ports.raft }} -{{- end }} -{{- end }} diff --git a/platforms/quorum/charts/quorum-member-node/values.yaml b/platforms/quorum/charts/quorum-member-node/values.yaml deleted file mode 100644 index 57d04ae3df6..00000000000 --- a/platforms/quorum/charts/quorum-member-node/values.yaml +++ /dev/null @@ -1,133 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -# This is a YAML-formatted file. -# Declare variables to be passed into your templates. - -#Provide the number of replicas for pods -#Eg. replicaCount: 1 -replicaCount: 1 - -metadata: - #Provide the namespace for the Quorum node - #Eg. namespace: example-quo - namespace: - #Provide the custom labels - #NOTE: Provide labels other than name, release name , release service, chart version , chart name, run - #These lables will not be applied to VolumeClaimTemplate of StatefulSet as labels are automatically picked up by Kubernetes - #Eg. labels: - # role: minter - labels: - -#These are the various docker images being used by this chart. update them as needed -images: - #Provide the valid image name and version for quorum node - #Eg. node: quorumengineering/quorum:2.1.1 - node: quorumengineering/quorum:2.1.1 - #Provide the valid image name and version to read certificates from vault server - #Eg. alpineutils: ghcr.io/hyperledger/bevel-alpine:latest - alpineutils: ghcr.io/hyperledger/bevel-alpine:latest - -node: - #Provide the name for Quorum node - #Eg. name: node-1 - name: node-1 - #Provide the status of the node as default,additional - #Eg. status: default - status: default - #Provide the id which is obtained when the new peer is added for raft consensus - #This field is only for RAFT consensus and only when a new node is added to existing network - #Eg. peer_id: 5 - peer_id: 5 - #Provide the consesus for the Quorum network - # values can be 'raft' or 'ibft' - #Eg. consensus: raft - consensus: ibft - #Provide the mountpath for Quorum pod - #Eg. mountPath: /etc/quorum/qdata - mountPath: /etc/quorum/qdata - #Provide the docker secret name in the namespace - #Eg. imagePullSecret: regcred - imagePullSecret: regcred - #Provide the keystore file name - #Eg. keystore: keystore_1 - keystore: keystore_1 - #Provide the k8s service type - servicetype: ClusterIP - lock: false - #Provide the container and service ports - ports: - rpc: 8546 - raft: 50401 - quorum: 21000 -tm: - type: none -vault: - type: hashicorp - #Provide the Vault Address from where secrets will be read - #Eg. address: http://vault.internal.demo.aws.blockchaincloudpoc.com:9000 - address: - #Provide the Vault secret path from where secrets will be read - #Eg. secretprefix: secret/org1/crypto/node_1 - secretprefix: secret/org1/crypto/node_1 - #Provide the serviceaccount which is verified with Vault - #Eg. serviceaccountname: vault-auth - serviceaccountname: vault-auth - #Provide the key name from where quorum secrets will be read - #Eg. keyname: quorum - keyname: quorum - #Provide the key name from where transaction-manager secrets will be read - #Eg. tm_keyname: tm - tm_keyname: tm - #Provide the service role which is verified with Vault - #Eg. role: vault-role - role: vault-role - #Provide the Vault auth-path which is created for the namespace - #Eg. authpath: quorumorg1 - authpath: quorumorg1 - -tessera: - #Provide the tessera node's own url. This should be local. Use http if tls is OFF - #Eg. url: "https://node1.quo.demo.aws.blockchaincloudpoc.com" - url: - clienturl: - -#Provide the genesis.json file in base64 format -#Eg. genesis: ewogICAgImFsbG9jIjogewogICAgICAgICIwOTg2Nzk2ZjM0ZDhmMWNkMmI0N2M3MzQ2YTUwYmY2 -# OWFhOWM1NzcyIjogewogICAgICAgICAgICAiYmFsYW5jZSI6ICIxMDAwMDAwMDAwMDAwMDAwMDAw -# MDAwMDAwMDAwIgogICAgICAgIH0sCiAgICAgICAgImY2MjkyNTQ1YWVjNTkyMDU4MzQ -genesis: - -#Provide the staticnodes as an array -#Eg. staticnodes: -# - enode://293ce022bf114b14520ad97349a1990180973885cc2afb6f4196b490397e164fabc87900736e4b685c5f4cf31479021ba0d589e58bd0ea6792ebbfd5eb0348af@node1.quo.demo.aws.blockchaincloudpoc.com:15011?discport=0&raftport=15012 -# - enode://4e7a1a15ef6a9bbf30f8b2a6b927f4941c9e80aeeeed14cfeeea619f93256b41ef9994b9a8af371f394c2a6de9bc6930e142c0350399a22081c518ab2d27f92a@node2.quo.demo.aws.blockchaincloudpoc.com:15021?discport=0&raftport=15022 -# - enode://947fa59385da72f4a68b7348ef5dab7e759148b48b30892c29b7b03a872233a6475a13fd5df62ea75abff9981d459606c1f878cd6ab929307eac6b56b19424bd@node3.quo.demo.aws.blockchaincloudpoc.com:15031?discport=0&raftport=15032 -# - enode://b28ac5bd1c554d05d68db65b0c3351838249e5b935e04d4b361745b74e6c7b3379927eefc11a5fef605fa64d14d000645e182662c51f5bf4a9dd228377f0e1ba@node4.quo.demo.aws.blockchaincloudpoc.com:15041?discport=0&raftport=15042 -staticnodes: - -proxy: - #This will be the proxy/ingress provider. Can have values "ambassador" or "haproxy" - # TODO "haproxy" has not been tested - #Eg. provider: "ambassador" - provider: "ambassador" - #This field contains the external URL of the node - #Eg. external_url: node1.quo.demo.aws.blockchaincloudpoc.com - external_url: - # Following are the ports that are exposed externally via the proxy - quorumport: 15031 - portRaft: 15032 - -storage: - #Provide the kubernetes storageclass for node - #Eg. storageclassname: awsstorageclass - storageclassname: awsstorageclass - #Provide the memory for node - #Eg. storagesize: 1Gi - storagesize: 1Gi - #Provide the memory for database - #Eg. dbstorage: 1Gi - dbstorage: 1Gi diff --git a/platforms/quorum/charts/quorum-node/Chart.yaml b/platforms/quorum/charts/quorum-node/Chart.yaml new file mode 100644 index 00000000000..45a72532a87 --- /dev/null +++ b/platforms/quorum/charts/quorum-node/Chart.yaml @@ -0,0 +1,25 @@ +############################################################################################## +# Copyright Accenture. All Rights Reserved. +# +# SPDX-License-Identifier: Apache-2.0 +############################################################################################## +apiVersion: v1 +name: quorum-node +description: "Quorum: Deploys a Quorum node for a POA network" +version: 1.0.1 +appVersion: latest +keywords: + - bevel + - ethereum + - quorum + - hyperledger + - enterprise + - blockchain + - deployment + - accenture +home: https://hyperledger-bevel.readthedocs.io/en/latest/ +sources: + - https://github.com/hyperledger/bevel +maintainers: + - name: Hyperledger Bevel maintainers + email: bevel@lists.hyperledger.org diff --git a/platforms/quorum/charts/quorum-node/README.md b/platforms/quorum/charts/quorum-node/README.md new file mode 100644 index 00000000000..c8e93382c88 --- /dev/null +++ b/platforms/quorum/charts/quorum-node/README.md @@ -0,0 +1,180 @@ +[//]: # (##############################################################################################) +[//]: # (Copyright Accenture. All Rights Reserved.) +[//]: # (SPDX-License-Identifier: Apache-2.0) +[//]: # (##############################################################################################) + +# quorum-node + +This chart is a component of Hyperledger Bevel. The quorum-node chart deploys a Hyperledger quorum node with different settings like validator or member. If enabled, the keys are then stored on the configured vault and stored as Kubernetes secrets. See [Bevel documentation](https://hyperledger-bevel.readthedocs.io/en/latest/) for details. + +## TL;DR + +```bash +helm repo add bevel https://hyperledger.github.io/bevel +helm install validator-1 bevel/quorum-node +``` + +## Prerequisites + +- Kubernetes 1.19+ +- Helm 3.2.0+ + +If Hashicorp Vault is used, then +- HashiCorp Vault Server 1.13.1+ + +> **Important**: Ensure the `quorum-genesis` chart has been installed before installing this. Also check the dependent charts. + +## Installing the Chart + +To install the chart with the release name `validator-1`: + +```bash +helm repo add bevel https://hyperledger.github.io/bevel +helm install validator-1 bevel/quorum-node +``` + +The command deploys the chart on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation. + +> **Tip**: List all releases using `helm list` + +## Uninstalling the Chart + +To uninstall/delete the `validator-1` deployment: + +```bash +helm uninstall validator-1 +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Parameters + +### Global parameters +These parameters are refered to as same in each parent or child chart +| Name | Description | Default Value | +|--------|---------|-------------| +|`global.serviceAccountName` | The serviceaccount name that will be created for Vault Auth management| `vault-auth` | +| `global.cluster.provider` | Kubernetes cluster provider like AWS EKS or minikube. Currently ony `aws` and `minikube` is tested | `aws` | +| `global.cluster.cloudNativeServices` | only `false` is implemented, `true` to use Cloud Native Services (SecretsManager and IAM for AWS; KeyVault & Managed Identities for Azure) is for future | `false` | +| `global.vault.type` | Type of Vault to support other providers. Currently, only `hashicorp` is supported. | `hashicorp` | +| `global.vault.role` | Role used for authentication with Vault | `vault-role` | +| `global.vault.address`| URL of the Vault server. | `""` | +| `global.vault.authPath` | Authentication path for Vault | `supplychain` | +| `global.vault.secretEngine` | The value for vault secret engine name | `secretsv2` | +| `global.vault.secretPrefix` | The value for vault secret prefix which must start with `data/` | `data/supplychain` | +| `global.proxy.provider` | The proxy or Ingress provider. Can be `none` or `ambassador` | `ambassador` | +| `global.proxy.externalUrlSuffix` | The External URL suffix at which the Besu P2P and RPC service will be available | `test.blockchaincloudpoc.com` | +| `global.proxy.p2p` | The external port at which the Besu P2P service will be available. This port must be unique for a single cluster and enabled on Ambassador. | `15010` | + +### Storage + +| Name | Description | Default Value | +|--------|---------|-------------| +| `storage.size` | Size of the PVC needed for Besu node | `2Gi` | +| `storage.reclaimPolicy` | Reclaim policy for the PVC. Choose from: `Delete` or `Retain` | `Delete` | +| `storage.volumeBindingMode` | Volume binding mode for the PVC. Choose from: `Immediate` or `WaitForFirstConsumer` | `Immediate` | +| `storage.allowedTopologies.enabled` | Check [bevel-storageclass](../../../shared/charts/bevel-storageclass/README.md) for details | `false` | + +### Tessera +This is where you can override the values for the [quorum-tessera-node subchart](../quorum-tessera-node/README.md). + +| Name | Description | Default Value | +|--------|---------|-------------| +| `tessera.enabled` | Enable Privacy/Tessera for the quorum node | `false` | + +### TLS +This is where you can override the values for the [quorum-tessera-node subchart](../quorum-tessera-node/README.md). + +| Name | Description | Default Value | +|--------|---------|-------------| +| `tls.enabled` | Enable secure access to Tessera node via HTTPS | `false` | + +### Image +| Name | Description | Default Value | +| -------------| ---------- | --------- | +| `image.quorum.repository` | Besu image repository | `quorumengineering/quorum`| +| `image.quorum.tag` | Besu image tag as per version of Besu | `22.7.1`| +| `image.hooks.repository` | Quorum/Besu hooks image repository | `ghcr.io/hyperledger/bevel-k8s-hooks` | +| `image.hooks.tag` | Quorum/Besu hooks image tag | `qgt-0.2.12` | +| `image.pullPolicy` | Pull policy to be used for the Docker images | `IfNotPresent` | +| `image.pullSecret` | Secret name in the namespace containing private image registry credentials | `""` | + + +### quorum node + +This contains all the parameters for the quorum node. Please read [Hyperledger quorum documentation](https://besu.hyperledger.org/public-networks/reference/cli/options) for detailed explanation of each parameter. + +| Name | Description | Default Value | +|-------------------------------------------|------------------------------------------------------|---------------| +| `node.quorum.resources.cpuLimit` | CPU Limit for quorum statefulset | `0.7` | +| `node.quorum.resources.cpuRequest` | Initial CPU request for quorum statefulset | `0.5` | +| `node.quorum.resources.memLimit` | Memory Limit for quorum statefulset | `2G` | +| `node.quorum.resources.memRequest` | Initial Memory request for quorum statefulset | `1G` | +| `node.quorum.dataPath` | Mount path for quorum PVC | `data` | +| `node.quorum.logging` | Logging setting for the quorum nodes | `INFO` | +| `node.quorum.account.password` | Password for quorum Account key generation | `password` | +| `node.quorum.account.passwordPath` | Path where the password file will be stored | `data/keystore/accountPassword` | +| `node.quorum.log.verbosity` | Log verbosity setting for quorum nodes | `5` | +| `node.quorum.miner.threads` | Number of mining threads for quorum nodes | `1` | +| `node.quorum.miner.blockPeriod` | Block period for quorum nodes | `5` | +| `node.quorum.p2p.enabled` | Enable P2P communication for quorum nodes | `true` | +| `node.quorum.p2p.addr` | P2P address for quorum nodes | `0.0.0.0` | +| `node.quorum.p2p.port` | P2P port for quorum nodes | `30303` | +| `node.quorum.rpc.enabled` | Enable RPC for quorum nodes | `true` | +| `node.quorum.rpc.addr` | RPC address for quorum nodes | `0.0.0.0` | +| `node.quorum.rpc.port` | RPC port for quorum nodes | `8545` | +| `node.quorum.rpc.corsDomain` | CORS domain for quorum RPC | `*` | +| `node.quorum.rpc.vHosts` | Virtual hosts for quorum RPC | `*` | +| `node.quorum.rpc.api` | Enabled APIs for quorum RPC | `admin,db,eth,debug,miner,net,shh,txpool,personal,web3,quorum,istanbul` | +| `node.quorum.rpc.authenticationEnabled`| Enable authentication for quorum RPC | `false` | +| `node.quorum.ws.enabled` | Enable WebSocket communication for quorum nodes | `true` | +| `node.quorum.ws.addr` | WebSocket address for quorum nodes | `0.0.0.0` | +| `node.quorum.ws.port` | WebSocket port for quorum nodes | `8546` | +| `node.quorum.ws.api` | Enabled APIs for quorum WebSocket | `admin,db,eth,debug,miner,net,shh,txpool,personal,web3,quorum,istanbul` | +| `node.quorum.ws.origins` | Allowed origins for quorum WebSocket | `*` | +| `node.quorum.ws.authenticationEnabled` | Enable authentication for quorum WebSocket | `false` | +| `node.quorum.graphql.enabled` | Enable GraphQL API for quorum nodes | `true` | +| `node.quorum.graphql.addr` | GraphQL address for quorum nodes | `0.0.0.0` | +| `node.quorum.graphql.port` | GraphQL port for quorum nodes | `8547` | +| `node.quorum.graphql.corsDomain` | CORS domain for quorum GraphQL | `*` | +| `node.quorum.graphql.vHosts` | Virtual hosts for quorum GraphQL | `*` | +| `node.quorum.metrics.enabled` | Enable metrics collection for quorum nodes | `true` | +| `node.quorum.metrics.pprofaddr` | Address for accessing pprof endpoints for metrics | `0.0.0.0` | +| `node.quorum.metrics.pprofport` | Port for accessing pprof endpoints for metrics | `9545` | +| `node.quorum.metrics.serviceMonitorEnabled` | Enable ServiceMonitor for Prometheus integration | `false` | +| `node.quorum.privacy.url` | URL for Tessera privacy manager | `http://localhost:9101` | +| `node.quorum.privacy.pubkeysPath` | Path for Tessera public keys | `/tessera` | +| `node.quorum.privacy.pubkeyFile` | File containing Tessera public key | `/tessera/tm.pub` | + +### Common parameters + +| Name | Description | Default Value | +|--------|---------|-------------| +| `volumePermissionsFix` | fixes permissions of volumes because besu runs as user `besu` and volumes prefer `root` | `- minikube`
`- aws` | +| `labels.service` | Custom labels in yaml k-v format | `[]` | +| `labels.pvc` | Custom labels in yaml k-v format | `[]` | +| `labels.deployment` | Custom labels in yaml k-v format | `[]` | + +## License + +This chart is licensed under the Apache v2.0 license. + +Copyright © 2023 Accenture + +### Attribution + +This chart is adapted from the [charts](https://hyperledger.github.io/bevel/) which is licensed under the Apache v2.0 License which is reproduced here: + +``` +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +``` diff --git a/platforms/quorum/charts/quorum-node/requirements.yaml b/platforms/quorum/charts/quorum-node/requirements.yaml new file mode 100644 index 00000000000..e127c6e4b9e --- /dev/null +++ b/platforms/quorum/charts/quorum-node/requirements.yaml @@ -0,0 +1,21 @@ +dependencies: + - name: bevel-storageclass + alias: storage + repository: "file://../../../shared/charts/bevel-storageclass" + tags: + - storage + version: ~1.0.0 + - name: quorum-tessera-node + alias: tessera + repository: "file://../quorum-tessera-node" + tags: + - tessera + version: ~1.0.0 + condition: tessera.enabled + - name: quorum-tls-gen + alias: tls + repository: "file://../quorum-tlscert-gen" + tags: + - bevel + version: ~1.0.0 + condition: tls.enabled diff --git a/platforms/quorum/charts/quorum-node/templates/_helpers.tpl b/platforms/quorum/charts/quorum-node/templates/_helpers.tpl new file mode 100644 index 00000000000..38ded90397a --- /dev/null +++ b/platforms/quorum/charts/quorum-node/templates/_helpers.tpl @@ -0,0 +1,43 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "quorum-node.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "quorum-node.fullname" -}} +{{- $name := default .Chart.Name -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" $name .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} + + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "quorum-node.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create static nodes url depending on proxy +*/}} +{{- define "quorum-node.enodeURL" -}} +{{- $fullname := include "quorum-node.fullname" . -}} +{{- $port := .Values.node.quorum.p2p.port | int -}} +{{- $extport := .Values.global.proxy.p2p | int -}} +{{- if eq .Values.global.proxy.provider "ambassador" -}} + {{- printf "%s.%s:%d" .Release.Name .Values.global.proxy.externalUrlSuffix $extport | quote }} +{{- else -}} + {{- printf "%s-0.%s.%s.svc.cluster.local:%d" $fullname $fullname .Release.Namespace $port | quote }} +{{- end -}} +{{- end -}} diff --git a/platforms/quorum/charts/quorum-node/templates/node-hook-pre-delete.yaml b/platforms/quorum/charts/quorum-node/templates/node-hook-pre-delete.yaml new file mode 100644 index 00000000000..b981de3dcee --- /dev/null +++ b/platforms/quorum/charts/quorum-node/templates/node-hook-pre-delete.yaml @@ -0,0 +1,92 @@ + +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ include "quorum-node.fullname" . }}-pre-delete-hook + namespace: {{ .Release.Namespace }} + annotations: + helm.sh/hook: pre-delete + helm.sh/hook-weight: "0" + helm.sh/hook-delete-policy: "hook-succeeded" + labels: + app.kubernetes.io/name: pre-delete-hook + app.kubernetes.io/component: cleanup + app.kubernetes.io/part-of: {{ include "quorum-node.fullname" . }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} + app.kubernetes.io/managed-by: helm +spec: + backoffLimit: 3 + completions: 1 + template: + metadata: + labels: + app.kubernetes.io/name: pre-delete-hook + app.kubernetes.io/instance: {{ .Release.Name }} + spec: + serviceAccountName: {{ .Values.global.serviceAccountName }} + imagePullSecrets: + {{- if .Values.image.pullSecret }} + - name: {{ .Values.image.pullSecret }} + {{- end }} + restartPolicy: "OnFailure" + containers: + - name: {{ template "quorum-node.fullname" . }}-cleanup + image: "{{ .Values.image.hooks.repository }}:{{ .Values.image.hooks.tag }}" + imagePullPolicy: {{ .Values.image.hooks.pullPolicy }} + command: + - /bin/bash + - -c + args: + - | + + echo "{{ template "quorum-node.fullname" . }} Pre Delete hook ..." + + # Check if the ConfigMap exists + if kubectl get configmap quorum-peers --namespace {{ .Release.Namespace }} &>/dev/null; then + # Get the content of the ConfigMap and save it to a file + if kubectl get configmap quorum-peers --namespace {{ .Release.Namespace }} -o json | jq -r '.data["static-nodes.json"]' > ./static-nodes.json; then + existingStaticNodes=$(cat ./static-nodes.json) + else + echo "Error: Failed to get content of ConfigMap." + exit 1 + fi + fi + + # Check if the secret exists + if kubectl get secret "{{ template "quorum-node.fullname" . }}-keys" --namespace {{ .Release.Namespace }} >/dev/null 2>&1; then + # Retrieve nodekey_pub from the secret + nodekey_pub=$(kubectl get secret "{{ template "quorum-node.fullname" . }}-keys" --namespace {{ .Release.Namespace }} -o json | jq -r '.data["nodekey.pub"]' | base64 -d) + # Check if nodekey_pub is empty + if [ -z "$nodekey_pub" ]; then + echo "Error: Retrieved nodekey.pub is empty." + exit 1 + fi + deleteNode="enode://$nodekey_pub@{{ include "quorum-node.enodeURL" . }}?discport=0" + fi + + # Check if the newStaticNode is already present in existingStaticNodes + if echo "$existingStaticNodes" | jq 'contains(["'"$deleteNode"'"])' | grep -q true; then + # Enode URL exists, remove it + existingStaticNodes=$(echo "$existingStaticNodes" | jq 'map(select(. != "'"${deleteNode}"'"))') + fi + + # Update the static-nodes.json file + echo "$existingStaticNodes" > ./static-nodes.json + + # Check if the ConfigMap exists + if kubectl get configmap quorum-peers --namespace {{ .Release.Namespace }} &>/dev/null; then + # Applying the updated static-nodes.json to the ConfigMap. + if kubectl create configmap quorum-peers --from-file=./static-nodes.json --namespace {{ .Release.Namespace }} --dry-run=client -o yaml | kubectl replace -f -; then + echo "ConfigMap updated successfully." + else + echo "Error: Failed to update ConfigMap." + exit 1 + fi + fi + + # Checking if the secret exist and delete it. + if kubectl get secret "{{ template "quorum-node.fullname" . }}-keys" --namespace {{ .Release.Namespace }} >/dev/null 2>&1; then + kubectl delete secret {{ template "quorum-node.fullname" . }}-keys --namespace {{ .Release.Namespace }} + fi diff --git a/platforms/quorum/charts/quorum-node/templates/node-hooks-pre-install.yaml b/platforms/quorum/charts/quorum-node/templates/node-hooks-pre-install.yaml new file mode 100644 index 00000000000..52b48025ecf --- /dev/null +++ b/platforms/quorum/charts/quorum-node/templates/node-hooks-pre-install.yaml @@ -0,0 +1,221 @@ +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ include "quorum-node.fullname" . }}-pre-install-hook + namespace: {{ .Release.Namespace }} + annotations: + "helm.sh/hook": pre-install + "helm.sh/hook-weight": "0" + "helm.sh/hook-delete-policy": "before-hook-creation" + labels: + app.kubernetes.io/name: pre-install-hook + app.kubernetes.io/component: keygen + app.kubernetes.io/part-of: {{ include "quorum-node.fullname" . }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} + app.kubernetes.io/managed-by: helm +spec: + backoffLimit: 1 + completions: 1 + template: + metadata: + labels: + app.kubernetes.io/name: pre-install-hook + app.kubernetes.io/instance: {{ .Release.Name }} + spec: + serviceAccountName: {{ .Values.global.serviceAccountName }} + restartPolicy: "OnFailure" + containers: + - name: {{ template "quorum-node.fullname" . }}-keygen + image: {{ .Values.image.hooks.repository }}:{{ .Values.image.hooks.tag }} + securityContext: + runAsUser: 0 +{{- if eq .Values.global.vault.type "hashicorp" }} + volumeMounts: + - name: scripts-volume + mountPath: /scripts/bevel-vault.sh + subPath: bevel-vault.sh + env: + - name: VAULT_ADDR + value: "{{ .Values.global.vault.address }}" + - name: VAULT_SECRET_ENGINE + value: "{{ .Values.global.vault.secretEngine }}" + - name: VAULT_SECRET_PREFIX + value: "{{ .Values.global.vault.secretPrefix }}" + - name: KUBERNETES_AUTH_PATH + value: "{{ .Values.global.vault.authPath }}" + - name: VAULT_APP_ROLE + value: "{{ .Values.global.vault.role }}" + - name: VAULT_TYPE + value: "{{ .Values.global.vault.type }}" +{{- end }} + command: + - /bin/bash + - -c + args: + - | + + # Check if the vault type is HashiCorp +{{- if eq .Values.global.vault.type "hashicorp" }} + # Source the script containing vault-related functions + . /scripts/bevel-vault.sh + + echo "Generate a customize token." + vaultBevelFunc "init" + + # Function to safely write keys + safeWriteSecret() { + local key="$1" + local fpath="$2" + + # Read secret from vault + vaultBevelFunc "readJson" "${VAULT_SECRET_ENGINE}/${VAULT_SECRET_PREFIX}/${key}" + + # Check if secrets are available in the vault + if [ "$SECRETS_AVAILABLE" == "yes" ] + then + # Extract secrets from JSON response + local accountAddress=$(echo ${VAULT_SECRET} | jq -r '.["accountAddress"]') + local accountKeystore=$(echo ${VAULT_SECRET} | jq -r '.["accountKeystore_base64"]' | base64 -d) + local accountPassword=$(echo ${VAULT_SECRET} | jq -r '.["accountPassword"]') + local accountPrivateKey=$(echo ${VAULT_SECRET} | jq -r '.["accountPrivateKey"]') + local address=$(echo ${VAULT_SECRET} | jq -r '.["nodeAddress"]') + local nodeKey=$(echo ${VAULT_SECRET} | jq -r '.["nodeKey"]') + nodekey_pub=$(echo ${VAULT_SECRET} | jq -r '.["nodeKeyPub"]') + + # Check if Kubernetes secret exists, if not, create one + if ! kubectl get secret {{ template "quorum-node.fullname" . }}-keys --namespace {{ .Release.Namespace }} &> /dev/null; then + # Create Kubernetes secret from vault secrets + kubectl create secret generic {{ template "quorum-node.fullname" . }}-keys --namespace {{ .Release.Namespace }} \ + --from-literal=accountAddress=${accountAddress} \ + --from-literal=accountKeystore=${accountKeystore} \ + --from-literal=accountPassword=${accountPassword} \ + --from-literal=accountPrivateKey=${accountPrivateKey} \ + --from-literal=address=${address} \ + --from-literal=nodekey=${nodeKey} \ + --from-literal=nodekey.pub=${nodekey_pub} + fi + else + # Read data from files if secrets are not available in the vault + local accountAddress=$(cat "${fpath}/accountAddress") + local accountKeystore_base64=$(cat "${fpath}/accountKeystore" | base64 -w 0) + local accountPassword=$(cat "${fpath}/accountPassword") + local accountPrivateKey=$(cat "${fpath}/accountPrivateKey") + local address=$(cat "${fpath}/address") + local nodekey=$(cat "${fpath}/nodekey") + local nodekey_pub=$(cat "${fpath}/nodekey.pub") + + # Construct JSON payload + echo " + { + \"data\": + { + \"accountAddress\": \"${accountAddress}\", + \"accountKeystore_base64\": \"${accountKeystore_base64}\", + \"accountPassword\": \"${accountPassword}\", + \"accountPrivateKey\": \"${accountPrivateKey}\", + \"nodeAddress\": \"${address}\", + \"nodeKey\": \"${nodekey}\", + \"nodeKeyPub\": \"${nodekey_pub}\" + } + }" > nodePayload.json + + # Push data to vault + vaultBevelFunc 'write' "${VAULT_SECRET_ENGINE}/${VAULT_SECRET_PREFIX}/${key}" 'nodePayload.json' + + rm nodePayload.json + fi + } +{{- else }} + safeWriteSecret() { + # Placeholder: + # - Implement code to fetch the keys if using any cloud-native service or platform different from HashiCorp to store the keys + # - After fetching the keys, create Kubernetes secrets from them + # - For guidance, refer to the code written for HashiCorp Vault for the same purpose + return 0 + } +{{- end }} + + # Check if the secret exists in Kubernetes + if ! kubectl get secret {{ template "quorum-node.fullname" . }}-keys --namespace {{ .Release.Namespace }} &> /dev/null; then + echo "Secret does not exist. Creating secret." + + # Use quorum-genesis-tool to generate genesis, keys and other required files + FOLDER_PATH=$(quorum-genesis-tool --validators 0 --members 1 --bootnodes 0 \ + {{ if .Values.node.quorum.account.password }} --accountPassword {{ .Values.node.quorum.account.password }} {{ end }} \ + --outputPath /generated-config | tail -1 | sed -e "s/^Artifacts in folder: //") + dir="$FOLDER_PATH/member0" + +{{- if and (ne .Values.global.cluster.provider "minikube") (.Values.global.cluster.cloudNativeServices) }} + # Safely write keys for cloud-native services + echo "Creating keys in vault for {{ template "quorum-node.fullname" . }} ..." + safeWriteSecret {{ template "quorum-node.fullname" . }}-nodekey ${dir}/nodekey + safeWriteSecret {{ template "quorum-node.fullname" . }}-nodekeypub ${dir}/nodekey.pub + safeWriteSecret {{ template "quorum-node.fullname" . }}-enode ${dir}/nodekey.pub + safeWriteSecret {{ template "quorum-node.fullname" . }}-address ${dir}/address + safeWriteSecret {{ template "quorum-node.fullname" . }}-accountPrivateKey ${dir}/accountPrivateKey + safeWriteSecret {{ template "quorum-node.fullname" . }}-accountPassword ${dir}/accountPassword + safeWriteSecret {{ template "quorum-node.fullname" . }}-accountKeystore ${dir}/accountKeystore + safeWriteSecret {{ template "quorum-node.fullname" . }}-accountAddress ${dir}/accountAddress +{{- else }} + # Safely write keys to the Hashicorp Vault + safeWriteSecret "{{ template "quorum-node.fullname" . }}-keys" "${dir}" +{{- end }} + + # Check if Kubernetes secret exists, if not, create one + if ! kubectl get secret {{ template "quorum-node.fullname" . }}-keys --namespace {{ .Release.Namespace }} &> /dev/null; then + kubectl create secret generic {{ template "quorum-node.fullname" . }}-keys --namespace {{ .Release.Namespace }} \ + --from-literal=accountAddress=$(cat "${dir}/accountAddress") \ + --from-literal=accountKeystore=$(cat "${dir}/accountKeystore") \ + --from-literal=accountPassword=$(cat "${dir}/accountPassword") \ + --from-literal=accountPrivateKey=$(cat "${dir}/accountPrivateKey") \ + --from-literal=address=$(cat "${dir}/address") \ + --from-literal=nodekey=$(cat "${dir}/nodekey") \ + --from-literal=nodekey.pub=$(cat "${dir}/nodekey.pub") + + nodekey_pub=$(cat "${dir}/nodekey.pub") + fi + else + echo "Secret exists. Extract modekey.pub key" + nodekey_pub=$(kubectl get secret {{ template "quorum-node.fullname" . }}-keys --namespace {{ .Release.Namespace }} -o json | jq -r '.data["nodekey.pub"]' | base64 -d) + fi + + quorum_peers_configmap="quorum-peers" + # Check if the ConfigMap exists + if kubectl get configmap "$quorum_peers_configmap" --namespace {{ .Release.Namespace }} &> /dev/null; then + # Get the content of the ConfigMap and save it to a file + kubectl get configmap "$quorum_peers_configmap" --namespace {{ .Release.Namespace }} -o json | jq -r '.data["static-nodes.json"]' > ./static-nodes.json + echo "Content of the ConfigMap has been saved to static-nodes.json" + existingStaticNodes=$(cat ./static-nodes.json) + + newStaticNode="enode://$nodekey_pub@{{ include "quorum-node.enodeURL" . }}?discport=0" + + # Check if newStaticNode already exists in existingStaticNodes + if ! echo "$existingStaticNodes" | jq 'contains(["'"$newStaticNode"'"])' | grep -q true; then + existingStaticNodes=$(jq ". + [\"$newStaticNode\"]" <<< "$existingStaticNodes") + echo "$existingStaticNodes" > ./static-nodes.json + + # Update the ConfigMap + kubectl create configmap "$quorum_peers_configmap" --from-file=static-nodes.json=./static-nodes.json --namespace {{ .Release.Namespace }} --dry-run=client -o yaml | kubectl replace -f - + fi + else + echo "ConfigMap $quorum_peers_configmap does not exist in namespace {{ .Release.Namespace }}. Creating one." + + # Creating static-nodes.json file + echo "[" > "./static-nodes.json" + echo "\"enode://$nodekey_pub@{{ include "quorum-node.enodeURL" . }}?discport=0\"" >> "./static-nodes.json" + echo "]" >> "./static-nodes.json" + + # Create ConfigMap + kubectl create configmap "$quorum_peers_configmap" --from-file=static-nodes.json=./static-nodes.json --namespace {{ .Release.Namespace }} + fi + + echo "COMPLETED PRE-HOOK" +{{- if eq .Values.global.vault.type "hashicorp" }} + volumes: + - name: scripts-volume + configMap: + name: bevel-vault-script + defaultMode: 0777 +{{- end }} diff --git a/platforms/quorum/charts/quorum-node/templates/node-service.yaml b/platforms/quorum/charts/quorum-node/templates/node-service.yaml new file mode 100644 index 00000000000..a29d0a264b8 --- /dev/null +++ b/platforms/quorum/charts/quorum-node/templates/node-service.yaml @@ -0,0 +1,126 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ include "quorum-node.fullname" . }} + labels: + app.kubernetes.io/name: {{ include "quorum-node.fullname" . }} + app.kubernetes.io/component: service + app.kubernetes.io/part-of: {{ include "quorum-node.fullname" . }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} + app.kubernetes.io/managed-by: helm + namespace: {{ .Release.Namespace }} +spec: + type: ClusterIP + selector: + app.kubernetes.io/part-of: {{ include "quorum-node.fullname" . }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} + ports: + - name: json-rpc + port: {{ .Values.node.quorum.rpc.port }} + targetPort: json-rpc + protocol: TCP + - name: ws + port: {{ .Values.node.quorum.ws.port }} + targetPort: ws + protocol: TCP + - name: graphql + port: {{ .Values.node.quorum.graphql.port }} + targetPort: graphql + protocol: TCP + - name: rlpx + port: {{ .Values.node.quorum.p2p.port }} + targetPort: rlpx + protocol: TCP + - name: discovery + port: {{ .Values.node.quorum.p2p.port }} + targetPort: discovery + protocol: UDP + - name: metrics + port: {{ .Values.node.quorum.metrics.pprofport }} + targetPort: metrics + protocol: TCP + +{{- if and .Values.node.quorum.metrics.enabled .Values.node.quorum.metrics.serviceMonitorEnabled }} +{{- if $.Capabilities.APIVersions.Has "monitoring.coreos.com/v1/ServiceMonitor" }} +--- +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "quorum-node.fullname" . }}-servicemonitor + labels: + release: monitoring + app: {{ include "quorum-node.fullname" . }} + chart: {{ template "quorum-node.chart" . }} + heritage: {{ .Release.Service }} + namespace: {{ .Release.Namespace }} + app.kubernetes.io/name: {{ include "quorum-node.fullname" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/component: {{ .Release.Name }} + {{- if $.Values.labels }} + {{- range $key, $value := $.Values.labels.service }} + {{- range $k, $v := $value }} + {{ $k }}: {{ $v | quote }} + {{- end }} + {{- end }} + {{- end }} + namespace: {{ .Release.Namespace }} +spec: + namespaceSelector: + matchNames: + - {{ .Release.Namespace }} + selector: + matchLabels: + app.kubernetes.io/name: {{ include "quorum-node.fullname" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/component: {{ .Release.Name }} + endpoints: + - port: metrics + interval: 15s + path: /metrics + scheme: http + honorLabels: true +{{- end }} +{{- end }} + +{{- if eq .Values.global.proxy.provider "ambassador" }} +## Listeners rlpx (p2p) ports +--- +apiVersion: getambassador.io/v3alpha1 +kind: Listener +metadata: + name: {{ include "quorum-node.fullname" . }}-rlpx + namespace: {{ .Release.Namespace }} +spec: + port: {{ .Values.global.proxy.p2p }} + protocol: TCP + securityModel: XFP + hostBinding: + namespace: + from: SELF +## Mapping for rpc +--- +apiVersion: getambassador.io/v3alpha1 +kind: Mapping +metadata: + name: {{ include "quorum-node.fullname" . }}-json-rpc + namespace: {{ .Release.Namespace }} +spec: + hostname: '{{ .Release.Name }}rpc.{{ .Values.global.proxy.externalUrlSuffix }}' + prefix: / + service: http://{{ include "quorum-node.fullname" . }}.{{ .Release.Namespace }}:{{ .Values.node.quorum.rpc.port }} +## TCPMapping for rlpx +--- +apiVersion: getambassador.io/v3alpha1 +kind: TCPMapping +metadata: + name: {{ include "quorum-node.fullname" . }}-rlpx + namespace: {{ .Release.Namespace }} +spec: + port: {{ .Values.global.proxy.p2p }} + service: {{ include "quorum-node.fullname" . }}.{{ .Release.Namespace }}:{{ .Values.node.quorum.p2p.port }} +{{- end }} diff --git a/platforms/quorum/charts/quorum-node/templates/node-statefulset.yaml b/platforms/quorum/charts/quorum-node/templates/node-statefulset.yaml new file mode 100644 index 00000000000..5277e92b22d --- /dev/null +++ b/platforms/quorum/charts/quorum-node/templates/node-statefulset.yaml @@ -0,0 +1,199 @@ +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ template "quorum-node.fullname" . }} + labels: + app: {{ .Release.Name }} + app.kubernetes.io/name: quorum-statefulset + app.kubernetes.io/component: quorum + app.kubernetes.io/part-of: {{ include "quorum-node.fullname" . }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} + app.kubernetes.io/managed-by: helm + {{- if $.Values.labels }} + {{- range $key, $value := $.Values.labels.deployment }} + {{- range $k, $v := $value }} + {{ $k }}: {{ $v | quote }} + {{- end }} + {{- end }} + {{- end }} + namespace: {{ .Release.Namespace }} +spec: + replicas: {{ .Values.node.quorum.replicaCount }} + podManagementPolicy: OrderedReady + updateStrategy: + type: RollingUpdate + selector: + matchLabels: + app: {{ .Release.Name }} + app.kubernetes.io/name: quorum-statefulset + app.kubernetes.io/component: quorum + app.kubernetes.io/part-of: {{ include "quorum-node.fullname" . }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} + app.kubernetes.io/managed-by: helm + serviceName: {{ include "quorum-node.fullname" . }} + volumeClaimTemplates: + - metadata: + name: data + labels: + {{- if $.Values.labels }} + {{- range $key, $value := $.Values.labels.pvc }} + {{- range $k, $v := $value }} + {{ $k }}: {{ $v | quote }} + {{- end }} + {{- end }} + {{- end }} + spec: + accessModes: [ "ReadWriteOnce" ] + storageClassName: storage-{{ .Release.Name }} + resources: + requests: + storage: "{{ .Values.storage.size }}" + template: + metadata: + labels: + app: {{ .Release.Name }} + app.kubernetes.io/name: quorum-statefulset + app.kubernetes.io/component: quorum + app.kubernetes.io/part-of: {{ include "quorum-node.fullname" . }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} + app.kubernetes.io/managed-by: helm + {{- if $.Values.labels }} + {{- range $key, $value := $.Values.labels.deployment }} + {{- range $k, $v := $value }} + {{ $k }}: {{ $v | quote }} + {{- end }} + {{- end }} + {{- end }} + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: {{ .Values.node.quorum.metrics.pprofport | quote }} + prometheus.io/path: "/metrics" + spec: + serviceAccountName: {{ .Values.global.serviceAccountName }} + imagePullSecrets: + {{- if .Values.image.pullSecret }} + - name: {{ .Values.image.pullSecret }} + {{- end }} + initContainers: +{{- if has .Values.global.cluster.provider .Values.volumePermissionsFix }} + # fix for minikube and PVC's only writable as root https://github.com/kubernetes/minikube/issues/1990 + - name: volume-permission-quorum + image: busybox + command: ["sh", "-c", "chown -R 1000:1000 /data"] + volumeMounts: + - name: data + mountPath: /data + securityContext: + runAsUser: 0 +{{- end}} + containers: + - name: "{{ include "quorum-node.fullname" . }}-quorum" + image: {{ .Values.image.quorum.repository }}:{{ .Values.image.quorum.tag }} + imagePullPolicy: {{ .Values.image.quorum.imagePullPolicy }} + resources: + requests: + cpu: "{{ .Values.node.quorum.resources.cpuRequest }}" + memory: "{{ .Values.node.quorum.resources.memRequest }}" + limits: + cpu: "{{ .Values.node.quorum.resources.cpuLimit }}" + memory: "{{ .Values.node.quorum.resources.memLimit }}" + volumeMounts: + - name: static-nodes + mountPath: /staticNode/ + - name: genesis + mountPath: /genesis/ + - name: node-keys + mountPath: /nodeSecrets/ + readOnly: true + securityContext: + runAsUser: 0 + env: + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + ports: + - name: json-rpc + containerPort: {{ .Values.node.quorum.rpc.port }} + protocol: TCP + - name: ws + containerPort: {{ .Values.node.quorum.ws.port }} + protocol: TCP + - name: graphql + containerPort: {{ .Values.node.quorum.graphql.port }} + protocol: TCP + - name: rlpx + containerPort: {{ .Values.node.quorum.p2p.port }} + protocol: TCP + - name: discovery + containerPort: {{ .Values.node.quorum.p2p.port }} + protocol: UDP + - name: metrics + containerPort: {{ .Values.node.quorum.metrics.pprofport }} + protocol: TCP + command: + - /bin/sh + - -c + args: + - | + + # Create the necessary directory structure + mkdir -p {{ .Release.Name }}/data/keystore + + # Move files to their respective locations + cp staticNode/static-nodes.json {{ .Release.Name }}/data/ + cp genesis/genesis.json {{ .Release.Name }}/data/ + cp nodeSecrets/nodekey* {{ .Release.Name }}/nodeSecrets/address {{ .Release.Name }}/data/ + cp nodeSecrets/account* {{ .Release.Name }}/data/keystore/ + cd {{ .Release.Name }}/ + + # Initialize the node + geth --datadir data init data/genesis.json + + # Extract the address from the account keystore file + export ADDRESS=$(grep -o '"address": *"[^"]*"' ./data/keystore/accountKeystore | grep -o '"[^"]*"$' | sed 's/"//g') + + # Start the node + geth \ + --datadir {{ .Values.node.quorum.dataPath }} \ + --nodiscover --ipcdisable \ + --nat extip:$POD_IP \ + --verbosity {{ .Values.node.quorum.log.verbosity }} \ + --istanbul.blockperiod {{ .Values.node.quorum.miner.blockPeriod }} --mine --miner.threads {{ .Values.node.quorum.miner.threads }} --miner.gasprice 0 --emitcheckpoints \ + --syncmode full --nousb \ + --metrics --pprof --pprof.addr {{ .Values.node.quorum.metrics.pprofaddr | quote }} --pprof.port {{ .Values.node.quorum.metrics.pprofport }} \ + --networkid {{ .Values.node.quorum.networkId }} \ + --port {{ .Values.node.quorum.p2p.port }} \ + {{- if .Values.node.quorum.rpc.enabled }} + --http --http.addr {{ .Values.node.quorum.rpc.addr }} --http.port {{ .Values.node.quorum.rpc.port }} --http.corsdomain {{ .Values.node.quorum.rpc.corsDomain | quote }} --http.vhosts {{ .Values.node.quorum.rpc.vHosts | quote }} --http.api {{ .Values.node.quorum.rpc.api | quote }} \ + {{- end }} + {{- if .Values.node.quorum.ws.enabled }} + --ws --ws.addr {{ .Values.node.quorum.ws.addr }} --ws.port {{ .Values.node.quorum.ws.port }} --ws.origins {{ .Values.node.quorum.ws.origins | quote }} --ws.api {{ .Values.node.quorum.ws.api | quote }} \ + {{- end }} + {{- if hasKey .Values.node.quorum.account "unlock" }} + --unlock ${ADDRESS} --allow-insecure-unlock --password {{ .Values.node.quorum.account.passwordPath }} \ + {{- end }} + livenessProbe: + httpGet: + path: / + port: 8545 + initialDelaySeconds: 180 + periodSeconds: 60 + volumes: + - name: static-nodes + configMap: + name: quorum-peers + - name: genesis + configMap: + name: quorum-genesis + - name: node-keys + secret: + secretName: {{ template "quorum-node.fullname" . }}-keys diff --git a/platforms/quorum/charts/quorum-node/values.yaml b/platforms/quorum/charts/quorum-node/values.yaml new file mode 100644 index 00000000000..cd23e788205 --- /dev/null +++ b/platforms/quorum/charts/quorum-node/values.yaml @@ -0,0 +1,124 @@ +# The following are for overriding global values +global: + #Provide the service account name which will be created. + serviceAccountName: vault-auth + cluster: + provider: aws # choose from: minikube | aws + cloudNativeServices: false # set to true to use Cloud Native Services (SecretsManager and IAM for AWS; KeyVault & Managed Identities for Azure) + # Provide the kubernetes host url + # Eg. kubernetesUrl: https://10.3.8.5:6443 + kubernetesUrl: + vault: + #Provide the type of vault + type: hashicorp # hashicorp | kubernetes + #Provide the vault role used. + role: vault-role + #Provide the network type + network: quorum + #Provide the vault server address + address: + #Provide the vault authPath configured to be used. + authPath: supplychain + #Provide the secret engine. + secretEngine: secretsv2 + #Provide the vault path where the secrets will be stored + secretPrefix: "data/supplychain" + + # This section contains the proxy ports. + proxy: + # Mention the proxy provider. Currently ambassador or none is supported. + provider: # ambassador | none + # Provide the external URL of the proxy. + externalUrlSuffix: # svc.cluster.local | test.blockchaincloudpoc.com + # Mention the p2p port configured on proxy. + # NOTE: Make sure that the port is enabled and not already used. + # Eg. p2p: 15010 + p2p: 15010 + +tessera: + enabled: false +tls: + enabled: false + +node: + quorum: + resources: + cpuLimit: 1 + cpuRequest: 0.1 + memLimit: "2G" + memRequest: "1G" + dataPath: "data" + customLabels: {} + networkId: 10 + replicaCount: 1 + account: + unlock: 0 + password: 'password' + passwordPath: "data/keystore/accountPassword" + log: + verbosity: 5 + miner: + threads: 1 + blockPeriod: 5 + p2p: + enabled: true + addr: "0.0.0.0" + port: 30303 + rpc: + enabled: true + addr: "0.0.0.0" + port: 8545 + corsDomain: "*" + vHosts: "*" + api: "admin,db,eth,debug,miner,net,shh,txpool,personal,web3,quorum,istanbul" + authenticationEnabled: false + ws: + enabled: true + addr: "0.0.0.0" + port: 8546 + api: "admin,db,eth,debug,miner,net,shh,txpool,personal,web3,quorum,istanbul" + origins: "*" + authenticationEnabled: false + graphql: + enabled: true + addr: "0.0.0.0" + port: 8547 + corsDomain: "*" + vHosts: "*" + metrics: + enabled: true + pprofaddr: "0.0.0.0" + pprofport: 9545 + serviceMonitorEnabled: false + privacy: # TAI + url: "http://localhost:9101" + pubkeysPath: "/tessera" + pubkeyFile: "/tessera/tm.pub" + +image: + quorum: + repository: quorumengineering/quorum + tag: 22.7.1 + hooks: + repository: ghcr.io/hyperledger/bevel-k8s-hooks + tag: qgt-0.2.12 + pullPolicy: IfNotPresent + pullSecret: + +# Fixes permissions of volumes because besu runs as user `besu` and volumes prefer `root` +volumePermissionsFix: + - minikube + - aws +labels: + service: [] + pvc: [] + deployment: [] + +# Override necessary Subchart values +storage: + size: "2Gi" + # NOTE: when you set this to Retain, the volume WILL persist after the chart is delete and you need to manually delete it + reclaimPolicy: "Delete" # choose from: Delete | Retain + volumeBindingMode: Immediate # choose from: Immediate | WaitForFirstConsumer + allowedTopologies: + enabled: false diff --git a/platforms/quorum/charts/quorum-propose-validator/Chart.yaml b/platforms/quorum/charts/quorum-propose-validator/Chart.yaml new file mode 100644 index 00000000000..16ab5ea775c --- /dev/null +++ b/platforms/quorum/charts/quorum-propose-validator/Chart.yaml @@ -0,0 +1,20 @@ +apiVersion: v1 +name: quorum-propose-validator +description: "Quorum: Proposes to add or remove a validator with the specified address." +version: 1.0.1 +appVersion: latest +keywords: + - bevel + - ethereum + - quorum + - hyperledger + - enterprise + - blockchain + - deployment + - accenture +home: https://hyperledger-bevel.readthedocs.io/en/latest/ +sources: + - https://github.com/hyperledger/bevel +maintainers: + - name: Hyperledger Bevel maintainers + email: bevel@lists.hyperledger.org diff --git a/platforms/quorum/charts/quorum-propose-validator/README.md b/platforms/quorum/charts/quorum-propose-validator/README.md new file mode 100644 index 00000000000..732fb256a26 --- /dev/null +++ b/platforms/quorum/charts/quorum-propose-validator/README.md @@ -0,0 +1,92 @@ +[//]: # (##############################################################################################) +[//]: # (Copyright Accenture. All Rights Reserved.) +[//]: # (SPDX-License-Identifier: Apache-2.0) +[//]: # (##############################################################################################) + +# goquorum-propose-validator + +This chart is a component of Hyperledger Bevel. The goquorum-propose-validator chart injects a new authorization candidate that the validator attempts to push through. If a majority of the validators vote the candidate in/out, the candidate is added/removed in the validator set. + +## TL;DR + +```bash +helm repo add bevel https://hyperledger.github.io/bevel +helm install propose-validator bevel/goquorum-propose-validator +``` + +## Prerequisites + +- Kubernetes 1.19+ +- Helm 3.2.0+ + +If Hashicorp Vault is used, then +- HashiCorp Vault Server 1.13.1+ + +> **Important**: Also check the dependent charts. + +## Installing the Chart + +To install the chart with the release name `propose-validator`: + +```bash +helm repo add bevel https://hyperledger.github.io/bevel +helm install propose-validator bevel/goquorum-propose-validator +``` + +The command deploys the chart on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation. + +> **Tip**: List all releases using `helm list` + +## Uninstalling the Chart + +To uninstall/delete the `propose-validator` deployment: + +```bash +helm uninstall propose-validator +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Parameters + +### Image + +| Name | Description | Default Value | +| -------------| ---------- | --------- | +| `image.genesisUtils.repository` | Quorum hooks image repository | `ghcr.io/hyperledger/bevel-k8s-hooks` | +| `image.genesisUtils.tag` | Quorum hooks image tag | `qgt-0.2.12` | +| `image.pullSecret` | Secret name in the namespace containing private image registry credentials | `""` | +| `image.pullPolicy` | Pull policy to be used for the Docker images | `IfNotPresent` | + +### validators + +| Name | Description | Default Value | +| ----------------| ----------- | ------------- | +| `validators.auth` | Set to 'true' to vote the candidate in and 'false' to vote them out | `true` | +| `validators.authorizedValidatorsURL` | URLs of already authorized validators | `""` | +| `validators.nonAuthorizedValidatorsNodeAddress` | Node addresses of the validators that need to be proposed | `""` | + + +## License + +This chart is licensed under the Apache v2.0 license. + +Copyright © 2023 Accenture + +### Attribution + +This chart is adapted from the [charts](https://hyperledger.github.io/bevel/) which is licensed under the Apache v2.0 License which is reproduced here: + +``` +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +``` diff --git a/platforms/quorum/charts/quorum-propose-validator/templates/_helpers.tpl b/platforms/quorum/charts/quorum-propose-validator/templates/_helpers.tpl new file mode 100644 index 00000000000..5987378709c --- /dev/null +++ b/platforms/quorum/charts/quorum-propose-validator/templates/_helpers.tpl @@ -0,0 +1,31 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "quorum-propose-validator.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "quorum-propose-validator.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "quorum-propose-validator.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} diff --git a/platforms/quorum/charts/quorum-propose-validator/templates/propose-validator-job.yaml b/platforms/quorum/charts/quorum-propose-validator/templates/propose-validator-job.yaml new file mode 100644 index 00000000000..fa70738e899 --- /dev/null +++ b/platforms/quorum/charts/quorum-propose-validator/templates/propose-validator-job.yaml @@ -0,0 +1,56 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ include "quorum-propose-validator.name" . }} + namespace: {{ .Release.Namespace }} + labels: + app: {{ .Release.Name }} + app.kubernetes.io/name: quorum-propose-validator-job + app.kubernetes.io/component: propose-validator-job + app.kubernetes.io/part-of: {{ include "quorum-propose-validator.fullname" . }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} + app.kubernetes.io/managed-by: helm +spec: + backoffLimit: 3 + template: + metadata: + labels: + app: {{ .Release.Name }} + app.kubernetes.io/name: quorum-propose-validator-job + app.kubernetes.io/component: propose-validator-job + app.kubernetes.io/part-of: {{ include "quorum-propose-validator.fullname" . }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/managed-by: helm + spec: + restartPolicy: "OnFailure" + containers: + - name: propose-validator + image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + securityContext: + runAsUser: 0 + env: + - name: EXISTING_VALIDATOR_URLS + value: "{{- .Values.validators.existingValidators | join " " -}}" + - name: PROPOSE_VALIDATOR_ADDRS + value: "{{- .Values.validators.proposeValidatorsAddr | join " " -}}" + command: ["/bin/sh", "-c"] + args: + - | + + for propose_val_addr in $PROPOSE_VALIDATOR_ADDRS; do + for existing_val_url in $EXISTING_VALIDATOR_URLS; do + # Send proposal to the existing validator + proposal_response=$(curl -s -X POST -H "Content-Type: application/json" --data '{"jsonrpc":"2.0","method":"{{ .Values.validators.consensusMethod}}","params":["'"$propose_val_addr"'",{{ .Values.validators.auth }}],"id":1}' "$existing_val_url") + + # Check if proposal was successful or not + result_count=$(echo "$proposal_response" | grep -c "result") + if [ "$result_count" = 1 ]; then + echo "Node proposed successfully." + else + echo "$proposal_response" | jq -r '.error' + fi + done + done + echo "COMPLETED" diff --git a/platforms/quorum/charts/quorum-propose-validator/values.yaml b/platforms/quorum/charts/quorum-propose-validator/values.yaml new file mode 100644 index 00000000000..8ba08cd6d1a --- /dev/null +++ b/platforms/quorum/charts/quorum-propose-validator/values.yaml @@ -0,0 +1,20 @@ +image: + repository: ghcr.io/hyperledger/bevel-k8s-hooks + tag: qgt-0.2.12 + pullPolicy: IfNotPresent + pullSecret: "" + +validators: + consensusMethod: "istanbul_propose" + auth: true # Set to 'true' to vote the candidate in and 'false' to vote them out + # List of URLs of the existing validators + existingValidators: + # - "http://" + # - "http://" + # - "http://" + # - "http://" + # List of node addresses of the validators that need to be proposed + proposeValidatorsAddr: + # - "<0xnodeAddress-1>" + # - "<0xnodeAddress-2>" + # - "<0xnodeAddress-3>" diff --git a/platforms/quorum/charts/quorum-raft-crypto-gen/Chart.yaml b/platforms/quorum/charts/quorum-raft-crypto-gen/Chart.yaml deleted file mode 100644 index b6d209f0ea6..00000000000 --- a/platforms/quorum/charts/quorum-raft-crypto-gen/Chart.yaml +++ /dev/null @@ -1,11 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -apiVersion: v1 -appVersion: "2.0" -description: "Quorum: This Helm Chart generates the crypto material for raft consensus only if they are not already available in the vault." -name: quorum-raft-crypto-gen -version: 1.0.0 diff --git a/platforms/quorum/charts/quorum-raft-crypto-gen/README.md b/platforms/quorum/charts/quorum-raft-crypto-gen/README.md deleted file mode 100644 index bfdc9300117..00000000000 --- a/platforms/quorum/charts/quorum-raft-crypto-gen/README.md +++ /dev/null @@ -1,196 +0,0 @@ -[//]: # (##############################################################################################) -[//]: # (Copyright Accenture. All Rights Reserved.) -[//]: # (SPDX-License-Identifier: Apache-2.0) -[//]: # (##############################################################################################) - - -# RAFT Crypto GoQuorum Deployment - -- [RAFT Crypto GoQuorum Deployment Helm Chart](#raft-crypto-goquorum-deployment-helm-chart) -- [Prerequisites](#prerequisites) -- [Chart Structure](#chart-structure) -- [Configuration](#configuration) -- [Deployment](#deployment) -- [Verification](#verification) -- [Updating the Deployment](#updating-the-deployment) -- [Deletion](#deletion) -- [Contributing](#contributing) -- [License](#license) - - - -## RAFT Crypto GoQuorum Deployment Helm Chart ---- -This [Helm chart](https://github.com/hyperledger/bevel/blob/develop/platforms/quorum/charts/quorum-raft-crypto-gen) generate the crypto material for raft consensus. - - - -## Prerequisites ---- -Before deploying the Helm chart, make sure to have the following prerequisites: - -- Kubernetes cluster up and running. -- The GoQuorum network is set up and running. -- A HashiCorp Vault instance is set up and configured to use Kubernetes service account token-based authentication. -- The Vault is unsealed and initialized. -- Helm installed. - - -## Chart Structure ---- -The structure of the Helm chart is as follows: - -``` -quorum-raft-crypto-gen/ - |- templates/ - |- helpers.tpl - |- job.yaml - |- Chart.yaml - |- README.md - |- values.yaml -``` - -- `templates/`: This directory contains the template files for generating Kubernetes resources. -- `helpers.tpl`: A template file used for defining custom labels in the Helm chart. -- `job.yaml`: Interacting with a Vault server to fetch and validate secrets, as well as generating and saving cryptographic materials in the vault. -- `Chart.yaml`: Provides metadata about the chart, such as its name, version, and description. -- `README.md`: This file provides information and instructions about the Helm chart. -- `values.yaml`: Contains the default configuration values for the chart. It includes settings for the metadata, peer, image, and Vault configurations. - - - -## Configuration ---- -The [values.yaml](https://github.com/hyperledger/bevel/blob/develop/platforms/quorum/charts/quorum-raft-crypto-gen/values.yaml) file contains configurable values for the Helm chart. We can modify these values according to the deployment requirements. Here are some important configuration options: - -## Parameters ---- - -### Metadata - -| Name | Description | Default Value | -| ----------------| --------------------------------------------------------------------------------------- | --------------------- | -| namespace | Provide the namespace for organization's peer | default | -| name | Provide the name for quorum-raft-crypto job release | quorum-crypto-raft | - -### Peer - -| Name | Description | Default Value | -| ----------------| --------------------------------------------------------------------------------------- | ------------- | -| name | Provide the name of the peer | carrier | -| gethPassphrase | Provide the passphrase for building the crypto files | 12345 | - -### Image - -| Name | Description | Default Value | -| -------------------| -------------------------------------------------------------------------------------------- | --------------------------------- | -| initContainerName | Provide the alpine utils image, which is used for all init-containers of deployments/jobs | ghcr.io/hyperledger/bevel-alpine:latest | -| pullPolicy | Pull policy to be used for the Docker image | IfNotPresent | -| node | Pull quorum Docker image | "" | - -### Vault - -| Name | Description | Default Value | -| ------------------- | ------------------------------------------------------------------------| ------------- | -| address | Provide the vault address/URL | "" | -| role | Provide the vault role used | vault-role | -| authpath | Provide the authpath configured to be used | "" | -| serviceAccountName | Provide the already created service account name autheticated to vault | vault-auth | -| certSecretPrefix | Provide the vault path where the certificates are stored | "" | -| retries | Number of retries to check contents from vault | 30 | - -### Sleep - -| Name | Description | Default Value | -| ------------------------- | ------------------------------------------------------- | ------------- | -| sleepTimeAfterError | Sleep time in seconds when error while registration | 120 | -| sleepTime | custom sleep time in seconds | 20 | - -### Healthcheck - -| Name | Description | Default Value | -| ----------------------------| ----------------------------------------------------------------------------------------------------------- | ------------- | -| readinesscheckinterval | Provide the wait interval in seconds in fetching certificates from vault | 5 | -| readinessthreshold | Provide the threshold number of retries in fetching certificates from vault | 2 | - - - -## Deployment ---- - -To deploy the quorum-raft-crypto-gen Helm chart, follow these steps: - -1. Modify the [values.yaml](https://github.com/hyperledger/bevel/blob/develop/platforms/quorum/charts/quorum-raft-crypto-gen/values.yaml) file to set the desired configuration values. -2. Run the following Helm command to install the chart: - ``` - $ helm repo add bevel https://hyperledger.github.io/bevel/ - $ helm install ./quorum-raft-crypto-gen - ``` -Replace `` with the desired name for the release. - -This will deploy the raft node to the Kubernetes cluster based on the provided configurations. - - - -## Verification ---- - -To verify the deployment, we can use the following command: -``` -$ kubectl get jobs -n -``` -Replace `` with the actual namespace where the Job was created. This command will display information about the Job, including the number of completions and the current status of the Job's pods. - - - -## Updating the Deployment ---- - -If we need to update the deployment with new configurations or changes, modify the same [values.yaml](https://github.com/hyperledger/bevel/blob/develop/platforms/quorum/charts/quorum-raft-crypto-gen/values.yaml) file with the desired changes and run the following Helm command: -``` -$ helm upgrade ./quorum-raft-crypto-gen -``` -Replace `` with the name of the release. This command will apply the changes to the deployment, ensuring the raft node is up to date. - - - -## Deletion ---- - -To delete the deployment and associated resources, run the following Helm command: -``` -$ helm uninstall -``` -Replace `` with the name of the release. This command will remove all the resources created by the Helm chart. - - - -## Contributing ---- -If you encounter any bugs, have suggestions, or would like to contribute to the [RAFT Crypto GoQuorum Deployment Helm chart](https://github.com/hyperledger/bevel/blob/develop/platforms/quorum/charts/quorum-raft-crypto-gen), please feel free to open an issue or submit a pull request on the [project's GitHub repository](https://github.com/hyperledger/bevel). - - - -## License - -This chart is licensed under the Apache v2.0 license. - -Copyright © 2023 Accenture - -### Attribution - -This chart is adapted from the [charts](https://hyperledger.github.io/bevel/) which is licensed under the Apache v2.0 License which is reproduced here: - -``` -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -``` diff --git a/platforms/quorum/charts/quorum-raft-crypto-gen/templates/_helpers.tpl b/platforms/quorum/charts/quorum-raft-crypto-gen/templates/_helpers.tpl deleted file mode 100644 index d43c09d8cef..00000000000 --- a/platforms/quorum/charts/quorum-raft-crypto-gen/templates/_helpers.tpl +++ /dev/null @@ -1,5 +0,0 @@ -{{- define "labels.custom" }} - {{ range $key, $val := $.Values.metadata.labels }} - {{ $key }}: {{ $val }} - {{ end }} -{{- end }} diff --git a/platforms/quorum/charts/quorum-raft-crypto-gen/templates/job.yaml b/platforms/quorum/charts/quorum-raft-crypto-gen/templates/job.yaml deleted file mode 100644 index 9c29a538a56..00000000000 --- a/platforms/quorum/charts/quorum-raft-crypto-gen/templates/job.yaml +++ /dev/null @@ -1,196 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -apiVersion: batch/v1 -kind: Job -metadata: - name: "{{ $.Values.metadata.name }}" - namespace: "{{ $.Values.metadata.namespace }}" - labels: - app: "{{ $.Values.metadata.name }}" - app.kubernetes.io/name: "{{ $.Values.metadata.name }}" - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} -spec: - backoffLimit: 6 - template: - metadata: - labels: - app: "{{ $.Values.metadata.name }}" - app.kubernetes.io/name: "{{ $.Values.metadata.name }}" - app.kubernetes.io/instance: {{ .Release.Name }} - spec: - restartPolicy: OnFailure - serviceAccountName: {{ $.Values.vault.serviceaccountname }} - securityContext: - fsGroup: 1000 - initContainers: - - name: crypto-init - image: {{ $.Values.image.initContainerName }} - imagePullPolicy: {{ $.Values.image.pullPolicy }} - env: - - name: VAULT_ADDR - value: {{ $.Values.vault.address }} - - name: KUBERNETES_AUTH_PATH - value: {{ $.Values.vault.authpath }} - - name: VAULT_APP_ROLE - value: {{ $.Values.vault.role }} - - name: CERTS_SECRET_PREFIX - value: {{ .Values.vault.certsecretprefix }} - - name: MOUNT_PATH - value: "/certcheck" - - name: PEER_NAME - value: "{{ $.Values.peer.name }}" - - name: VAULT_TYPE - value: "{{ $.Values.vault.type }}" - volumeMounts: - - name: certcheck - mountPath: /certcheck - - name: scripts-volume - mountPath: /scripts/bevel-vault.sh - subPath: bevel-vault.sh - command: ["sh", "-c"] - args: - - |- - #!/usr/bin/env bash - . /scripts/bevel-vault.sh - mkdir -p ${MOUNT_PATH} - - # Calling a function to retrieve the vault token. - vaultBevelFunc "init" - - # Calling a function to retrieve secrets from Vault only if they exist. - vaultBevelFunc "readJson" "${CERTS_SECRET_PREFIX}/crypto/${PEER_NAME}/quorum" - - checknodekey=$(echo ${VAULT_SECRET} | jq -r '.["keystore"]') - # Create an empty file to indicate that the secret is absent or present in vault. - if [ -z "$checknodekey" ] || [ "$checknodekey" == "null" ] - then - echo "Certificates absent in vault. Ignore error warning." - touch ${MOUNT_PATH}/absent.txt - else - echo "Certificates present in vault." - touch ${MOUNT_PATH}/present.txt - fi - - echo "Done checking for certificates in vault." - containers: - - name: generate-cryptomaterials - image: {{ $.Values.image.node }} - imagePullPolicy: {{ $.Values.image.pullPolicy }} - volumeMounts: - - name: certcheck - mountPath: /certcheck - - name: scripts-volume - mountPath: /scripts/bevel-vault.sh - subPath: bevel-vault.sh - - name: package-manager - mountPath: /scripts/package-manager.sh - subPath: package-manager.sh - env: - - name: VAULT_ADDR - value: {{ $.Values.vault.address }} - - name: KUBERNETES_AUTH_PATH - value: {{ $.Values.vault.authpath }} - - name: VAULT_APP_ROLE - value: {{ $.Values.vault.role }} - - name: MOUNT_PATH - value: "/certcheck" - - name: CERTS_SECRET_PREFIX - value: {{ .Values.vault.certsecretprefix }} - - name: GETH_PASSPHRASE - value: "{{ $.Values.peer.gethPassphrase }}" - - name: PEER_NAME - value: "{{ $.Values.peer.name }}" - - name: DB_USER - value: "demouser" - - name: DB_PASSWORD - value: "password" - - name: VAULT_TYPE - value: "{{ $.Values.vault.type }}" - command: ["sh", "-c"] - args: - - |- - #!/usr/bin/env sh - . /scripts/bevel-vault.sh - . /scripts/package-manager.sh - - # Define the packages to install - packages_to_install="jq curl openssl" - install_packages "$packages_to_install" - - # Skip secret creation if "present.txt" exists in /certcheck/ - if [ -e /certcheck/present.txt ] - then - echo "Certificates already present in the vault. Skipping." - exit 0 - fi - - echo "Fetching nodekey from vault" - # Calling a function to retrieve the vault token. - vaultBevelFunc "init" - # Calling a function to retrieve secrets from Vault only if they exist. - vaultBevelFunc "readJson" "${CERTS_SECRET_PREFIX}/crypto/${PEER_NAME}/quorum" - nodekey=$(echo ${VAULT_SECRET} | jq -r '.["nodekey"]') - - echo "If nodekey does not exist, we generate one and will be saved in vault" - if [ -z "$nodekey" ] || [ "$nodekey" == "null" ] - then - bootnode -genkey nodevalue - nodekey=$(cat nodevalue) - else - echo "Nodekey already exists" - fi - echo "Creating files and certs" - echo $nodekey >> nodekey - echo $GETH_PASSPHRASE >> password - bootnode --nodekey nodekey --writeaddress >> enode - geth account new --datadir ${PEER_NAME} --password password - cp ${PEER_NAME}/keystore/* keystorefile - tr -d "\n\r" < keystorefile > newchange - openssl base64 -in newchange -out base64_keystore - echo " - { - \"data\": { - \"nodekey\": \"$nodekey\", - \"keystore\": \"$(cat base64_keystore)\", - \"db_password\": \"${DB_PASSWORD}\", - \"geth_password\": \"${GETH_PASSPHRASE}\", - \"db_user\": \"${DB_USER}\" - }}" > finalJSON.json - - # Calling a function to write secrets to the Vault. - vaultBevelFunc 'write' "${CERTS_SECRET_PREFIX}/crypto/${PEER_NAME}/quorum" 'finalJSON.json' - - # get nodekey from vault - # Calling a function to retrieve secrets from Vault. - vaultBevelFunc "readJson" "${CERTS_SECRET_PREFIX}/crypto/${PEER_NAME}/quorum" - nodekey=$(echo ${VAULT_SECRET} | jq -r '.["nodekey"]' 2>&1) - # get keystore from vault - keystore=$(echo ${VAULT_SECRET} | jq -r '.["keystore"]' 2>&1) - - if [ "$nodekey" == "null" ] || [ "$cat base64_keystore" == "null" ] || [ "$nodekey" == "parse error"* ] || [ "$cat base64_keystore" == "parse error"* ] - then - echo "certificates write or read fail" - sleep {{ $.Values.sleepTime }} - if [ "$COUNTER" -ge {{ $.Values.vault.retries }} ] - then - echo "Retry attempted $COUNTER times, certificates have not been saved" - exit 1 - fi - COUNTER=`expr "$COUNTER" + 1` - fi - volumes: - - name: certcheck - emptyDir: - medium: Memory - - name: scripts-volume - configMap: - name: bevel-vault-script - - name: package-manager - configMap: - name: package-manager diff --git a/platforms/quorum/charts/quorum-raft-crypto-gen/values.yaml b/platforms/quorum/charts/quorum-raft-crypto-gen/values.yaml deleted file mode 100644 index b3e940bcab6..00000000000 --- a/platforms/quorum/charts/quorum-raft-crypto-gen/values.yaml +++ /dev/null @@ -1,78 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -# Default values for Crypto IBFT chart. -# This is a YAML-formatted file. -# Declare variables to be passed into your templates. - -############################################################# -# Basic Configuration # -############################################################# -# Default values for indy-key-mgmt. -# This is a YAML-formatted file. -# Declare variables to be passed into your templates. - -metadata: - #Provide the namespace for organization's peer - #Eg. namespace: supplychain-quo - namespace: default - - #Provide the name for quorum-raft-crypto job release - #Eg. name: quorum-crypto-raft - name: quorum-crypto-raft - -peer: - # Provide the name of the peer - # Eg. name: carrier - name: carrier - # Provide the passphrase for building the crypto files - # Eg. 12345 - gethPassphrase: 12345 -# This section contains the Quorum Crypto IBFT metadata. - -# Provide information regarding the Docker images used. -image: - # Provide the alpine utils image, which is used for all init-containers of deployments/jobs. - # NOTE: The alpine image used is the base alpine image with CURL installed. - # Eg. initContainerName: ghcr.io/hyperledger/bevel-alpine:latest - initContainerName: ghcr.io/hyperledger/bevel-alpine:latest - # Pull policy to be used for the Docker image - # Eg. pullPolicy: Always - pullPolicy: IfNotPresent - # Pull quorum Docker image - node: quorumengineering/quorum:21.4.2 - -############################################################# -# HashiCorp Vault Configuration # -############################################################# -# NOTE: Make sure that the vault is already unsealed, intialized and configured to -# use Kubernetes service account token based authentication. -# For more info, see https://www.vaultproject.io/docs/auth/kubernetes - -vault: - # Provide the vault address - # Eg. address: http://vault.example.com:8200 - address: - # Provide the vault role used. - # Eg. role: vault-role - role: vault-role - # Provide the authpath configured to be used. - authpath: - # Provide the service account name autheticated to vault. - # NOTE: Make sure that the service account is already created and autheticated to use the vault. - # Eg. serviceaccountname: vault-auth - serviceAccountName: vault-auth - # Provide the vault path where the certificates are stored - # Eg. certsecretprefix: secret/cenm-org-name - certSecretPrefix: - # Number of retries to check contents from vault - retries: 30 - -############################################################# -# Settings # -############################################################# -# custom sleep time in seconds -sleepTime: 20 diff --git a/platforms/quorum/charts/quorum-tessera-key-mgmt/Chart.yaml b/platforms/quorum/charts/quorum-tessera-key-mgmt/Chart.yaml deleted file mode 100644 index 671cd7bbd94..00000000000 --- a/platforms/quorum/charts/quorum-tessera-key-mgmt/Chart.yaml +++ /dev/null @@ -1,11 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -apiVersion: v1 -appVersion: "2.0" -description: "Quorum: This Helm chart generates certificates and keys required by Tessera transaction manager." -name: quorum-tessera-key-mgmt -version: 1.0.0 diff --git a/platforms/quorum/charts/quorum-tessera-key-mgmt/README.md b/platforms/quorum/charts/quorum-tessera-key-mgmt/README.md deleted file mode 100644 index eee8ad61192..00000000000 --- a/platforms/quorum/charts/quorum-tessera-key-mgmt/README.md +++ /dev/null @@ -1,181 +0,0 @@ -[//]: # (##############################################################################################) -[//]: # (Copyright Accenture. All Rights Reserved.) -[//]: # (SPDX-License-Identifier: Apache-2.0) -[//]: # (##############################################################################################) - - -# RAFT Crypto GoQuorum Deployment - -- [Tessera Key Management Deployment Helm Chart](#tessera-key-management-deployment-helm-chart) -- [Prerequisites](#prerequisites) -- [Chart Structure](#chart-structure) -- [Configuration](#configuration) -- [Deployment](#deployment) -- [Verification](#verification) -- [Updating the Deployment](#updating-the-deployment) -- [Deletion](#deletion) -- [Contributing](#contributing) -- [License](#license) - - - -## Tessera Key Management Deployment Helm Chart ---- -This [Helm chart](https://github.com/hyperledger/bevel/blob/develop/platforms/quorum/charts/quorum-tessera-key-mgmt) helps in generating Tessera crypto. - - - -## Prerequisites ---- -Before deploying the Helm chart, make sure to have the following prerequisites: - -- Kubernetes cluster up and running. -- The GoQuorum network is set up and running. -- A HashiCorp Vault instance is set up and configured to use Kubernetes service account token-based authentication. -- The Vault is unsealed and initialized. -- Helm installed. - - - -## Chart Structure ---- -The structure of the Helm chart is as follows: - -``` -quorum-tessera-key-mgmt/ - |- templates/ - |- helpers.tpl - |- job.yaml - |- Chart.yaml - |- README.md - |- values.yaml -``` - -- `templates/`: This directory contains the template files for generating Kubernetes resources. -- `helpers.tpl`: A template file used for defining custom labels in the Helm chart. -- `job.yaml`: The job.yaml file defines a Kubernetes Job that runs the "tessera-crypto" container. This container interacts with a Vault server to retrieve secrets and generate tessera keys. -- `Chart.yaml`: Provides metadata about the chart, such as its name, version, and description. -- `README.md`: This file provides information and instructions about the Helm chart. -- `values.yaml`: Contains the default configuration values for the chart. It includes settings for the peer, metadata, image, and Vault configurations. - - - -## Configuration ---- -The [values.yaml](https://github.com/hyperledger/bevel/blob/develop/platforms/quorum/charts/quorum-ibft-crypto-gen/values.yaml) file contains configurable values for the Helm chart. We can modify these values according to the deployment requirements. Here are some important configuration options: - -## Parameters ---- - -### Peer - -| Name | Description | Default Value | -| ----------------| --------------------------------------------------------------------------------------- | ------------- | -| name | Provide the name of the peer | node_1 | - -### Metadata - -| Name | Description | Default Value | -| ------------- | --------------------------------------------------------------------------------------- | --------------------- | -| namespace | Provide the namespace for organization's peer | default | -| name | Provide the name for indy-key-mgmt release | indy-key-mgmt | - -### Image - -| Name | Description | Default Value | -| ---------------- | ------------------------------------------------------------- | ------------------------------------------ | -| pullSecret | Pull policy to be used for the Docker image | regcred | -| repository | Provide the image repository for the indy-key-mgmt container | quorumengineering/tessera:hashicorp-21.7.3 | - -### Vault - -| Name | Description | Default Value | -| ------------------- | ------------------------------------------------------------------------| ----------------------------------- | -| address | Provide the vault address/URL | "" | -| authpath | Provide the authpath configured to be used | "" | -| role | Provide the vault role used | vault-role | -| serviceAccountName | Provide the already created service account name autheticated to vault | vault-auth | -| tmprefix | Provide the vault path where the tm secrets are stored | secret/node_1-quo/crypto/node_1/tm | -| keyprefix | Provide the vault path where the keys are stored | secret/node_1-quo/crypto/node_1/key | - - - -## Deployment ---- - -To deploy the quorum-ibft-crypto-gen Helm chart, follow these steps: - -1. Modify the [values.yaml](https://github.com/hyperledger/bevel/blob/develop/platforms/quorum/charts/quorum-tessera-key-mgmt/values.yaml) file to set the desired configuration values. -2. Run the following Helm command to install the chart: - ``` - $ helm repo add bevel https://hyperledger.github.io/bevel/ - $ helm install ./quorum-tessera-key-mgmt - ``` -Replace `` with the desired name for the release. - -This will deploy the tessera-key-management node to the Kubernetes cluster based on the provided configurations. - - - -## Verification ---- - -To verify the deployment, we can use the following command: -``` -$ kubectl get jobs -n -``` -Replace `` with the actual namespace where the Job was created. This command will display information about the Job, including the number of completions and the current status of the Job's pods. - - - -## Updating the Deployment ---- - -If we need to update the deployment with new configurations or changes, modify the same [values.yaml](https://github.com/hyperledger/bevel/blob/develop/platforms/quorum/charts/quorum-tessera-key-mgmt/values.yaml) file with the desired changes and run the following Helm command: -``` -$ helm upgrade ./quorum-tessera-key-mgmt -``` -Replace `` with the name of the release. This command will apply the changes to the deployment, ensuring the tessera-key-management node is up to date. - - - -## Deletion ---- - -To delete the deployment and associated resources, run the following Helm command: -``` -$ helm uninstall -``` -Replace `` with the name of the release. This command will remove all the resources created by the Helm chart. - - - -## Contributing ---- -If you encounter any bugs, have suggestions, or would like to contribute to the [Tessera Key Management Deployment Helm Chart](https://github.com/hyperledger/bevel/blob/develop/platforms/quorum/charts/quorum-tessera-key-mgmt), please feel free to open an issue or submit a pull request on the [project's GitHub repository](https://github.com/hyperledger/bevel). - - - -## License - -This chart is licensed under the Apache v2.0 license. - -Copyright © 2023 Accenture - -### Attribution - -This chart is adapted from the [charts](https://hyperledger.github.io/bevel/) which is licensed under the Apache v2.0 License which is reproduced here: - -``` -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -``` diff --git a/platforms/quorum/charts/quorum-tessera-key-mgmt/templates/_helpers.tpl b/platforms/quorum/charts/quorum-tessera-key-mgmt/templates/_helpers.tpl deleted file mode 100644 index d43c09d8cef..00000000000 --- a/platforms/quorum/charts/quorum-tessera-key-mgmt/templates/_helpers.tpl +++ /dev/null @@ -1,5 +0,0 @@ -{{- define "labels.custom" }} - {{ range $key, $val := $.Values.metadata.labels }} - {{ $key }}: {{ $val }} - {{ end }} -{{- end }} diff --git a/platforms/quorum/charts/quorum-tessera-key-mgmt/templates/job.yaml b/platforms/quorum/charts/quorum-tessera-key-mgmt/templates/job.yaml deleted file mode 100644 index 72daf534166..00000000000 --- a/platforms/quorum/charts/quorum-tessera-key-mgmt/templates/job.yaml +++ /dev/null @@ -1,84 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -apiVersion: batch/v1 -kind: Job -metadata: - name: "{{ $.Values.metadata.name }}" - namespace: "{{ $.Values.metadata.namespace }}" - labels: - app: "{{ $.Values.metadata.name }}" - app.kubernetes.io/name: "{{ $.Values.metadata.name }}" - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} -spec: - backoffLimit: 6 - template: - metadata: - labels: - app: "{{ $.Values.metadata.name }}" - app.kubernetes.io/name: "{{ $.Values.metadata.name }}" - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} - spec: - restartPolicy: OnFailure - imagePullSecrets: - - name: "{{ $.Values.image.pullSecret }}" - serviceAccountName: "{{ $.Values.vault.serviceaccountname }}" - volumes: - - name: scripts-volume - configMap: - name: bevel-vault-script - - name: package-manager - configMap: - name: package-manager - containers: - - name: "tessera-crypto" - image: "{{ $.Values.image.repository }}" - imagePullPolicy: IfNotPresent - env: - - name: VAULT_ADDR - value: {{ $.Values.vault.address }} - - name: VAULT_SECRET_ENGINE - value: {{ $.Values.vault.secretengine }} - - name: VAULT_KEY_PREFIX - value: "{{ $.Values.vault.keyprefix }}" - - name: PEER_NAME - value: {{ $.Values.peer.name }} - - name: KUBERNETES_AUTH_PATH - value: {{ $.Values.vault.authpath }} - - name: VAULT_APP_ROLE - value: {{ $.Values.vault.role }} - - name: VAULT_SECRET_PREFIX - value: "{{ $.Values.vault.tmprefix }}" - - name: VAULT_TYPE - value: "{{ $.Values.vault.type }}" - command: ["/bin/sh", "-c"] - args: - - |- - #!/usr/bin/env bash - - . /scripts/bevel-vault.sh - . /scripts/package-manager.sh - - # Define the packages to install - packages_to_install="jq curl" - install_packages "$packages_to_install" - - # Calling a function to retrieve the vault token. - vaultBevelFunc "init" - # Generate tessera keys - export HASHICORP_TOKEN="${VAULT_TOKEN}" - /tessera/bin/tessera -keygen -keygenvaulttype HASHICORP -keygenvaulturl ${VAULT_ADDR} -keygenvaultsecretengine ${VAULT_SECRET_ENGINE} -filename ${VAULT_KEY_PREFIX}/${PEER_NAME}/tm - volumeMounts: - - name: scripts-volume - mountPath: /scripts/bevel-vault.sh - subPath: bevel-vault.sh - - name: package-manager - mountPath: /scripts/package-manager.sh - subPath: package-manager.sh diff --git a/platforms/quorum/charts/quorum-tessera-key-mgmt/values.yaml b/platforms/quorum/charts/quorum-tessera-key-mgmt/values.yaml deleted file mode 100644 index 8624c6c0537..00000000000 --- a/platforms/quorum/charts/quorum-tessera-key-mgmt/values.yaml +++ /dev/null @@ -1,58 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -# Default values for tessera-key-mgmt. -# This is a YAML-formatted file. -# Declare variables to be passed into your templates. -peer: - #Provide the name for organization's peer - #Eg. namespace: supplychain-quo1 - name: node_1 - -metadata: - #Provide the namespace for organization's peer - #Eg. namespace: supplychain-quo - namespace: default - - #Provide the name for tessera-key-mgmt release - #Eg. name: tessera-key-mgmt - name: tessera-key-mgmt - -image: - #Provide the image repository for the tessera-key-mgmt container - #Eg. repository: quorumengineering/tessera:hashicorp-21.7.3 - repository: quorumengineering/tessera:hashicorp-21.7.3 - - #Provide the image pull secret of image - #Eg. pullSecret: regcred - pullSecret: regcred - - -vault: - #Provide the vault server address - #Eg. address: http://54.226.163.39:8200 - address: - - #Provide the key path for vault - #Eg. authpath: provider.stewards - authpath: - - #Provide the identity for vault - #Eg. role: my-identity - role: vault-role - - # Provide the service account name autheticated to vault. - # NOTE: Make sure that the service account is already created and autheticated to use the vault. - # Eg. serviceAccountName: vault-auth - serviceaccountname: vault-auth - - # Provide the vault path where the tm secrets are stored - # Eg. tmprefix: secret/warehouse-quo/crypto/warehouse/tm - tmprefix: secret/node_1-quo/crypto/node_1/tm - - # Provide the vault path where the keys are stored - # Eg. tmprefix: secret/warehouse-quo/crypto/warehouse/key - keyprefix: secret/node_1-quo/crypto/node_1/key diff --git a/platforms/quorum/charts/quorum-tessera-node/Chart.yaml b/platforms/quorum/charts/quorum-tessera-node/Chart.yaml index 36f7e7f6b47..e5bf20fe3b4 100644 --- a/platforms/quorum/charts/quorum-tessera-node/Chart.yaml +++ b/platforms/quorum/charts/quorum-tessera-node/Chart.yaml @@ -3,9 +3,24 @@ # # SPDX-License-Identifier: Apache-2.0 ############################################################################################## - apiVersion: v1 -appVersion: "2.0" -description: "Quorum: This Helm chart deploys a secure MySQL database and Tessera transaction manager node." name: quorum-tessera-node -version: 1.0.0 +description: "Quorum: Deploys Tessera transaction manager nodes" +version: 1.0.1 +# Tessera version +appVersion: '21.7.3' +keywords: + - bevel + - tessera + - quorum + - hyperledger + - enterprise + - blockchain + - deployment + - accenture +home: https://hyperledger-bevel.readthedocs.io/en/latest/ +sources: + - https://github.com/hyperledger/bevel +maintainers: + - name: Hyperledger Bevel maintainers + email: bevel@lists.hyperledger.org diff --git a/platforms/quorum/charts/quorum-tessera-node/README.md b/platforms/quorum/charts/quorum-tessera-node/README.md index 34e0a6ec649..a04cbcc610f 100644 --- a/platforms/quorum/charts/quorum-tessera-node/README.md +++ b/platforms/quorum/charts/quorum-tessera-node/README.md @@ -3,224 +3,139 @@ [//]: # (SPDX-License-Identifier: Apache-2.0) [//]: # (##############################################################################################) - -# Quorum Tessera Node Deployment +# quorum-tessera-node -- [Quorum Tessera Node Deployment Helm Chart](#quorum-tessera-node-deployment-helm-chart) -- [Prerequisites](#prerequisites) -- [Chart Structure](#chart-structure) -- [Configuration](#configuration) -- [Deployment](#deployment) -- [Verification](#verification) -- [Updating the Deployment](#updating-the-deployment) -- [Deletion](#deletion) -- [Contributing](#contributing) -- [License](#license) +This chart is a component of Hyperledger Bevel. The quorum-tessera-node chart deploys a tessera node with separate Mysql database. If enabled, the keys are then stored on the configured vault and stored as Kubernetes secrets. See [Bevel documentation](https://hyperledger-bevel.readthedocs.io/en/latest/) for details. +## TL;DR - -## Quorum Tessera Node Deployment Helm Chart ---- -This [Helm chart](https://github.com/hyperledger/bevel/blob/develop/platforms/quorum/charts/quorum-tessera-node) helps to deploy tessera nodes. - +```bash +helm repo add bevel https://hyperledger.github.io/bevel +helm install my-tessera bevel/quorum-tessera-node +``` - ## Prerequisites ---- -Before deploying the Helm chart, make sure to have the following prerequisites: - -- Kubernetes cluster up and running. -- The GoQuorum network is set up and running. -- A HashiCorp Vault instance is set up and configured to use Kubernetes service account token-based authentication. -- The Vault is unsealed and initialized. -- Either HAproxy or Ambassador is required as ingress controller. -- Helm installed. - - -## Chart Structure ---- -The structure of the Helm chart is as follows: +- Kubernetes 1.19+ +- Helm 3.2.0+ -``` -quorum-tessera-node/ - |- templates/ - |- _helpers.yaml - |- configmap.yaml - |- deployment.yaml - |- ingress.yaml - |- service.yaml - |- Chart.yaml - |- README.md - |- values.yaml -``` +If Hashicorp Vault is used, then +- HashiCorp Vault Server 1.13.1+ -- `templates/`: This directory contains the template files for generating Kubernetes resources. -- `helpers.tpl`: A template file used for defining custom labels in the Helm chart. -- `configmap.yaml`: The file defines a ConfigMap that stores the base64-encoded content of the "genesis.json" file under the key "genesis.json.base64" in the specified namespace. -- `deployment.yaml`: This file is a configuration file for deploying a StatefulSet in Kubernetes. It creates a StatefulSet with a specified number of replicas and defines various settings for the deployment. It includes initialization containers for fetching secrets from a Vault server, an init container for initializing the Quorum blockchain network, and a main container for running the Quorum tessera node. It also specifies volume mounts for storing certificates and data. The StatefulSet ensures stable network identities for each replica. -- `ingress.yaml`: This file is a Kubernetes configuration file for setting up an Ingress resource with HAProxy as the provider. It includes annotations for SSL passthrough and specifies rules for routing traffic based on the host and path. -- `service.yaml`: This file defines a Kubernetes Service with multiple ports for different protocols and targets, and supports Ambassador proxy annotations for specific configurations when using the "ambassador" proxy provider. -- `Chart.yaml`: Provides metadata about the chart, such as its name, version, and description. -- `README.md`: This file provides information and instructions about the Helm chart. -- `values.yaml`: Contains the default configuration values for the chart. It includes configuration for the metadata, image, node, Vault, etc. +> **Important**: Also check the dependent charts. +## Installing the Chart - -## Configuration ---- -The [values.yaml](https://github.com/hyperledger/bevel/blob/develop/platforms/quorum/charts/quorum-tessera-node/values.yaml) file contains configurable values for the Helm chart. We can modify these values according to the deployment requirements. Here are some important configuration options: - -## Parameters ---- +To install the chart with the release name `my-tessera`: -## Parameters ---- - -### replicaCount - -| Name | Description | Default Value | -| ------------------------ | ------------------------------------ | ------------- | -| replicaCount | Number of replicas | 1 | - -### metadata - -| Name | Description | Default Value | -| ----------------| ---------------------------------------------------------------------------- | ------------- | -| namespace | Provide the namespace for the Quorum node | default | -| labels | Provide any additional labels | "" | - -### image - -| Name | Description | Default Value | -| ---------------| ------------------------------------------------------------------------------------ | ------------------------------------- | -| node | Provide the valid image name and version for quorum node | quorumengineering/quorum:2.1.1 | -| alpineutils | Provide the valid image name and version to read certificates from vault server | ghcr.io/hyperledger/bevel-alpine:latest | -| tessera | Provide the valid image name and version for quorum tessera | quorumengineering/tessera:0.9.2 | -| busybox | Provide the valid image name and version for busybox | busybox | -| mysql | Provide the valid image name and version for MySQL. This is used as the DB for TM | mysql/mysql-server:5.7 | - -### node - -| Name | Description | Default Value | -| ------------------------ | ------------------------------------------------------------------------------ | ------------- | -| name | Provide the name for Quorum node | node-1 | -| status | Provide the status of the node as default,additional | default | -| peer_id | Provide the id which is obtained when the new peer is added for raft consensus | 5 | -| consensus | Provide the consesus for the Quorum network, values can be 'raft' or 'ibft' | ibft | -| mountPath | Provide the mountpath for Quorum pod | /etc/quorum/qdata | -| imagePullSecret | Provide the docker secret name in the namespace | regcred | -| keystore | Provide the keystore file name | keystore_1 | -| servicetype | Provide the k8s service type | ClusterIP | -| ports.rpc | Provide the rpc service ports | 8546 | -| ports.raft | Provide the raft service ports | 50401 | -| ports.tm | Provide the Tessera Transaction Manager service ports | 15013 | -| ports.quorum | Provide the Quorum port | 21000 | -| ports.db | Provide the DataBase port | 3306 | -| dbname | Provide the mysql DB name | demodb | -| mysqluser | Provide the mysql username | demouser | -| mysqlpassword | Provide the mysql user password | password | - -### vault - -| Name | Description | Default Value | -| ---------------- | -------------------------------------------------------------------------| ------------- | -| address | Address/URL of the Vault server. | "" | -| secretprefix | Provide the Vault secret path from where secrets will be read | secret/org1/crypto/node_1 | -| serviceaccountname | Provide the service account name verified with Vault | vault-auth | -| keyname | Provide the key name from where Quorum secrets will be read | quorum | -| role | Provide the service role verified with Vault | vault-role | -| authpath | Provide the Vault auth path created for the namespace | quorumorg1 | - -### tessera - -| Name | Description | Default Value | -| ------------- | ----------------------------------------------------------------------------------------------------------------- | --------------------------------- | -| dburl | Provide the Database URL | jdbc:mysql://localhost:3306/demodb| -| dbusername | Provide the Database username | demouser | -| dbpassword | Provide the Database password | password | -| url | Provide the tessera node's own url. This should be local. Use http if tls is OFF | "" | -| othernodes | Provide the list of tessera nodes to connect in `url: ` format. This should be reachable from this node | "" | -| tls | Provide if tessera will use tls | STRICT | -| trust | Provide the server/client trust configuration for transaction manager nodes | TOFU | - -### proxy - -| Name | Description | Default Value | -| --------------------- | --------------------------------------------------------------------- | ------------- | -| provider | The proxy/ingress provider (ambassador, haproxy) | ambassador | -| external_url | This field contains the external URL of the node | "" | -| portTM | The TM port exposed externally via the proxy | 15013 | -| rpcport | The RPC port exposed externally via the proxy | 15030 | -| quorumport | The Quorum port exposed externally via the proxy | 15031 | -| portRaft | The Raft port exposed externally via the proxy | 15032 | - -### storage - -| Name | Description | Default Value | -| --------------------- | ------------------------------------------| ----------------- | -| storageclassname | The Kubernetes storage class for the node | awsstorageclass | -| storagesize | The memory for the node | 1Gi | -| dbstorage | Provide the memory for database | 1Gi | - - - -## Deployment ---- - -To deploy the quorum-tessera-node Helm chart, follow these steps: - -1. Modify the [values.yaml](https://github.com/hyperledger/bevel/blob/develop/platforms/quorum/charts/quorum-tessera-node/values.yaml) file to set the desired configuration values. -2. Run the following Helm command to install the chart: - ``` - $ helm repo add bevel https://hyperledger.github.io/bevel/ - $ helm install ./quorum-tessera-node - ``` -Replace `` with the desired name for the release. - -This will deploy the quorum tessera node to the Kubernetes cluster based on the provided configurations. - - - -## Verification ---- - -To verify the deployment, we can use the following command: -``` -$ kubectl get statefulsets -n +```bash +helm repo add bevel https://hyperledger.github.io/bevel +helm install my-tessera bevel/quorum-tessera-node ``` -Replace `` with the actual namespace where the StatefulSet was created. This command will display information about the StatefulSet, including the number of replicas and their current status. +The command deploys the chart on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation. - -## Updating the Deployment ---- +> **Tip**: List all releases using `helm list` -If we need to update the deployment with new configurations or changes, modify the same [values.yaml](https://github.com/hyperledger/bevel/blob/develop/platforms/quorum/charts/quorum-tessera-node/values.yaml) file with the desired changes and run the following Helm command: -``` -$ helm upgrade ./quorum-tessera-node -``` -Replace `` with the name of the release. This command will apply the changes to the deployment, ensuring the quorum tessera node is up to date. +## Uninstalling the Chart +To uninstall/delete the `my-tessera` deployment: - -## Deletion ---- - -To delete the deployment and associated resources, run the following Helm command: -``` -$ helm uninstall +```bash +helm uninstall my-tessera ``` -Replace `` with the name of the release. This command will remove all the resources created by the Helm chart. +The command removes all the Kubernetes components associated with the chart and deletes the release. - -## Contributing ---- -If you encounter any bugs, have suggestions, or would like to contribute to the [Quorum Tessera Node Deployment Helm Chart](https://github.com/hyperledger/bevel/blob/develop/platforms/quorum/charts/quorum-tessera-node), please feel free to open an issue or submit a pull request on the [project's GitHub repository](https://github.com/hyperledger/bevel). +## Parameters +### Global parameters +These parameters are refered to as same in each parent or child chart +| Name | Description | Default Value | +|--------|---------|-------------| +|`global.serviceAccountName` | The serviceaccount name that will be created for Vault Auth management| `vault-auth` | +| `global.cluster.provider` | Kubernetes cluster provider like AWS EKS or minikube. Currently ony `aws` and `minikube` is tested | `aws` | +| `global.cluster.cloudNativeServices` | only `false` is implemented, `true` to use Cloud Native Services (SecretsManager and IAM for AWS; KeyVault & Managed Identities for Azure) is for future | `false` | +| `global.vault.type` | Type of Vault to support other providers. Currently, only `hashicorp` is supported. | `hashicorp` | +| `global.vault.role` | Role used for authentication with Vault | `vault-role` | +| `global.vault.address`| URL of the Vault server. | `""` | +| `global.vault.authPath` | Authentication path for Vault | `supplychain` | +| `global.vault.secretEngine` | The value for vault secret engine name | `secretsv2` | +| `global.vault.secretPrefix` | The value for vault secret prefix which must start with `data/` | `data/supplychain` | +| `global.proxy.provider` | The proxy or Ingress provider. Can be `none` or `ambassador` | `ambassador` | +| `global.proxy.externalUrlSuffix` | The External URL suffix at which the tessera service will be available | `test.blockchaincloudpoc.com` | +| `global.proxy.tmport` | The external port at which the tessera service will be available. This port must match `tessera.port` | `443` | + +### Storage + +| Name | Description | Default Value | +|--------|---------|-------------| +|`storage.enabled` | To enable new storage class for Tessera node | `true` | +| `storage.size` | Size of the PVC needed for Tessera | `1Gi` | +| `storage.dbSize` | Size of the PVC needed for the MySql DB | `1Gi` | +| `storage.allowedTopologies.enabled` | Check [bevel-storageclass](../../../shared/charts/bevel-storageclass/README.md) for details | `false` | + +### Image + +| Name | Description | Default Value | +| -------------| ---------- | --------- | +| `image.alpineutils.repository` | Alpine utils image repository | `ghcr.io/hyperledger/bevel-alpine-ext` | +| `image.alpineutils.tag` | Alpine utils image tag | `latest` | +| `image.tessera.repository` | Tessera image repository | `quorumengineering/tessera`| +| `image.tessera.tag` | Tessera image tag as per version of Tessera | `22.1.7`| +| `image.busybox`| Repo and default tag for busybox image | `busybox` | +| `image.mysql.repository` | MySQL image repository. This is used as the DB for TM | `mysql/mysql-server` | +| `image.mysql.tag` | MySQL image tag | `5.7` | +| `image.hooks.repository` | Quorum/Besu hooks image repository | `ghcr.io/hyperledger/bevel-k8s-hooks` | +| `image.hooks.tag` | Quorum/Besu hooks image tag | `qgt-0.2.12` | +| `image.pullSecret` | Secret name in the namespace containing private image registry credentials | `""` | +| `image.pullPolicy` | Pull policy to be used for the Docker images | `IfNotPresent` | + + +### Tessera + +| Name | Description | Default Value | +| ----------------| ----------- | ------------- | +| `tessera.removeKeysOnDelete` | Setting to delete the secrets when uninstalling the release | `true` | +| `tessera.dbName` | Name of the MySQL database | `demodb` | +| `tessera.dbUsername` | MySQL Database username | `demouser` | +| `tessera.peerNodes` | List of other tessera peer nodes like `- url: "https://node1.test.blockchaincloudpoc.com"` | `""` | +| `tessera.resources.cpuLimit` | CPU Limit for tessera statefulset | `1` | +| `tessera.resources.cpuRequest` | Initial CPU request for tessera statefulset | `0.25` | +| `tessera.resources.memLimit` | Memory Limit for tessera statefulset | `2G` | +| `tessera.resources.memRequest` | Initial Memory request for tessera statefulset | `1G` | +| `tessera.password` | Password for tessera key generation | `password` | +| `tessera.passwordPath` | Path where the password file will be stored | `/keys/tm.password` | +| `tessera.dataPath` | Mount path for tessera PVC | `/data/tessera` | +| `tessera.keysPath` | Mount path for Tessera keys | `/keys` | +| `tessera.port` | Port at which Tessera service will run | `9000` | +| `tessera.tpport` | Third party port | `9080` | +| `tessera.q2tport` | Client port where quorum nodes will connect | `9101` | +| `tessera.dbport` | Port where MySQL service is running | `3306` | +| `tessera.metrics.enabled` | Enable metrics and monitoring for Tessera node | `true` | +| `tessera.metrics.host` | Host where metrics will be available | `"0.0.0.0"` | +| `tessera.metrics.port` | Port where metrics will be available | `9545` | +| `tessera.metrics.serviceMonitorEnabled` | Enable service monitor | `false` | +| `tessera.tlsMode` | TLS mode for tessera. Options are `"STRICT"` or `"OFF"` | `"STRICT"` | +| `tessera.trust` | Server/Client trust configuration. Only for `STRICT` tlsMode. options are: `"WHITELIST"`, `"CA_OR_TOFU"`, `"CA"`, `"TOFU"`| `"CA_OR_TOFU"` | + + +### TLS + +| Name | Description | Default Value | +|--------|---------|-------------| +|`tls.enabled` | To enable TLS cert generation for Tessera node. | `true` | +| `tls.settings.tmTls` | Set the TLS setting for certificate generation. Must be enabled for `tlsMode: STRICT` | `True` | +| `tls.settings.certSubject` | X.509 Subject for the Root CA | `"CN=DLT Root CA,OU=DLT,O=DLT,L=London,C=GB"` | + +### Common parameters + +| Name | Description | Default Value | +|--------|---------|-------------| +| `labels.service` | Custom labels in yaml k-v format | `[]` | +| `labels.pvc` | Custom labels in yaml k-v format | `[]` | +| `labels.deployment` | Custom labels in yaml k-v format | `[]` | - ## License This chart is licensed under the Apache v2.0 license. diff --git a/platforms/quorum/charts/quorum-tessera-node/requirements.yaml b/platforms/quorum/charts/quorum-tessera-node/requirements.yaml new file mode 100644 index 00000000000..5f3ec035eee --- /dev/null +++ b/platforms/quorum/charts/quorum-tessera-node/requirements.yaml @@ -0,0 +1,8 @@ +dependencies: + - name: bevel-storageclass + alias: storage + repository: "file://../../../shared/charts/bevel-storageclass" + tags: + - storage + version: ~1.0.0 + condition: storage.enabled diff --git a/platforms/quorum/charts/quorum-tessera-node/templates/_helpers.tpl b/platforms/quorum/charts/quorum-tessera-node/templates/_helpers.tpl index 7f9b0dc6131..063a9f83833 100644 --- a/platforms/quorum/charts/quorum-tessera-node/templates/_helpers.tpl +++ b/platforms/quorum/charts/quorum-tessera-node/templates/_helpers.tpl @@ -1,5 +1,60 @@ -{{- define "labels.custom" -}} - {{- range $key, $val := .Values.metadata.labels -}} - {{- $key -}}: {{- $val -}} - {{- end -}} +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "quorum-tessera-node.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "quorum-tessera-node.fullname" -}} +{{- $name := default .Chart.Name -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" $name .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} + + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "quorum-tessera-node.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create tessera url depending on tls mode +*/}} +{{- define "quorum-tessera-node.tesseraURL" -}} +{{- $fullname := include "quorum-tessera-node.fullname" . -}} +{{- $port := .Values.tessera.port | int -}} +{{- $extport := .Values.global.proxy.tmport | int -}} +{{- if eq .Values.tessera.tlsMode "STRICT" -}} +{{- if eq .Values.global.proxy.provider "ambassador" -}} + {{- printf "https://%s.%s:%d" .Release.Name .Values.global.proxy.externalUrlSuffix $extport | quote }} +{{- else -}} + {{- printf "https://%s.%s:%d" $fullname .Release.Namespace $port | quote }} +{{- end -}} +{{- else -}} +{{- if eq .Values.global.proxy.provider "ambassador" -}} + {{- printf "http://%s.%s:%d" .Release.Name .Values.global.proxy.externalUrlSuffix $extport | quote }} +{{- else -}} + {{- printf "http://%s.%s:%d" $fullname .Release.Namespace $port | quote }} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Client URL is defaulted to http; tls certificates need to be checked for using https +*/}} +{{- define "quorum-tessera-node.clientURL" -}} +{{- $fullname := include "quorum-tessera-node.fullname" . -}} +{{- $port := .Values.tessera.q2tport | int -}} +{{- printf "http://%s.%s:%d" $fullname .Release.Namespace $port | quote }} {{- end -}} diff --git a/platforms/quorum/charts/quorum-tessera-node/templates/configmap.yaml b/platforms/quorum/charts/quorum-tessera-node/templates/configmap.yaml index 99e6d226abc..fd8544c1111 100644 --- a/platforms/quorum/charts/quorum-tessera-node/templates/configmap.yaml +++ b/platforms/quorum/charts/quorum-tessera-node/templates/configmap.yaml @@ -8,92 +8,96 @@ apiVersion: v1 kind: ConfigMap metadata: - name: tessera-config-{{ .Values.node.name }} - namespace: {{ .Values.metadata.namespace }} + name: {{ include "quorum-tessera-node.fullname" . }}-tessera-config + namespace: {{ .Release.Namespace }} labels: app.kubernetes.io/name: tessera-config - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} + app.kubernetes.io/component: quorum + app.kubernetes.io/part-of: {{ include "quorum-tessera-node.fullname" . }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} app.kubernetes.io/managed-by: {{ .Release.Service }} app.kubernetes.io/instance: {{ .Release.Name }} -data: +data: tessera-config.json.tmpl: |- { + "mode": "orion", "useWhiteList": "false", "jdbc": { - "username": {{ .Values.tessera.dbusername | quote }}, + "username": {{ .Values.tessera.dbUsername | quote }}, "password": "", - "url": {{ .Values.tessera.dburl | quote }} + "url": "jdbc:mysql://{{ include "quorum-tessera-node.fullname" . }}:{{ .Values.tessera.dbport }}/demodb" }, "serverConfigs": [ { "app": "ThirdParty", "enabled": true, - "serverAddress": "http://{{ .Values.node.name }}-tessera:9080", + "serverAddress": "http://{{ include "quorum-tessera-node.fullname" . }}:{{ .Values.tessera.tpport }}", "communicationType": "REST" }, { "app": "Q2T", "enabled": true, - "serverAddress": {{ .Values.tessera.clienturl | quote }}, + "serverAddress": {{ include "quorum-tessera-node.clientURL" . }}, "sslConfig": { "tls": "OFF", "generateKeyStoreIfNotExisted": true, - "serverKeyStore": "{{ .Values.node.mountPath }}/tm/server-keystore", - "serverKeyStorePassword": "quorum", - "serverTrustStore": "{{ .Values.node.mountPath }}/tm/server-truststore", - "serverTrustStorePassword": "quorum", + "sslConfigType": "SERVER_ONLY", "serverTrustMode": {{ .Values.tessera.trust | quote }}, - "knownClientsFile": "{{ .Values.node.mountPath }}/tm/knownClients", - "clientKeyStore": "{{ .Values.node.mountPath }}/tm/client-keystore", - "clientKeyStorePassword": "quorum", - "clientTrustStore": "{{ .Values.node.mountPath }}/tm/client-truststore", - "clientTrustStorePassword": "quorum", + "serverTlsKeyPath": "{{ .Values.tessera.dataPath }}/crypto/tessera_cer.key", + "serverTlsCertificatePath": "{{ .Values.tessera.dataPath }}/crypto/tessera_cer.pem", + "serverTrustCertificates": ["{{ .Values.tessera.dataPath }}/crypto/tessera_ca.pem"], "clientTrustMode": {{ .Values.tessera.trust | quote }}, - "knownServersFile": "{{ .Values.node.mountPath }}/tm/knownServers" + "clientTlsKeyPath": "{{ .Values.tessera.dataPath }}/crypto/tessera_cer.key", + "clientTlsCertificatePath": "{{ .Values.tessera.dataPath }}/crypto/tessera_cer.pem", + "clientTrustCertificates": ["{{ .Values.tessera.dataPath }}/crypto/tessera_ca.pem"], + "knownClientsFile": "{{ .Values.tessera.dataPath }}/crypto/known_client", + "knownServersFile": "{{ .Values.tessera.dataPath }}/crypto/known_server", + "clientAuth": false }, "communicationType": "REST" }, { "app": "P2P", "enabled": true, - "serverAddress": {{ .Values.tessera.url | quote }}, + "serverAddress": {{ include "quorum-tessera-node.tesseraURL" . }}, "sslConfig": { - "tls": {{ .Values.tessera.tls | quote }}, + "tls": {{ .Values.tessera.tlsMode | quote }}, "generateKeyStoreIfNotExisted": true, - "serverKeyStore": "{{ .Values.node.mountPath }}/tm/server-keystore", - "serverKeyStorePassword": "quorum", - "serverTrustStore": "{{ .Values.node.mountPath }}/tm/server-truststore", - "serverTrustStorePassword": "quorum", + "sslConfigType": "SERVER_AND_CLIENT", "serverTrustMode": {{ .Values.tessera.trust | quote }}, - "knownClientsFile": "{{ .Values.node.mountPath }}/tm/knownClients", - "clientKeyStore": "{{ .Values.node.mountPath }}/tm/client-keystore", - "clientKeyStorePassword": "quorum", - "clientTrustStore": "{{ .Values.node.mountPath }}/tm/client-truststore", - "clientTrustStorePassword": "quorum", + "serverTlsKeyPath": "{{ .Values.tessera.dataPath }}/crypto/tessera_cer.key", + "serverTlsCertificatePath": "{{ .Values.tessera.dataPath }}/crypto/tessera_cer.pem", + "serverTrustCertificates": ["{{ .Values.tessera.dataPath }}/crypto/tessera_ca.pem"], "clientTrustMode": {{ .Values.tessera.trust | quote }}, - "knownServersFile": "{{ .Values.node.mountPath }}/tm/knownServers" + "clientTlsKeyPath": "{{ .Values.tessera.dataPath }}/crypto/tessera_cer.key", + "clientTlsCertificatePath": "{{ .Values.tessera.dataPath }}/crypto/tessera_cer.pem", + "clientTrustCertificates": ["{{ .Values.tessera.dataPath }}/crypto/tessera_ca.pem"], + "knownClientsFile": "{{ .Values.tessera.dataPath }}/crypto/known_client2", + "knownServersFile": "{{ .Values.tessera.dataPath }}/crypto/known_server2" }, "communicationType": "REST" } ], - "peer": {{ .Values.tessera.othernodes | toPrettyJson | indent 6 }}, + "peer": [ + { + "url": {{ include "quorum-tessera-node.tesseraURL" . }} + } + {{- range .Values.tessera.peerNodes }} + ,{ + "url": {{ .url | quote }} + } + {{- end }} + ], "keys": { - "keyVaultConfigs": [ - { - "keyVaultType": "HASHICORP", - "properties": { - "url": "{{ $.Values.vault.address }}" - } - } - ], - "keyData": [ - { - "hashicorpVaultSecretEngineName": "{{ $.Values.vault.secretengine }}", - "hashicorpVaultSecretName": "{{ $.Values.vault.tmsecretpath }}", - "hashicorpVaultPrivateKeyId": "privateKey", - "hashicorpVaultPublicKeyId": "publicKey" - } - ] + {{ if .Values.tessera.password }} + "passwordFile": "{{ .Values.tessera.passwordPath }}", + {{ end }} + "keyData": [ + { + "privateKeyPath": "/keys/tm.key", + "publicKeyPath": "/keys/tm.pub" + } + ] }, "alwaysSendTo": [] } diff --git a/platforms/quorum/charts/quorum-tessera-node/templates/deployment.yaml b/platforms/quorum/charts/quorum-tessera-node/templates/deployment.yaml deleted file mode 100644 index 0507f6a5bce..00000000000 --- a/platforms/quorum/charts/quorum-tessera-node/templates/deployment.yaml +++ /dev/null @@ -1,246 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -apiVersion: apps/v1 -kind: StatefulSet -metadata: - namespace: {{ .Values.metadata.namespace }} - creationTimestamp: null - labels: - app.kubernetes.io/name: {{ .Values.node.name }}-tessera - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} - {{- include "labels.custom" . | nindent 2 }} - name: {{ .Values.node.name }}-tessera -spec: - serviceName: {{ .Values.node.name }}-tessera - replicas: {{ .Values.replicaCount }} - selector: - matchLabels: - app: consortiumchain - service.rpc: {{ .Values.node.name }}-tessera - app.kubernetes.io/name: {{ .Values.node.name }}-tessera - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} - {{- include "labels.custom" . | nindent 2 }} - template: - metadata: - creationTimestamp: null - labels: - app: consortiumchain - name: {{ .Values.node.name }}-tessera - service.rpc: {{ .Values.node.name }}-tessera - app.kubernetes.io/name: {{ .Values.node.name }}-tessera - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} - {{- include "labels.custom" . | nindent 2 }} - spec: - serviceAccountName: {{ .Values.vault.serviceaccountname }} - hostname: {{ .Values.node.name }}-tessera - imagePullSecrets: - - name: {{ .Values.node.imagePullSecret }} - volumes: - - name: certificates - emptyDir: - medium: Memory - - name: mysql - emptyDir: - medium: Memory - - name: tessera-config - configMap: - name: tessera-config-{{ .Values.node.name }} - items: - - key: tessera-config.json.tmpl - path: tessera-config.json.tmpl - - name: scripts-volume - configMap: - name: bevel-vault-script - - name: package-manager - configMap: - name: package-manager - initContainers: - - name: certificates-init - image: {{ .Values.images.alpineutils }} - imagePullPolicy: IfNotPresent - env: - - name: VAULT_ADDR - value: {{ .Values.vault.address }} - - name: VAULT_SECRET_PREFIX - value: {{ .Values.vault.secretprefix }} - - name: KUBERNETES_AUTH_PATH - value: {{ .Values.vault.authpath }} - - name: VAULT_APP_ROLE - value: {{ .Values.vault.role }} - - name: MOUNT_PATH - value: "/secret" - - name: VAULT_TYPE - value: "{{ $.Values.vault.type }}" - command: ["sh", "-c"] - args: - - |- - #!/bin/bash - . /scripts/bevel-vault.sh - - # Calling a function to retrieve the vault token. - vaultBevelFunc "init" - # Calling a function to retrieve secrets from Vault only if they exist. - vaultBevelFunc "readJson" "${VAULT_SECRET_PREFIX}/{{ .Values.vault.keyname }}" - - username=$(echo ${VAULT_SECRET} | jq -r '.["db_user"]') - password=$(echo ${VAULT_SECRET} | jq -r '.["db_password"]') - - OUTPUT_PATH="${MOUNT_PATH}/keys" - mkdir -p ${OUTPUT_PATH} - echo "${username}" > ${OUTPUT_PATH}/username - echo "${password}" > ${OUTPUT_PATH}/password - - # Calling a function to retrieve secrets for keystore - vaultBevelFunc "readJson" "${VAULT_SECRET_PREFIX}/certs" - keystore=$(echo ${VAULT_SECRET} | jq -r '.["keystore"]') - echo "${keystore}" > ${OUTPUT_PATH}/keystore - base64 -d ${OUTPUT_PATH}/keystore > ${OUTPUT_PATH}/server-keystore - - echo "Done checking for certificates in vault" - volumeMounts: - - name: certificates - mountPath: /secret - - name: scripts-volume - mountPath: /scripts/bevel-vault.sh - subPath: bevel-vault.sh - - name: mysql-init - image: {{ .Values.images.busybox }} - imagePullPolicy: IfNotPresent - command: ["/bin/sh"] - args: - - "-cx" - - |- - #!/bin/bash - - echo "getting username and password from vault" - var=$(cat /secret/keys/password) - cat << EOF > /docker-entrypoint-initdb.d/createTables.sql - UPDATE mysql.user SET authentication_string = PASSWORD('$var') WHERE User = 'demouser'; - UPDATE mysql.user SET authentication_string = PASSWORD('$var') WHERE User = 'root'; - CREATE TABLE IF NOT EXISTS ENCRYPTED_TRANSACTION (ENCODED_PAYLOAD BLOB NOT NULL, PAYLOAD_CODEC VARCHAR(50), HASH VARBINARY(100) NOT NULL, TIMESTAMP BIGINT, PRIMARY KEY (HASH)); - CREATE TABLE IF NOT EXISTS ENCRYPTED_RAW_TRANSACTION (ENCRYPTED_KEY BLOB NOT NULL, ENCRYPTED_PAYLOAD BLOB NOT NULL, NONCE BLOB NOT NULL, SENDER BLOB NOT NULL, TIMESTAMP BIGINT, HASH VARBINARY(100) NOT NULL, PRIMARY KEY (HASH)); - CREATE TABLE PRIVACY_GROUP(ID VARBINARY(100) NOT NULL, LOOKUP_ID BLOB NOT NULL, DATA BLOB NOT NULL, TIMESTAMP BIGINT, PRIMARY KEY (ID)); - CREATE TABLE ST_TRANSACTION(ID BIGINT(19) NOT NULL, PAYLOAD_CODEC VARCHAR(50), HASH VARCHAR(100) NOT NULL, PAYLOAD BLOB, PRIVACY_MODE BIGINT(10), TIMESTAMP BIGINT(19), VALIDATION_STAGE BIGINT(19), PRIMARY KEY (ID)); - CREATE TABLE ST_AFFECTED_TRANSACTION(ID BIGINT(19) NOT NULL, AFFECTED_HASH VARCHAR(100) NOT NULL, TXN_ID BIGINT(19) NOT NULL, CONSTRAINT FK_ST_AFFECTED_TRANSACTION_TXN_ID FOREIGN KEY (TXN_ID) REFERENCES ST_TRANSACTION(ID), PRIMARY KEY (ID)); - ALTER TABLE ST_TRANSACTION ADD INDEX ST_TRANSACTION_VALSTG (VALIDATION_STAGE); - EOF - volumeMounts: - - name: mysql - mountPath: /docker-entrypoint-initdb.d - - name: certificates - mountPath: /secret - containers: - - name: mysql-db - image: {{ .Values.images.mysql }} - imagePullPolicy: IfNotPresent - env: - - name: "MYSQL_ROOT_PASSWORD" - value: "" - - name: MYSQL_DATABASE - value: "{{ .Values.node.dbname }}" - - name: MYSQL_USER - value: "{{ .Values.node.mysqluser }}" - - name: MYSQL_PASSWORD - value: /secret/keys/password - ports: - - containerPort: {{ .Values.node.ports.db }} - volumeMounts: - - name: mysql - mountPath: /docker-entrypoint-initdb.d - - name: certificates - mountPath: /secret - - name: {{ .Values.node.name }}-mysql - mountPath: "/var/lib/mysql" - subPath: mysql - - name: tessera - image: {{ .Values.images.tessera }} - imagePullPolicy: IfNotPresent - command: ["/bin/sh", "-c"] - args: - - |- - #!/usr/bin/env bash - - . /scripts/bevel-vault.sh - . /scripts/package-manager.sh - - # Define the packages to install - packages_to_install="jq curl" - install_packages "$packages_to_install" - - mkdir -p $TESSERA_HOME/logs; - mkdir -p $TESSERA_HOME/tm; - DDIR=$TESSERA_HOME/tm; - PASSWORD=$(cat $TESSERA_HOME/crypto/keys/password); - mv $TESSERA_HOME/crypto/keys/server-keystore $TESSERA_HOME/tm/ - - # Calling a function to retrieve the vault token. - vaultBevelFunc "init" - export HASHICORP_TOKEN="$VAULT_TOKEN" - - printenv; - # mysql connector required for mysql tessera DB - wget -q http://dev.mysql.com/get/Downloads/Connector-J/mysql-connector-java-8.0.25.tar.gz; - tar -xf mysql-connector-java-8.0.25.tar.gz; - cp /mysql-connector-java-8.0.25/mysql-connector-java-8.0.25.jar /tessera/lib/; - CONFIG_TMPL=$(cat ${TESSERA_HOME}/tessera-config.json.tmpl); - echo $CONFIG_TMPL > ${DDIR}/tessera-config-with-hosts.json; - cat ${DDIR}/tessera-config-with-hosts.json; - - /tessera/bin/tessera --configfile ${DDIR}/tessera-config-with-hosts.json -o jdbc.password=$PASSWORD; - ports: - - containerPort: {{ .Values.node.ports.tm }} - env: - - name: TESSERA_HOME - value: {{ .Values.node.mountPath }} - - name: QHOME - value: {{ .Values.node.mountPath }} - - name: VAULT_ADDR - value: {{ $.Values.vault.address }} - - name: KUBERNETES_AUTH_PATH - value: {{ $.Values.vault.authpath }} - - name: VAULT_APP_ROLE - value: {{ $.Values.vault.role }} - - name: VAULT_TYPE - value: "{{ $.Values.vault.type }}" - volumeMounts: - - name: certificates - mountPath: {{ .Values.node.mountPath }}/crypto/ - - name: {{ .Values.node.name }}-tessera-pv - mountPath: {{ .Values.node.mountPath }} - - name: tessera-config - mountPath: {{ .Values.node.mountPath }}/tessera-config.json.tmpl - subPath: tessera-config.json.tmpl - - name: scripts-volume - mountPath: /scripts/bevel-vault.sh - subPath: bevel-vault.sh - - name: package-manager - mountPath: /scripts/package-manager.sh - subPath: package-manager.sh - restartPolicy: Always - volumeClaimTemplates: - - metadata: - name: {{ .Values.node.name }}-tessera-pv - spec: - storageClassName: {{ .Values.storage.storageclassname }} - accessModes: [ "ReadWriteOnce" ] - resources: - requests: - storage: {{ .Values.storage.storagesize }} - - metadata: - name: {{ .Values.node.name }}-mysql - spec: - storageClassName: {{ .Values.storage.storageclassname }} - accessModes: [ "ReadWriteOnce" ] - resources: - requests: - storage: {{ .Values.storage.dbstorage }} diff --git a/platforms/quorum/charts/quorum-tessera-node/templates/hooks-pre-delete.yaml b/platforms/quorum/charts/quorum-tessera-node/templates/hooks-pre-delete.yaml new file mode 100644 index 00000000000..8c465e89834 --- /dev/null +++ b/platforms/quorum/charts/quorum-tessera-node/templates/hooks-pre-delete.yaml @@ -0,0 +1,79 @@ +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ include "quorum-tessera-node.fullname" . }}-pre-delete-hook + namespace: {{ .Release.Namespace }} + annotations: + helm.sh/hook: pre-delete + helm.sh/hook-weight: "0" + helm.sh/hook-delete-policy: "hook-succeeded" + labels: + app.kubernetes.io/name: pre-delete-hook + app.kubernetes.io/component: cleanup + app.kubernetes.io/part-of: {{ include "quorum-tessera-node.fullname" . }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} + app.kubernetes.io/managed-by: helm +spec: + backoffLimit: 3 + completions: 1 + template: + metadata: + labels: + app.kubernetes.io/name: pre-delete-hook + app.kubernetes.io/instance: {{ .Release.Name }} + spec: + serviceAccountName: {{ .Values.global.serviceAccountName }} + restartPolicy: "OnFailure" + containers: + - name: {{ template "quorum-tessera-node.fullname" . }}-cleanup + image: "{{ .Values.image.hooks.repository }}:{{ .Values.image.hooks.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + command: + - /bin/bash + - -c + args: + - | + + echo "{{ template "quorum-tessera-node.fullname" . }} pre-delete-hook ..." + +{{- if and (ne .Values.global.cluster.provider "minikube") (.Values.global.cluster.cloudNativeServices) }} + # placeholder for cloudNative deleteSecret function +{{- else }} + + function deleteSecret { + key=$1 + kubectl delete secret ${key} --namespace {{ .Release.Namespace }} + } + +{{- end }} + + function delete_node_from_tessera_peers_configmap { + kubectl -n {{ .Release.Namespace }} get configmap tessera-peers -o json + # if there is no configmap, do nothing + if [ $? -ne 0 ]; then + echo "No tessera-peers found, nothing to do..." + # delete the one + else + echo "tessera-peers found, deleting {{ template "quorum-tessera-node.fullname" . }}..." + echo $(kubectl -n {{ .Release.Namespace }} get configmap tessera-peers -o jsonpath='{.data.tesseraPeers}' ) > /tmp/tessera-peers.raw + cat /tmp/tessera-peers.raw | jq --arg NEEDLE "{{ .Release.Name }}" 'del(.[] | select( .url | contains($NEEDLE) ))' > /tmp/tessera-peers + kubectl -n {{ .Release.Namespace }} create configmap tessera-peers --from-file=tesseraPeers=/tmp/tessera-peers -o yaml --dry-run=client | kubectl replace -f - + fi + } + + delete_node_from_tessera_peers_configmap + +{{- if .Values.tessera.removeKeysOnDelete }} + +{{- if and (ne .Values.global.cluster.provider "minikube") (.Values.global.cluster.cloudNativeServices) }} + deleteSecret {{ template "quorum-tessera-node.fullname" . }}-tmkey + deleteSecret {{ template "quorum-tessera-node.fullname" . }}-tmkeypub + deleteSecret {{ template "quorum-tessera-node.fullname" . }}-tmpassword +{{- else }} + deleteSecret {{ template "quorum-tessera-node.fullname" . }}-keys +{{- end }} + +{{- end }} + echo "Completed" diff --git a/platforms/quorum/charts/quorum-tessera-node/templates/hooks-pre-install.yaml b/platforms/quorum/charts/quorum-tessera-node/templates/hooks-pre-install.yaml new file mode 100644 index 00000000000..dcf997bae5c --- /dev/null +++ b/platforms/quorum/charts/quorum-tessera-node/templates/hooks-pre-install.yaml @@ -0,0 +1,158 @@ +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ include "quorum-tessera-node.fullname" . }}-pre-install-hook + namespace: {{ .Release.Namespace }} + annotations: + "helm.sh/hook": pre-install + "helm.sh/hook-weight": "0" + "helm.sh/hook-delete-policy": "before-hook-creation" + labels: + app.kubernetes.io/name: pre-install-hook + app.kubernetes.io/component: keygen + app.kubernetes.io/part-of: {{ include "quorum-tessera-node.fullname" . }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} + app.kubernetes.io/managed-by: helm +spec: + backoffLimit: 3 + completions: 1 + template: + metadata: + labels: + app.kubernetes.io/name: pre-install-hook + app.kubernetes.io/instance: {{ .Release.Name }} + spec: + serviceAccountName: {{ .Values.global.serviceAccountName }} + restartPolicy: "OnFailure" + containers: + - name: {{ template "quorum-tessera-node.fullname" . }}-keygen + image: {{ .Values.image.hooks.repository }}:{{ .Values.image.hooks.tag }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + securityContext: + runAsUser: 0 + {{- if (eq .Values.global.vault.type "hashicorp") }} + volumeMounts: + - name: scripts-volume + mountPath: /scripts/bevel-vault.sh + subPath: bevel-vault.sh + env: + - name: VAULT_ADDR + value: "{{ .Values.global.vault.address }}" + - name: VAULT_SECRET_ENGINE + value: "{{ .Values.global.vault.secretEngine }}" + - name: VAULT_SECRET_PREFIX + value: "{{ .Values.global.vault.secretPrefix }}" + - name: KUBERNETES_AUTH_PATH + value: "{{ .Values.global.vault.authPath }}" + - name: VAULT_APP_ROLE + value: "{{ .Values.global.vault.role }}" + - name: VAULT_TYPE + value: "{{ .Values.global.vault.type }}" + {{- end }} + command: + - /bin/bash + - -c + args: + - | + + function update_tessera_peers_configmap { + kubectl -n {{ .Release.Namespace }} get configmap tessera-peers -o json + # first time a tx node is deployed and there is no configmap + if [ $? -ne 0 ]; then + echo "No tessera-peers found, creating a new one..." + echo "[{ \"url\": \"{{ include "quorum-tessera-node.tesseraURL" . }}\" }]" > /tmp/tessera-peers + kubectl --namespace {{ .Release.Namespace }} create configmap tessera-peers --from-file=tesseraPeers=/tmp/tessera-peers + + # update the entries + else + echo "Tessera-peers found, updating existing..." + echo $(kubectl -n {{ .Release.Namespace }} get configmap tessera-peers -o jsonpath='{.data.tesseraPeers}' ) > /tmp/tessera-peers.raw + NEEDLE="{{ include "quorum-tessera-node.tesseraURL" . }}" + cat /tmp/tessera-peers.raw | jq --arg NEEDLE "$NEEDLE" '. += [{"url": $NEEDLE}] | unique ' > /tmp/tessera-peers + kubectl -n {{ .Release.Namespace }} create configmap tessera-peers --from-file=tesseraPeers=/tmp/tessera-peers -o yaml --dry-run=client | kubectl replace -f - + fi + } + +{{- if (eq .Values.global.vault.type "hashicorp") }} + . /scripts/bevel-vault.sh + echo "Getting vault Token..." + vaultBevelFunc "init" + function safeWriteSecret { + key=$1 + fpath=$2 + #Read if secret exists in Vault + vaultBevelFunc 'readJson' "${VAULT_SECRET_ENGINE}/${VAULT_SECRET_PREFIX}/${key}-keys" + if [ "$SECRETS_AVAILABLE" == "yes" ] + then + # Get secret from Vault and create the k8s secret if it does not exist + kubectl get secret ${key}-keys --namespace {{ .Release.Namespace }} -o json > /dev/null 2>&1 + if [ $? -ne 0 ]; then + privateKey=$(echo ${VAULT_SECRET} | jq -r '.["privateKey_base64"]' | base64 -d) + publicKey=$(echo ${VAULT_SECRET} | jq -r '.["publicKey"]') + password=$(echo ${VAULT_SECRET} | jq -r '.["password"]') + echo $privateKey > /tmp/privateKey + kubectl create secret generic {{ template "quorum-tessera-node.fullname" . }}-keys --namespace {{ .Release.Namespace }} \ + --from-file=tm.key=/tmp/privateKey --from-literal=tm.pub=${publicKey} \ + --from-literal=tm.password=${password} + fi + else + # Save Tessera secrets to Vault + privateKey=$(cat ${fpath}/tessera.key | base64 -w 0) + publicKey=$(cat ${fpath}/tessera.pub) + password=$(cat ${fpath}/passwordFile.txt) + # create a JSON file for the data related to node crypto + echo " + { + \"data\": + { + \"privateKey_base64\": \"${privateKey}\", + \"publicKey\": \"${publicKey}\", + \"password\": \"${password}\" + } + }" > tessera.json + vaultBevelFunc 'write' "${VAULT_SECRET_ENGINE}/${VAULT_SECRET_PREFIX}/${key}-keys" 'tessera.json' + rm tessera.json + fi + } +{{- else }} + function safeWriteSecret { + # Placeholder: + # - Implement code to fetch the keys if using any cloud-native service or platform different from HashiCorp to store the keys + # - After fetching the keys, create Kubernetes secrets from them + # - For guidance, refer to the code written for HashiCorp Vault for the same purpose + return 0 + } +{{- end }} + + echo "{{ template "quorum-tessera-node.fullname" . }} pre-install-hook ..." + echo "Tessera keys generation ..." + FOLDER_PATH=$(quorum-genesis-tool --validators 0 --members 1 --bootnodes 0 --tesseraEnabled true \ + --tesseraPassword {{ .Values.tessera.password }} --outputPath /tmp/tessera | tail -1 | sed -e "s/^Artifacts in folder: //") + if [ ! -f "$FOLDER_PATH/member0/passwordFile.txt" ]; then + echo "" > $FOLDER_PATH/member0/passwordFile.txt + fi + echo "Creating {{ template "quorum-tessera-node.fullname" . }}-keys secrets in k8s ..." +{{- if and (ne .Values.global.cluster.provider "minikube") (.Values.global.cluster.cloudNativeServices) }} + safeWriteSecret {{ template "quorum-tessera-node.fullname" . }}-tmkey $FOLDER_PATH/member0/tessera.key + safeWriteSecret {{ template "quorum-tessera-node.fullname" . }}-tmkeypub $FOLDER_PATH/member0/tessera.pub + safeWriteSecret {{ template "quorum-tessera-node.fullname" . }}-tmpassword $FOLDER_PATH/member0/passwordFile.txt +{{- else }} + safeWriteSecret {{ template "quorum-tessera-node.fullname" . }} $FOLDER_PATH/member0 +{{- end }} + + if ! kubectl get secret {{ template "quorum-tessera-node.fullname" . }}-keys --namespace {{ .Release.Namespace }} &> /dev/null; then + # Secret + kubectl create secret generic {{ template "quorum-tessera-node.fullname" . }}-keys --namespace {{ .Release.Namespace }} \ + --from-file=tm.key=$FOLDER_PATH/member0/tessera.key \ + --from-file=tm.pub=$FOLDER_PATH/member0/tessera.pub \ + --from-file=tm.password=$FOLDER_PATH/member0/passwordFile.txt + fi + update_tessera_peers_configmap + echo "Completed" + volumes: + - name: scripts-volume + configMap: + name: bevel-vault-script + defaultMode: 0777 diff --git a/platforms/quorum/charts/quorum-tessera-node/templates/ingress.yaml b/platforms/quorum/charts/quorum-tessera-node/templates/ingress.yaml deleted file mode 100644 index aa79c9c6c3a..00000000000 --- a/platforms/quorum/charts/quorum-tessera-node/templates/ingress.yaml +++ /dev/null @@ -1,28 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -{{- if eq .Values.proxy.provider "haproxy" }} -apiVersion: networking.k8s.io/v1 -kind: Ingress -metadata: - name: {{ .Values.node.name }} - namespace: {{ .Values.metadata.namespace }} - annotations: - kubernetes.io/ingress.class: "haproxy" - ingress.kubernetes.io/ssl-passthrough: "true" -spec: - rules: - - host: {{ .Values.node.name }}tm.{{ .Values.proxy.external_url }} - http: - paths: - - path: / - pathType: Prefix - backend: - service: - name: {{ .Values.node.name }}-tessera - port: - number: {{ .Values.node.ports.tm }} -{{- end }} diff --git a/platforms/quorum/charts/quorum-tessera-node/templates/service.yaml b/platforms/quorum/charts/quorum-tessera-node/templates/service.yaml index 852346f6f3f..2c8d5095ae7 100644 --- a/platforms/quorum/charts/quorum-tessera-node/templates/service.yaml +++ b/platforms/quorum/charts/quorum-tessera-node/templates/service.yaml @@ -7,83 +7,84 @@ apiVersion: v1 kind: Service metadata: - namespace: {{ .Values.metadata.namespace }} - annotations: - app: consortiumchain - version: '1' - creationTimestamp: null + name: {{ include "quorum-tessera-node.fullname" . }} + namespace: {{ .Release.Namespace }} labels: - app.kubernetes.io/name: {{ .Values.node.name }}-tessera - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} + app.kubernetes.io/name: {{ include "quorum-tessera-node.fullname" . }} + app.kubernetes.io/component: service + app.kubernetes.io/part-of: {{ include "quorum-tessera-node.fullname" . }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} - {{- include "labels.custom" . | nindent 2 }} - name: {{ .Values.node.name }}-tessera + app.kubernetes.io/release: {{ .Release.Name }} + {{- if $.Values.labels }} + {{- range $key, $value := $.Values.labels.service }} + {{- range $k, $v := $value }} + {{ $k }}: {{ $v | quote }} + {{- end }} + {{- end }} + {{- end }} spec: + type: ClusterIP selector: - app.kubernetes.io/name: {{ .Values.node.name }}-tessera - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} - {{- include "labels.custom" . | nindent 2 }} - service.rpc: {{ .Values.node.name }}-tessera - type: {{ .Values.node.servicetype }} + app.kubernetes.io/part-of: {{ include "quorum-tessera-node.fullname" . }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} ports: - name: tm-manager protocol: TCP - port: {{ .Values.node.ports.tm }} - targetPort: {{ .Values.node.ports.tm }} + port: {{ .Values.tessera.port }} + targetPort: {{ .Values.tessera.port }} + - name: clientport + protocol: TCP + port: {{ .Values.tessera.q2tport }} + targetPort: {{ .Values.tessera.q2tport }} - name: tm-tessera-third-part protocol: TCP port: 9080 targetPort: 9080 - - name: tm-client - protocol: TCP - port: {{ .Values.proxy.clientport }} - targetPort: {{ .Values.proxy.clientport }} - name: mysql-db protocol: TCP - port: {{ .Values.node.ports.db }} - targetPort: {{ .Values.node.ports.db }} -{{- if eq $.Values.proxy.provider "ambassador" }} + port: {{ .Values.tessera.dbport }} + targetPort: {{ .Values.tessera.dbport }} +{{- if eq $.Values.global.proxy.provider "ambassador" }} ## Host for tm connection --- apiVersion: getambassador.io/v3alpha1 kind: Host metadata: - name: {{ .Values.node.name }}-host + name: {{ .Release.Name }}-host spec: - hostname: {{ .Values.proxy.external_url }} + hostname: {{ .Release.Name }}.{{ .Values.global.proxy.externalUrlSuffix }} acmeProvider: authority: none requestPolicy: insecure: action: Reject tlsSecret: - name: {{ .Values.node.name }}-ambassador-certs - tls: - min_tls_version: v1.2 + name: tls-tls-certs + namespace: {{ .Release.Namespace }} ## Mapping for tm port --- apiVersion: getambassador.io/v3alpha1 kind: Mapping metadata: - name: {{ .Values.node.name }}-tm - namespace: {{ .Values.metadata.namespace }} + name: {{ .Release.Name }}-mapping + namespace: {{ .Release.Namespace }} spec: - hostname: {{ .Values.proxy.external_url }} + host: {{ .Release.Name }}.{{ .Values.global.proxy.externalUrlSuffix }} prefix: / - service: {{ .Values.node.name }}-tessera.{{ .Values.metadata.namespace }}:{{ .Values.node.ports.tm }} - tls: {{ .Values.node.name }}-tlscontext + service: {{ include "quorum-tessera-node.fullname" . }}.{{ .Release.Namespace }}:{{ .Values.tessera.port }} + tls: {{ .Release.Name }}-tlscontext --- apiVersion: getambassador.io/v3alpha1 kind: TLSContext metadata: - name: {{ .Values.node.name }}-tlscontext - namespace: {{ .Values.metadata.namespace }} + name: {{ .Release.Name }}-tlscontext + namespace: {{ .Release.Namespace }} spec: hosts: - - {{ .Values.proxy.external_url }} - secret: {{ .Values.node.name }}-ambassador-certs + - {{ .Release.Name }}.{{ .Values.global.proxy.externalUrlSuffix }} + secret: tls-tls-certs.{{ .Release.Namespace }} + secret_namespacing: true min_tls_version: v1.2 {{- end }} diff --git a/platforms/quorum/charts/quorum-tessera-node/templates/servicemonitor.yaml b/platforms/quorum/charts/quorum-tessera-node/templates/servicemonitor.yaml new file mode 100644 index 00000000000..8f72c220473 --- /dev/null +++ b/platforms/quorum/charts/quorum-tessera-node/templates/servicemonitor.yaml @@ -0,0 +1,42 @@ +{{- if and .Values.tessera.metrics.enabled .Values.tessera.metrics.serviceMonitorEnabled }} +{{- if $.Capabilities.APIVersions.Has "monitoring.coreos.com/v1/ServiceMonitor" }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "quorum-tessera-node.fullname" . }}-servicemonitor + labels: + release: monitoring + app: {{ include "quorum-tessera-node.fullname" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/component: {{ .Release.Name }} + {{- if $.Values.labels }} + {{- range $key, $value := $.Values.labels.service }} + {{- range $k, $v := $value }} + {{ $k }}: {{ $v | quote }} + {{- end }} + {{- end }} + {{- end }} + namespace: {{ .Release.Namespace }} +spec: + jobLabel: {{ .Release.Name }} + endpoints: + - port: tm-tessera-third-part + honorLabels: true + namespaceSelector: + matchNames: + - {{ .Release.Namespace }} + selector: + matchLabels: + app.kubernetes.io/name: {{ include "quorum-tessera-node.fullname" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/component: {{ .Release.Name }} + {{- if $.Values.labels }} + {{- range $key, $value := $.Values.labels.service }} + {{- range $k, $v := $value }} + {{ $k }}: {{ $v | quote }} + {{- end }} + {{- end }} + {{- end }} +{{- end }} +{{- end }} diff --git a/platforms/quorum/charts/quorum-tessera-node/templates/statefulset.yaml b/platforms/quorum/charts/quorum-tessera-node/templates/statefulset.yaml new file mode 100644 index 00000000000..ae5f6317ea2 --- /dev/null +++ b/platforms/quorum/charts/quorum-tessera-node/templates/statefulset.yaml @@ -0,0 +1,327 @@ +############################################################################################## +# Copyright Accenture. All Rights Reserved. +# +# SPDX-License-Identifier: Apache-2.0 +############################################################################################## +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ template "quorum-tessera-node.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + app: {{ include "quorum-tessera-node.fullname" . }} + app.kubernetes.io/name: tessera-statefulset + app.kubernetes.io/component: quorum + app.kubernetes.io/part-of: {{ include "quorum-tessera-node.fullname" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} + {{- if $.Values.labels }} + {{- range $key, $value := $.Values.labels.deployment }} + {{- range $k, $v := $value }} + {{ $k }}: {{ $v | quote }} + {{- end }} + {{- end }} + {{- end }} +spec: + replicas: 1 + podManagementPolicy: OrderedReady + updateStrategy: + type: RollingUpdate + selector: + matchLabels: + app: {{ include "quorum-tessera-node.fullname" . }} + app.kubernetes.io/name: tessera-statefulset + app.kubernetes.io/component: quorum + app.kubernetes.io/part-of: {{ include "quorum-tessera-node.fullname" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} + serviceName: {{ include "quorum-tessera-node.fullname" . }} + volumeClaimTemplates: + - metadata: + name: data + labels: + {{- if $.Values.labels }} + {{- range $key, $value := $.Values.labels.pvc }} + {{- range $k, $v := $value }} + {{ $k }}: {{ $v | quote }} + {{- end }} + {{- end }} + {{- end }} + spec: + storageClassName: storage-{{ .Release.Name }} + accessModes: [ "ReadWriteOnce" ] + resources: + requests: + storage: {{ .Values.storage.size }} + - metadata: + name: data-mysql + labels: + {{- if $.Values.labels }} + {{- range $key, $value := $.Values.labels.pvc }} + {{- range $k, $v := $value }} + {{ $k }}: {{ $v | quote }} + {{- end }} + {{- end }} + {{- end }} + spec: + storageClassName: storage-{{ .Release.Name }} + accessModes: [ "ReadWriteOnce" ] + resources: + requests: + storage: {{ .Values.storage.dbSize }} + template: + metadata: + labels: + app: {{ include "quorum-tessera-node.fullname" . }} + app.kubernetes.io/name: tessera-statefulset + app.kubernetes.io/component: quorum + app.kubernetes.io/part-of: {{ include "quorum-tessera-node.fullname" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} + {{- if $.Values.labels }} + {{- range $key, $value := $.Values.labels.deployment }} + {{- range $k, $v := $value }} + {{ $k }}: {{ $v | quote }} + {{- end }} + {{- end }} + {{- end }} + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: {{ .Values.tessera.metrics.port | quote }} + prometheus.io/path: "/metrics" + spec: + serviceAccountName: {{ .Values.global.serviceAccountName }} + hostname: {{ include "quorum-tessera-node.fullname" . }}.{{ .Release.Namespace }} + imagePullSecrets: + {{- if .Values.image.pullSecret }} + - name: {{ .Values.image.pullSecret }} + {{- end }} + initContainers: + - name: certificates-init + image: {{ .Values.image.alpineutils.repository }}:{{ .Values.image.alpineutils.tag }} + imagePullPolicy: IfNotPresent + env: +{{- if eq .Values.global.vault.type "hashicorp" }} + - name: VAULT_ADDR + value: {{ .Values.global.vault.address }} + - name: VAULT_SECRET_ENGINE + value: {{ $.Values.global.vault.secretEngine }} + - name: VAULT_SECRET_PREFIX + value: {{ .Values.global.vault.secretPrefix }} + - name: KUBERNETES_AUTH_PATH + value: {{ .Values.global.vault.authPath }} + - name: VAULT_APP_ROLE + value: {{ .Values.global.vault.role }} + - name: VAULT_TYPE + value: "{{ $.Values.global.vault.type }}" +{{- end }} + - name: MOUNT_PATH + value: "/secret" + command: ["sh", "-c"] + args: + - |- + #!/bin/bash + + # Source the bevel-vault.sh script to perform the Vault-CURD operations + . /scripts/bevel-vault.sh + + # Get the Vault token + echo "Getting the vault Token..." + vaultBevelFunc 'init' + + # Check if the value of 'tessera.tlsMode' is 'STRICT' + if [ {{ $.Values.tessera.tlsMode }} == "STRICT" ] + then + # Obtain TLS-related data from the Vault + vault_secret_key="${VAULT_SECRET_ENGINE}/${VAULT_SECRET_PREFIX}/tlscerts" + vaultBevelFunc 'readJson' ${vault_secret_key} + + # Extract and decode TLS-related data from the response obtained from the Vault + tessera_ca=$(echo ${VAULT_SECRET} | jq -r '.["rootca_pem"]' | base64 -d) + tessera_cer_pem=$(echo ${VAULT_SECRET} | jq -r '.["ambassadorcrt"]' | base64 -d) + tessera_cer_key=$(echo ${VAULT_SECRET} | jq -r '.["ambassadorkey"]' | base64 -d) + tessera_known_server=$(echo ${VAULT_SECRET} | jq -r '.["knownServer"]'| base64 -d) + keystore=$(echo ${VAULT_SECRET} | jq -r '.["keystore"]') + keystore_password=$(echo ${VAULT_SECRET} | jq -r '.["password"]' | base64 -d) + + # Write the decoded TLS-related data to files + echo "${tessera_ca}" > ${MOUNT_PATH}/tessera_ca.pem + echo "${tessera_cer_pem}" > ${MOUNT_PATH}/tessera_cer.pem + echo "${tessera_cer_key}" > ${MOUNT_PATH}/tessera_cer.key + echo "${tessera_known_server}" > ${MOUNT_PATH}/known_server + echo "${tessera_known_server}" > ${MOUNT_PATH}/known_client + echo "${keystore}" > ${MOUNT_PATH}/keystore + base64 -d ${MOUNT_PATH}/keystore > ${MOUNT_PATH}/keystore.pkcs12 + echo "${keystore_password}" > ${MOUNT_PATH}/keystore_password + fi + + # Copy MySQL jar files to the '/mysql-jar' directory + cp /mysql/* /mysql-jar + volumeMounts: + - name: certificates + mountPath: /secret + - name: mysql-jar + mountPath: /mysql-jar +{{- if eq .Values.global.vault.type "hashicorp" }} + - name: scripts-volume + mountPath: /scripts/bevel-vault.sh + subPath: bevel-vault.sh +{{- end }} + - name: mysql-init + image: {{ .Values.image.busybox }} + imagePullPolicy: IfNotPresent + command: ["/bin/sh"] + args: + - "-cx" + - |- + #!/bin/bash + + echo "Getting Username and Password from the Vault" + + # Read the password from the specified file and store it in 'pass' + pass=$(cat /keys/tm.password) + + # create an SQL script file + # This SQL script updates user passwords and creates database tables + cat << EOF > /docker-entrypoint-initdb.d/createTables.sql + UPDATE mysql.user SET authentication_string = PASSWORD('$pass') WHERE User = 'demouser'; + UPDATE mysql.user SET authentication_string = PASSWORD('$pass') WHERE User = 'root'; + CREATE TABLE IF NOT EXISTS ENCRYPTED_TRANSACTION (ENCODED_PAYLOAD BLOB NOT NULL, HASH VARBINARY(100) NOT NULL, TIMESTAMP BIGINT, PRIMARY KEY (HASH)); + CREATE TABLE IF NOT EXISTS PRIVACY_GROUP(ID VARBINARY(100) NOT NULL, LOOKUP_ID BLOB NOT NULL, DATA BLOB NOT NULL, TIMESTAMP BIGINT, PRIMARY KEY (ID)); + CREATE TABLE IF NOT EXISTS ENCRYPTED_RAW_TRANSACTION (ENCRYPTED_KEY BLOB NOT NULL, ENCRYPTED_PAYLOAD BLOB NOT NULL, NONCE BLOB NOT NULL, SENDER BLOB NOT NULL, TIMESTAMP BIGINT, HASH VARBINARY(100) NOT NULL, PRIMARY KEY (HASH)); + CREATE TABLE ST_TRANSACTION(ID BIGINT(19) NOT NULL, HASH VARCHAR(100) NOT NULL, PAYLOAD BLOB, PRIVACY_MODE BIGINT(10), TIMESTAMP BIGINT(19), VALIDATION_STAGE BIGINT(19), PRIMARY KEY (ID)); + CREATE TABLE ST_AFFECTED_TRANSACTION(ID BIGINT(19) NOT NULL, AFFECTED_HASH VARCHAR(100) NOT NULL, TXN_ID BIGINT(19) NOT NULL, CONSTRAINT FK_ST_AFFECTED_TRANSACTION_TXN_ID FOREIGN KEY (TXN_ID) REFERENCES ST_TRANSACTION(ID), PRIMARY KEY (ID)); + CREATE INDEX IF NOT EXISTS ST_TRANSACTION_VALSTG ON ST_TRANSACTION(VALIDATION_STAGE); + EOF + volumeMounts: + - name: mysql + mountPath: /docker-entrypoint-initdb.d + - name: tessera-keys + mountPath: {{ .Values.tessera.keysPath }} + readOnly: true + containers: + - name: mysql-db + image: {{ .Values.image.mysql.repository }}:{{ .Values.image.mysql.tag }} + imagePullPolicy: IfNotPresent + env: + - name: "MYSQL_ROOT_PASSWORD" + value: "" + - name: MYSQL_DATABASE + value: "{{ .Values.tessera.dbName }}" + - name: MYSQL_USER + value: "{{ .Values.tessera.dbUsername }}" + - name: MYSQL_PASSWORD + value: /keys/tm.password + ports: + - containerPort: {{ .Values.tessera.dbport }} + volumeMounts: + - name: mysql + mountPath: /docker-entrypoint-initdb.d + - name: tessera-keys + mountPath: {{ .Values.tessera.keysPath }} + readOnly: true + - name: data-mysql + mountPath: "/var/lib/mysql" + subPath: mysql + - name: tessera + image: {{ .Values.image.tessera.repository }}:{{ .Values.image.tessera.tag }} + imagePullPolicy: {{ .Values.image.PullPolicy }} + resources: + requests: + cpu: "{{ .Values.tessera.resources.cpuRequest }}" + memory: "{{ .Values.tessera.resources.memRequest }}" + limits: + cpu: "{{ .Values.tessera.resources.cpuLimit }}" + memory: "{{ .Values.tessera.resources.memLimit }}" + ports: + - name: tessera + containerPort: {{ .Values.tessera.port }} + protocol: TCP + - name: tessera-tp + containerPort: {{ .Values.tessera.tpport }} + protocol: TCP + - name: tessera-q2t + containerPort: {{ .Values.tessera.q2tport }} + protocol: TCP + env: + - name: TESSERA_HOME + value: {{ .Values.tessera.dataPath }} + - name: VAULT_ADDR + value: {{ $.Values.global.vault.address }} + - name: KUBERNETES_AUTH_PATH + value: {{ $.Values.global.vault.authPath }} + - name: VAULT_APP_ROLE + value: {{ $.Values.global.vault.role }} + volumeMounts: + - name: tessera-keys + mountPath: {{ .Values.tessera.keysPath }} + readOnly: true + - name: data + mountPath: {{ .Values.tessera.dataPath }} + - name: certificates + mountPath: {{ .Values.tessera.dataPath }}/crypto + - name: tessera-config + mountPath: {{ .Values.tessera.dataPath }}/tessera-config.json.tmpl + subPath: tessera-config.json.tmpl + - name: mysql-jar + mountPath: /mysql-jar + command: ["sh", "-c"] + args: + - |- + + # Create directories for Tessera logs and tm + mkdir -p $TESSERA_HOME/logs; + mkdir -p $TESSERA_HOME/tm; + + # Set the DDIR to the 'tm' directory + DDIR=$TESSERA_HOME/tm; + + # Read the password from the specified file and store it in the 'PASSWORD' variable + PASSWORD=$(cat /keys/tm.password); + + # Copy, extract, and place the MySQL connector file into the Tessera library directory, ensuring that Tessera can use it for database connectivity. + cp /mysql-jar/mysql-connector-java-8.0.25.tar.gz mysql-connector-java-8.0.25.tar.gz + tar -xf mysql-connector-java-8.0.25.tar.gz; + cp /mysql-connector-java-8.0.25/mysql-connector-java-8.0.25.jar /tessera/lib/; + + # Read the contents of the Tessera configuration template into 'CONFIG_TMPL' + CONFIG_TMPL=$(cat ${TESSERA_HOME}/tessera-config.json.tmpl); + + # Write the contents of the configuration template to a file in the 'tm' directory + echo $CONFIG_TMPL > ${DDIR}/tessera-config-with-hosts.json; + + # Display the contents of the generated Tessera configuration file + cat ${DDIR}/tessera-config-with-hosts.json; + + # Run Tessera with the generated configuration file, passing the 'jdbc.password' parameter + /tessera/bin/tessera --configfile ${DDIR}/tessera-config-with-hosts.json -o jdbc.password=$PASSWORD; + + restartPolicy: Always + volumes: + - name: certificates + emptyDir: + medium: Memory + - name: mysql + emptyDir: + medium: Memory + - name: mysql-jar + emptyDir: + medium: Memory + - name: tessera-config + configMap: + name: {{ include "quorum-tessera-node.fullname" . }}-tessera-config + items: + - key: tessera-config.json.tmpl + path: tessera-config.json.tmpl +{{- if eq .Values.global.vault.type "hashicorp" }} + - name: scripts-volume + configMap: + name: bevel-vault-script + defaultMode: 0777 +{{- end }} + - name: tessera-keys + secret: + secretName: {{ include "quorum-tessera-node.fullname" . }}-keys diff --git a/platforms/quorum/charts/quorum-tessera-node/values.yaml b/platforms/quorum/charts/quorum-tessera-node/values.yaml index 78e6e9cc6bc..c1df9a127f5 100644 --- a/platforms/quorum/charts/quorum-tessera-node/values.yaml +++ b/platforms/quorum/charts/quorum-tessera-node/values.yaml @@ -3,98 +3,101 @@ # # SPDX-License-Identifier: Apache-2.0 ############################################################################################## - +--- # This is a YAML-formatted file. # Declare variables to be passed into your templates. +global: + serviceAccountName: vault-auth + cluster: + provider: aws # choose from: minikube | aws | azure | gcp + cloudNativeServices: false # set to true to use Cloud Native Services (SecretsManager and IAM for AWS; KeyVault & Managed Identities for Azure) + vault: + type: hashicorp # hashicorp | kubernetes + role: vault-role + address: + authPath: supplychain + secretEngine: secretsv2 + secretPrefix: "data/supplychain" + proxy: + #This will be the proxy/ingress provider. Can have values "ambassador" or "none" + #Eg. provider: "ambassador" + provider: ambassador # none | ambassador + # This field contains the external URL of the node + externalUrlSuffix: # svc.cluster.local | test.blockchaincloudpoc.com + # Following are the ports that are exposed externally via the proxy + tmport: 443 -#Provide the number of replicas for pods -#Eg. replicaCount: 1 -replicaCount: 1 - -metadata: - #Provide the namespace for the Quorum node - #Eg. namespace: example-quo - namespace: default - #Provide the custom labels - #NOTE: Provide labels other than name, release name , release service, chart version , chart name, run - #These lables will not be applied to VolumeClaimTemplate of StatefulSet as labels are automatically picked up by Kubernetes - #Eg. labels: - # role: minter - labels: +storage: + enabled: true + #Provide the memory for node + #Eg. size: 1Gi + size: 1Gi + #Provide the memory for database + #Eg. dbSize: 1Gi + dbSize: 1Gi + allowedTopologies: + enabled: false #These are the various docker images being used by this chart. update them as needed -images: +image: #Provide the valid image name and version to read certificates from vault server - #Eg. alpineutils: ghcr.io/hyperledger/bevel-alpine:latest - alpineutils: ghcr.io/hyperledger/bevel-alpine:latest - #Provide the valid image name and version for quorum tessera + #Eg. alpineutils: ghcr.io/hyperledger/bevel-alpine-ext:latest + alpineutils: + repository: ghcr.io/hyperledger/bevel-alpine-ext + tag: latest + #Provide the valid image name and version for tessera #Eg. tessera: quorumengineering/tessera:0.9.2 - tessera: quorumengineering/tessera:0.9.2 + tessera: + repository: quorumengineering/tessera + tag: 22.1.7 #Provide the valid image name and version for busybox busybox: busybox #Provide the valid image name and version for MySQL. This is used as the DB for TM - mysql: mysql/mysql-server:5.7 - -node: - #Provide the name for Tessera node - #Eg. name: node-1 - name: node-1 - #Provide the mountpath for Quorum pod - #Eg. mountPath: /etc/quorum/qdata - mountPath: /etc/quorum/qdata + mysql: + repository: mysql/mysql-server + tag: 5.7 + hooks: + repository: ghcr.io/hyperledger/bevel-k8s-hooks + tag: qgt-0.2.12 #Provide the docker secret name in the namespace - #Eg. imagePullSecret: regcred - imagePullSecret: regcred - #Provide the k8s service type - servicetype: ClusterIP - #Provide the container and service ports - ports: - tm: 15013 - db: 3306 - #Provide the mysql DB name - #Eg. dbname: demodb - dbname: demodb - #Provide the mysql username - #Eg. mysqluser: demouser - mysqluser: demouser - -vault: - type: hashicorp - #Provide the Vault Address from where secrets will be read - #Eg. address: http://vault.internal.demo.aws.blockchaincloudpoc.com:9000 - address: - #Provide the Vault secret path from where secrets will be read - #Eg. secretprefix: secret/org1/crypto/node_1 - secretprefix: secret/org1/crypto/node_1 - #Provide the serviceaccount which is verified with Vault - #Eg. serviceaccountname: vault-auth - serviceaccountname: vault-auth - #Provide the key name from where quorum secrets will be read - #Eg. keyname: quorum - keyname: quorum - #Provide the service role which is verified with Vault - #Eg. role: vault-role - role: vault-role - #Provide the Vault auth-path which is created for the namespace - #Eg. authpath: quorumorg1 - authpath: quorumorg1 + #Eg. pullSecret: regcred + pullSecret: + #Pull policy to be used for the Docker image + #Eg. pullPolicy: IfNotPresent + pullPolicy: IfNotPresent tessera: - #Provide the Database URL - #Eg. dburl: jdbc:mysql://localhost:3306/demodb - dburl: jdbc:mysql://localhost:3306/demodb + removeKeysOnDelete: true + #Provide the mysql DB name + #Eg. dbName: demodb + dbName: demodb #Provide the Database username - #Eg. dbusername: $username - dbusername: demouser - #Provide the tessera node's own url. This should be local. Use http if tls is OFF - #Eg. url: "https://node1.quo.demo.aws.blockchaincloudpoc.com" - url: - clienturl: + #Eg. dbUsername: demouser + dbUsername: demouser #Provide the list of tessera nodes to connect in url: format. This should be reachable from this node - #Eg. othernodes: + #Eg. peerNodes: # - url: "https://node1.quo.demo.aws.blockchaincloudpoc.com" # - url: "https://node2.quo.demo.aws.blockchaincloudpoc.com" - othernodes: + peerNodes: + resources: + cpuLimit: 0.25 + cpuRequest: 0.05 + memLimit: "2G" + memRequest: "1G" + password: "password" + passwordPath: "/keys/tm.password" + dataPath: "/data/tessera" + keysPath: "/keys" + port: 9000 + tpport: 9080 + q2tport: 9101 + dbport: 3306 + metrics: + enabled: true + host: "0.0.0.0" + port: 9545 + # enable if using prometheus-stack metrics monitoring + serviceMonitorEnabled: false #Provide if tessera will use tls. # Options: ## - STRICT: All connections to and from this node must use TLS with mutual @@ -103,31 +106,14 @@ tessera: ## connections, although unauthenticated connections to HTTPS hosts are ## still possible. This should only be used if another transport security ## mechanism like WireGuard is in place. - #Eg. tls: 'STRICT' - tls: STRICT + #Eg. tlsMode: 'STRICT' + tlsMode: "STRICT" #Provide the server/client trust configuration for transaction manager nodes. # options are: "WHITELIST", "CA_OR_TOFU", "CA", "TOFU" # Eg: trust: "TOFU" - trust: TOFU - -proxy: - #This will be the proxy/ingress provider. Can have values "ambassador" or "haproxy" - # TODO "haproxy" has not been tested - #Eg. provider: "ambassador" - provider: "ambassador" - #This field contains the external URL of the node - #Eg. external_url: node1.quo.demo.aws.blockchaincloudpoc.com - external_url: - # Following are the ports that are exposed externally via the proxy - clientport: 15032 + trust: "CA_OR_TOFU" -storage: - #Provide the kubernetes storageclass for node - #Eg. storageclassname: awsstorageclass - storageclassname: awsstorageclass - #Provide the memory for node - #Eg. storagesize: 1Gi - storagesize: 1Gi - #Provide the memory for database - #Eg. dbstorage: 1Gi - dbstorage: 1Gi +labels: + service: [] + pvc: [] + deployment: [] diff --git a/platforms/quorum/charts/quorum-tlscert-gen/Chart.yaml b/platforms/quorum/charts/quorum-tlscert-gen/Chart.yaml new file mode 100644 index 00000000000..e4190f400aa --- /dev/null +++ b/platforms/quorum/charts/quorum-tlscert-gen/Chart.yaml @@ -0,0 +1,25 @@ +############################################################################################## +# Copyright Accenture. All Rights Reserved. +# +# SPDX-License-Identifier: Apache-2.0 +############################################################################################## + +apiVersion: v1 +name: quorum-tls-gen +description: "Quorum: Generates and stores TLS certificates for nodes and tessera" +version: 1.0.0 +appVersion: latest +keywords: + - bevel + - hyperledger + - quorum + - enterprise + - blockchain + - deployment + - accenture +home: https://hyperledger-bevel.readthedocs.io/en/latest/ +sources: + - https://github.com/hyperledger/bevel +maintainers: + - name: Hyperledger Bevel maintainers + email: bevel@lists.hyperledger.org diff --git a/platforms/quorum/charts/quorum-tlscert-gen/README.md b/platforms/quorum/charts/quorum-tlscert-gen/README.md new file mode 100644 index 00000000000..1d2d52c6dfc --- /dev/null +++ b/platforms/quorum/charts/quorum-tlscert-gen/README.md @@ -0,0 +1,109 @@ +[//]: # (##############################################################################################) +[//]: # (Copyright Accenture. All Rights Reserved.) +[//]: # (SPDX-License-Identifier: Apache-2.0) +[//]: # (##############################################################################################) + +# quorum-tlscert-gen + +This chart is a component of Hyperledger Bevel. The quorum-tlscert-gen chart generates the TLS certificates needed for accessing Besu and tessera nodes outside the cluster. If enabled, the certificates are then stored on the configured vault and also stored as Kubernetes secrets. See [Bevel documentation](https://hyperledger-bevel.readthedocs.io/en/latest/) for details. + +## TL;DR + +```bash +helm repo add bevel https://hyperledger.github.io/bevel +helm install my-release bevel/quorum-tlscert-gen +``` + +## Prerequisites + +- Kubernetes 1.19+ +- Helm 3.2.0+ + +If Hashicorp Vault is used, then +- HashiCorp Vault Server 1.13.1+ + +## Installing the Chart + +To install the chart with the release name `my-release`: + +```bash +helm repo add bevel https://hyperledger.github.io/bevel +helm install my-release bevel/quorum-tlscert-gen +``` + +The command deploys the chart on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation. + +> **Tip**: List all releases using `helm list` + +## Uninstalling the Chart + +To uninstall/delete the `my-release` deployment: + +```bash +helm uninstall my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Parameters + +### Global parameters +These parameters are refered to as same in each parent or chold chart +| Name | Description | Default Value | +|--------|---------|-------------| +|`global.serviceAccountName` | The serviceaccount name that will be created for Vault Auth management| `vault-auth` | +| `global.vault.type` | Type of Vault to support other providers. Currently, only `hashicorp` is supported. | `hashicorp` | +| `global.vault.role` | Role used for authentication with Vault | `vault-role` | +| `global.vault.address`| URL of the Vault server. | `""` | +| `global.vault.authPath` | Authentication path for Vault | `supplychain` | +| `global.vault.network` | Network type which will determine the vault policy | `quorum` | +| `global.vault.secretEngine` | Vault secret engine name | `secretsv2` | +| `global.vault.secretPrefix` | Vault secret prefix which must start with `data/` | `data/supplychain` | +| `global.proxy.externalUrlSuffix` | External URL suffix which will be used as CN to generate certificate | `test.blockchaincloudpoc.com` | + +### Image + +| Name | Description| Default Value | +|------------|-----------|---------| +| `image.repository` | Docker repository which will be used for this job | `ghcr.io/hyperledger/bevel-alpine` | +| `image.tag` | Docker image tag which will be used for this job | `latest` | +| `image.pullSecret` | Secret name in the namespace containing private image registry credentials | `""` | +| `image.pullPolicy` | The pull policy for the image | `IfNotPresent` | + +### Settings +| Name | Description | Default Value | +| ------------| -------------- | --------------- | +| `settings.tmTls` | Set value to true when transaction manager like tessera uses tls. This enables TLS for the transaction manager and Besu node. | `True` | +| `settings.certSubject` | X.509 subject for root CA | `"CN=DLT Root CA,OU=DLT,O=DLT,L=London,C=GB"` | + +### Common parameters + +| Name | Description | Default Value | +|--------|---------|-------------| +| `labels.service` | Custom labels in yaml k-v format | `[]` | +| `labels.pvc` | Custom labels in yaml k-v format | `[]` | +| `labels.deployment` | Custom labels in yaml k-v format | `[]` | + +## License + +This chart is licensed under the Apache v2.0 license. + +Copyright © 2023 Accenture + +### Attribution + +This chart is adapted from the [charts](https://hyperledger.github.io/bevel/) which is licensed under the Apache v2.0 License which is reproduced here: + +``` +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +``` diff --git a/platforms/quorum/charts/quorum-tlscert-gen/templates/_helpers.tpl b/platforms/quorum/charts/quorum-tlscert-gen/templates/_helpers.tpl new file mode 100644 index 00000000000..cf7f12c660d --- /dev/null +++ b/platforms/quorum/charts/quorum-tlscert-gen/templates/_helpers.tpl @@ -0,0 +1,29 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "quorum-tlscert-gen.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "quorum-tlscert-gen.fullname" -}} +{{- $name := default .Chart.Name -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" $name .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} + + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "quorum-tlscert-gen.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} diff --git a/platforms/quorum/charts/quorum-tlscert-gen/templates/job-cleanup.yaml b/platforms/quorum/charts/quorum-tlscert-gen/templates/job-cleanup.yaml new file mode 100644 index 00000000000..42ae6d648b2 --- /dev/null +++ b/platforms/quorum/charts/quorum-tlscert-gen/templates/job-cleanup.yaml @@ -0,0 +1,49 @@ +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ include "quorum-tlscert-gen.name" . }}-cleanup + labels: + app.kubernetes.io/name: quorum-tlscert-gen-job-cleanup + app.kubernetes.io/component: job-cleanup + app.kubernetes.io/part-of: {{ include "quorum-tlscert-gen.fullname" . }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/managed-by: helm + namespace: {{ .Release.Namespace }} + annotations: + helm.sh/hook-weight: "0" + helm.sh/hook: "pre-delete" + helm.sh/hook-delete-policy: "hook-succeeded" +spec: + backoffLimit: 3 + completions: 1 + template: + metadata: + labels: + app.kubernetes.io/name: quorum-tlscert-gen-job-cleanup + app.kubernetes.io/component: job-cleanup + app.kubernetes.io/part-of: {{ include "quorum-tlscert-gen.fullname" . }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/managed-by: helm + spec: + serviceAccountName: {{ .Values.global.serviceAccountName }} + restartPolicy: "Never" + imagePullSecrets: + {{- if .Values.image.pullSecret }} + - name: {{ .Values.image.pullSecret }} + {{- end }} + containers: + - name: delete-certs + image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + securityContext: + runAsUser: 0 + imagePullPolicy: {{ .Values.image.pullPolicy }} + command: + - sh + - -c + args: + - | + if kubectl get secret --namespace {{ .Release.Namespace }} {{ include "quorum-tlscert-gen.name" . }}-tls-certs &>/dev/null; then + echo "Deleting tls-certs secret in k8s ..." + kubectl delete secret --namespace {{ .Release.Namespace }} {{ include "quorum-tlscert-gen.name" . }}-tls-certs + fi diff --git a/platforms/quorum/charts/quorum-tlscert-gen/templates/job.yaml b/platforms/quorum/charts/quorum-tlscert-gen/templates/job.yaml new file mode 100644 index 00000000000..3bcd983a061 --- /dev/null +++ b/platforms/quorum/charts/quorum-tlscert-gen/templates/job.yaml @@ -0,0 +1,375 @@ +############################################################################################## +# Copyright Accenture. All Rights Reserved. +# +# SPDX-License-Identifier: Apache-2.0 +############################################################################################## + +apiVersion: batch/v1 +kind: Job +metadata: + name: "{{ include "quorum-tlscert-gen.name" . }}" + namespace: "{{ .Release.Namespace }}" + annotations: + helm.sh/hook-delete-policy: "before-hook-creation" + labels: + app: "{{ include "quorum-tlscert-gen.name" . }}" + app.kubernetes.io/name: "{{ include "quorum-tlscert-gen.name" . }}" + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} + {{- if $.Values.labels }} + {{- range $key, $value := $.Values.labels.deployment }} + {{- range $k, $v := $value }} + {{ $k }}: {{ $v | quote }} + {{- end }} + {{- end }} + {{- end }} +spec: + backoffLimit: 6 + template: + metadata: + labels: + app: "{{ include "quorum-tlscert-gen.name" . }}" + app.kubernetes.io/name: "{{ include "quorum-tlscert-gen.name" . }}" + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} + {{- if $.Values.labels }} + {{- range $key, $value := $.Values.labels.deployment }} + {{- range $k, $v := $value }} + {{ $k }}: {{ $v | quote }} + {{- end }} + {{- end }} + {{- end }} + spec: + restartPolicy: OnFailure + imagePullSecrets: + {{- if .Values.image.pullSecret }} + - name: {{ .Values.image.pullSecret }} + {{- end }} + serviceAccountName: {{ .Values.global.serviceAccountName }} + volumes: + - name: certificates + emptyDir: + medium: Memory +{{- if eq .Values.global.vault.type "hashicorp" }} + - name: scripts-volume + configMap: + name: bevel-vault-script + defaultMode: 0777 +{{- end }} + initContainers: + - name: init-check-certificates + image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + imagePullPolicy: {{ $.Values.image.pullPolicy }} +{{- if eq .Values.global.vault.type "hashicorp" }} + env: + - name: VAULT_ADDR + value: "{{ $.Values.global.vault.address }}" + - name: VAULT_SECRET_ENGINE + value: "{{ $.Values.global.vault.secretEngine }}" + - name: VAULT_SECRET_PREFIX + value: "{{ $.Values.global.vault.secretPrefix }}" + - name: KUBERNETES_AUTH_PATH + value: "{{ $.Values.global.vault.authPath }}" + - name: VAULT_APP_ROLE + value: "{{ $.Values.global.vault.role }}" + - name: VAULT_TYPE + value: "{{ $.Values.global.vault.type }}" +{{- end }} + command: ["sh", "-c"] + args: + - |- + + # Source the bevel-vault.sh script to perform the Vault-CURD operations + . /scripts/bevel-vault.sh + + # Get the Vault token + echo "Getting vault Token..." + vaultBevelFunc "init" + + # Set the output path. + OUTPUT_PATH=/certificates/check_certs + mkdir -p ${OUTPUT_PATH} + + # Obtain the ambassador TLS certificates from Vault if exists + vault_secret_key="${VAULT_SECRET_ENGINE}/${VAULT_SECRET_PREFIX}/tlscerts" + echo "Checking certs in vault at path: ${vault_secret_key}" + vaultBevelFunc "readJson" ${vault_secret_key} + + # Get the ambassador TLS data info from Vault + cert=$(echo ${VAULT_SECRET} | jq -r '.["ambassadorcrt"]') + + # If the cert is null, empty, or contains a parse error, then the certificates do not exist in Vault + if [ "$cert" == "null" ] || [[ "$cert" = *"error"* ]] || [ "$cert" = "" ]; then + # Create a file to indicate that the ambassador TLS certificates are absent + echo "Certficates absent in vault. Ignore error warning" + touch ${OUTPUT_PATH}/ambassadortls_absent.txt + else + # Create a file to indicate that the ambassador TLS certificates are present + echo "Certificates present in vault" + touch ${OUTPUT_PATH}/ambassadortls_present.txt + AMBASSADORTLS_PATH=/certificates/ambassadortls + mkdir -p ${AMBASSADORTLS_PATH} + cert=$(echo ${VAULT_SECRET} | jq -r '.["ambassadorcrt"]' | base64 -d ) + key=$(echo ${VAULT_SECRET} | jq -r '.["ambassadorkey"]' | base64 -d ) + echo "${cert}" > ${AMBASSADORTLS_PATH}/certchain.pem + echo "${key}" > ${AMBASSADORTLS_PATH}/node.key + fi + + echo "Done checking for certificates in vault." + volumeMounts: + - name: certificates + mountPath: /certificates +{{- if eq .Values.global.vault.type "hashicorp" }} + - name: scripts-volume + mountPath: /scripts/bevel-vault.sh + subPath: bevel-vault.sh +{{- end }} + containers: + - name: "generate-certs" + image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + imagePullPolicy: {{ $.Values.image.pullPolicy }} + env: +{{- if eq .Values.global.vault.type "hashicorp" }} + - name: VAULT_ADDR + value: "{{ $.Values.global.vault.address }}" + - name: VAULT_SECRET_ENGINE + value: "{{ $.Values.global.vault.secretEngine }}" + - name: VAULT_SECRET_PREFIX + value: "{{ $.Values.global.vault.secretPrefix }}" + - name: KUBERNETES_AUTH_PATH + value: "{{ $.Values.global.vault.authPath }}" + - name: VAULT_APP_ROLE + value: "{{ $.Values.global.vault.role }}" + - name: VAULT_TYPE + value: "{{ $.Values.global.vault.type }}" +{{- end }} + - name: TM_CLIENT_PORT + value: "8888" + - name: TM_TLS_ENABLED + value: "{{ $.Values.settings.tmTls }}" + - name: CERT_SUBJECT + value: "{{ $.Values.settings.certSubject }}" + - name: EXTERNAL_URL + value: "{{ include "quorum-tlscert-gen.name" . }}.{{ .Values.global.proxy.externalUrlSuffix }}" + - name: COMPONENT_NS + value: "{{ .Release.Namespace }}" + command: ["sh", "-c"] + args: + - |- + + # Function to format a certificate file and save it to the specified path + formatCertificate () { + NAME="${1##*/}" + while read line || [ -n "$line" ]; + do + echo "$line\n"; + done < ${1} > ${2}/${NAME}.txt + } + + # Set the directories path + CERTS_CHECKS_PATH=/certificates/check_certs + AMBASSADORTLS_PATH=/certificates/ambassadortls + ROOTCA_PATH=/certificates/rootca + FORMAT_CERTIFICATE_PATH="/formatcertificate" + + # Create the ambassadortls directory if it doesn't exist + mkdir -p ${AMBASSADORTLS_PATH} + + # if ambassadortls_absent file does not exist, create the certificates + if [ -e ${CERTS_CHECKS_PATH}/ambassadortls_absent.txt ] + then + # Create the rootca directory if it doesn't exist + mkdir -p ${ROOTCA_PATH} + cd ${ROOTCA_PATH} + + # Generates the rootCA certificates + openssl genrsa -out rootca.key 2048 + openssl req -x509 -new -nodes -key rootca.key -sha256 -days 1024 -out rootca.pem -subj "/${CERT_SUBJECT}" + + # Generates the openssl file for domain + cd ${AMBASSADORTLS_PATH} + + echo "[req] + req_extensions = v3_req + distinguished_name = dn + [dn] + [ v3_req ] + basicConstraints = CA:FALSE + keyUsage = nonRepudiation, digitalSignature, keyEncipherment + subjectAltName = @alt_names + [alt_names] + DNS.1 = ${EXTERNAL_URL} + DNS.2 = {{ include "quorum-tlscert-gen.fullname" . }}.{{ .Release.Namespace }} + DNS.3 = tessera-{{ .Release.Name }}.{{ .Release.Namespace }} + " > openssl.conf + # Generates the ambassador tls certificates + openssl genrsa -out node.key 2048 + openssl req -new -nodes -key node.key -out node.csr -subj "/CN=${EXTERNAL_URL}" -config "openssl.conf" + openssl x509 -req -in node.csr -CA ${ROOTCA_PATH}/rootca.pem -CAkey ${ROOTCA_PATH}/rootca.key -CAcreateserial -out node.pem -days 1000 -sha256 -extfile "openssl.conf" -extensions v3_req + cat node.pem > certchain.pem + cat ${ROOTCA_PATH}/rootca.pem >> certchain.pem + + # Check if TM_TLS_ENABLED is true + if [ "$TM_TLS_ENABLED" == "true" ] + then + # Generate crypto for quorum nodes and knownserver file + openssl genrsa -out quorum-node.key 2048 + openssl req -new -key quorum-node.key -out quorum-node.csr -subj "/CN=${EXTERNAL_URL}" + openssl x509 -req -in quorum-node.csr -CA ${ROOTCA_PATH}/rootca.pem -CAkey ${ROOTCA_PATH}/rootca.key -CAcreateserial -out quorum-node.pem -days 500 -sha256 + cat quorum-node.key quorum-node.pem > quorum_certificates.pem + + openssl pkcs12 -export -in quorum_certificates.pem -out quorum-node.pkcs12 -password pass:changeme -name myAlias -noiter -nomaciter + echo "changeme" > quorum-password + + openssl x509 -in certchain.pem -noout -fingerprint -sha256 > quorum-certchain-sha256 + export DIGEST=$(awk '{print $2}' quorum-certchain-sha256 | sed -n 's/Fingerprint=\(.*\)/\1/p') + printf '%s\n' "tessera-*.${COMPONENT_NS}:${TM_CLIENT_PORT} $DIGEST" "${EXTERNAL_URL} $DIGEST" > quorum-knownServer + fi; + fi; + # Create necessary subdirectories for certificate storage + mkdir -p ${FORMAT_CERTIFICATE_PATH}/rootca + mkdir -p ${FORMAT_CERTIFICATE_PATH}/ambassadortls + + # Source the bevel-vault.sh script to perform the Vault-CURD operations + . /scripts/bevel-vault.sh + # Get the Vault token + echo "Getting the vault Token.." + vaultBevelFunc 'init' + + # Check if ambassadortls certificates are absent, then save them to Vault + if [ -e ${CERTS_CHECKS_PATH}/ambassadortls_absent.txt ] + then + # Encode the certificate and key files in base64 format + base64 ${ROOTCA_PATH}/rootca.key > ${ROOTCA_PATH}/encode_rootca.key + base64 ${ROOTCA_PATH}/rootca.pem > ${ROOTCA_PATH}/encode_rootca.pem + base64 ${AMBASSADORTLS_PATH}/certchain.pem > ${AMBASSADORTLS_PATH}/encode_certchain.pem + base64 ${AMBASSADORTLS_PATH}/node.key > ${AMBASSADORTLS_PATH}/encode_node.key + + # Format the certificate files for saving in the vault + formatCertificate "${ROOTCA_PATH}/encode_rootca.key" "${FORMAT_CERTIFICATE_PATH}/rootca" + formatCertificate "${ROOTCA_PATH}/encode_rootca.pem" "${FORMAT_CERTIFICATE_PATH}/rootca" + formatCertificate "${AMBASSADORTLS_PATH}/encode_certchain.pem" "${FORMAT_CERTIFICATE_PATH}/ambassadortls" + formatCertificate "${AMBASSADORTLS_PATH}/encode_node.key" "${FORMAT_CERTIFICATE_PATH}/ambassadortls" + + # Read the formatted certificate files + ROOTCAKEY_FORMAT=$(cat ${FORMAT_CERTIFICATE_PATH}/rootca/encode_rootca.key.txt) + ROOTCAPEM_FORMAT=$(cat ${FORMAT_CERTIFICATE_PATH}/rootca/encode_rootca.pem.txt) + AMBASSADORCRT_FORMAT=$(cat ${FORMAT_CERTIFICATE_PATH}/ambassadortls/encode_certchain.pem.txt) + AMBASSADORKEY_FORMAT=$(cat ${FORMAT_CERTIFICATE_PATH}/ambassadortls/encode_node.key.txt) + + # Check if tmTls is enabled + if [ "$TM_TLS_ENABLED" == "true" ] + then + # Encode additional certificate files when tmTls is true + base64 ${AMBASSADORTLS_PATH}/quorum-node.pkcs12 > ${AMBASSADORTLS_PATH}/encode_quorum-node.pkcs12 + base64 ${AMBASSADORTLS_PATH}/quorum-password > ${AMBASSADORTLS_PATH}/encode_quorum-password + base64 ${AMBASSADORTLS_PATH}/quorum-knownServer > ${AMBASSADORTLS_PATH}/encode_quorum-knownServer + + # Format the additional certificate files for saving in the vault + formatCertificate "${AMBASSADORTLS_PATH}/encode_quorum-node.pkcs12" "${FORMAT_CERTIFICATE_PATH}/ambassadortls" + formatCertificate "${AMBASSADORTLS_PATH}/encode_quorum-password" "${FORMAT_CERTIFICATE_PATH}/ambassadortls" + formatCertificate "${AMBASSADORTLS_PATH}/encode_quorum-knownServer" "${FORMAT_CERTIFICATE_PATH}/ambassadortls" + + # Read the additional formatted certificate files + KEYSTORE_FORMAT=$(cat ${FORMAT_CERTIFICATE_PATH}/ambassadortls/encode_quorum-node.pkcs12.txt) + PASSWORD_FORMAT=$(cat ${FORMAT_CERTIFICATE_PATH}/ambassadortls/encode_quorum-password.txt) + KNOWNSERVER_FORMAT=$(cat ${FORMAT_CERTIFICATE_PATH}/ambassadortls/encode_quorum-knownServer.txt) + + # Create a JSON file with the data to be saved in the vault when tmTls is true + echo " + { + \"data\": + { + \"rootca_pem\": \"${ROOTCAPEM_FORMAT}\", + \"rootca_key\": \"${ROOTCAKEY_FORMAT}\", + \"ambassadorcrt\": \"${AMBASSADORCRT_FORMAT}\", + \"ambassadorkey\": \"${AMBASSADORKEY_FORMAT}\", + \"keystore\": \"${KEYSTORE_FORMAT}\", + \"password\": \"${PASSWORD_FORMAT}\", + \"knownServer\": \"${KNOWNSERVER_FORMAT}\" + } + }" > payload.json + else + # Create a JSON file with the data to be saved in the vault when tmTls is false + echo " + { + \"data\": + { + \"rootca_pem\": \"${ROOTCAPEM_FORMAT}\", + \"rootca_key\": \"${ROOTCAKEY_FORMAT}\", + \"ambassadorcrt\": \"${AMBASSADORCRT_FORMAT}\", + \"ambassadorkey\": \"${AMBASSADORKEY_FORMAT}\" + } + }" > payload.json + fi; + + # Copy the TLS certificates to the Vault + vaultBevelFunc 'write' "${VAULT_SECRET_ENGINE}/${VAULT_SECRET_PREFIX}/tlscerts" 'payload.json' + + # Check if TLS certificates are written successfully and read them to validate + if [ "$TM_TLS_ENABLED" == "true" ] + then + # Obtain TLS certificates from the Vault when tmTls is true + vaultBevelFunc 'readJson' "${VAULT_SECRET_ENGINE}/${VAULT_SECRET_PREFIX}/tlscerts" + + # Extract TLS certificate and key information from the response obtained from the Vault + CA_PEM=$(echo ${VAULT_SECRET} | jq -r '.["rootca_pem"]') + CA_KEY=$(echo ${VAULT_SECRET} | jq -r '.["rootca_key"]') + AMBASSADORCRT=$(echo ${VAULT_SECRET} | jq -r '.["ambassadorcrt"]') + AMBASSADORKEY=$(echo ${VAULT_SECRET} | jq -r '.["ambassadorkey"]') + KEYSTORE=$(echo ${VAULT_SECRET} | jq -r '.["keystore"]') + PASSWORD=$(echo ${VAULT_SECRET} | jq -r '.["password"]') + KNOWNSERVER=$(echo ${VAULT_SECRET} | jq -r '.["knownServer"]') + + # Check if any of the certificate and key fields are missing, empty or having any kind of error + for field in "$CA_PEM" "$CA_KEY" "$AMBASSADORCRT" "$AMBASSADORKEY" "$KEYSTORE" "$PASSWORD" "$KNOWNSERVER" + do + if [ "$field" == "null" ] || [[ "$field" = *"error"* ]] || [ "$field" = "" ]; then + AMBASSADORTLS_CERT_WRITTEN=false + break + else + AMBASSADORTLS_CERT_WRITTEN=true + fi + done + else + # Obtain the TLS certificates from the Vault when tmTls is false + vaultBevelFunc 'readJson' "${VAULT_SECRET_ENGINE}/${VAULT_SECRET_PREFIX}/tlscerts" + + # Extract TLS certificate and key information from the response obtained from the Vault + CA_PEM=$(echo ${VAULT_SECRET} | jq -r '.["rootca_pem"]') + CA_KEY=$(echo ${VAULT_SECRET} | jq -r '.["rootca_key"]') + AMBASSADORCRT=$(echo ${VAULT_SECRET} | jq -r '.["ambassadorcrt"]') + AMBASSADORKEY=$(echo ${VAULT_SECRET} | jq -r '.["ambassadorkey"]') + + # Check if any of the certificate and key fields are missing, empty or having any kind of error + for field in "$CA_PEM" "$CA_KEY" "$AMBASSADORCRT" "$AMBASSADORKEY" + do + if [ "$field" == "null" ] || [[ "$field" = *"error"* ]] || [ "$field" = "" ]; then + AMBASSADORTLS_CERT_WRITTEN=false + break + else + AMBASSADORTLS_CERT_WRITTEN=true + fi + done + fi; + echo "Final value of AMBASSADORTLS_CERT_WRITTEN:$AMBASSADORTLS_CERT_WRITTEN" + # Delete the same JSON file that we created to perform the write operation in the vault + rm payload.json + fi; + + # Create tls secret with the certificates + if ! kubectl get secret tls --namespace {{ .Release.Namespace }} {{ include "quorum-tlscert-gen.name" . }}-tls-certs &>/dev/null; then + kubectl create secret tls --namespace {{ .Release.Namespace }} {{ include "quorum-tlscert-gen.name" . }}-tls-certs \ + --cert=${AMBASSADORTLS_PATH}/certchain.pem \ + --key=${AMBASSADORTLS_PATH}/node.key + fi + + echo "COMPLETED" + volumeMounts: + - name: certificates + mountPath: /certificates +{{- if eq .Values.global.vault.type "hashicorp" }} + - name: scripts-volume + mountPath: /scripts/bevel-vault.sh + subPath: bevel-vault.sh +{{- end }} diff --git a/platforms/quorum/charts/quorum-tlscert-gen/values.yaml b/platforms/quorum/charts/quorum-tlscert-gen/values.yaml new file mode 100644 index 00000000000..5a8ccb99161 --- /dev/null +++ b/platforms/quorum/charts/quorum-tlscert-gen/values.yaml @@ -0,0 +1,66 @@ +############################################################################################## +# Copyright Accenture. All Rights Reserved. +# +# SPDX-License-Identifier: Apache-2.0 +############################################################################################## + +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. +# The following are for overriding global values +global: + #Provide the service account name which will be created. + #Eg. serviceAccountName: vault-auth + serviceAccountName: vault-auth + vault: + #Provide the type of vault + #Eg. type: hashicorp + type: hashicorp + #Provide the vault role used. + #Eg. role: vault-role + role: vault-role + #Provide the vault server address + #Eg. address: http://54.226.163.39:8200 + address: + #Provide the vault authPath configured to be used. + #Eg. authPath: supplychain + authPath: supplychain # supplychain + # #Provide the network type + network: quorum + #Provide the secret engine. + #Eg. secretEngine: secretsv2 + secretEngine: secretsv2 + #Provide the vault path where the tls certificates will be stored + #Eg. secretPrefix: data/warehouse-bes/crypto/warehouse/tls MUST use data/ + secretPrefix: "data/supplychain" + proxy: + # Provide external URL for cert generation + # Eg: test.blockchaincloudpoc.com + externalUrlSuffix: + +image: + #Provide the image repository for all containers + #Eg. repository: ghcr.io/hyperledger/bevel-alpine + repository: ghcr.io/hyperledger/bevel-alpine + tag: latest + #Provide the secret to use if private repository + #Eg. pullSecret: regcred + pullSecret: + #Pull policy to be used for the Docker image + #Eg. pullPolicy: IfNotPresent + pullPolicy: IfNotPresent + +#Settings for certificate generation +settings: + #Set value to true when transaction manager like tessera uses tls + #Eg. tmTls: True + tmTls: true # NEW + #Provide the X.509 subject for root CA. + #Eg. certSubject: "CN=DLT Root CA,OU=DLT,O=DLT,L=London,C=GB" + certSubject: "CN=DLT Root CA,OU=DLT,O=DLT,L=London,C=GB" + +labels: + service: [] + pvc: [] + deployment: [] + +nameOverride: ambassador-tls-gen diff --git a/platforms/quorum/charts/quorum-tlscerts-gen/Chart.yaml b/platforms/quorum/charts/quorum-tlscerts-gen/Chart.yaml deleted file mode 100644 index d7e40ad914d..00000000000 --- a/platforms/quorum/charts/quorum-tlscerts-gen/Chart.yaml +++ /dev/null @@ -1,11 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -apiVersion: v1 -appVersion: "1.0" -description: "Quorum: This Helm Chart generates SSL/TLS certificates using OpenSSL, including a root CA certificate and node certificates, and storing them in a Vault server. These certificates enable secure communication and authentication between servers and clients in a system." -name: quorum-tlscerts-gen -version: 1.0.0 diff --git a/platforms/quorum/charts/quorum-tlscerts-gen/README.md b/platforms/quorum/charts/quorum-tlscerts-gen/README.md deleted file mode 100644 index c8c84265614..00000000000 --- a/platforms/quorum/charts/quorum-tlscerts-gen/README.md +++ /dev/null @@ -1,196 +0,0 @@ -[//]: # (##############################################################################################) -[//]: # (Copyright Accenture. All Rights Reserved.) -[//]: # (SPDX-License-Identifier: Apache-2.0) -[//]: # (##############################################################################################) - - -# Ambassador Certs GoQuorum Deployment - -- [Ambassador Certs GoQuorum Deployment Helm Chart](#ambassador-certs-goquorum-deployment-helm-chart) -- [Prerequisites](#prerequisites) -- [Chart Structure](#chart-structure) -- [Configuration](#configuration) -- [Deployment](#deployment) -- [Verification](#verification) -- [Updating the Deployment](#updating-the-deployment) -- [Deletion](#deletion) -- [Contributing](#contributing) -- [License](#license) - - - -## Ambassador Certs GoQuorum Deployment Helm Chart ---- -This [Helm chart](https://github.com/hyperledger/bevel/blob/develop/platforms/quorum/charts/quorum-tlscerts-gen) facilitates the deployment of Ambassador certificates using Kubernetes Jobs and stores them securely in HashiCorp Vault. - - - -## Prerequisites ---- -Before deploying the Helm chart, make sure to have the following prerequisites: - -- Kubernetes cluster up and running. -- The GoQuorum network is set up and running. -- A HashiCorp Vault instance is set up and configured to use Kubernetes service account token-based authentication. -- The Vault is unsealed and initialized. -- Helm installed. - - - -## Chart Structure ---- -The structure of the Helm chart is as follows: - -``` -quorum-tlscerts-gen/ - |- templates/ - |- helpers.tpl - |- configmap.yaml - |- job.yaml - |- Chart.yaml - |- README.md - |- values.yaml -``` - -- `templates/`: This directory contains the Kubernetes manifest templates that define the resources to be deployed. -- `helpers.tpl`: A template file used for defining custom labels in the Helm chart. -- `configmap.yaml`: A Kubernetes ConfigMap resource that holds the OpenSSL configuration file. -- `job.yaml`: This file defines the Kubernetes Job resource for generating ambassador certificates and storing them in the Hashicorp Vault. -- `Chart.yaml`: This file contains the metadata for the Helm chart, such as the name, version, and description. -- `README.md`: This file provides information and instructions about the Helm chart. -- `values.yaml`: This file contains the default configuration values for the Helm chart. - - - -## Configuration ---- -The [values.yaml](https://github.com/hyperledger/bevel/blob/develop/platforms/quorum/charts/quorum-tlscerts-gen/values.yaml) file contains configurable values for the Helm chart. We can modify these values according to the deployment requirements. Here are some important configuration options: - -## Parameters ---- - -### Name - -| Name | Description | Default Value | -| -----------| -------------------------------------------------- | ------------- | -| name | Provide the name of the node | node_1 | - -### Metadata - -| Name | Description | Default Value | -| ----------------| ---------------------------------------------------------------------------- | ------------- | -| namespace | Provide the namespace for the quorum Certs Generator. | default | -| labels | Provide any additional labels for the quorum Certs Generator | "" | - -### Image - -| Name | Description | Default Value | -| ------------------------ | ------------------------------------------------------- | --------------- | -| initContainerName | Provide the alpine utils image, which is used for all init-containers of deployments/jobs | "" | -| certsContainerName | Provide the image for the certs container | "" | -| imagePullSecret | Provide the docker-registry secret created and stored in kubernetes cluster as a secret | regcred | -| pullPolicy | Pull policy to be used for the Docker image | IfNotPresent | - -### Vault - -| Name | Description | Default Value | -| ------------------------- | --------------------------------------------------------------------------| ------------- | -| address | Address/URL of the Vault server. | "" | -| role | Role used for authentication with Vault | vault-role | -| authpath | Authentication path for Vault | quorumnode_1 | -| serviceaccountname | Provide the already created service account name autheticated to vault | vault-auth | -| certsecretprefix | Provide the vault path where the certificates are stored | "" | -| retries | Number of retries to check contents from vault | 30 | -| type | The type of Vault used | hashicorp | - -### Subjects - -| Name | Description | Default Value | -| ------------------------- | ---------------------------------- | ------------- | -| root_subject | Mention the subject for rootca | "CN=DLT Root CA,OU=DLT,O=DLT,L=London,C=GB" | -| cert_subject | Mention the subject for cert | "CN=DLT Root CA/OU=DLT/O=DLT/L=London/C=GB" | - -### OpenSSL Vars - -| Name | Description | Default Value | -| --------------------------| ----------------------------------------------------------| ------------- | -| domain_name | Provides the name for domain | "" | -| domain_name_api | Provides the name for domain_name api endpoint | "" | -| domain_name_web | provides the name for domain_name web endpoint | "" | -| domain_name_tessera | provides the name for domain domain_name tessera endpoint | "" | - - -## Deployment ---- - -To deploy the quorum-tlscerts-gen Helm chart, follow these steps: - -1. Modify the [values.yaml](https://github.com/hyperledger/bevel/blob/develop/platforms/quorum/charts/quorum-tlscerts-gen/values.yaml) file to set the desired configuration values. -2. Run the following Helm command to install the chart: - ``` - $ helm repo add bevel https://hyperledger.github.io/bevel/ - $ helm install ./quorum-tlscerts-gen - ``` -Replace `` with the desired name for the release. - -This will deploy the Quorum Connector to the Kubernetes cluster based on the provided configurations. - - -## Verification ---- - -To verify the deployment, we can use the following command: -``` -$ kubectl get jobs -n -``` -Replace `` with the actual namespace where the Job was created. This command will display information about the Job, including the number of completions and the current status of the Job's pods. - - -## Updating the Deployment ---- - -If we need to update the deployment with new configurations or changes, modify the same [values.yaml](https://github.com/hyperledger/bevel/blob/develop/platforms/quorum/charts/quorum-tlscerts-gen/values.yaml) file with the desired changes and run the following Helm command: -``` -$ helm upgrade ./quorum-tlscerts-gen -``` -Replace `` with the name of the release. This command will apply the changes to the deployment, ensuring the quorum-tlscerts-gen node is up to date. - - -## Deletion ---- - -To delete the deployment and associated resources, run the following Helm command: -``` -$ helm uninstall -``` -Replace `` with the name of the release. This command will remove all the resources created by the Helm chart. - - -## Contributing ---- -If you encounter any bugs, have suggestions, or would like to contribute to the [Ambassador Certs GoQuorum Deployment Helm Chart](https://github.com/hyperledger/bevel/blob/develop/platforms/quorum/charts/quorum-tlscerts-gen), please feel free to open an issue or submit a pull request on the [project's GitHub repository](https://github.com/hyperledger/bevel). - - -## License - -This chart is licensed under the Apache v2.0 license. - -Copyright © 2023 Accenture - -### Attribution - -This chart is adapted from the [charts](https://hyperledger.github.io/bevel/) which is licensed under the Apache v2.0 License which is reproduced here: - -``` -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -``` diff --git a/platforms/quorum/charts/quorum-tlscerts-gen/templates/_helpers.tpl b/platforms/quorum/charts/quorum-tlscerts-gen/templates/_helpers.tpl deleted file mode 100644 index d43c09d8cef..00000000000 --- a/platforms/quorum/charts/quorum-tlscerts-gen/templates/_helpers.tpl +++ /dev/null @@ -1,5 +0,0 @@ -{{- define "labels.custom" }} - {{ range $key, $val := $.Values.metadata.labels }} - {{ $key }}: {{ $val }} - {{ end }} -{{- end }} diff --git a/platforms/quorum/charts/quorum-tlscerts-gen/templates/configmap.yaml b/platforms/quorum/charts/quorum-tlscerts-gen/templates/configmap.yaml deleted file mode 100644 index 3023ab18d4d..00000000000 --- a/platforms/quorum/charts/quorum-tlscerts-gen/templates/configmap.yaml +++ /dev/null @@ -1,20 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - ---- -apiVersion: v1 -kind: ConfigMap -metadata: - name: {{ $.Values.name }}-conf - namespace: {{ .Values.metadata.namespace }} - labels: - app.kubernetes.io/name: {{ $.Values.name }} - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - {{- include "labels.custom" . | nindent 2 }} -data: - openssl.conf: |+ -{{ .Files.Get "files/openssl.conf" | indent 4 }} diff --git a/platforms/quorum/charts/quorum-tlscerts-gen/templates/job.yaml b/platforms/quorum/charts/quorum-tlscerts-gen/templates/job.yaml deleted file mode 100644 index 614d4cba9ca..00000000000 --- a/platforms/quorum/charts/quorum-tlscerts-gen/templates/job.yaml +++ /dev/null @@ -1,291 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -apiVersion: batch/v1 -kind: Job -metadata: - name: {{ $.Values.name }}-generate-ambassador-certs-job - namespace: {{ .Values.metadata.namespace }} - labels: - app: {{ $.Values.name }}-generate-ambassador-certs-job - app.kubernetes.io/name: {{ $.Values.name }}-generate-ambassador-certs-job - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} - {{- include "labels.custom" . | nindent 2 }} -spec: - backoffLimit: 6 - template: - metadata: - labels: - app: {{ $.Values.name }}-generate-ambassador-certs-job - app.kubernetes.io/name: {{ $.Values.name }}-generate-ambassador-certs-job - app.kubernetes.io/instance: {{ .Release.Name }} - spec: - restartPolicy: "OnFailure" - serviceAccountName: {{ $.Values.vault.serviceaccountname }} - securityContext: - fsGroup: 1000 - initContainers: - - name: init-check-certificates - image: {{ .Values.image.initContainerName }} - imagePullPolicy: IfNotPresent - env: - - name: VAULT_ADDR - value: {{ $.Values.vault.address }} - - name: VAULT_APP_ROLE - value: {{.Values.vault.role}} - - name: KUBERNETES_AUTH_PATH - value: {{ $.Values.vault.authpath }} - - name: CERTS_SECRET_PREFIX - value: {{ $.Values.vault.certsecretprefix }} - - name: MOUNT_PATH - value: "/certcheck" - - name: VAULT_TYPE - value: "{{ $.Values.vault.type }}" - command: ["sh", "-c"] - args: - - |- - #!/usr/bin/env sh - . /scripts/bevel-vault.sh - - # Calling a function to retrieve the vault token. - vaultBevelFunc "init" - - # Calling a function to retrieve secrets from Vault only if they exist. - vaultBevelFunc "readJson" "${CERTS_SECRET_PREFIX}/crypto/{{ $.Values.name }}/certs" - - mkdir -p ${MOUNT_PATH} - - # Create an empty file to indicate that the secret is absent or present in vault. - if [ "$SECRETS_AVAILABLE" == "no" ] - then - echo "Certficates absent in vault. Ignore error warning." - touch ${MOUNT_PATH}/absent.txt - else - echo "Certificates present in vault." - touch ${MOUNT_PATH}/present.txt - fi - - echo "Done checking for certificates in vault." - volumeMounts: - - name: credentials - mountPath: /DATA - - name: certcheck - mountPath: /certcheck - - name: scripts-volume - mountPath: /scripts/bevel-vault.sh - subPath: bevel-vault.sh - containers: - - name: certs-generation - image: {{ .Values.image.certsContainerName }} - env: - - name: DOMAIN_NAME - value: "{{ .Values.opensslVars.domain_name }}" - - name: DOMAIN_NAME_API - value: "{{ .Values.opensslVars.domain_name_api }}" - - name: DOMAIN_NAME_WEB - value: "{{ .Values.opensslVars.domain_name_web }}" - - name: DOMAIN_NAME_TESSERA - value: "{{ .Values.opensslVars.domain_name_tessera }}" - - name: TM_CLIENT_PORT - value: "{{ .Values.opensslVars.clientport }}" - - name: NODE_NAME - value: "{{ $.Values.name }}" - - name: CERTS_SECRET_PREFIX - value: {{ $.Values.vault.certsecretprefix }} - - name: VAULT_ADDR - value: {{ $.Values.vault.address }} - - name: VAULT_APP_ROLE - value: {{.Values.vault.role}} - - name: KUBERNETES_AUTH_PATH - value: {{ $.Values.vault.authpath }} - - name: CERTS_SECRET_PREFIX - value: {{ $.Values.vault.certsecretprefix }} - - name: ROOT_SUBJECT - value: "{{ .Values.subjects.root_subject }}" - - name: CERT_SUBJECT - value: "{{ .Values.subjects.cert_subject }}" - - name: MOUNT_PATH - value: "/certcheck" - imagePullPolicy: {{ .Values.image.pullPolicy }} - command: ["/bin/sh", "-c"] - args: - - |- - if [ -e ${MOUNT_PATH}/present.txt ] - then - echo "Certificates already present in the vault. Skipping.." - exit 0 - fi - # create directories - mkdir -p ${MOUNT_PATH}/DATA/rootca - mkdir -p ${MOUNT_PATH}/DATA/ambassadortls - cd ${MOUNT_PATH}/DATA/rootca - set -x - cat <openssl.conf - [req] - req_extensions = v3_ca - distinguished_name = dn - [dn] - [v3_ca] - basicConstraints = critical, CA:TRUE - keyUsage = critical,digitalSignature, keyCertSign, cRLSign - extendedKeyUsage=serverAuth,clientAuth - subjectKeyIdentifier = hash - authorityKeyIdentifier = keyid:always - EOF - - openssl genrsa -out tessera_ca.key 2048 - openssl req -x509 -config openssl.conf -new -nodes -key tessera_ca.key -sha256 -days 1024 -out tessera_ca.pem -extensions v3_ca -subj "/${CERT_SUBJECT}" - - cd ${MOUNT_PATH}/DATA/ambassadortls - cat <openssl${NODE_NAME}.conf - [dn] - [req] - distinguished_name = dn - req_extensions = v3_req - [ v3_req ] - basicConstraints = CA:FALSE - keyUsage = nonRepudiation, digitalSignature, keyEncipherment - subjectAltName = @alt_names - - [alt_names] - DNS.1 = ${DOMAIN_NAME} - DNS.2 = ${DOMAIN_NAME_API} - DNS.3 = ${DOMAIN_NAME_WEB} - DNS.4 = ${DOMAIN_NAME_TESSERA} - EOF - cat openssl${NODE_NAME}.conf - cd ${MOUNT_PATH}/DATA/ambassadortls - - openssl genrsa -out tessera_cer.key 2048 - openssl req -new -key tessera_cer.key -out tessera_cer.csr -subj /CN=carrier.quo.demo.aws.blockchaincloudpoc.com -config opensslcarrier.conf - openssl req -new -key tessera_cer.key -out tessera_cer.csr -subj "/CN=${DOMAIN_NAME}" -config openssl${NODE_NAME}.conf - openssl x509 -req -in tessera_cer.csr -CA ${MOUNT_PATH}/DATA/rootca/tessera_ca.pem -CAkey ${MOUNT_PATH}/DATA/rootca/tessera_ca.key -set_serial 01 -out tessera_cer.pem -days 1000 -sha256 -extensions v3_req -extfile openssl${NODE_NAME}.conf - cat tessera_cer.pem > ${NODE_NAME}-certchain.pem - cat ${MOUNT_PATH}/DATA/rootca/tessera_ca.pem >> ${NODE_NAME}-certchain.pem - # Generate keystore file - cat tessera_cer.key tessera_cer.pem > ${NODE_NAME}_certificates.pem - openssl pkcs12 -export -in ${NODE_NAME}_certificates.pem -out ${NODE_NAME}-keystore.pkcs12 -password pass:quorum -name myAlias -noiter -nomaciter - - #creating a dummy file to perform check if last line is executed or not. - touch ${MOUNT_PATH}/DATA/done.txt - volumeMounts: - - name: certcheck - mountPath: /certcheck - - name: store-certs - image: {{ .Values.image.certsContainerName }} - imagePullPolicy: IfNotPresent - env: - - name: VAULT_ADDR - value: {{ $.Values.vault.address }} - - name: VAULT_APP_ROLE - value: {{ $.Values.vault.role}} - - name: NODE_NAME - value: "{{ $.Values.name }}" - - name: KUBERNETES_AUTH_PATH - value: {{ $.Values.vault.authpath }} - - name: CERTS_SECRET_PREFIX - value: {{ $.Values.vault.certsecretprefix }} - - name: MOUNT_PATH - value: "/certcheck" - - name: VAULT_TYPE - value: "{{ $.Values.vault.type }}" - command: ["/bin/sh", "-c"] - args: - - |- - #!/usr/bin/env sh - . /scripts/bevel-vault.sh - - # Skip secret creation if "present.txt" exists in /certcheck/ - if [ -e /certcheck/present.txt ] - then - echo "Certificates already present in the vault. Skipping." - exit 0 - fi - - # Calling a function to retrieve the vault token. - vaultBevelFunc 'init' - - COUNTER=1 - cd ${MOUNT_PATH}/DATA - while [ "$COUNTER" -lt 10 ] - do - if [ -e done.txt ] - then - cd ${MOUNT_PATH}/DATA - echo "found certificates, performing vault put" - # Use -w0 to get single line base64 -w0 - ROOTCAPEM=$(cat ./rootca/tessera_ca.pem | base64 -w0) - ROOTCAKEY=$(cat ./rootca/tessera_ca.key | base64 -w0) - AMBASSADOR_CERTS=$(cat ./ambassadortls/${NODE_NAME}-certchain.pem | base64 -w0) - AMBASSADOR_KEYS=$(cat ./ambassadortls/tessera_cer.key | base64 -w0) - KEYSTORE_FILE=$(cat ./ambassadortls/${NODE_NAME}-keystore.pkcs12 | base64 -w0) - echo "{\"data\": { - \"rootcakey\": \"${ROOTCAKEY}\", - \"rootcapem\": \"${ROOTCAPEM}\", - \"ambassadorcrt\": \"${AMBASSADOR_CERTS}\", - \"ambassadorkey\": \"${AMBASSADOR_KEYS}\", - \"keystore\": \"${KEYSTORE_FILE}\" - }}" > payload.json - - # Calling a function to write secrets to the Vault. - vaultBevelFunc 'write' "${CERTS_SECRET_PREFIX}/crypto/${NODE_NAME}/certs" 'payload.json' - rm payload.json - - # Calling a function to retrieve secrets from Vault. - vaultBevelFunc "readJson" "${CERTS_SECRET_PREFIX}/crypto/${NODE_NAME}/certs" - - R_KEY=$(echo ${VAULT_SECRET} | jq -r '.["rootcakey"]' 2>&1) - R_PEM=$(echo ${VAULT_SECRET} | jq -r '.["rootcapem"]' 2>&1) - A_CERT=$(echo ${VAULT_SECRET} | jq -r '.["ambassadorcrt"]' 2>&1) - A_KEY=$(echo ${VAULT_SECRET} | jq -r '.["ambassadorkey"]' 2>&1) - K_KEY=$(echo ${VAULT_SECRET} | jq -r '.["keystore"]' 2>&1) - - for field in "$R_KEY" "$R_PEM" "$A_CERT" "$A_KEY" "$K_KEY" - do - if [ "$field" = "null" ] || [[ "$field" = "parse error"* ]] || [ "$field" = "" ] - then - echo "certificates write or read fail" - break - else - echo "Certificates write or read success." - sleep 5 - COUNTER=`expr "$COUNTER" + 1` - fi - done - if [ "$COUNTER" -gt 30 ] - then - echo "Retry attempted `expr $COUNTER - 1` times, Certificates have not been saved." - exit 1 - else - echo "Certificates have been saved." - fi - echo "completed" - fi - done - volumeMounts: - - name: certcheck - mountPath: /certcheck - - name: scripts-volume - mountPath: /scripts/bevel-vault.sh - subPath: bevel-vault.sh - imagePullSecrets: - - name: {{ $.Values.image.imagePullSecret }} - volumes: - - name: certcheck - emptyDir: - medium: Memory - - name: credentials - emptyDir: - medium: Memory - - name: certs-keys - emptyDir: - medium: Memory - - name: scripts-volume - configMap: - name: bevel-vault-script - - name: certs-etc diff --git a/platforms/quorum/charts/quorum-tlscerts-gen/values.yaml b/platforms/quorum/charts/quorum-tlscerts-gen/values.yaml deleted file mode 100644 index b3544733fa4..00000000000 --- a/platforms/quorum/charts/quorum-tlscerts-gen/values.yaml +++ /dev/null @@ -1,99 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -# Default values for Certs Generator chart. -# This is a YAML-formatted file. -# Declare variables to be passed into your templates. - -############################################################# -# Basic Configuration # -############################################################# -# Provide the name of the node -# Eg. name: -name: node_1 -# This section contains the quorum metadata. -metadata: - # Provide the namespace for the quorum Certs Generator. - # Eg. namespace: cenm - namespace: default - # Provide any additional labels for the quorum Certs Generator. - labels: - -# Provide information regarding the Docker images used. -image: - # Provide the alpine utils image, which is used for all init-containers of deployments/jobs. - # NOTE: The alpine image used is the base alpine image with CURL installed. - # Eg. initContainerName: ghcr.io/hyperledger/bevel-alpine:latest - initContainerName: ghcr.io/hyperledger/bevel-alpine:latest - # Provide the image for the certs container. - # Eg. certsContainerName: ghcr.io/hyperledger/bevel-build:jdk8-latest - certsContainerName: ghcr.io/hyperledger/bevel-build:jdk8-latest - # Provide the docker-registry secret created and stored in kubernetes cluster as a secret. - # Eg. imagePullSecret: regcred - imagePullSecret: regcred - # Pull policy to be used for the Docker image - # Eg. pullPolicy: Always - pullPolicy: IfNotPresent - -############################################################# -# HashiCorp Vault Configuration # -############################################################# -# NOTE: Make sure that the vault is already unsealed, intialized and configured to -# use Kubernetes service account token based authentication. -# For more info, see https://www.vaultproject.io/docs/auth/kubernetes - -vault: - # Provide the vault address - # Eg. address: http://vault.example.com:8200 - address: - # Provide the vault role used. - # Eg. role: vault-role - role: vault-role - # Provide the authpath configured to be used. - authpath: quorumnode_1 - # Provide the service account name autheticated to vault. - # NOTE: Make sure that the service account is already created and autheticated to use the vault. - # Eg. serviceaccountname: vault-auth - serviceaccountname: vault-auth - # Provide the vault path where the certificates are stored - # Eg. certsecretprefix: secret/cenm-org-name - certsecretprefix: secretsv2/node_1 - # Number of retries to check contents from vault - retries: 30 - type: hashicorp - -############################################################# -# SUBJECT Details # -############################################################# -# This section details the X509 subjects - -subjects: - # Mention the subject for rootca - # Eg. rootca: "CN=DLT Root CA,OU=DLT,O=DLT,L=London,C=GB" - root_subject: "CN=DLT Root CA,OU=DLT,O=DLT,L=London,C=GB" - # Mention the subject for cert - #Eg. cert_subject: "CN=DLT Root CA/OU=DLT/O=DLT/L=London/C=GB" - cert_subject: "CN=DLT Root CA/OU=DLT/O=DLT/L=London/C=GB" -############################################################# -# Opensslvars Details # -############################################################# -# This section details of the Openssl used in domain creation - -opensslVars: - #provides the name for domain. - #Eg. domain_name: nodename.rc.dev2.aws.example.com - domain_name: - #provides the name for domain_name api endpoint. - #Eg. domain_name_api: nodenameapi.rc.dev2.aws.example.com - domain_name_api: - #provides the name for domain_name web endpoint. - #Eg. domain_name_web: nodenameweb.rc.dev2.aws.example.com - domain_name_web: - #provides the name for domain domain_name tessera endpoint. - #Eg. domain_name_tessera: nodename-tessera.nodename-quo - domain_name_tessera: - #Provide tm client port - clientport: 8888 diff --git a/platforms/quorum/charts/quorum-validator-node/Chart.yaml b/platforms/quorum/charts/quorum-validator-node/Chart.yaml deleted file mode 100644 index 1f11fcffae3..00000000000 --- a/platforms/quorum/charts/quorum-validator-node/Chart.yaml +++ /dev/null @@ -1,11 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -apiVersion: v1 -appVersion: "2.0" -description: "Quorum: This Helm chart deploys a validator Quorum node." -name: quorum-validator-node -version: 1.0.0 diff --git a/platforms/quorum/charts/quorum-validator-node/README.md b/platforms/quorum/charts/quorum-validator-node/README.md deleted file mode 100644 index 1d7d340c2c6..00000000000 --- a/platforms/quorum/charts/quorum-validator-node/README.md +++ /dev/null @@ -1,243 +0,0 @@ -[//]: # (##############################################################################################) -[//]: # (Copyright Accenture. All Rights Reserved.) -[//]: # (SPDX-License-Identifier: Apache-2.0) -[//]: # (##############################################################################################) - - -# Quorum Validator Node Deployment - -- [Quorum Validator Node Deployment Helm Chart](#quorum-validator-node-deployment-helm-chart) -- [Prerequisites](#prerequisites) -- [Chart Structure](#chart-structure) -- [Configuration](#configuration) -- [Deployment](#deployment) -- [Verification](#verification) -- [Updating the Deployment](#updating-the-deployment) -- [Deletion](#deletion) -- [Contributing](#contributing) -- [License](#license) - - - -## Quorum Validator Node Deployment Helm Chart ---- -This [Helm chart](https://github.com/hyperledger/bevel/blob/develop/platforms/quorum/charts/quorum-validator-node) helps to deploy Quorum validator nodes. - - - -## Prerequisites ---- -Before deploying the Helm chart, make sure to have the following prerequisites: - -- Kubernetes cluster up and running. -- The GoQuorum network is set up and running. -- A HashiCorp Vault instance is set up and configured to use Kubernetes service account token-based authentication. -- The Vault is unsealed and initialized. -- Either HAproxy or Ambassador is required as ingress controller. -- Helm installed. - - - -## Chart Structure ---- -The structure of the Helm chart is as follows: - -``` -quorum-validator-node/ - |- templates/ - |- _helpers.yaml - |- configmap.yaml - |- deployment.yaml - |- ingress.yaml - |- service.yaml - |- Chart.yaml - |- README.md - |- values.yaml -``` - -- `templates/`: This directory contains the template files for generating Kubernetes resources. -- `helpers.tpl`: A template file used for defining custom labels in the Helm chart. -- `configmap.yaml`: The file defines a ConfigMap that stores the base64-encoded content of the "genesis.json" file under the key "genesis.json.base64" in the specified namespace. -- `deployment.yaml`: This file is a configuration file for deploying a StatefulSet in Kubernetes. It creates a StatefulSet with a specified number of replicas and defines various settings for the deployment. It includes initialization containers for fetching secrets from a Vault server, an init container for initializing the Quorum blockchain network, and a main container for running the Quorum validator node. It also specifies volume mounts for storing certificates and data. The StatefulSet ensures stable network identities for each replica. -- `ingress.yaml`: This file is a Kubernetes configuration file for setting up an Ingress resource with HAProxy as the provider. It includes annotations for SSL passthrough and specifies rules for routing traffic based on the host and path. -- `service.yaml`: This file defines a Kubernetes Service with multiple ports for different protocols and targets, and supports Ambassador proxy annotations for specific configurations when using the "ambassador" proxy provider. -- `Chart.yaml`: Provides metadata about the chart, such as its name, version, and description. -- `README.md`: This file provides information and instructions about the Helm chart. -- `values.yaml`: Contains the default configuration values for the chart. It includes configuration for the metadata, image, node, Vault, etc. - - - -## Configuration ---- -The [values.yaml](https://github.com/hyperledger/bevel/blob/develop/platforms/quorum/charts/quorum-validator-node/values.yaml) file contains configurable values for the Helm chart. We can modify these values according to the deployment requirements. Here are some important configuration options: - -## Parameters ---- - -### replicaCount - -| Name | Description | Default Value | -| ------------------------ | ------------------------------------ | ------------- | -| replicaCount | Number of replicas | 1 | - -### metadata - -| Name | Description | Default Value | -| ----------------| ---------------------------------------------------------------------------- | ------------- | -| namespace | Provide the namespace for the Quorum node | default | -| labels | Provide any additional labels | "" | - -### image - -| Name | Description | Default Value | -| ---------------| ------------------------------------------------------------------------------------ | ------------------------------------- | -| node | Provide the valid image name and version for quorum node | quorumengineering/quorum:2.1.1 | -| alpineutils | Provide the valid image name and version to read certificates from vault server | ghcr.io/hyperledger/bevel-alpine:latest | -| busybox | Provide the valid image name and version for busybox | busybox | -| mysql | Provide the valid image name and version for MySQL. This is used as the DB for TM | mysql/mysql-server:5.7 | - -### node - -| Name | Description | Default Value | -| ------------------------ | ------------------------------------------------------------------------------ | ------------- | -| name | Provide the name for Quorum node | node-1 | -| status | Provide the status of the node as default,additional | default | -| peer_id | Provide the id which is obtained when the new peer is added for raft consensus | 5 | -| consensus | Provide the consesus for the Quorum network, values can be 'raft' or 'ibft' | ibft | -| mountPath | Provide the mountpath for Quorum pod | /etc/quorum/qdata | -| imagePullSecret | Provide the docker secret name in the namespace | regcred | -| keystore | Provide the keystore file name | keystore_1 | -| servicetype | Provide the k8s service type | ClusterIP | -| ports.rpc | Provide the rpc service ports | 8546 | -| ports.raft | Provide the raft service ports | 50401 | -| ports.tm | Provide the Tessera Transaction Manager service ports | 15013 | -| ports.quorum | Provide the Quorum port | 21000 | -| ports.db | Provide the DataBase port | 3306 | -| dbname | Provide the mysql DB name | demodb | -| mysqluser | Provide the mysql username | demouser | -| mysqlpassword | Provide the mysql user password | password | - -### vault - -| Name | Description | Default Value | -| ---------------- | -------------------------------------------------------------------------| ------------- | -| address | Address/URL of the Vault server. | "" | -| secretprefix | Provide the Vault secret path from where secrets will be read | secret/org1/crypto/node_1 | -| serviceaccountname | Provide the service account name verified with Vault | vault-auth | -| keyname | Provide the key name from where Quorum secrets will be read | quorum | -| role | Provide the service role verified with Vault | vault-role | -| authpath | Provide the Vault auth path created for the namespace | quorumorg1 | - -### genesis - -| Name | Description | Default Value | -| --------| ---------------------------------------------- | ------------- | -| genesis | Provide the genesis.json file in base64 format | "" | - - -### staticnodes - -| Name | Description | Default Value | -| ----------------| --------------------------------------| ------------- | -| staticnodes | Provide the static nodes as an array | "" | - -### proxy - -| Name | Description | Default Value | -| --------------------- | --------------------------------------------------------------------- | ------------- | -| provider | The proxy/ingress provider (ambassador, haproxy) | ambassador | -| external_url | This field contains the external URL of the node | "" | -| portTM | The TM port exposed externally via the proxy | 15013 | -| rpcport | The RPC port exposed externally via the proxy | 15030 | -| quorumport | The Quorum port exposed externally via the proxy | 15031 | -| portRaft | The Raft port exposed externally via the proxy | 15032 | - -### storage - -| Name | Description | Default Value | -| --------------------- | ------------------------------------------| ----------------- | -| storageclassname | The Kubernetes storage class for the node | awsstorageclass | -| storagesize | The memory for the node | 1Gi | -| dbstorage | Provide the memory for database | 1Gi | - - - -## Deployment ---- - -To deploy the quorum-member-node Helm chart, follow these steps: - -1. Modify the [values.yaml](https://github.com/hyperledger/bevel/blob/develop/platforms/quorum/charts/quorum-validator-node/values.yaml) file to set the desired configuration values. -2. Run the following Helm command to install the chart: - ``` - $ helm repo add bevel https://hyperledger.github.io/bevel/ - $ helm install ./quorum-validator-node - ``` -Replace `` with the desired name for the release. - -This will deploy the quorum validator node to the Kubernetes cluster based on the provided configurations. - - - -## Verification ---- - -To verify the deployment, we can use the following command: -``` -$ kubectl get statefulsets -n -``` -Replace `` with the actual namespace where the StatefulSet was created. This command will display information about the StatefulSet, including the number of replicas and their current status. - - - -## Updating the Deployment ---- - -If we need to update the deployment with new configurations or changes, modify the same [values.yaml](https://github.com/hyperledger/bevel/blob/develop/platforms/quorum/charts/quorum-validator-node/values.yaml) file with the desired changes and run the following Helm command: -``` -$ helm upgrade ./quorum-validator-node -``` -Replace `` with the name of the release. This command will apply the changes to the deployment, ensuring the quorum validator node is up to date. - - - -## Deletion ---- - -To delete the deployment and associated resources, run the following Helm command: -``` -$ helm uninstall -``` -Replace `` with the name of the release. This command will remove all the resources created by the Helm chart. - - - -## Contributing ---- -If you encounter any bugs, have suggestions, or would like to contribute to the [Quorum Validator Node Deployment Helm Chart](https://github.com/hyperledger/bevel/blob/develop/platforms/quorum/charts/quorum-validator-node), please feel free to open an issue or submit a pull request on the [project's GitHub repository](https://github.com/hyperledger/bevel). - - - -## License - -This chart is licensed under the Apache v2.0 license. - -Copyright © 2023 Accenture - -### Attribution - -This chart is adapted from the [charts](https://hyperledger.github.io/bevel/) which is licensed under the Apache v2.0 License which is reproduced here: - -``` -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -``` diff --git a/platforms/quorum/charts/quorum-validator-node/templates/_helpers.tpl b/platforms/quorum/charts/quorum-validator-node/templates/_helpers.tpl deleted file mode 100644 index a4793a721ee..00000000000 --- a/platforms/quorum/charts/quorum-validator-node/templates/_helpers.tpl +++ /dev/null @@ -1,5 +0,0 @@ -{{- define "labels.custom" -}} - {{- range $key, $val := .Values.metadata.labels -}} - {{- $key -}}: {{- $val -}} - {{- end -}} -{{- end -}} diff --git a/platforms/quorum/charts/quorum-validator-node/templates/configmap.yaml b/platforms/quorum/charts/quorum-validator-node/templates/configmap.yaml deleted file mode 100644 index 1f9acd713e9..00000000000 --- a/platforms/quorum/charts/quorum-validator-node/templates/configmap.yaml +++ /dev/null @@ -1,20 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - ---- -apiVersion: v1 -kind: ConfigMap -metadata: - name: genesis-{{ .Values.node.name }} - namespace: {{ .Values.metadata.namespace }} - labels: - app.kubernetes.io/name: genesis-{{ .Values.node.name }} - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} - {{- include "labels.custom" . | nindent 2 }} -data: - genesis.json.base64: {{ .Values.genesis | quote }} diff --git a/platforms/quorum/charts/quorum-validator-node/templates/deployment.yaml b/platforms/quorum/charts/quorum-validator-node/templates/deployment.yaml deleted file mode 100644 index 3c08b06d9c2..00000000000 --- a/platforms/quorum/charts/quorum-validator-node/templates/deployment.yaml +++ /dev/null @@ -1,237 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -apiVersion: apps/v1 -kind: StatefulSet -metadata: - namespace: {{ .Values.metadata.namespace }} - creationTimestamp: null - labels: - app.kubernetes.io/name: {{ .Values.node.name }} - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} - {{- include "labels.custom" . | nindent 2 }} - name: {{ .Values.node.name }} -spec: - serviceName: {{ .Values.node.name }} - replicas: {{ .Values.replicaCount }} - selector: - matchLabels: - app: consortiumchain - service.rpc: {{ .Values.node.name }} - app.kubernetes.io/name: {{ .Values.node.name }} - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} - {{- include "labels.custom" . | nindent 2 }} - template: - metadata: - creationTimestamp: null - labels: - app: consortiumchain - name: {{ .Values.node.name }} - service.rpc: {{ .Values.node.name }} - app.kubernetes.io/name: {{ .Values.node.name }} - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} - {{- include "labels.custom" . | nindent 2 }} - spec: - serviceAccountName: {{ .Values.vault.serviceaccountname }} - hostname: {{ .Values.node.name }} - imagePullSecrets: - - name: {{ .Values.node.imagePullSecret }} - volumes: - - name: certificates - emptyDir: - medium: Memory - - name: mysql - emptyDir: - medium: Memory - - name: {{ .Values.node.name }}-genesis-volume - configMap: - name: genesis-{{ .Values.node.name }} - items: - - key: genesis.json.base64 - path: genesis.json.base64 - - name: scripts-volume - configMap: - name: bevel-vault-script - - name: package-manager - configMap: - name: package-manager - initContainers: - - name: certificates-init - image: {{ .Values.images.alpineutils }} - imagePullPolicy: IfNotPresent - env: - - name: VAULT_ADDR - value: {{ .Values.vault.address }} - - name: VAULT_SECRET_PREFIX - value: {{ .Values.vault.secretprefix }} - - name: KUBERNETES_AUTH_PATH - value: {{ .Values.vault.authpath }} - - name: VAULT_APP_ROLE - value: {{ .Values.vault.role }} - - name: MOUNT_PATH - value: "/secret" - - name: VAULT_TYPE - value: "{{ $.Values.vault.type }}" - command: ["sh", "-c"] - args: - - |- - #!/bin/bash - . /scripts/bevel-vault.sh - - # Calling a function to retrieve the vault token. - vaultBevelFunc "init" - # Calling a function to retrieve secrets from Vault only if they exist. - vaultBevelFunc "readJson" "${VAULT_SECRET_PREFIX}/{{ .Values.vault.keyname }}" - - nodekey=$(echo ${VAULT_SECRET} | jq -r '.["nodekey"]') - keystore=$(echo ${VAULT_SECRET} | jq -r '.["keystore"]' | base64 -d ) - gethpassword=$(echo ${VAULT_SECRET} | jq -r '.["geth_password"]') - username=$(echo ${VAULT_SECRET} | jq -r '.["db_user"]') - password=$(echo ${VAULT_SECRET} | jq -r '.["db_password"]') - - OUTPUT_PATH="${MOUNT_PATH}/keys" - mkdir -p ${OUTPUT_PATH} - - echo -n "${gethpassword}" > ${OUTPUT_PATH}/passwords.txt - echo -n "${nodekey}" > ${OUTPUT_PATH}/nodekey - echo -n "${keystore}" > ${OUTPUT_PATH}/{{ .Values.node.keystore }} - echo "${username}" > ${OUTPUT_PATH}/username - echo "${password}" > ${OUTPUT_PATH}/password - - echo "Done checking for certificates in vault." - volumeMounts: - - name: certificates - mountPath: /secret - - name: scripts-volume - mountPath: /scripts/bevel-vault.sh - subPath: bevel-vault.sh - - name: quorum-genesis-init-container - image: "{{ .Values.images.node }}" - imagePullPolicy: IfNotPresent - command: [ "sh" ] - args: - - "-cx" - - "mkdir -p $QUORUM_DATA_DIR; - cat {{ .Values.node.mountPath }}/genesis/genesis.json.base64 | base64 -d > $QUORUM_DATA_DIR/genesis.json; - if [ ! -f $QUORUM_DATA_DIR/genesis_created ]; then - echo \"running geth init\"; - /usr/local/bin/geth --datadir $QUORUM_DATA_DIR init $QUORUM_DATA_DIR/genesis.json; - date > $QUORUM_DATA_DIR/genesis_created; - fi; - " - env: - - name: QUORUM_DATA_DIR - value: {{ .Values.node.mountPath }}/dd - - name: QHOME - value: {{ .Values.node.mountPath }} - volumeMounts: - - name: {{ .Values.node.name }}-pv - mountPath: {{ .Values.node.mountPath }} - - name: {{ .Values.node.name }}-genesis-volume - mountPath: {{ .Values.node.mountPath }}/genesis - containers: - - name: quorum - image: "{{ .Values.images.node }}" - imagePullPolicy: IfNotPresent - command: ["sh"] - args: - - "-cx" - - |- - #!/usr/bin/env sh - . /scripts/package-manager.sh - - # Define the packages to install - packages_to_install="curl" - install_packages "$packages_to_install" - - echo -n {{ .Values.staticnodes | toRawJson | quote }} > $QUORUM_DATA_DIR/static-nodes.json - mkdir -p $QUORUM_DATA_DIR/geth - mkdir -p $QUORUM_DATA_DIR/keystore - # touch $qd/passwords.txt - cp $QUORUM_HOME/crypto/keys/{{ .Values.node.keystore }} $QUORUM_DATA_DIR/keystore/ - cp $QUORUM_HOME/crypto/keys/nodekey $QUORUM_DATA_DIR/geth/ - cp $QUORUM_HOME/crypto/keys/passwords.txt $QUORUM_DATA_DIR/password.txt - rm -f $QUORUM_HOME/crypto/keys/{{ .Values.node.keystore }} - rm -f $QUORUM_HOME/crypto/keys/nodekey - rm -f $QUORUM_HOME/crypto/keys/passwords.txt - args="" - NODE_STATUS="" - if [ $CONSENSUS == 'raft' ]; then - NODE_STATUS={{ .Values.node.status }} - fi; - if [ $CONSENSUS == 'raft' ] && [ $NODE_STATUS == 'additional' ]; then - args="--raftdnsenable --raft --raftport {{ .Values.node.ports.raft }} --rpcapi admin,debug,web3,eth,txpool,personal,miner,net,quorumExtension,raft --raftjoinexisting {{ .Values.node.peer_id }}"; - fi; - if [ $CONSENSUS == 'raft' ] && [ $NODE_STATUS == 'default' ]; then - args="--raftdnsenable --raft --raftport {{ .Values.node.ports.raft }} --rpcapi admin,debug,web3,eth,txpool,personal,raft,miner,net,quorumExtension"; - fi; - if [ $CONSENSUS == 'ibft' ]; then - args=" --istanbul.blockperiod 3 --syncmode full --mine --miner.threads 1 --rpcapi admin,debug,web3,eth,txpool,personal,istanbul,miner,net,quorumExtension" - fi; - - LOCK_STATUS={{ .Values.node.lock }} - if [ $LOCK_STATUS = false ] - then - args=" ${args} --unlock 0 " - fi - /usr/local/bin/geth \ - --datadir $QUORUM_DATA_DIR \ - $args \ - --identity {{ .Values.node.subject | quote }} \ - --vmdebug \ - --gcmode=archive \ - --nodiscover \ - --nat=none \ - --verbosity 9 \ - --emitcheckpoints \ - --rpc \ - --rpcaddr 0.0.0.0 \ - --rpcport {{ .Values.node.ports.rpc }} \ - --rpcvhosts=* \ - --allow-insecure-unlock \ - --port {{ .Values.node.ports.quorum }} \ - --password $QUORUM_DATA_DIR/password.txt 2>&1 | tee -a $QUORUM_HOME/logs/quorum.log; - ports: - - containerPort: {{ .Values.node.ports.rpc }} - - containerPort: {{ .Values.node.ports.quorum }} -{{- if eq $.Values.node.consensus "raft" }} - - containerPort: {{ .Values.node.ports.raft }} -{{- end }} - env: - - name: QUORUM_DATA_DIR - value: {{ .Values.node.mountPath }}/dd - - name: QUORUM_HOME - value: {{ .Values.node.mountPath }} - - name: QHOME - value: {{ .Values.node.mountPath }} - - name: TM_HOME - value: {{ .Values.node.mountPath }}/tm/ - - name: CONSENSUS - value: {{ .Values.node.consensus }} - volumeMounts: - - name: certificates - mountPath: {{ .Values.node.mountPath }}/crypto/ - - name: {{ .Values.node.name }}-pv - mountPath: {{ .Values.node.mountPath }} - - name: package-manager - mountPath: /scripts/package-manager.sh - subPath: package-manager.sh - restartPolicy: Always - volumeClaimTemplates: - - metadata: - name: {{ .Values.node.name }}-pv - spec: - storageClassName: {{ .Values.storage.storageclassname }} - accessModes: [ "ReadWriteOnce" ] - resources: - requests: - storage: {{ .Values.storage.storagesize }} diff --git a/platforms/quorum/charts/quorum-validator-node/templates/ingress.yaml b/platforms/quorum/charts/quorum-validator-node/templates/ingress.yaml deleted file mode 100644 index 718a5a10a88..00000000000 --- a/platforms/quorum/charts/quorum-validator-node/templates/ingress.yaml +++ /dev/null @@ -1,50 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -{{- if eq .Values.proxy.provider "haproxy" }} -apiVersion: networking.k8s.io/v1 -kind: Ingress -metadata: - name: {{ .Values.node.name }} - namespace: {{ .Values.metadata.namespace }} - annotations: - kubernetes.io/ingress.class: "haproxy" - ingress.kubernetes.io/ssl-passthrough: "true" -spec: - rules: - - host: {{ .Values.node.name }}rpc.{{ .Values.proxy.external_url }} - http: - paths: - - path: / - pathType: Prefix - backend: - service: - name: {{ .Values.node.name }} - port: - number: {{ .Values.node.ports.rpc }} - - host: {{ .Values.node.name }}tcp.{{ .Values.proxy.external_url }} - http: - paths: - - path: / - pathType: Prefix - backend: - service: - name: {{ .Values.node.name }} - port: - number: {{ .Values.node.ports.quorum }} - {{- if eq $.Values.node.consensus "raft" }} - - host: {{ .Values.node.name }}raft.{{ .Values.proxy.external_url }} - http: - paths: - - path: / - pathType: Prefix - backend: - service: - name: {{ .Values.node.name }} - port: - number: {{ .Values.node.ports.raft }} - {{- end }} -{{- end }} diff --git a/platforms/quorum/charts/quorum-validator-node/templates/service.yaml b/platforms/quorum/charts/quorum-validator-node/templates/service.yaml deleted file mode 100644 index b569e860e4a..00000000000 --- a/platforms/quorum/charts/quorum-validator-node/templates/service.yaml +++ /dev/null @@ -1,93 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -apiVersion: v1 -kind: Service -metadata: - namespace: {{ .Values.metadata.namespace }} - annotations: - app: consortiumchain - version: '1' - creationTimestamp: null - labels: - app.kubernetes.io/name: {{ .Values.node.name }} - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} - {{- include "labels.custom" . | nindent 2 }} - name: {{ .Values.node.name }} -spec: - selector: - app.kubernetes.io/name: {{ .Values.node.name }} - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} - {{- include "labels.custom" . | nindent 2 }} - service.rpc: {{ .Values.node.name }} - type: {{ .Values.node.servicetype }} - ports: - - name: wsrpc-listener - protocol: TCP - port: {{ .Values.node.ports.rpc }} - targetPort: {{ .Values.node.ports.rpc }} -{{- if eq $.Values.node.consensus "raft" }} - - name: raft - protocol: TCP - port: {{ .Values.node.ports.raft }} - targetPort: {{ .Values.node.ports.raft }} -{{- end }} - - name: quorum-listener - protocol: TCP - port: {{ .Values.node.ports.quorum }} - targetPort: {{ .Values.node.ports.quorum }} -{{- if eq $.Values.proxy.provider "ambassador" }} -## Listeners rlpx (p2p) ports ---- -apiVersion: getambassador.io/v3alpha1 -kind: Listener -metadata: - name: {{ .Values.node.name }}-rlpx - namespace: {{ .Values.metadata.namespace }} -spec: - port: {{ .Values.proxy.quorumport }} - protocol: TCP - securityModel: XFP - hostBinding: - namespace: - from: SELF -## Mapping for rpc ---- -apiVersion: getambassador.io/v3alpha1 -kind: Mapping -metadata: - name: {{ .Values.node.name }}-json-rpc - namespace: {{ .Values.metadata.namespace }} -spec: - hostname: '{{ .Values.node.name }}rpc.{{ .Values.proxy.external_url }}' - prefix: / - service: http://{{ .Values.node.name }}.{{ .Values.metadata.namespace }}:{{ .Values.node.ports.rpc }} -## TCPMapping for rlpx ---- -apiVersion: getambassador.io/v3alpha1 -kind: TCPMapping -metadata: - name: {{ .Values.node.name }}-rlpx - namespace: {{ .Values.metadata.namespace }} -spec: - port: {{ .Values.proxy.quorumport }} - service: {{ .Values.node.name }}.{{ .Values.metadata.namespace }}:{{ .Values.node.ports.quorum }} - {{- if eq $.Values.node.consensus "raft" }} ---- -apiVersion: getambassador.io/v3alpha1 -kind: TCPMapping -metadata: - name: {{ .Values.node.name }}-raft - namespace: {{ .Values.metadata.namespace }} -spec: - port: {{ .Values.proxy.portRaft }} - service: {{ .Values.node.name }}.{{ .Values.metadata.namespace }}:{{ .Values.node.ports.raft }} - {{- end }} -{{- end }} diff --git a/platforms/quorum/charts/quorum-validator-node/values.yaml b/platforms/quorum/charts/quorum-validator-node/values.yaml deleted file mode 100644 index 3ad572c61d5..00000000000 --- a/platforms/quorum/charts/quorum-validator-node/values.yaml +++ /dev/null @@ -1,122 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -# This is a YAML-formatted file. -# Declare variables to be passed into your templates. - -#Provide the number of replicas for pods -#Eg. replicaCount: 1 -replicaCount: 1 - -metadata: - #Provide the namespace for the Quorum node - #Eg. namespace: example-quo - namespace: default - #Provide the custom labels - #NOTE: Provide labels other than name, release name , release service, chart version , chart name, run - #These lables will not be applied to VolumeClaimTemplate of StatefulSet as labels are automatically picked up by Kubernetes - #Eg. labels: - # role: minter - labels: - -#These are the various docker images being used by this chart. update them as needed -images: - #Provide the valid image name and version for quorum node - #Eg. node: quorumengineering/quorum:2.1.1 - node: quorumengineering/quorum:21.4.2 - #Provide the valid image name and version to read certificates from vault server - #Eg. alpineutils: ghcr.io/hyperledger/bevel-alpine:latest - alpineutils: ghcr.io/hyperledger/bevel-alpine:latest - -node: - #Provide the name for Quorum node - #Eg. name: node-1 - name: node-1 - #Provide the status of the node as default,additional - #Eg. status: default - status: default - #Provide the id which is obtained when the new peer is added for raft consensus - #This field is only for RAFT consensus and only when a new node is added to existing network - #Eg. peer_id: 5 - peer_id: 5 - #Provide the consesus for the Quorum network - # values can be 'raft' or 'ibft' - #Eg. consensus: raft - consensus: ibft - #Provide the mountpath for Quorum pod - #Eg. mountPath: /etc/quorum/qdata - mountPath: /etc/quorum/qdata - #Provide the docker secret name in the namespace - #Eg. imagePullSecret: regcred - imagePullSecret: regcred - #Provide the keystore file name - #Eg. keystore: keystore_1 - keystore: keystore_1 - #Provide the k8s service type - servicetype: ClusterIP - lock: false - #Provide the container and service ports - ports: - rpc: 8546 - raft: 50401 - quorum: 21000 - -vault: - #Provide the Vault Address from where secrets will be read - #Eg. address: http://vault.internal.demo.aws.blockchaincloudpoc.com:9000 - address: - #Provide the Vault secret path from where secrets will be read - #Eg. secretprefix: secret/org1/crypto/node_1 - secretprefix: secret/org1/crypto/node_1 - #Provide the serviceaccount which is verified with Vault - #Eg. serviceaccountname: vault-auth - serviceaccountname: vault-auth - #Provide the key name from where quorum secrets will be read - #Eg. keyname: quorum - keyname: quorum - #Provide the service role which is verified with Vault - #Eg. role: vault-role - role: vault-role - #Provide the Vault auth-path which is created for the namespace - #Eg. authpath: quorumorg1 - authpath: quorumorg1 - -#Provide the genesis.json file in base64 format -#Eg. genesis: ewogICAgImFsbG9jIjogewogICAgICAgICIwOTg2Nzk2ZjM0ZDhmMWNkMmI0N2M3MzQ2YTUwYmY2 -# OWFhOWM1NzcyIjogewogICAgICAgICAgICAiYmFsYW5jZSI6ICIxMDAwMDAwMDAwMDAwMDAwMDAw -# MDAwMDAwMDAwIgogICAgICAgIH0sCiAgICAgICAgImY2MjkyNTQ1YWVjNTkyMDU4MzQ -genesis: - -#Provide the staticnodes as an array -#Eg. staticnodes: -# - enode://293ce022bf114b14520ad97349a1990180973885cc2afb6f4196b490397e164fabc87900736e4b685c5f4cf31479021ba0d589e58bd0ea6792ebbfd5eb0348af@node1.quo.demo.aws.blockchaincloudpoc.com:15011?discport=0&raftport=15012 -# - enode://4e7a1a15ef6a9bbf30f8b2a6b927f4941c9e80aeeeed14cfeeea619f93256b41ef9994b9a8af371f394c2a6de9bc6930e142c0350399a22081c518ab2d27f92a@node2.quo.demo.aws.blockchaincloudpoc.com:15021?discport=0&raftport=15022 -# - enode://947fa59385da72f4a68b7348ef5dab7e759148b48b30892c29b7b03a872233a6475a13fd5df62ea75abff9981d459606c1f878cd6ab929307eac6b56b19424bd@node3.quo.demo.aws.blockchaincloudpoc.com:15031?discport=0&raftport=15032 -# - enode://b28ac5bd1c554d05d68db65b0c3351838249e5b935e04d4b361745b74e6c7b3379927eefc11a5fef605fa64d14d000645e182662c51f5bf4a9dd228377f0e1ba@node4.quo.demo.aws.blockchaincloudpoc.com:15041?discport=0&raftport=15042 -staticnodes: - -proxy: - #This will be the proxy/ingress provider. Can have values "ambassador" or "haproxy" - # TODO "haproxy" has not been tested - #Eg. provider: "ambassador" - provider: "ambassador" - #This field contains the external URL of the node - #Eg. external_url: node1.quo.demo.aws.blockchaincloudpoc.com - external_url: - # Following are the ports that are exposed externally via the proxy - quorumport: 15031 - portRaft: 15032 - -storage: - #Provide the kubernetes storageclass for node - #Eg. storageclassname: awsstorageclass - storageclassname: awsstorageclass - #Provide the memory for node - #Eg. storagesize: 1Gi - storagesize: 1Gi - #Provide the memory for database - #Eg. dbstorage: 1Gi - dbstorage: 1Gi diff --git a/platforms/quorum/charts/values/noproxy-and-novault/genesis-sec.yaml b/platforms/quorum/charts/values/noproxy-and-novault/genesis-sec.yaml new file mode 100644 index 00000000000..2101dcb16be --- /dev/null +++ b/platforms/quorum/charts/values/noproxy-and-novault/genesis-sec.yaml @@ -0,0 +1,12 @@ +# helm install genesis -f values/noproxy-and-novault/genesis-sec.yaml -n carrier-quo quorum-genesis +global: + serviceAccountName: vault-auth + cluster: + provider: aws + cloudNativeServices: false + vault: + type: kubernetes + +settings: + removeGenesisOnDelete: true + secondaryGenesis: true diff --git a/platforms/quorum/charts/values/noproxy-and-novault/genesis.yaml b/platforms/quorum/charts/values/noproxy-and-novault/genesis.yaml new file mode 100644 index 00000000000..a8a83626b31 --- /dev/null +++ b/platforms/quorum/charts/values/noproxy-and-novault/genesis.yaml @@ -0,0 +1,10 @@ +# helm install genesis -f values/noproxy-and-novault/genesis.yaml -n supplychain-quo quorum-genesis +global: + serviceAccountName: vault-auth + cluster: + provider: aws + cloudNativeServices: false + vault: + type: kubernetes +settings: + removeGenesisOnDelete: true diff --git a/platforms/quorum/charts/values/noproxy-and-novault/txnode-sec.yaml b/platforms/quorum/charts/values/noproxy-and-novault/txnode-sec.yaml new file mode 100644 index 00000000000..3e1e4ac8527 --- /dev/null +++ b/platforms/quorum/charts/values/noproxy-and-novault/txnode-sec.yaml @@ -0,0 +1,46 @@ +--- +# helm install member-2 -f values/noproxy-and-novault/txnode-sec.yml -n carrier-quo quorum-node +global: + serviceAccountName: vault-auth + cluster: + provider: aws + cloudNativeServices: false + vault: + type: kubernetes + proxy: + provider: none + externalUrlSuffix: svc.cluster.local + +tessera: + enabled: true + tessera: + tlsMode: "OFF" + peerNodes: + - url: "http://quorum-tessera-node-member-0.supplychain-quo:9000" + resources: + cpuLimit: 0.25 + cpuRequest: 0.05 + memLimit: "2G" + memRequest: "1G" + password: 'password' + storage: + enabled: false + size: 1Gi + dbSize: 2Gi + allowedTopologies: + enabled: false + +tls: + enabled: false + +node: + goquorum: + metrics: + serviceMonitorEnabled: true + resources: + cpuLimit: 0.25 + cpuRequest: 0.05 + memLimit: "1G" + memRequest: "300M" + account: + password: 'password' diff --git a/platforms/quorum/charts/values/noproxy-and-novault/txnode.yaml b/platforms/quorum/charts/values/noproxy-and-novault/txnode.yaml new file mode 100644 index 00000000000..fdde1d2b51b --- /dev/null +++ b/platforms/quorum/charts/values/noproxy-and-novault/txnode.yaml @@ -0,0 +1,48 @@ +--- +# helm install member-1 -f values/noproxy-and-novault/txnode.yml -n supplychain-quo quorum-node +# helm upgrade member-1 -f values/noproxy-and-novault/txnode.yml -n supplychain-quo quorum-node +global: + serviceAccountName: vault-auth + cluster: + provider: aws + cloudNativeServices: false + vault: + type: kubernetes + proxy: + provider: none + externalUrlSuffix: svc.cluster.local + +tessera: + enabled: true + tessera: + tlsMode: "OFF" + resources: + cpuLimit: 0.25 + cpuRequest: 0.05 + memLimit: "2G" + memRequest: "1G" + password: 'password' + storage: + enabled: false + size: 1Gi + dbSize: 2Gi + allowedTopologies: + enabled: false + +tls: + enabled: false + settings: + certSubject: CN=DLT Root CA,OU=DLT,O=DLT,L=London,C=GB + tmTls: true + +node: + goquorum: + metrics: + serviceMonitorEnabled: true + resources: + cpuLimit: 0.25 + cpuRequest: 0.05 + memLimit: "1G" + memRequest: "300M" + account: + password: 'password' diff --git a/platforms/quorum/charts/values/noproxy-and-novault/validator.yaml b/platforms/quorum/charts/values/noproxy-and-novault/validator.yaml new file mode 100644 index 00000000000..bcaba85f48f --- /dev/null +++ b/platforms/quorum/charts/values/noproxy-and-novault/validator.yaml @@ -0,0 +1,14 @@ +--- +# helm install validator-1 -f values/noproxy-and-novault/validator.yml -n supplychain-quo quorum-node +# helm upgrade validator-1 -f values/noproxy-and-novault/validator.yml -n supplychain-quo quorum-node +global: + serviceAccountName: vault-auth + cluster: + provider: aws + cloudNativeServices: false # set to true to use Cloud Native Services (SecretsManager and IAM for AWS; KeyVault & Managed Identities for Azure) + vault: + type: kubernetes + + proxy: + provider: none + externalUrlSuffix: svc.cluster.local diff --git a/platforms/quorum/charts/values/proxy-and-vault/genesis-sec.yaml b/platforms/quorum/charts/values/proxy-and-vault/genesis-sec.yaml new file mode 100644 index 00000000000..66427b09776 --- /dev/null +++ b/platforms/quorum/charts/values/proxy-and-vault/genesis-sec.yaml @@ -0,0 +1,18 @@ +# helm install genesis -f values/proxy-and-vault/genesis-sec.yaml -n carrier-quo quorum-genesis +global: + serviceAccountName: vault-auth + cluster: + provider: aws + cloudNativeServices: false + kubernetesUrl: https://kubernetes.url + vault: + type: hashicorp + network: quorum + address: http://vault.demo.com:8200 + authPath: supplychain + secretEngine: secretsv2 + secretPrefix: "data/supplychain" + role: vault-role +settings: + removeGenesisOnDelete: true + secondaryGenesis: true diff --git a/platforms/quorum/charts/values/proxy-and-vault/genesis.yaml b/platforms/quorum/charts/values/proxy-and-vault/genesis.yaml new file mode 100644 index 00000000000..a8b68f816c2 --- /dev/null +++ b/platforms/quorum/charts/values/proxy-and-vault/genesis.yaml @@ -0,0 +1,17 @@ +# helm install genesis -f values/proxy-and-vault/genesis.yaml -n supplychain-quo --create-namespace quorum-genesis +global: + serviceAccountName: vault-auth + cluster: + provider: aws + cloudNativeServices: false + kubernetesUrl: https://kubernetes.url + vault: + type: hashicorp + role: vault-role + network: quorum + address: http://vault.demo.com:8200 + authPath: supplychain + secretEngine: secretsv2 + secretPrefix: "data/supplychain" +settings: + removeGenesisOnDelete: true diff --git a/platforms/quorum/charts/values/proxy-and-vault/txnode-sec.yaml b/platforms/quorum/charts/values/proxy-and-vault/txnode-sec.yaml new file mode 100644 index 00000000000..73899b3dcaf --- /dev/null +++ b/platforms/quorum/charts/values/proxy-and-vault/txnode-sec.yaml @@ -0,0 +1,62 @@ +--- +# helm install member-2 -f values/proxy-and-vault/txnode-sec.yml -n carrier-quo --set global.proxy.p2p=15016 quorum-node +global: + serviceAccountName: vault-auth + cluster: + provider: aws + cloudNativeServices: false + kubernetesUrl: https://kubernetes.url + vault: + type: hashicorp + network: quorum + address: http://vault.demo.com:8200 + authPath: supplychain + secretEngine: secretsv2 + secretPrefix: "data/supplychain" + role: vault-role + proxy: + provider: ambassador + externalUrlSuffix: test.yourdomain.com + tmport: 443 + +storage: + size: "2Gi" + +tessera: + enabled: true + tessera: + port: 443 + peerNodes: + - url: "https://supplychain.test.yourdomain.com" + resources: + cpuLimit: 0.25 + cpuRequest: 0.05 + memLimit: "2G" + memRequest: "1G" + password: 'password' + storage: + enabled: false + size: 1Gi + dbSize: 2Gi + allowedTopologies: + enabled: false + +tls: + enabled: true + settings: + certSubject: CN=DLT Root CA,OU=DLT,O=DLT,L=London,C=GB + tmTls: true + +node: + goquorum: + metrics: + serviceMonitorEnabled: true + resources: + cpuLimit: 0.25 + cpuRequest: 0.05 + memLimit: "1G" + memRequest: "300M" + account: + password: 'password' + p2p: + discovery: false diff --git a/platforms/quorum/charts/values/proxy-and-vault/txnode.yaml b/platforms/quorum/charts/values/proxy-and-vault/txnode.yaml new file mode 100644 index 00000000000..200ee6c857e --- /dev/null +++ b/platforms/quorum/charts/values/proxy-and-vault/txnode.yaml @@ -0,0 +1,57 @@ +--- +# helm install member-1 -f values/proxy-and-vault/txnode.yml -n supplychain-quo --set global.proxy.p2p=15015 quorum-node +# helm upgrade member-1 -f values/proxy-and-vault/txnode.yml -n supplychain-quo --set global.proxy.p2p=15015 quorum-node +global: + serviceAccountName: vault-auth + cluster: + provider: aws + cloudNativeServices: false + vault: + type: hashicorp + network: quorum + address: http://vault.demo.com:8200 + authPath: supplychain + secretEngine: secretsv2 + secretPrefix: "data/supplychain" + role: vault-role + proxy: + provider: ambassador + externalUrlSuffix: test.yourdomain.com + tmport: 443 + +tessera: + enabled: true + tessera: + port: 443 + resources: + cpuLimit: 0.25 + cpuRequest: 0.05 + memLimit: "2G" + memRequest: "1G" + password: 'password' + storage: + enabled: false + size: 1Gi + dbSize: 2Gi + allowedTopologies: + enabled: false + +tls: + enabled: true + settings: + certSubject: CN=DLT Root CA,OU=DLT,O=DLT,L=London,C=GB + tmTls: true + +node: + goquorum: + metrics: + serviceMonitorEnabled: true + resources: + cpuLimit: 0.25 + cpuRequest: 0.05 + memLimit: "1G" + memRequest: "300M" + account: + password: 'password' + p2p: + discovery: false diff --git a/platforms/quorum/charts/values/proxy-and-vault/validator.yaml b/platforms/quorum/charts/values/proxy-and-vault/validator.yaml new file mode 100644 index 00000000000..c0b14bbe12d --- /dev/null +++ b/platforms/quorum/charts/values/proxy-and-vault/validator.yaml @@ -0,0 +1,19 @@ +--- +# helm install validator-1 -f values/proxy-and-vault/validator.yml -n supplychain-quo --set global.proxy.p2p=15011 quo-node +# helm upgrade validator-1 -f values/proxy-and-vault/validator.yml -n supplychain-quo --set global.proxy.p2p=15011 quo-node +global: + serviceAccountName: vault-auth + cluster: + provider: aws + cloudNativeServices: false + vault: + type: hashicorp + role: vault-role + network: quorum + address: http://vault.demo.com:8200 + authPath: supplychain + secretEngine: secretsv2 + secretPrefix: "data/supplychain" + proxy: + provider: ambassador + externalUrlSuffix: test.yourdomain.com diff --git a/platforms/quorum/configuration/deploy-network.yaml b/platforms/quorum/configuration/deploy-network.yaml index e0e6f9f37f1..19b67ac454d 100644 --- a/platforms/quorum/configuration/deploy-network.yaml +++ b/platforms/quorum/configuration/deploy-network.yaml @@ -33,201 +33,75 @@ gitops: "{{ item.gitops }}" loop: "{{ network['organizations'] }}" - # Create Storageclass - - name: Create Storage Class + # Create necessary secrets + - name: "Create k8s secrets" include_role: - name: "{{ playbook_dir }}/../../../platforms/shared/configuration/roles/setup/storageclass" + name: create/secrets vars: - org_name: "{{ org.name | lower }}" - sc_name: "{{ org_name }}-bevel-storageclass" - region: "{{ org.k8s.region | default('eu-west-1') }}" - loop: "{{ network['organizations'] }}" - loop_control: - loop_var: org - - # Setup script for Vault and OS Package Manager - - name: "Setup script for Vault and OS Package Manager" - include_role: - name: "{{ playbook_dir }}/../../shared/configuration/roles/setup/scripts" - vars: - namespace: "{{ org.name | lower }}-quo" - kubernetes: "{{ org.k8s }}" - loop: "{{ network['organizations'] }}" - loop_control: - loop_var: org - - # Setup Vault-Kubernetes accesses and Regcred for docker registry - - name: "Setup vault" - include_role: - name: "{{ playbook_dir }}/../../shared/configuration/roles/setup/vault_kubernetes" - vars: - name: "{{ org.name | lower }}" component_ns: "{{ org.name | lower }}-quo" - component_name: "{{ org.name | lower }}-vaultk8s-job" - component_auth: "{{ network.env.type }}{{ name }}" - component_type: "organization" kubernetes: "{{ org.k8s }}" vault: "{{ org.vault }}" - gitops: "{{ org.gitops }}" loop: "{{ network['organizations'] }}" loop_control: loop_var: org + when: + - org.org_status is not defined or org.org_status == 'new' - # Generate Ambassador certificate for nodes - - name: "Create ambassador certificates for Nodes" + # Execute primary genesis for the first organization + - name: "Setup primary genesis with first org as Validators" include_role: - name: create/certificates/ambassador + name: setup/genesis/primary vars: - gitops: "{{ org.gitops }}" - component_auth: "quorum{{ org.name | lower }}" - component_ns: "{{ org.name | lower }}-quo" - charts_dir: "{{ org.gitops.chart_source }}" - component_name: "{{ org.name | lower }}-ambassador-certs" - kubernetes: "{{ org.k8s }}" - values_dir: "{{ playbook_dir }}/../../../{{ org.gitops.release_dir }}" - vault: "{{ org.vault }}" - peers: "{{ org.services.peers }}" - loop: "{{ network['organizations'] }}" - loop_control: - loop_var: org - - # Generate the crypto material for quorum network based on RAFT consensus and store it in vault - - name: "Generate crypto material for RAFT consensus" - include_role: - name: create/crypto/raft - vars: - gitops: "{{ org.gitops }}" - org_name: "{{ org.name }}" - component_name: "{{ org.name | lower }}" + build_path: "./build" + org: "{{ network['organizations'] | first }}" + name: "{{ org.name | lower }}" component_ns: "{{ org.name | lower }}-quo" - values_dir: "{{ playbook_dir }}/../../../{{ org.gitops.release_dir }}" - vault: "{{ org.vault }}" - peers: "{{ org.services.peers }}" - charts_dir: "{{ org.gitops.chart_source }}" kubernetes: "{{ org.k8s }}" - loop: "{{ network['organizations'] }}" - when: network.config.consensus == 'raft' - loop_control: - loop_var: org - - # Build istanbul binary and place it in the bin directory - - name: "Setup istanbul-tools" - include_role: - name: setup/istanbul - loop: "{{ network['organizations'] }}" - when: network.config.consensus == 'ibft' - - # Generate the genesis.json and nodekey/enode for all orgs of the network - - name: "Generate genesis and nodekey/enode for the network" - include_role: - name: create/genesis_nodekey - when: network.config.consensus == 'ibft' - - # Generate the crypto material for quorum network based on ibft consensus and store it in vault - - name: "Generate crypto material for IBFT consensus" - include_role: - name: create/crypto/ibft - vars: - component_name: "{{ org.name | lower }}" - component_ns: "{{ org.name | lower }}-quo" vault: "{{ org.vault }}" - org_name: "{{ org.name }}" - kubernetes: "{{ org.k8s }}" - peers: "{{ org.services.peers }}" - values_dir: "{{ playbook_dir }}/../../../{{ org.gitops.release_dir }}" gitops: "{{ org.gitops }}" charts_dir: "{{ org.gitops.chart_source }}" - loop: "{{ network['organizations'] }}" - loop_control: - loop_var: org - when: network.config.consensus == 'ibft' + values_dir: "./build/{{ component_ns }}" + when: + - not (add_new_org | bool) - # Generate the crypto materials for tessera tm - - name: "Generate crypto for the Tessera transaction manager" + # # This role deploy validator nodes + - name: "Deploy validator nodes" include_role: - name: create/crypto/tessera + name: create/validator_node vars: - build_path: "{{ playbook_dir }}/build" + build_path: "./build" + org: "{{ network['organizations'] | first }}" + name: "{{ org.name | lower }}" component_ns: "{{ org.name | lower }}-quo" kubernetes: "{{ org.k8s }}" vault: "{{ org.vault }}" - org_name: "{{ org.name }}" - peers: "{{ org.services.peers }}" gitops: "{{ org.gitops }}" charts_dir: "{{ org.gitops.chart_source }}" - values_dir: "{{ playbook_dir }}/../../../{{ org.gitops.release_dir }}" - loop: "{{ network['organizations'] }}" - loop_control: - loop_var: org - when: - - network.config.transaction_manager == 'tessera' - - # Deploy Tessera transaction manager node - - name: "Deploy Tessera Transaction Manager" - include_role: - name: create/tessera - vars: - consensus: "{{ network.config.consensus }}" - component_ns: "{{ org.name | lower }}-quo" - name: "{{ org.name | lower }}" - peers: "{{ org.services.peers }}" - sc_name: "{{ name }}-bevel-storageclass" - external_url: "{{ org.external_url_suffix }}" - vault: "{{ org.vault }}" - charts_dir: "{{ org.gitops.chart_source }}" - values_dir: "{{playbook_dir}}/../../../{{org.gitops.release_dir}}/{{ org.name | lower }}" - loop: "{{ network['organizations'] }}" - loop_control: - loop_var: org - when: network.config.transaction_manager == 'tessera' + values_dir: "{{playbook_dir}}/../../../{{org.gitops.release_dir}}/{{ component_ns }}" + when: + - org.org_status is not defined or org.org_status == 'new' # Deploy member nodes - - name: "Deploy member nodes" + - name: "Deploy member nodes with tessera transaction manager" include_role: name: create/member_node vars: build_path: "./build" kubernetes: "{{ org.k8s }}" - consensus: "{{ network.config.consensus }}" - component_ns: "{{ org.name | lower }}-quo" name: "{{ org.name | lower }}" - peers: "{{ org.services.peers }}" - sc_name: "{{ name }}-bevel-storageclass" - external_url: "{{ org.external_url_suffix }}" - vault: "{{ org.vault }}" - git_url: "{{ org.gitops.git_url }}" - git_branch: "{{ org.gitops.branch }}" - docker_url: "{{ network.docker.url }}" - charts_dir: "{{ org.gitops.chart_source }}" - values_dir: "{{playbook_dir}}/../../../{{org.gitops.release_dir}}/{{ org.name | lower }}" - geth_data: "{{ network.config.bootnode | default('null') }}" - loop: "{{ network['organizations'] }}" - loop_control: - loop_var: org - - # Deploy validator nodes - - name: "Deploy validator nodes" - include_role: - name: create/validator_node - vars: - build_path: "./build" - kubernetes: "{{ org.k8s }}" + firstorg: "{{ network['organizations'] | first }}" consensus: "{{ network.config.consensus }}" component_ns: "{{ org.name | lower }}-quo" - name: "{{ org.name | lower }}" - peers: "{{ org.services.peers }}" - sc_name: "{{ name }}-bevel-storageclass" - external_url: "{{ org.external_url_suffix }}" vault: "{{ org.vault }}" - git_url: "{{ org.gitops.git_url }}" - git_branch: "{{ org.gitops.branch }}" - docker_url: "{{ network.docker.url }}" + gitops: "{{ org.gitops }}" charts_dir: "{{ org.gitops.chart_source }}" - values_dir: "{{playbook_dir}}/../../../{{org.gitops.release_dir}}/{{ org.name | lower }}" + values_dir: "{{playbook_dir}}/../../../{{org.gitops.release_dir}}/{{ component_ns }}" geth_data: "{{ network.config.bootnode | default('null') }}" loop: "{{ network['organizations'] }}" loop_control: loop_var: org + when: + - org.org_status is not defined or org.org_status == 'new' #These variables can be overriden from the command line vars: diff --git a/platforms/quorum/configuration/roles/create/crypto/ibft/tasks/nested_main.yaml b/platforms/quorum/configuration/roles/create/crypto/ibft/tasks/nested_main.yaml deleted file mode 100644 index 1a58b99ce2c..00000000000 --- a/platforms/quorum/configuration/roles/create/crypto/ibft/tasks/nested_main.yaml +++ /dev/null @@ -1,34 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -# Generate tessera crypto helmrelease file -- name: "Create ibft crypto file" - include_role: - name: helm_component - vars: - component_type: "crypto" - type: "crypto_ibft_job" - name: "{{ org.name | lower }}" - component_name: "{{ peer.name }}-ibft-job" - -# Push the created deployment files to repository -- name: "Push the created deployment files to repository" - include_role: - name: "{{ playbook_dir }}/../../shared/configuration/roles/git_push" - vars: - GIT_DIR: "{{ playbook_dir }}/../../../" - msg: "[ci skip] Pushing tessera job files for {{ component_ns }}" - tags: notest - -# Check if tessera crypto job is completed -- name: Check if tessera crypto job is completed - include_role: - name: "{{ playbook_dir }}/../../shared/configuration/roles/check/helm_component" - vars: - component_name: "{{ peer.name }}-ibft-job" - component_type: Job - namespace: "{{ component_ns }}" - tags: notest diff --git a/platforms/quorum/configuration/roles/create/crypto/raft/meta/main.yaml b/platforms/quorum/configuration/roles/create/crypto/raft/meta/main.yaml deleted file mode 100644 index 589d39ae941..00000000000 --- a/platforms/quorum/configuration/roles/create/crypto/raft/meta/main.yaml +++ /dev/null @@ -1,9 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - ---- -dependencies: - - role: "setup/geth-bootnode" diff --git a/platforms/quorum/configuration/roles/create/crypto/raft/tasks/main.yaml b/platforms/quorum/configuration/roles/create/crypto/raft/tasks/main.yaml deleted file mode 100644 index 386e1148442..00000000000 --- a/platforms/quorum/configuration/roles/create/crypto/raft/tasks/main.yaml +++ /dev/null @@ -1,12 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -# Create crypto material for each peer with RAFT consensus -- name: Create crypto material for each peer with RAFT consensus - include_tasks: nested_main.yaml - loop: "{{ peers }}" - loop_control: - loop_var: peer diff --git a/platforms/quorum/configuration/roles/create/crypto/raft/tasks/nested_main.yaml b/platforms/quorum/configuration/roles/create/crypto/raft/tasks/nested_main.yaml deleted file mode 100644 index 9cb752e3840..00000000000 --- a/platforms/quorum/configuration/roles/create/crypto/raft/tasks/nested_main.yaml +++ /dev/null @@ -1,34 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -# Generate tessera crypto helmrelease file -- name: "Create raft crypto file" - include_role: - name: helm_component - vars: - component_type: "crypto" - type: "crypto_raft_job" - name: "{{ org.name | lower }}" - component_name: "{{ peer.name }}-raft-job" - -# Push the created deployment files to repository -- name: "Push the created deployment files to repository" - include_role: - name: "{{ playbook_dir }}/../../shared/configuration/roles/git_push" - vars: - GIT_DIR: "{{ playbook_dir }}/../../../" - msg: "[ci skip] Pushing tessera job files for {{ component_ns }}" - tags: notest - -# Check if tessera crypto job is completed -- name: Check if tessera crypto job is completed - include_role: - name: "{{ playbook_dir }}/../../shared/configuration/roles/check/helm_component" - vars: - component_name: "{{ peer.name }}-raft-job" - component_type: Job - namespace: "{{ component_ns }}" - tags: notest diff --git a/platforms/quorum/configuration/roles/create/crypto/tessera/tasks/check_vault.yaml b/platforms/quorum/configuration/roles/create/crypto/tessera/tasks/check_vault.yaml deleted file mode 100644 index f4009d8079a..00000000000 --- a/platforms/quorum/configuration/roles/create/crypto/tessera/tasks/check_vault.yaml +++ /dev/null @@ -1,22 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -# Check for the crypto material to the vault -- name: Check the crypto material to Vault - shell: | - vault kv get -field=publicKey {{ vault.secret_path | default('secretsv2') }}/{{ org.name | lower }}/crypto/{{ item.name }}/tm - environment: - VAULT_ADDR: "{{ vault.url }}" - VAULT_TOKEN: "{{ vault.root_token }}" - with_items: "{{ peers }}" - when: item.type == 'member' - register: vault_result - ignore_errors: yes - -# Set a fact vault_result -- set_fact: - generate_crypto_tessera: True - when: vault_result.failed is defined and vault_result.failed == True diff --git a/platforms/quorum/configuration/roles/create/crypto/tessera/tasks/main.yaml b/platforms/quorum/configuration/roles/create/crypto/tessera/tasks/main.yaml deleted file mode 100644 index 4128f2d14ec..00000000000 --- a/platforms/quorum/configuration/roles/create/crypto/tessera/tasks/main.yaml +++ /dev/null @@ -1,13 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -# Create crypto material for each non validator peer for tessera tm -- name: Create crypto material for each non validator peer for tessera tm - include_tasks: nested_main.yaml - loop: "{{ peers }}" - loop_control: - loop_var: peer - when: peer.type != 'validator' diff --git a/platforms/quorum/configuration/roles/create/crypto/tessera/tasks/nested_main.yaml b/platforms/quorum/configuration/roles/create/crypto/tessera/tasks/nested_main.yaml deleted file mode 100644 index 76586044baf..00000000000 --- a/platforms/quorum/configuration/roles/create/crypto/tessera/tasks/nested_main.yaml +++ /dev/null @@ -1,64 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -# Check crypto material in the vault -- name: Check for the crypto material in the vault - include_tasks: check_vault.yaml - vars: - vault: "{{ org.vault }}" - peers: "{{ org.services.peers }}" - -# Wait for namespace creation -- name: "Wait for namespace creation" - include_role: - name: "{{ playbook_dir }}/../../shared/configuration/roles/check/k8_component" - vars: - component_type: "Namespace" - component_name: "{{ component_ns }}" - type: "retry" - when: - - generate_crypto_tessera is defined - - generate_crypto_tessera - tags: - - notest - -# Generate tessera crypto helmrelease file -- name: "Create tessera crypto file" - include_role: - name: helm_component - vars: - component_type: "crypto" - type: "crypto_tessera" - name: "{{ org.name | lower }}" - component_name: "{{ peer.name }}-tessera-job" - when: - - generate_crypto_tessera is defined - - generate_crypto_tessera - -# Push the created deployment files to repository -- name: "Push the created deployment files to repository" - include_role: - name: "{{ playbook_dir }}/../../shared/configuration/roles/git_push" - vars: - GIT_DIR: "{{ playbook_dir }}/../../../" - msg: "[ci skip] Pushing tessera job files for {{ component_ns }}" - when: - - generate_crypto_tessera is defined - - generate_crypto_tessera - tags: notest - -# Check if tessera crypto job is completed -- name: Check if tessera crypto job is completed - include_role: - name: "{{ playbook_dir }}/../../shared/configuration/roles/check/helm_component" - vars: - component_name: "{{ peer.name }}-tessera-job" - component_type: Job - namespace: "{{ component_ns }}" - when: - - generate_crypto_tessera is defined - - generate_crypto_tessera - tags: notest diff --git a/platforms/quorum/configuration/roles/helm_component/tasks/main.yaml b/platforms/quorum/configuration/roles/create/helm_component/tasks/main.yaml similarity index 100% rename from platforms/quorum/configuration/roles/helm_component/tasks/main.yaml rename to platforms/quorum/configuration/roles/create/helm_component/tasks/main.yaml diff --git a/platforms/quorum/configuration/roles/helm_component/templates/certs-ambassador-quorum.tpl b/platforms/quorum/configuration/roles/create/helm_component/templates/certs-ambassador-quorum.tpl similarity index 94% rename from platforms/quorum/configuration/roles/helm_component/templates/certs-ambassador-quorum.tpl rename to platforms/quorum/configuration/roles/create/helm_component/templates/certs-ambassador-quorum.tpl index f8c9c3a086e..a2a731d13ae 100644 --- a/platforms/quorum/configuration/roles/helm_component/templates/certs-ambassador-quorum.tpl +++ b/platforms/quorum/configuration/roles/create/helm_component/templates/certs-ambassador-quorum.tpl @@ -1,4 +1,4 @@ -apiVersion: helm.toolkit.fluxcd.io/v2beta1 +apiVersion: helm.toolkit.fluxcd.io/v2 kind: HelmRelease metadata: name: {{ component_name }} @@ -10,7 +10,7 @@ spec: interval: 1m chart: spec: - chart: {{ charts_dir }}/quorum-tlscerts-gen + chart: {{ charts_dir }}/quorum-tlscert-gen sourceRef: kind: GitRepository name: flux-{{ network.env.type }} diff --git a/platforms/quorum/configuration/roles/helm_component/templates/crypto_ibft_job.tpl b/platforms/quorum/configuration/roles/create/helm_component/templates/crypto_ibft_job.tpl similarity index 96% rename from platforms/quorum/configuration/roles/helm_component/templates/crypto_ibft_job.tpl rename to platforms/quorum/configuration/roles/create/helm_component/templates/crypto_ibft_job.tpl index b1acef6e440..b81871b1f46 100644 --- a/platforms/quorum/configuration/roles/helm_component/templates/crypto_ibft_job.tpl +++ b/platforms/quorum/configuration/roles/create/helm_component/templates/crypto_ibft_job.tpl @@ -1,4 +1,4 @@ -apiVersion: helm.toolkit.fluxcd.io/v2beta1 +apiVersion: helm.toolkit.fluxcd.io/v2 kind: HelmRelease metadata: name: {{ component_name }} diff --git a/platforms/quorum/configuration/roles/helm_component/templates/crypto_raft_job.tpl b/platforms/quorum/configuration/roles/create/helm_component/templates/crypto_raft_job.tpl similarity index 96% rename from platforms/quorum/configuration/roles/helm_component/templates/crypto_raft_job.tpl rename to platforms/quorum/configuration/roles/create/helm_component/templates/crypto_raft_job.tpl index 485649b1176..fa7b2416877 100644 --- a/platforms/quorum/configuration/roles/helm_component/templates/crypto_raft_job.tpl +++ b/platforms/quorum/configuration/roles/create/helm_component/templates/crypto_raft_job.tpl @@ -1,4 +1,4 @@ -apiVersion: helm.toolkit.fluxcd.io/v2beta1 +apiVersion: helm.toolkit.fluxcd.io/v2 kind: HelmRelease metadata: name: {{ component_name }} diff --git a/platforms/quorum/configuration/roles/helm_component/templates/crypto_tessera.tpl b/platforms/quorum/configuration/roles/create/helm_component/templates/crypto_tessera.tpl similarity index 96% rename from platforms/quorum/configuration/roles/helm_component/templates/crypto_tessera.tpl rename to platforms/quorum/configuration/roles/create/helm_component/templates/crypto_tessera.tpl index 72f6b0d6ce0..f5bcdcb3c7e 100644 --- a/platforms/quorum/configuration/roles/helm_component/templates/crypto_tessera.tpl +++ b/platforms/quorum/configuration/roles/create/helm_component/templates/crypto_tessera.tpl @@ -1,4 +1,4 @@ -apiVersion: helm.toolkit.fluxcd.io/v2beta1 +apiVersion: helm.toolkit.fluxcd.io/v2 kind: HelmRelease metadata: name: {{ component_name }} diff --git a/platforms/quorum/configuration/roles/helm_component/templates/helm_component.tpl b/platforms/quorum/configuration/roles/create/helm_component/templates/helm_component.tpl similarity index 100% rename from platforms/quorum/configuration/roles/helm_component/templates/helm_component.tpl rename to platforms/quorum/configuration/roles/create/helm_component/templates/helm_component.tpl diff --git a/platforms/quorum/configuration/roles/create/helm_component/templates/memberquorum.tpl b/platforms/quorum/configuration/roles/create/helm_component/templates/memberquorum.tpl new file mode 100644 index 00000000000..594251f8de0 --- /dev/null +++ b/platforms/quorum/configuration/roles/create/helm_component/templates/memberquorum.tpl @@ -0,0 +1,79 @@ +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: {{ component_name }} + namespace: {{ component_ns }} + annotations: + fluxcd.io/automated: "false" +spec: + releaseName: {{ component_name }} + interval: 1m + chart: + spec: + chart: {{ charts_dir }}/quorum-node + sourceRef: + kind: GitRepository + name: flux-{{ network.env.type }} + namespace: flux-{{ network.env.type }} + values: + global: + serviceAccountName: vault-auth + cluster: + provider: {{ org.cloud_provider }} + cloudNativeServices: false + vault: + type: {{ vault.type | default("hashicorp") }} + network: quorum + address: {{ vault.url }} + secretPrefix: "data/{{ network.env.type }}{{ name }}" + secretEngine: {{ vault.secret_path | default("secretsv2") }} + role: vault-role + authPath: {{ network.env.type }}{{ name }} + proxy: + provider: ambassador + externalUrlSuffix: {{ org.external_url_suffix }} + p2p: {{ peer.p2p.ambassador }} + tmport: {{ peer.tm_nodeport.ambassador | default(443) }} + storage: + size: "2Gi" + tessera: + enabled: true + tessera: + port: + resources: + cpuLimit: 0.25 + cpuRequest: 0.05 + memLimit: "2G" + memRequest: "1G" + password: 'password' + storage: + enabled: false + size: 1Gi + dbSize: 2Gi + allowedTopologies: + enabled: false + + tls: + enabled: true +{% if network.docker.password is defined %} + image: + pullSecret: regcred +{% endif %} + settings: + certSubject: {{ network.config.subject | quote }} + tmTls: {{ network.config.tm_tls | default(false) }} + + + node: + goquorum: + metrics: + serviceMonitorEnabled: true + resources: + cpuLimit: 0.25 + cpuRequest: 0.05 + memLimit: "1G" + memRequest: "300M" + account: + password: 'password' + p2p: + discovery: false diff --git a/platforms/quorum/configuration/roles/helm_component/templates/tessera.tpl b/platforms/quorum/configuration/roles/create/helm_component/templates/tessera.tpl similarity index 98% rename from platforms/quorum/configuration/roles/helm_component/templates/tessera.tpl rename to platforms/quorum/configuration/roles/create/helm_component/templates/tessera.tpl index d7bae03456b..fab8974f589 100755 --- a/platforms/quorum/configuration/roles/helm_component/templates/tessera.tpl +++ b/platforms/quorum/configuration/roles/create/helm_component/templates/tessera.tpl @@ -1,4 +1,4 @@ -apiVersion: helm.toolkit.fluxcd.io/v2beta1 +apiVersion: helm.toolkit.fluxcd.io/v2 kind: HelmRelease metadata: name: {{ component_name }} diff --git a/platforms/quorum/configuration/roles/create/helm_component/templates/validatorquorum.tpl b/platforms/quorum/configuration/roles/create/helm_component/templates/validatorquorum.tpl new file mode 100644 index 00000000000..587a281418c --- /dev/null +++ b/platforms/quorum/configuration/roles/create/helm_component/templates/validatorquorum.tpl @@ -0,0 +1,35 @@ +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: {{ component_name }} + namespace: {{ component_ns }} + annotations: + fluxcd.io/automated: "false" +spec: + releaseName: {{ component_name }} + interval: 1m + chart: + spec: + chart: {{ charts_dir }}/quorum-node + sourceRef: + kind: GitRepository + name: flux-{{ network.env.type }} + namespace: flux-{{ network.env.type }} + values: + global: + serviceAccountName: vault-auth + cluster: + provider: {{ org.cloud_provider }} + cloudNativeServices: false + vault: + address: {{ vault.url }} + secretPrefix: data/{{ network.env.type }}{{ name }} + network: quorum + role: vault-role + authPath: {{ network.env.type }}{{ name }} + type: {{ vault.type | default("hashicorp") }} + secretEngine: {{ vault.secret_path | default("secretsv2") }} + proxy: + provider: "ambassador" + externalUrlSuffix: {{ org.external_url_suffix }} + p2p: {{ peer.p2p.ambassador }} diff --git a/platforms/quorum/configuration/roles/helm_component/vars/main.yaml b/platforms/quorum/configuration/roles/create/helm_component/vars/main.yaml similarity index 100% rename from platforms/quorum/configuration/roles/helm_component/vars/main.yaml rename to platforms/quorum/configuration/roles/create/helm_component/vars/main.yaml diff --git a/platforms/quorum/configuration/roles/create/member_node/tasks/enode_data.yaml b/platforms/quorum/configuration/roles/create/member_node/tasks/enode_data.yaml deleted file mode 100644 index d421503af05..00000000000 --- a/platforms/quorum/configuration/roles/create/member_node/tasks/enode_data.yaml +++ /dev/null @@ -1,12 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -# Get enode data -- name: Get enode data - include_tasks: nested_enode_data.yaml - loop: "{{ org1.services.peers }}" - loop_control: - loop_var: peernode diff --git a/platforms/quorum/configuration/roles/create/member_node/tasks/main.yaml b/platforms/quorum/configuration/roles/create/member_node/tasks/main.yaml index 766d5065673..7b97986556b 100644 --- a/platforms/quorum/configuration/roles/create/member_node/tasks/main.yaml +++ b/platforms/quorum/configuration/roles/create/member_node/tasks/main.yaml @@ -1,12 +1,53 @@ -# Create helm release files for member nodes -- name: Create helm release files for member nodes - include_tasks: member_main.yaml +############################################################################################## +# Copyright Accenture. All Rights Reserved. +# +# SPDX-License-Identifier: Apache-2.0 +############################################################################################## + +# Execute only for members +- name: Perform secondary genesis + include_role: + name: setup/genesis/secondary vars: - peer_query: "peers[?type=='member']" - member_node: "{{ org.services | json_query(peer_query) | first | default() }}" - loop: "{{ peers }}" + values_dir: "./build/{{ component_ns }}" + when: org.type == 'member' + +# Get the Genesis and staticnodes +- name: Get genesis and staticnodes + include_role: + name: get/genesis + when: org.type == 'member' and org.services.peers is defined + +# Creates the Quorum node value files for each node of organization +- name: Create value file for Quorum node + include_role: + name: create/helm_component + vars: + component_name: "{{ peer.name }}" + type: "memberquorum" + loop: "{{ org.services.peers }}" loop_control: loop_var: peer - when: - - member_node | length > 0 - - peer.type == 'member' + when: org.services.peers is defined + +# Pushes the above generated files to git directory +- name: Git Push + include_role: + name: "{{ playbook_dir }}/../../shared/configuration/roles/git_push" + vars: + GIT_DIR: "{{ playbook_dir }}/../../../" + msg: "[ci skip] Pushing Member node files" + when: org.services.peers is defined + +# Wait for the last Member to be running +- name: "Wait for the last member to run" + include_role: + name: "{{ playbook_dir }}/../../shared/configuration/roles/check/helm_component" + vars: + member: "{{ org.services.peers | last }}" + label_selectors: + - app.kubernetes.io/release = {{ member.name | lower }} + component_type: "Pod" + component_name: "{{ member.name | lower }}" + namespace: "{{ component_ns }}" + when: org.services.peers is defined diff --git a/platforms/quorum/configuration/roles/create/member_node/tasks/member_main.yaml b/platforms/quorum/configuration/roles/create/member_node/tasks/member_main.yaml deleted file mode 100644 index e8f1adba0d6..00000000000 --- a/platforms/quorum/configuration/roles/create/member_node/tasks/member_main.yaml +++ /dev/null @@ -1,88 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -# Set enode_data_list to empty -- name: Set enode_data_list to [] - set_fact: - enode_data_list: [] - -# Set node status -- name: Set node status - set_fact: - node_status: default - when: not add_new_org - -# Set node status -- name: Set node status - set_fact: - node_status: additional - when: add_new_org - -# Get enode data for all orgs -- name: Get enode data for each node of all organization - include_tasks: enode_data.yaml - loop: "{{ network['organizations'] }}" - loop_control: - loop_var: org1 - -# Create the staticnodes file from the template -- name: Create staticnodes file - template: - src: "staticnodes.tpl" - dest: "{{ network.config.staticnodes }}" - when: not add_new_org - -# Add a new node to the existing network -- name: Adds a new node to the existing network - uri: - url: "http://{{ geth_data.url }}" - method: POST - validate_certs: no - return_content: yes - body_format: json - body: '{"jsonrpc":"2.0","method":"raft_addPeer","params": ["enode://{{ enode.enodeval }}@{{ enode.peer_name }}.{{ enode.external_url }}:{{ enode.p2p_ambassador }}?discport=0&raftport={{ enode.raft_ambassador }}"], "id":{{ geth_data.nodeid }} }' - headers: - Content-Type: "application/json" - loop: "{{ enode_data_list }}" - loop_control: - loop_var: enode - register: peer_id - until: peer_id.status == 200 - retries: "{{ network.env.retry_count }}" - delay: 50 - when: add_new_org and network.config.consensus == 'raft' - tags: - - notest - -# Fetch the new peer_id -- name: Get peer_id - set_fact: - peer_id: "{{ peer_id.results[0].json.result }}" - when: add_new_org and network.config.consensus == 'raft' - tags: - - notest - -# Create the Quorum value files for each node of organization -- name: Create value file for Quorum Nodes - include_role: - name: helm_component - vars: - component_name: "{{ name }}{{ peer.name }}quorum" - type: "memberquorum" - genesis: "{{ lookup('file', '{{ network.config.genesis }}') | b64encode }}" - staticnodes: "{{ lookup('file', '{{ network.config.staticnodes }}') | from_yaml | to_nice_json }}" - when: peer.type == 'member' - -# Git Push : Push the above generated files to git directory -- name: Git Push - include_role: - name: "{{ playbook_dir }}/../../shared/configuration/roles/git_push" - vars: - GIT_DIR: "{{ playbook_dir }}/../../../" - gitops: "{{ org.gitops }}" - msg: "[ci skip] Pushing Peer files" - tags: - - notest diff --git a/platforms/quorum/configuration/roles/create/member_node/tasks/nested_enode_data.yaml b/platforms/quorum/configuration/roles/create/member_node/tasks/nested_enode_data.yaml deleted file mode 100644 index a1a11400679..00000000000 --- a/platforms/quorum/configuration/roles/create/member_node/tasks/nested_enode_data.yaml +++ /dev/null @@ -1,47 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -# Check if the keystore.json exist in build directory -- name: Check if enode is present in the build directory or not - stat: - path: "{{ build_path }}/{{ org1.name }}/{{ peernode.name }}/enode" - register: file_status - -# Create the build directory if it does not exist -- name: Create build directory if it does not exist - file: - path: "{{ build_path }}/{{ org1.name }}/{{ peernode.name }}" - state: directory - mode: '0755' - recurse: yes - when: file_status.stat.exists == False - changed_when: false - -# Fetch nodekey from vault -- name: Get the nodekey from vault and generate the enode - shell: | - vault kv get -field=nodekey {{ vault.secret_path | default('secretsv2') }}/{{ org1.name }}/crypto/{{ peernode.name }}/quorum > {{ build_path }}/{{ org1.name }}/{{ peernode.name }}/nodekey - {{ bin_install_dir }}/bootnode --nodekey={{ build_path }}/{{ org1.name }}/{{ peernode.name }}/nodekey --writeaddress > {{ build_path }}/{{ org1.name }}/{{ peernode.name }}/enode - environment: - VAULT_ADDR: "{{ vault.url }}" - VAULT_TOKEN: "{{ vault.root_token }}" - when: file_status.stat.exists == False - changed_when: false - -# Get enode data -- name: Get enode_data - set_fact: - enode_data: "{{ lookup('file', '{{ build_path }}/{{ org1.name }}/{{ peernode.name }}/enode') }}" - -# Get information about each validator node present in network.yaml and store it as a list of org,node -- name: Get validator and transaction node data for raft - set_fact: - enode_data_list={{ enode_data_list | default([]) + [ {'peer_name':peernode.name, 'enodeval':enode_data, 'external_url':org1.external_url_suffix, 'p2p_ambassador':peernode.p2p.ambassador, 'raft_ambassador':peernode.raft.ambassador } ] }} - when: network.config.consensus == 'raft' -- name: Get validator and transaction node data - set_fact: - enode_data_list={{ enode_data_list | default([]) + [ {'peer_name':peernode.name, 'enodeval':enode_data, 'external_url':org1.external_url_suffix, 'p2p_ambassador':peernode.p2p.ambassador } ] }} - when: network.config.consensus != 'raft' diff --git a/platforms/quorum/configuration/roles/create/secrets/tasks/main.yaml b/platforms/quorum/configuration/roles/create/secrets/tasks/main.yaml new file mode 100644 index 00000000000..cc31dd73c32 --- /dev/null +++ b/platforms/quorum/configuration/roles/create/secrets/tasks/main.yaml @@ -0,0 +1,32 @@ +############################################################################################## +# Copyright Accenture. All Rights Reserved. +# +# SPDX-License-Identifier: Apache-2.0 +############################################################################################## + +# Wait for namespace to be created by flux +- name: "Wait for the namespace {{ component_ns }} to be created" + include_role: + name: "{{ playbook_dir }}/../../shared/configuration/roles/check/k8_component" + vars: + component_type: "Namespace" + component_name: "{{ component_ns }}" + type: "retry" + +# Create the vault roottoken secret +- name: "Create vault token secret" + include_role: + name: "{{ playbook_dir }}/../../shared/configuration/roles/create/shared_k8s_secrets" + vars: + namespace: "{{ component_ns }}" + check: "token_secret" + +# Create the docker pull credentials for image registry +- name: "Create docker credentials secret" + include_role: + name: "{{ playbook_dir }}/../../shared/configuration/roles/create/shared_k8s_secrets" + vars: + namespace: "{{ component_ns }}" + check: "docker_credentials" + when: + - network.docker.username is defined diff --git a/platforms/quorum/configuration/roles/create/tessera/meta/main.yaml b/platforms/quorum/configuration/roles/create/tessera/meta/main.yaml deleted file mode 100644 index 06bfb7f3714..00000000000 --- a/platforms/quorum/configuration/roles/create/tessera/meta/main.yaml +++ /dev/null @@ -1,12 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - ---- -dependencies: - - role: "create/genesis_raft" - vars: - build_path: "./build" - when: network.type == 'quorum' and network.config.consensus == 'raft' diff --git a/platforms/quorum/configuration/roles/create/tessera/tasks/enode_data.yaml b/platforms/quorum/configuration/roles/create/tessera/tasks/enode_data.yaml deleted file mode 100644 index 148175be2ab..00000000000 --- a/platforms/quorum/configuration/roles/create/tessera/tasks/enode_data.yaml +++ /dev/null @@ -1,12 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -#Get enode data -- name: Get enode data - include_tasks: nested_enode_data.yaml - loop: "{{ org1.services.peers }}" - loop_control: - loop_var: peernode diff --git a/platforms/quorum/configuration/roles/create/tessera/tasks/main.yaml b/platforms/quorum/configuration/roles/create/tessera/tasks/main.yaml deleted file mode 100644 index c9f74862cb7..00000000000 --- a/platforms/quorum/configuration/roles/create/tessera/tasks/main.yaml +++ /dev/null @@ -1,41 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -# Set node status -- name: Set node status - set_fact: - node_status: default - when: not add_new_org - -# Set node status -- name: Set node status - set_fact: - node_status: additional - when: add_new_org - -# Create value file for Tessera TM nodes -- name: Create value file for Tessera TM nodes - include_role: - name: helm_component - vars: - component_name: "{{ name }}{{ peer.name }}tessera" - type: "quorum_tessera" - loop: "{{ peers }}" - loop_control: - loop_var: peer - when: - - peer.type == 'member' - -# Git Push : Push the above generated files to git directory -- name: Git Push - include_role: - name: "{{ playbook_dir }}/../../shared/configuration/roles/git_push" - vars: - GIT_DIR: "{{ playbook_dir }}/../../../" - gitops: "{{ org.gitops }}" - msg: "[ci skip] Pushing Peer files" - tags: - - notest diff --git a/platforms/quorum/configuration/roles/create/validator_node/tasks/enode_data.yaml b/platforms/quorum/configuration/roles/create/validator_node/tasks/enode_data.yaml deleted file mode 100644 index d421503af05..00000000000 --- a/platforms/quorum/configuration/roles/create/validator_node/tasks/enode_data.yaml +++ /dev/null @@ -1,12 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -# Get enode data -- name: Get enode data - include_tasks: nested_enode_data.yaml - loop: "{{ org1.services.peers }}" - loop_control: - loop_var: peernode diff --git a/platforms/quorum/configuration/roles/create/validator_node/tasks/main.yaml b/platforms/quorum/configuration/roles/create/validator_node/tasks/main.yaml index 0b0b5db00fe..aa7e77c1d0b 100644 --- a/platforms/quorum/configuration/roles/create/validator_node/tasks/main.yaml +++ b/platforms/quorum/configuration/roles/create/validator_node/tasks/main.yaml @@ -4,15 +4,41 @@ # SPDX-License-Identifier: Apache-2.0 ############################################################################################## -# Create helm release files for validator nodes -- name: Create helm release files for validator nodes - include_tasks: validator_main.yaml +# This task creates helm release file for each validator node of organization +- name: Create helm release file for each validator node of organization + include_role: + name: create/helm_component vars: - peer_query: "peers[?type=='validator']" - validator_node: "{{ org.services | json_query(peer_query) | first | default() }}" - loop: "{{ peers }}" + component_name: "{{ peer.name }}" + type: "validatorquorum" + loop: "{{ org.services.validators }}" loop_control: loop_var: peer - when: - - validator_node | length > 0 - - peer.type == 'validator' + when: org.services.validators is defined + +# Git Push : Pushes the above generated files to git +- name: Git Push + include_role: + name: "{{ playbook_dir }}/../../shared/configuration/roles/git_push" + vars: + GIT_DIR: "{{ playbook_dir }}/../../../" + msg: "[ci skip] Pushing Validator files" + when: org.services.validators is defined + +# Wait for the last validator to be running +- name: "Wait for the last validator to run" + include_role: + name: "{{ playbook_dir }}/../../shared/configuration/roles/check/helm_component" + vars: + validator: "{{ org.services.validators | last }}" + label_selectors: + - app.kubernetes.io/release = {{ validator.name | lower }} + component_type: "Pod" + namespace: "{{ component_ns }}" + when: org.services.validators is defined + +# Get the Genesis and staticnodes +- name: Get genesis and staticnodes + include_role: + name: get/genesis + when: org.services.validators is defined diff --git a/platforms/quorum/configuration/roles/create/validator_node/tasks/nested_enode_data.yaml b/platforms/quorum/configuration/roles/create/validator_node/tasks/nested_enode_data.yaml deleted file mode 100644 index a1a11400679..00000000000 --- a/platforms/quorum/configuration/roles/create/validator_node/tasks/nested_enode_data.yaml +++ /dev/null @@ -1,47 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -# Check if the keystore.json exist in build directory -- name: Check if enode is present in the build directory or not - stat: - path: "{{ build_path }}/{{ org1.name }}/{{ peernode.name }}/enode" - register: file_status - -# Create the build directory if it does not exist -- name: Create build directory if it does not exist - file: - path: "{{ build_path }}/{{ org1.name }}/{{ peernode.name }}" - state: directory - mode: '0755' - recurse: yes - when: file_status.stat.exists == False - changed_when: false - -# Fetch nodekey from vault -- name: Get the nodekey from vault and generate the enode - shell: | - vault kv get -field=nodekey {{ vault.secret_path | default('secretsv2') }}/{{ org1.name }}/crypto/{{ peernode.name }}/quorum > {{ build_path }}/{{ org1.name }}/{{ peernode.name }}/nodekey - {{ bin_install_dir }}/bootnode --nodekey={{ build_path }}/{{ org1.name }}/{{ peernode.name }}/nodekey --writeaddress > {{ build_path }}/{{ org1.name }}/{{ peernode.name }}/enode - environment: - VAULT_ADDR: "{{ vault.url }}" - VAULT_TOKEN: "{{ vault.root_token }}" - when: file_status.stat.exists == False - changed_when: false - -# Get enode data -- name: Get enode_data - set_fact: - enode_data: "{{ lookup('file', '{{ build_path }}/{{ org1.name }}/{{ peernode.name }}/enode') }}" - -# Get information about each validator node present in network.yaml and store it as a list of org,node -- name: Get validator and transaction node data for raft - set_fact: - enode_data_list={{ enode_data_list | default([]) + [ {'peer_name':peernode.name, 'enodeval':enode_data, 'external_url':org1.external_url_suffix, 'p2p_ambassador':peernode.p2p.ambassador, 'raft_ambassador':peernode.raft.ambassador } ] }} - when: network.config.consensus == 'raft' -- name: Get validator and transaction node data - set_fact: - enode_data_list={{ enode_data_list | default([]) + [ {'peer_name':peernode.name, 'enodeval':enode_data, 'external_url':org1.external_url_suffix, 'p2p_ambassador':peernode.p2p.ambassador } ] }} - when: network.config.consensus != 'raft' diff --git a/platforms/quorum/configuration/roles/create/validator_node/tasks/validator_main.yaml b/platforms/quorum/configuration/roles/create/validator_node/tasks/validator_main.yaml deleted file mode 100644 index 1b347f8bcb3..00000000000 --- a/platforms/quorum/configuration/roles/create/validator_node/tasks/validator_main.yaml +++ /dev/null @@ -1,88 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -# Set enode_data_list to empty -- name: Set enode_data_list to [] - set_fact: - enode_data_list: [] - -# Set node status -- name: Set node status - set_fact: - node_status: default - when: not add_new_org - -# Set node status -- name: Set node status - set_fact: - node_status: additional - when: add_new_org - -# Get enode data for all orgs -- name: Get enode data for each node of all organization - include_tasks: enode_data.yaml - loop: "{{ network['organizations'] }}" - loop_control: - loop_var: org1 - -# Create the staticnodes file from the template -- name: Create staticnodes file - template: - src: "staticnodes.tpl" - dest: "{{ network.config.staticnodes }}" - when: not add_new_org - -# Add a new node to the existing network -- name: Adds a new validator node to the existing network - uri: - url: "http://{{ geth_data.url }}:{{ geth_data.rpcport }}" - method: POST - validate_certs: no - return_content: yes - body_format: json - body: '{"jsonrpc":"2.0","method":"raft_addPeer","params": ["enode://{{ enode.enodeval }}@{{ enode.peer_name }}.{{ enode.external_url }}:{{ enode.p2p_ambassador }}?discport=0&raftport={{ enode.raft_ambassador }}"], "id":{{ geth_data.nodeid }} }' - headers: - Content-Type: "application/json" - loop: "{{ enode_data_list }}" - loop_control: - loop_var: enode - register: peer_id - until: peer_id.status == 200 - retries: "{{ network.env.retry_count }}" - delay: 50 - when: add_new_org and network.config.consensus == 'raft' - tags: - - notest - -# Fetch the new peer_id -- name: Get validator peer_id - set_fact: - peer_id: "{{ peer_id.results[0].json.result }}" - when: add_new_org and network.config.consensus == 'raft' - tags: - - notest - -# Create the Quorum value files for each node of organization -- name: Create value file for Quorum Nodes - include_role: - name: helm_component - vars: - component_name: "{{ name }}{{ peer.name }}quorum" - type: "validatorquorum" - genesis: "{{ lookup('file', '{{ network.config.genesis }}') | b64encode }}" - staticnodes: "{{ lookup('file', '{{ network.config.staticnodes }}') | from_yaml | to_nice_json }}" - when: peer.type == 'validator' - -# Git Push: Push the above generated files to git directory -- name: Git Push - include_role: - name: "{{ playbook_dir }}/../../shared/configuration/roles/git_push" - vars: - GIT_DIR: "{{ playbook_dir }}/../../../" - gitops: "{{ org.gitops }}" - msg: "[ci skip] Pushing Peer files" - tags: - - notest diff --git a/platforms/quorum/configuration/roles/delete/certificates/ambassador/tasks/main.yaml b/platforms/quorum/configuration/roles/delete/certificates/ambassador/tasks/main.yaml new file mode 100644 index 00000000000..57000c86907 --- /dev/null +++ b/platforms/quorum/configuration/roles/delete/certificates/ambassador/tasks/main.yaml @@ -0,0 +1,23 @@ +############################################################################################## +# Copyright Accenture. All Rights Reserved. +# +# SPDX-License-Identifier: Apache-2.0 +############################################################################################## +--- + +# Delete Ambassador certificates +- name: delete Ambassador certificates + include_tasks: nested_main.yaml + vars: + node_name: "{{ node.name | lower }}" + loop: "{{ services.peers is defined | ternary(services.peers, services.validators) }}" + loop_control: + loop_var: node + +# Delete clusterissuer helm chart +- name: Delete ClusterIssuer + kubernetes.core.helm: + kubeconfig: "{{ kubernetes.config_file }}" + name: letsencrypt-clusterissuer + state: absent + release_namespace: "default" diff --git a/platforms/quorum/configuration/roles/delete/certificates/ambassador/tasks/nested_main.yaml b/platforms/quorum/configuration/roles/delete/certificates/ambassador/tasks/nested_main.yaml new file mode 100644 index 00000000000..4bd5f1814f4 --- /dev/null +++ b/platforms/quorum/configuration/roles/delete/certificates/ambassador/tasks/nested_main.yaml @@ -0,0 +1,17 @@ +############################################################################################## +# Copyright Accenture. All Rights Reserved. +# +# SPDX-License-Identifier: Apache-2.0 +############################################################################################## + +# This role generates certificates for rootca and ambassador +# and places them in vault. Certificates are created using openssl +--- + +# Delete ambassador tls certificates created by cert-manager +- name: Delete TLS certificate + kubernetes.core.helm: + kubeconfig: "{{ kubernetes.config_file }}" + name: "letsencrypt-cert-{{node_name}}" + state: absent + release_namespace: "default" diff --git a/platforms/quorum/configuration/roles/delete/vault_secrets/tasks/main.yaml b/platforms/quorum/configuration/roles/delete/vault_secrets/tasks/main.yaml index d3cb8c8ab62..113c88ddfdf 100644 --- a/platforms/quorum/configuration/roles/delete/vault_secrets/tasks/main.yaml +++ b/platforms/quorum/configuration/roles/delete/vault_secrets/tasks/main.yaml @@ -10,7 +10,7 @@ ############################################################################################# -# Delete the Docker credentials +# Delete Docker credentials - name: Delete docker creds k8s: kind: Secret @@ -19,33 +19,19 @@ state: absent kubeconfig: "{{ kubernetes.config_file }}" context: "{{ kubernetes.context }}" - ignore_errors: yes + ignore_errors: true -# Delete Ambassador creds -- name: Delete Ambassador creds - k8s: - kind: Secret - namespace: "{{ org_namespace }}" - name: "{{ peer.name }}-ambassador-certs" - kubeconfig: "{{ kubernetes.config_file }}" - context: "{{ kubernetes.context }}" - state: absent - loop: "{{ services.peers }}" - loop_control: - loop_var: peer - ignore_errors: yes - -# Delete crypto materials -- name: Delete Crypto materials +# Deletes crypto materials +- name: Delete Crypto material shell: | - vault kv delete {{ item.vault.secret_path | default('secretsv2') }}/{{ org_name }}/crypto/{{ peer.name }}/tm - vault kv delete {{ item.vault.secret_path | default('secretsv2') }}/{{ org_name }}/crypto/{{ peer.name }}/quorum - vault kv delete {{ item.vault.secret_path | default('secretsv2') }}/{{ org_name }}/crypto/{{ peer.name }}/certs - vault kv delete {{ item.vault.secret_path | default('secretsv2') }}/{{ org_name }}/crypto/genesis - loop: "{{ services.peers }}" + vault kv delete {{ item.vault.secret_path | default('secretsv2') }}/{{ network.env.type }}{{ org_name }}/quorum-node-{{ peer.name }}-keys + vault kv delete {{ item.vault.secret_path | default('secretsv2') }}/{{ network.env.type }}{{ org_name }}/tessera-{{ peer.name }}-keys + vault kv delete {{ item.vault.secret_path | default('secretsv2') }}/{{ network.env.type }}{{ org_name }}/tlscerts + vault kv delete {{ item.vault.secret_path | default('secretsv2') }}/{{ network.env.type }}{{ org_name }}/genesis environment: VAULT_ADDR: "{{ item.vault.url }}" VAULT_TOKEN: "{{ item.vault.root_token }}" + loop: "{{ services.peers is defined | ternary( services.peers, services.validators) }}" loop_control: loop_var: peer - ignore_errors: yes + ignore_errors: true diff --git a/platforms/quorum/configuration/roles/get/genesis/tasks/main.yaml b/platforms/quorum/configuration/roles/get/genesis/tasks/main.yaml new file mode 100644 index 00000000000..cbeba800ede --- /dev/null +++ b/platforms/quorum/configuration/roles/get/genesis/tasks/main.yaml @@ -0,0 +1,30 @@ +############################################################################################## +# Copyright Accenture. All Rights Reserved. +# +# SPDX-License-Identifier: Apache-2.0 +############################################################################################## + +# Get the genesis file from current org +- name: Get genesis from config map of current org + kubernetes.core.k8s_info: + kubeconfig: "{{ kubernetes.config_file }}" + kind: ConfigMap + name: "quorum-genesis" + namespace: "{{ component_ns }}" + register: genesis_data + +# Get the static node file from current org +- name: Get static-nodes from config map of current org + kubernetes.core.k8s_info: + kubeconfig: "{{ kubernetes.config_file }}" + kind: ConfigMap + name: "quorum-peers" + namespace: "{{ component_ns }}" + register: nodes_data + +- name: Save genesis locally for Secondary genesis + shell: | + echo {{ genesis_data.resources[0].data['genesis.json'] | to_nice_json }} > {{ files_loc }}/genesis.json + echo {{ nodes_data.resources[0].data['static-nodes.json'] | to_nice_json }} > {{ files_loc }}/static-nodes.json + vars: + files_loc: "{{playbook_dir}}/../../../{{ charts_dir }}/quorum-genesis/files" diff --git a/platforms/quorum/configuration/roles/helm_component/templates/memberquorum.tpl b/platforms/quorum/configuration/roles/helm_component/templates/memberquorum.tpl deleted file mode 100644 index 74d4c9fe9d6..00000000000 --- a/platforms/quorum/configuration/roles/helm_component/templates/memberquorum.tpl +++ /dev/null @@ -1,97 +0,0 @@ -apiVersion: helm.toolkit.fluxcd.io/v2beta1 -kind: HelmRelease -metadata: - name: {{ component_name }} - namespace: {{ component_ns }} - annotations: - fluxcd.io/automated: "false" -spec: - releaseName: {{ component_name }} - interval: 1m - chart: - spec: - chart: {{ charts_dir }}/quorum-member-node - sourceRef: - kind: GitRepository - name: flux-{{ network.env.type }} - namespace: flux-{{ network.env.type }} - values: - replicaCount: 1 - metadata: - namespace: {{ component_ns }} - labels: - images: - node: quorumengineering/quorum:{{ network.version }} - alpineutils: ghcr.io/hyperledger/bevel-alpine:latest - node: - name: {{ peer.name }} -{% if add_new_org %} -{% if network.config.consensus == 'raft' %} - peer_id: {{ peer_id | int }} -{% endif %} -{% endif %} - status: {{ node_status }} - consensus: {{ consensus }} - subject: {{ peer.subject }} - mountPath: /etc/quorum/qdata - imagePullSecret: regcred - keystore: keystore_1 -{% if org.cloud_provider == 'minikube' %} - servicetype: NodePort -{% else %} - servicetype: ClusterIP -{% endif %} - lock: {{ peer.lock | lower }} - ports: - rpc: {{ peer.rpc.port }} -{% if network.config.consensus == 'raft' %} - raft: {{ peer.raft.port }} -{% endif %} - quorum: {{ peer.p2p.port }} - - tm: - type: {{ network.config.transaction_manager }} - - vault: - type: {{ vault.type | default("hashicorp") }} - address: {{ vault.url }} - secretprefix: {{ vault.secret_path | default('secretsv2') }}/data/{{ name }}/crypto/{{ peer.name }} - serviceaccountname: vault-auth - keyname: quorum - tm_keyname: tm - role: vault-role - authpath: {{ network.env.type }}{{ name }} - -{% if network.config.transaction_manager != "none" %} - tessera: -{% if network.config.tm_tls == 'strict' %} - url: "https://{{ peer.name }}.{{ external_url }}:{{ peer.transaction_manager.ambassador }}" -{% else %} - url: "http://{{ peer.name }}.{{ external_url }}:{{ peer.transaction_manager.ambassador }}" -{% endif %} - clienturl: "http://{{ peer.name }}-tessera:{{ peer.transaction_manager.clientport }}" #TODO: Enable tls strict for q2t -{% endif %} - genesis: {{ genesis }} - staticnodes: - {{ staticnodes }} -{% if network.env.proxy == 'ambassador' %} - proxy: - provider: "ambassador" - external_url: {{ external_url }} - quorumport: {{ peer.p2p.ambassador }} -{% if network.config.consensus == 'raft' %} - portRaft: {{ peer.raft.ambassador }} -{% endif %} -{% else %} - proxy: - provider: none - external_url: {{ name }}.{{ component_ns }} - quorumport: {{ peer.p2p.port }} -{% if network.config.consensus == 'raft' %} - portRaft: {{ peer.raft.port }} -{% endif %} -{% endif %} - storage: - storageclassname: {{ sc_name }} - storagesize: 1Gi - dbstorage: 1Gi diff --git a/platforms/quorum/configuration/roles/helm_component/templates/validatorquorum.tpl b/platforms/quorum/configuration/roles/helm_component/templates/validatorquorum.tpl deleted file mode 100644 index f26a3736eb9..00000000000 --- a/platforms/quorum/configuration/roles/helm_component/templates/validatorquorum.tpl +++ /dev/null @@ -1,81 +0,0 @@ -apiVersion: helm.toolkit.fluxcd.io/v2beta1 -kind: HelmRelease -metadata: - name: {{ component_name }} - namespace: {{ component_ns }} - annotations: - fluxcd.io/automated: "false" -spec: - releaseName: {{ component_name }} - interval: 1m - chart: - spec: - chart: {{ charts_dir }}/quorum-validator-node - sourceRef: - kind: GitRepository - name: flux-{{ network.env.type }} - namespace: flux-{{ network.env.type }} - values: - replicaCount: 1 - metadata: - namespace: {{ component_ns }} - labels: - images: - node: quorumengineering/quorum:{{ network.version }} - alpineutils: ghcr.io/hyperledger/bevel-alpine:latest - node: - name: {{ peer.name }} -{% if add_new_org %} -{% if network.config.consensus == 'raft' %} - peer_id: {{ peer_id | int }} -{% endif %} -{% endif %} - status: {{ node_status }} - consensus: {{ consensus }} - subject: {{ peer.subject }} - mountPath: /etc/quorum/qdata - imagePullSecret: regcred - keystore: keystore_1 -{% if org.cloud_provider == 'minikube' %} - servicetype: NodePort -{% else %} - servicetype: ClusterIP -{% endif %} - lock: {{ peer.lock | lower }} - ports: - rpc: {{ peer.rpc.port }} -{% if network.config.consensus == 'raft' %} - raft: {{ peer.raft.port }} -{% endif %} - quorum: {{ peer.p2p.port }} - vault: - address: {{ vault.url }} - secretprefix: {{ vault.secret_path | default('secretsv2') }}/data/{{ name }}/crypto/{{ peer.name }} - serviceaccountname: vault-auth - keyname: quorum - role: vault-role - authpath: {{ network.env.type }}{{ name }} - type: {{ vault.type | default("hashicorp") }} - genesis: {{ genesis }} - staticnodes: {{ staticnodes }} -{% if network.env.proxy == 'ambassador' %} - proxy: - provider: "ambassador" - external_url: {{ external_url }} - quorumport: {{ peer.p2p.ambassador }} -{% if network.config.consensus == 'raft' %} - portRaft: {{ peer.raft.ambassador }} -{% endif %} -{% else %} - proxy: - provider: none - external_url: {{ name }}.{{ component_ns }} - quorumport: {{ peer.p2p.port }} -{% if network.config.consensus == 'raft' %} - portRaft: {{ peer.raft.port }} -{% endif %} -{% endif %} - storage: - storageclassname: {{ sc_name }} - storagesize: 1Gi - dbstorage: 1Gi diff --git a/platforms/quorum/configuration/roles/setup/genesis/primary/tasks/main.yaml b/platforms/quorum/configuration/roles/setup/genesis/primary/tasks/main.yaml new file mode 100644 index 00000000000..083936f8cca --- /dev/null +++ b/platforms/quorum/configuration/roles/setup/genesis/primary/tasks/main.yaml @@ -0,0 +1,53 @@ +############################################################################################## +# Copyright Accenture. All Rights Reserved. +# +# SPDX-License-Identifier: Apache-2.0 +############################################################################################## + +# create build directory +- name: Create build directory if it does not exist + file: + path: "{{ build_path }}" + state: directory + +# Get number of validators +- name: Validator count + set_fact: + validator_count={{ validator_count | default(0) | int + 1 }} + loop: "{{ org.services.validators | default([]) }}" + +- name: Fail when no validators detected + ansible.builtin.fail: + msg: The first organization must have Validators. + when: validator_count == 0 + +- name: Get the kubernetes server url + shell: | + KUBECONFIG={{ kubernetes.config_file }} kubectl config view --minify | grep server | cut -f 2- -d ":" | tr -d " " + register: kubernetes_server_url + +# This task runs the genesis job +- name: Genesis job helm install + include_role: + name: "{{ playbook_dir }}/../../shared/configuration/roles/create/job_component" + vars: + component_name: "genesis" + type: "primary_genesis" + kubernetes_url: "{{ kubernetes_server_url.stdout }}" + +# Pushes the above generated files to git directory +- name: Git Push + include_role: + name: "{{ playbook_dir }}/../../shared/configuration/roles/git_push" + vars: + GIT_DIR: "{{ playbook_dir }}/../../../" + msg: "[ci skip] Pushing genesis file" + +# Wait for Genesis job to complete +- name: "Wait for the genesis job to complete" + include_role: + name: "{{ playbook_dir }}/../../shared/configuration/roles/check/helm_component" + vars: + component_name: "genesis" + component_type: Job + namespace: "{{ component_ns }}" diff --git a/platforms/quorum/configuration/roles/setup/genesis/secondary/tasks/main.yaml b/platforms/quorum/configuration/roles/setup/genesis/secondary/tasks/main.yaml new file mode 100644 index 00000000000..9fbc4d7ce4d --- /dev/null +++ b/platforms/quorum/configuration/roles/setup/genesis/secondary/tasks/main.yaml @@ -0,0 +1,42 @@ +############################################################################################## +# Copyright Accenture. All Rights Reserved. +# +# SPDX-License-Identifier: Apache-2.0 +############################################################################################## + +# create build directory +- name: Create build directory if it does not exist + file: + path: "{{ build_path }}" + state: directory + +- name: Get the kubernetes server url + shell: | + KUBECONFIG={{ kubernetes.config_file }} kubectl config view --minify | grep server | cut -f 2- -d ":" | tr -d " " + register: kubernetes_server_url + +# This task runs the genesis job +- name: Genesis job helm install + include_role: + name: "{{ playbook_dir }}/../../shared/configuration/roles/create/job_component" + vars: + component_name: "genesis" + type: "secondary_genesis" + kubernetes_url: "{{ kubernetes_server_url.stdout }}" + +# Pushes the above generated files to git directory +- name: Git Push + include_role: + name: "{{ playbook_dir }}/../../shared/configuration/roles/git_push" + vars: + GIT_DIR: "{{ playbook_dir }}/../../../" + msg: "[ci skip] Pushing genesis file" + +# Wait for Genesis job to complete +- name: "Wait for the genesis job to complete" + include_role: + name: "{{ playbook_dir }}/../../shared/configuration/roles/check/helm_component" + vars: + component_name: "genesis" + component_type: Job + namespace: "{{ component_ns }}" diff --git a/platforms/quorum/configuration/roles/setup/get_crypto/tasks/main.yaml b/platforms/quorum/configuration/roles/setup/get_crypto/tasks/main.yaml deleted file mode 100644 index dc298818b5d..00000000000 --- a/platforms/quorum/configuration/roles/setup/get_crypto/tasks/main.yaml +++ /dev/null @@ -1,35 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -############################################################################################# -# This role saves the crypto from Vault into ansible_provisioner -############################################################################################# - -# Ensure admincerts directory is present in build -- name: Ensure directory exists - file: - path: "{{ cert_path }}" - state: directory - -# Save the cert file -- name: Save cert - local_action: copy content="{{ vault_output['data'].data.ambassadorcrt | b64decode }}" dest="{{ cert_path }}/ambassador.crt" - when: type == "ambassador" - -# Save the key file -- name: Save key - local_action: copy content="{{ vault_output['data'].data.ambassadorkey | b64decode }}" dest="{{ cert_path }}/ambassador.key" - when: type == "ambassador" - -# Save root rootcapem -- name: Save root rootcapem - local_action: copy content="{{ vault_output['data'].data.rootcapem | b64decode }}" dest="{{ cert_path }}/rootca.pem" - when: type == "rootca" - -# Save root rootcakey -- name: Save root rootcakey - local_action: copy content="{{ vault_output['data'].data.rootcakey | b64decode }}" dest="{{ cert_path }}/rootca.key" - when: type == "rootca" diff --git a/platforms/quorum/configuration/roles/setup/geth-bootnode/tasks/main.yaml b/platforms/quorum/configuration/roles/setup/geth-bootnode/tasks/main.yaml deleted file mode 100644 index 4d742fd5d90..00000000000 --- a/platforms/quorum/configuration/roles/setup/geth-bootnode/tasks/main.yaml +++ /dev/null @@ -1,64 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -# Create a temporary directory -- name: create temporary directory - tempfile: - state: directory - register: tmp_directory - -# Check if the geth binary is already in place or not -- name: Check geth - stat: - path: "{{ bin_install_dir }}/geth" - register: geth_stat_result - -# Check if the bootnode binary is already in place or not -- name: Check bootnode - stat: - path: "{{ bin_install_dir }}/bootnode" - register: bootnode_stat_result - -# Download the geth and bootnode tar -- name: Download geth and bootnode tar - get_url: - url: "{{ geth_tar.location }}" - dest: "{{ tmp_directory.path }}" - mode: 0440 - when: geth_stat_result.stat.exists == False or bootnode_stat_result.stat.exists == False - -# Create the bin directory, if it doesn't exist -- name: Create bin directory - file: - path: "{{ bin_install_dir }}" - state: directory - when: geth_stat_result.stat.exists == False or bootnode_stat_result.stat.exists == False - -# Extract the tar file containing the geth and bootnode binary -- name: Extracts the tar file containing the geth and bootnode binary - unarchive: - src: "{{ tmp_directory.path }}/geth-alltools-linux-amd64-1.10.0-56dec25a.tar.gz" - dest: "{{ tmp_directory.path }}" - copy: no - when: geth_stat_result.stat.exists == False or bootnode_stat_result.stat.exists == False - -# Copy the binary to destination directory -- name: Copy geth binary to destination directory - copy: - src: "{{ tmp_directory.path }}/geth-alltools-linux-amd64-1.10.0-56dec25a/geth" - dest: "{{ bin_install_dir }}" - mode: 0777 - remote_src: yes - when: geth_stat_result.stat.exists == False - -# Copy the binary to destination directory -- name: Copy bootnode binary to destination directory - copy: - src: "{{ tmp_directory.path }}/geth-alltools-linux-amd64-1.10.0-56dec25a/bootnode" - dest: "{{ bin_install_dir }}" - mode: 0777 - remote_src: yes - when: bootnode_stat_result.stat.exists == False diff --git a/platforms/quorum/configuration/roles/setup/geth-bootnode/vars/main.yaml b/platforms/quorum/configuration/roles/setup/geth-bootnode/vars/main.yaml deleted file mode 100644 index 9f1f5b843df..00000000000 --- a/platforms/quorum/configuration/roles/setup/geth-bootnode/vars/main.yaml +++ /dev/null @@ -1,11 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - ---- -tmp_directory: "{{ lookup('env', 'TMPDIR') | default('/tmp',true) }}" - -geth_tar: - location: "https://gethstore.blob.core.windows.net/builds/geth-alltools-linux-amd64-1.10.0-56dec25a.tar.gz" diff --git a/platforms/quorum/configuration/roles/setup/golang/tasks/main.yaml b/platforms/quorum/configuration/roles/setup/golang/tasks/main.yaml deleted file mode 100644 index b3bd6951cde..00000000000 --- a/platforms/quorum/configuration/roles/setup/golang/tasks/main.yaml +++ /dev/null @@ -1,50 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -# Register a temporary directory -- name: Register temporary directory - tempfile: - state: directory - register: tmp_directory - changed_when: false - -# Check if go is already installed or not -- name: Check go - stat: - path: "{{ go_root_dir }}/go" - register: go_stat_result - changed_when: false - -# Download golang tar -- name: Download golang tar - get_url: - url: "https://storage.googleapis.com/golang/go{{ go.version }}.{{ install_os }}-{{ install_arch }}.tar.gz" - dest: "{{ tmp_directory.path }}" - mode: 0440 - when: not go_stat_result.stat.exists - -# Create bin directory -- name: Create bin directory - file: - path: "{{ bin_install_dir }}" - state: directory - when: not go_stat_result.stat.exists - -# Extract the Go tarball -- name: Extract the Go tarball - unarchive: - src: "{{ tmp_directory.path }}/go{{ go.version }}.{{install_os}}-{{install_arch}}.tar.gz" - dest: "{{ go_root_dir }}" - copy: no - become: yes - when: not go_stat_result.stat.exists - -# Test go installation -- name: Test go installation - command: "go version" - changed_when: false - environment: - GOROOT: "{{ go_root_dir }}" diff --git a/platforms/quorum/configuration/roles/setup/golang/vars/main.yaml b/platforms/quorum/configuration/roles/setup/golang/vars/main.yaml deleted file mode 100644 index 66ffbe92d7c..00000000000 --- a/platforms/quorum/configuration/roles/setup/golang/vars/main.yaml +++ /dev/null @@ -1,12 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - ---- -tmp_directory: "{{ lookup('env', 'TMPDIR') | default('/tmp',true) }}" - -go: - version: 1.13.5 -go_root_dir: "{{ go_root_folder | default('/usr/local') }}" diff --git a/platforms/quorum/configuration/roles/setup/istanbul/meta/main.yaml b/platforms/quorum/configuration/roles/setup/istanbul/meta/main.yaml deleted file mode 100644 index b23df7b8e02..00000000000 --- a/platforms/quorum/configuration/roles/setup/istanbul/meta/main.yaml +++ /dev/null @@ -1,9 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - ---- - dependencies: - - role: "setup/golang" diff --git a/platforms/quorum/configuration/roles/setup/istanbul/tasks/main.yaml b/platforms/quorum/configuration/roles/setup/istanbul/tasks/main.yaml deleted file mode 100644 index e6bd2d67605..00000000000 --- a/platforms/quorum/configuration/roles/setup/istanbul/tasks/main.yaml +++ /dev/null @@ -1,49 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -# Check if the istanbul-tools is already in place or not -- name: Check istanbul - stat: - path: "{{ bin_install_dir }}/istanbul" - register: istanbul_stat_result - -# Check if istanbul repo directory exists -- name: Check istanbul repo dir exists - stat: - path: "{{ bin_install_dir }}/istanbul_repo" - register: repo_stat_result - -# Clone the istanbul-tools git repository -- name: Clone the istanbul-tools git repo - git: - repo: "{{ istanbul.repo }}" - version: v1.1.0 - force: yes - dest: "{{ bin_install_dir }}/istanbul_repo" - when: not ( repo_stat_result.stat.exists and istanbul_stat_result.stat.exists ) - -# Build the istanbul binary -- name: Make istanbul - make: - chdir: "{{ bin_install_dir }}/istanbul_repo" - when: istanbul_stat_result.stat.exists == False - -# Create the bin directory, if it doesn't exist, for storing the istanbul binary -- name: "Create bin directory" - include_role: - name: "{{ playbook_dir }}/../../shared/configuration/roles/check/directory" - vars: - path: "{{ bin_install_dir }}" - when: istanbul_stat_result.stat.exists == False - -# Move the istanbul binary to above created bin directory -- name: Copy istanbul binary to destination directory - copy: - src: "{{ bin_install_dir }}/istanbul_repo/build/bin/istanbul" - dest: "{{ bin_install_dir }}/istanbul" - mode: 0755 - remote_src: yes - when: istanbul_stat_result.stat.exists == False diff --git a/platforms/quorum/configuration/roles/setup/istanbul/vars/main.yaml b/platforms/quorum/configuration/roles/setup/istanbul/vars/main.yaml deleted file mode 100644 index 57b7973ff1f..00000000000 --- a/platforms/quorum/configuration/roles/setup/istanbul/vars/main.yaml +++ /dev/null @@ -1,11 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - ---- -tmp_directory: "{{ lookup('env', 'TMPDIR') | default('/tmp',true) }}" - -istanbul: - repo: https://github.com/ConsenSys/istanbul-tools.git diff --git a/platforms/quorum/configuration/roles/setup/new_member/tasks/main.yaml b/platforms/quorum/configuration/roles/setup/new_member/tasks/main.yaml new file mode 100644 index 00000000000..2619c9779fa --- /dev/null +++ b/platforms/quorum/configuration/roles/setup/new_member/tasks/main.yaml @@ -0,0 +1,49 @@ +############################################################################################## +# Copyright Accenture. All Rights Reserved. +# +# SPDX-License-Identifier: Apache-2.0 +############################################################################################## +- name: Read static-nodes file + slurp: + src: "{{ files_loc }}/static-nodes.json" + register: staticnodes_file_content + vars: + files_loc: "{{playbook_dir}}/../../../{{ charts_dir }}/quorum-genesis/files" + +- name: Get Node enode URL from static-nodes + set_fact: + enode_url: "{{ (staticnodes_file_content.content | b64decode | from_json) | json_query('[?contains(@, `' + peer.name + '`)]') }}" + +- name: Get Node enode URL from static-nodes + set_fact: + enode: "{{ enode_url[0].split('@')[0] }}" + +#Get IP Address using getent for ubuntu/linux +- name: Get host ip + shell: | + getent hosts {{ peer.name }}.{{ org.external_url_suffix }} | awk '{ print $1 }' + register: host_details + +# Use host IP because admin_addPeer function does not accept DNS for host address +- name: Get host_ip + set_fact: + host_ip: "{{ host_details.stdout.split('\n') | first }}" + +# Add a new node to the existing network using admin_addPeer API +- name: Add a new node to the existing network + uri: + url: "{{ node }}" + method: POST + validate_certs: no + return_content: yes + body_format: json + body: '{"jsonrpc":"2.0","method":"admin_addPeer","params":["{{ enode }}@{{ host_ip }}:{{ peer.p2p.ambassador }}"],"id":1}' + headers: + Content-Type: "application/json" + loop: "{{ network.config.tm_nodes }}" + loop_control: + loop_var: node + register: peer_id + until: peer_id.status == 200 + retries: "{{ network.env.retry_count }}" + delay: 20 diff --git a/platforms/quorum/configuration/samples/network-minikube.yaml b/platforms/quorum/configuration/samples/network-minikube.yaml index 856bfcc2f91..af42df24d30 100644 --- a/platforms/quorum/configuration/samples/network-minikube.yaml +++ b/platforms/quorum/configuration/samples/network-minikube.yaml @@ -32,7 +32,7 @@ network: # Please ensure all required images are built and stored in this registry. # Do not check-in docker_password. docker: - url: "index.docker.io/hyperledgerlabs" + url: "ghcr.io/hyperledger" username: "docker_username" password: "docker_password" diff --git a/platforms/quorum/configuration/samples/network-quorum-newnode.yaml b/platforms/quorum/configuration/samples/network-quorum-newnode.yaml index d157302cd51..f2f057df9b1 100644 --- a/platforms/quorum/configuration/samples/network-quorum-newnode.yaml +++ b/platforms/quorum/configuration/samples/network-quorum-newnode.yaml @@ -33,7 +33,7 @@ network: # Please ensure all required images are built and stored in this registry. # Do not check-in docker_password. docker: - url: "index.docker.io/hyperledgerlabs" + url: "ghcr.io/hyperledger" username: "docker_username" password: "docker_password" diff --git a/platforms/quorum/configuration/samples/network-quorum-tessera.yaml b/platforms/quorum/configuration/samples/network-quorum-tessera.yaml index 3bf57670ccc..e8f5f87772c 100644 --- a/platforms/quorum/configuration/samples/network-quorum-tessera.yaml +++ b/platforms/quorum/configuration/samples/network-quorum-tessera.yaml @@ -33,7 +33,7 @@ network: # Please ensure all required images are built and stored in this registry. # Do not check-in docker_password. docker: - url: "index.docker.io/hyperledgerlabs" + url: "ghcr.io/hyperledger" username: "docker_username" password: "docker_password" diff --git a/platforms/quorum/configuration/samples/network-quorum.yaml b/platforms/quorum/configuration/samples/network-quorum.yaml index 57b15b726be..3238b25cde4 100644 --- a/platforms/quorum/configuration/samples/network-quorum.yaml +++ b/platforms/quorum/configuration/samples/network-quorum.yaml @@ -100,40 +100,91 @@ network: private_key: "path_to_private_key" # Path to private key file which has write-access to the git repo (Optional for https; Required for ssh) # The participating nodes are named as peers services: - peers: - - peer: - name: carrier - subject: "O=Carrier,OU=Carrier,L=51.50/-0.13/London,C=GB" # This is the node subject. L=lat/long is mandatory for supplychain sample app - type: member # value can be validator or member, only applicable if consensus = 'ibft' - geth_passphrase: "12345" # Passphrase to be used to generate geth account - lock: false # true or false: corresponds to, **geth --allow-insecure-unlock**, while starting up the node + validators: + - validator: + name: validator-1 + bootnode: true # true if the validator node is used also a bootnode for the network + cactus_connector: disabled # set to enabled to create a cactus connector for Besu p2p: port: 21000 ambassador: 15010 #Port exposed on ambassador service (use one port per org if using single cluster) rpc: + port: 8545 + ambassador: 80 # Will only support port 80 + ws: port: 8546 - transaction_manager: - port: 443 - ambassador: 443 - clientport: 8888 - raft: # Only used if consensus = 'raft' - port: 50401 - ambassador: 15013 - db: # Only used if transaction_manager = "tessera" - port: 3306 + metrics: + enabled: true # Set this to true to enable Prometheus monitoring for this node, or false to disable it. + port: 9545 # Specify the port that Prometheus will use to collect metrics for this node. + - validator: + name: validator-2 + bootnode: true # true if the validator node is used also a bootnode for the network + cactus_connector: disabled # set to enabled to create a cactus connector for Besu + p2p: + port: 21000 + ambassador: 15011 #Port exposed on ambassador service (use one port per org if using single cluster) + rpc: + port: 8545 + ambassador: 80 # Will only support port 80 + ws: + port: 8546 + metrics: + enabled: true # Set this to true to enable Prometheus monitoring for this node, or false to disable it. + port: 9545 # Specify the port that Prometheus will use to collect metrics for this node. + - validator: + name: validator-3 + bootnode: false # true if the validator node is used also a bootnode for the network + p2p: + port: 21000 + ambassador: 15012 #Port exposed on ambassador service (use one port per org if using single cluster) + rpc: + port: 8545 + ambassador: 80 # Will only support port 80 + ws: + port: 8546 + metrics: + enabled: false # Set this to true to enable Prometheus monitoring for this node, or false to disable it. + port: 9545 # Specify the port that Prometheus will use to collect metrics for this node. + - validator: + name: validator-4 + bootnode: false # true if the validator node is used also a bootnode for the network + p2p: + port: 21000 + ambassador: 15013 #Port exposed on ambassador service (use one port per org if using single cluster) + rpc: + port: 8545 + ambassador: 80 # Will only support port 80 + ws: + port: 8546 + metrics: + enabled: false # Set this to true to enable Prometheus monitoring for this node, or false to disable it. + port: 9545 # Specify the port that Prometheus will use to collect metrics for this node. + peers: - peer: - name: validator1 - subject: "O=Validator1,OU=Validator1,L=51.50/-0.13/London,C=GB" # This is the node subject. L=lat/long is mandatory for supplychain sample app - type: validator # value can be validator or member, only applicable if consensus = 'ibft' + name: supplychain + subject: "O=SupplyChain,OU=ValidatorOrg,L=51.50/-0.13/London,C=GB" # This is the node subject. L=lat/long is mandatory for supplychain sample app geth_passphrase: "12345" # Passphrase to be used to generate geth account - lock: false # true or false: corresponds to, **geth --allow-insecure-unlock**, while starting up the node + lock: true # Sets Besu node to lock or unlock mode. Can be true or false + cactus_connector: disabled # set to enabled to create a cactus connector for Besu p2p: port: 21000 ambassador: 15014 #Port exposed on ambassador service (use one port per org if using single cluster) rpc: + port: 8545 + ambassador: 80 # Will only support port 80 + ws: port: 8546 - db: # Only used if transaction_manager = "tessera" - port: 3306 + db: + port: 3306 # Only applicable for tessra where mysql db is used + tm_nodeport: + port: 443 # Port exposed on ambassador service must be same + ambassador: 443 + tm_clientport: + port: 8888 + metrics: + enabled: false # Set this to true to enable Prometheus monitoring for this node, or false to disable it. + port: 9545 # Specify the port that Prometheus will use to collect metrics for this node. + # Specification for the 2nd organization. Each organization maps to a VPC and a separate k8s cluster - organization: @@ -180,7 +231,7 @@ network: lock: false # true or false: corresponds to, **geth --allow-insecure-unlock**, while starting up the node p2p: port: 21000 - ambassador: 15020 #Port exposed on ambassador service (use one port per org if using single cluster) + ambassador: 15015 #Port exposed on ambassador service (use one port per org if using single cluster) rpc: port: 8546 transaction_manager: @@ -192,19 +243,6 @@ network: ambassador: 15023 db: # Only used if transaction_manager = "tessera" port: 3306 - - peer: - name: validator2 - subject: "O=Validator2,OU=Validator2,L=47.38/8.54/Zurich,C=CH" # This is the node subject. L=lat/long is mandatory for supplychain sample app - type: validator # value can be validator or member, only applicable if consensus = 'ibft' - geth_passphrase: "12345" # Passphrase to be used to generate geth account - lock: false # true or false: corresponds to, **geth --allow-insecure-unlock**, while starting up the node - p2p: - port: 21000 - ambassador: 15024 #Port exposed on ambassador service (use one port per org if using single cluster) - rpc: - port: 8546 - db: # Only used if transaction_manager = "tessera" - port: 3306 - organization: name: store @@ -249,7 +287,7 @@ network: lock: false # true or false: corresponds to, **geth --allow-insecure-unlock**, while starting up the node p2p: port: 21000 - ambassador: 15030 #Port exposed on ambassador service (use one port per org if using single cluster) + ambassador: 15016 #Port exposed on ambassador service (use one port per org if using single cluster) rpc: port: 8546 transaction_manager: @@ -261,19 +299,6 @@ network: ambassador: 15033 db: # Only used if transaction_manager = "tessera" port: 3306 - - peer: - name: validator3 - subject: "O=Validator3,OU=Validator3,L=40.73/-74/New York,C=US" # This is the node subject. L=lat/long is mandatory for supplychain sample app - type: validator # value can be validator or member, only applicable if consensus = 'ibft' - geth_passphrase: "12345" # Passphrase to be used to generate geth account - lock: false # true or false: corresponds to, **geth --allow-insecure-unlock**, while starting up the node - p2p: - port: 21000 - ambassador: 15034 #Port exposed on ambassador service (use one port per org if using single cluster) - rpc: - port: 8546 - db: # Only used if transaction_manager = "tessera" - port: 3306 - organization: name: warehouse @@ -318,7 +343,7 @@ network: lock: false # true or false: corresponds to, **geth --allow-insecure-unlock**, while starting up the node p2p: port: 21000 - ambassador: 15040 #Port exposed on ambassador service (use one port per org if using single cluster) + ambassador: 15017 #Port exposed on ambassador service (use one port per org if using single cluster) rpc: port: 8546 transaction_manager: @@ -330,16 +355,3 @@ network: ambassador: 15043 db: # Only used if transaction_manager = "tessera" port: 3306 - - peer: - name: validator4 - subject: "O=Validator4,OU=Validator4,L=42.36/-71.06/Boston,C=US" # This is the node subject. L=lat/long is mandatory for supplychain sample app - type: validator # value can be validator or member, only applicable if consensus = 'ibft' - geth_passphrase: "12345" # Passphrase to be used to generate geth account - lock: false # true or false: corresponds to, **geth --allow-insecure-unlock**, while starting up the node - p2p: - port: 21000 - ambassador: 15044 #Port exposed on ambassador service (use one port per org if using single cluster) - rpc: - port: 8546 - db: # Only used if transaction_manager = "tessera" - port: 3306 diff --git a/platforms/r3-corda-ent/charts/README.md b/platforms/r3-corda-ent/charts/README.md new file mode 100644 index 00000000000..96ad3257016 --- /dev/null +++ b/platforms/r3-corda-ent/charts/README.md @@ -0,0 +1,149 @@ +[//]: # (##############################################################################################) +[//]: # (Copyright Accenture. All Rights Reserved.) +[//]: # (SPDX-License-Identifier: Apache-2.0) +[//]: # (##############################################################################################) + +# Charts for R3 Corda Enterprise components + +## About +This folder contains the helm charts which are used for the deployment of the R3 Corda Enterprise components. Each helm chart that you can use has the following keys and you need to set them. The `global.cluster.provider` is used as a key for the various cloud features enabled. Also you only need to specify one cloud provider, **not** both if deploying to cloud. As of writing this doc, AWS is fully supported. + +```yaml +global: + serviceAccountName: vault-auth + cluster: + provider: aws # choose from: minikube | aws + cloudNativeServices: false # future: set to true to use Cloud Native Services + kubernetesUrl: "https://yourkubernetes.com" # Provide the k8s URL, ignore if not using Hashicorp Vault + vault: + type: hashicorp # choose from hashicorp | kubernetes + network: corda # must be corda for these charts + # Following are necessary only when hashicorp vault is used. + address: http://vault.url:8200 + authPath: supplychain + secretEngine: secretsv2 + secretPrefix: "data/supplychain" + role: vault-role +``` + +## Usage + +### Pre-requisites + +- Kubernetes Cluster (either Managed cloud option like EKS or local like minikube) +- Accessible and unsealed Hahsicorp Vault (if using Vault) +- Configured Ambassador AES (if using Ambassador as proxy) +- Update the dependencies + ``` + helm dependency update enterprise-init + helm dependency update cenm + helm dependency update enterprise-node + helm dependency update cenm-networkmap + ``` + +### _Without Proxy or Vault_ + +```bash +helm install init ./enterprise-init --namespace supplychain-ent --create-namespace --values ./values/noproxy-and-novault/init.yaml + +# Install cenm services : Zone, Auth, Gateway, Idman and Signer +helm install cenm ./cenm --namespace supplychain-ent --values ./values/noproxy-and-novault/cenm.yaml + +# Install the inital set of notary nodes +helm install notary ./enterprise-node --namespace supplychain-ent --values ./values/noproxy-and-novault/notary.yaml + +# Install cenm services : Networkmap service +helm install networkmap ./cenm-networkmap --namespace supplychain-ent --values ./values/noproxy-and-novault/cenm.yaml + +# Install cenm services : Networkmap service +helm install node ./enterprise-node --namespace supplychain-ent --values ./values/noproxy-and-novault/node.yaml + +``` +### To setup another node in a different namespace + +```bash +# Run init for new namespace +helm install init ./enterprise-init --namespace manufacturer-ent --create-namespace --values ./values/noproxy-and-novault/init.yaml + +# This step is an operator task, where the network operator provides the network-root-truststore.jks file and its passwords + +mkdir -p ./enterprise-node/build + +kubectl get secret -n supplychain-ent cenm-certs -o jsonpath="{.data.network\-root\-truststore\.jks}" | base64 --decode > ./enterprise-node/build/network-root-truststore.jks + +kubectl create secret generic -n manufacturer-ent cenm-certs --from-file=network-root-truststore.jks=./enterprise-node/build/network-root-truststore.jks + +# Update the ./values/noproxy-and-novault/node.yaml with the given truststore password at network.creds.truststore + +# Install a Corda node +helm install manufacturer ./enterprise-node --namespace manufacturer-ent --values ./values/noproxy-and-novault/node.yaml +``` + +### _With Ambassador proxy and Vault_ + +Replace the `global.vault.address`, `global.cluster.kubernetesUrl` and `global.proxy.externalUrlSuffix` in all the files in `./values/proxy-and-vault/` folder. Also update the `nodeConf.networkMapURL` and `nodeConf.doormanURL` as per your `global.proxy.externalUrlSuffix` of nms and doorman. + +```bash +kubectl create namespace supplychain-ent # if the namespace does not exist already + +# Create the roottoken secret +kubectl -n supplychain-ent create secret generic roottoken --from-literal=token= + +helm install init ./enterprise-init --namespace supplychain-ent --values ./values/proxy-and-vault/init.yaml + +# Install cenm services +helm install cenm ./cenm --namespace supplychain-ent --values ./values/proxy-and-vault/cenm.yaml + +# Install a notary service +helm install notary ./enterprise-node --namespace supplychain-ent --values ./values/proxy-and-vault/notary.yaml + +# Install cenm services : Networkmap service +helm install networkmap ./cenm-networkmap --namespace supplychain-ent --values ./values/proxy-and-vault/cenm.yaml +``` + + +### To setup another node in a different namespace +```bash +kubectl create namespace manufacturer-ent # if the namespace does not exist already +# Create the roottoken secret +kubectl -n manufacturer-ent create secret generic roottoken --from-literal=token= +# Run init for new namespace +helm install init ./enterprise-init --namespace manufacturer-ent --create-namespace --values ./values/proxy-and-vault/init.yaml + +# This step is an operator task, where the network operator provides the network-root-truststore.jks file and its passwords + +mkdir -p ./enterprise-node/build +mkdir -p ./enterprise-node/build/doorman +mkdir -p ./enterprise-node/build/nms + +kubectl get secret -n supplychain-ent cenm-certs -o jsonpath="{.data.network\-root\-truststore\.jks}" | base64 --decode > ./enterprise-node/build/network-root-truststore.jks +kubectl get secret -n supplychain-ent doorman-tls-certs -o jsonpath="{.data.tls\.crt}" | base64 --decode > ./enterprise-node/build/doorman/tls.crt +kubectl get secret -n supplychain-ent nms-tls-certs -o jsonpath="{.data.tls\.crt}" | base64 --decode > ./enterprise-node/build/nms/tls.crt + +kubectl create secret generic -n manufacturer-ent cenm-certs --from-file=network-root-truststore.jks=./enterprise-node/build/network-root-truststore.jks +kubectl create secret generic -n manufacturer-ent doorman-tls-certs --from-file=tls.crt=./enterprise-node/build/doorman/tls.crt +kubectl create secret generic -n manufacturer-ent nms-tls-certs --from-file=tls.crt=./enterprise-node/build/nms/tls.crt + +# Update the ./values/proxy-and-vault/node.yaml with the given truststore password at network.creds.truststore + +# Install a Corda node +helm install node ./enterprise-node --namespace manufacturer-ent --values ./values/proxy-and-vault/node.yaml +``` + +### Clean-up + +To clean up, just uninstall the helm releases. +```bash +helm uninstall --namespace supplychain-ent node +helm uninstall --namespace supplychain-ent notary +helm uninstall --namespace supplychain-ent cenm +helm uninstall --namespace supplychain-ent networkmap +helm uninstall --namespace supplychain-ent init + +helm uninstall --namespace manufacturer-ent manufacturer +helm uninstall --namespace manufacturer-ent init + +# Clean up the created namespaces to completly clean up the env. +kubectl delete ns supplychain-ent +kubectl delete ns maunfacturer-ent +``` diff --git a/platforms/r3-corda-ent/charts/cenm-auth/Chart.yaml b/platforms/r3-corda-ent/charts/cenm-auth/Chart.yaml index e208d3e0cdf..906b679b364 100644 --- a/platforms/r3-corda-ent/charts/cenm-auth/Chart.yaml +++ b/platforms/r3-corda-ent/charts/cenm-auth/Chart.yaml @@ -5,7 +5,21 @@ ############################################################################################## apiVersion: v1 -appVersion: "2.0" -description: "R3-corda-ent: Deploys the CENM Auth Service." name: cenm-auth +description: "R3 Corda Enterprise Network Manager Auth Service" version: 1.0.0 +appVersion: "latest" +keywords: + - bevel + - corda + - hyperledger + - enterprise + - blockchain + - deployment + - accenture +home: https://hyperledger-bevel.readthedocs.io/en/latest/ +sources: + - https://github.com/hyperledger/bevel +maintainers: + - name: Hyperledger Bevel maintainers + email: bevel@lists.hyperledger.org diff --git a/platforms/r3-corda-ent/charts/cenm-auth/README.md b/platforms/r3-corda-ent/charts/cenm-auth/README.md index c7008fca3c7..f770b9f65bd 100644 --- a/platforms/r3-corda-ent/charts/cenm-auth/README.md +++ b/platforms/r3-corda-ent/charts/cenm-auth/README.md @@ -3,188 +3,109 @@ [//]: # (SPDX-License-Identifier: Apache-2.0) [//]: # (##############################################################################################) - -# Auth Deployment +# cenm auth-service -- [Auth Deployment Helm Chart](#Auth-deployment-helm-chart) -- [Prerequisites](#prerequisites) -- [Chart Structure](#chart-structure) -- [Configuration](#configuration) -- [Deployment](#deployment) -- [Contributing](#contributing) -- [License](#license) +This chart is a component of Hyperledger Bevel. The cenm-auth chart deploys a R3 Corda Enterprise auth. If enabled, the keys are then stored on the configured vault and stored as Kubernetes secrets. See [Bevel documentation](https://hyperledger-bevel.readthedocs.io/en/latest/) for details. - -## Auth Deployment Helm Chart ---- -This [Helm chart](https://github.com/hyperledger/bevel/tree/main/platforms/r3-corda-ent/charts/cenm-auth) deploys the CENM Auth Service. +## TL;DR +```bash +helm repo add bevel https://hyperledger.github.io/bevel +helm install auth bevel/cenm-auth +``` - ## Prerequisites ---- -Before deploying the chart please ensure you have the following prerequisites: - -- Kubernetes cluster up and running. -- A HashiCorp Vault instance is set up and configured to use Kubernetes service account token-based authentication. -- The Vault is unsealed and initialized. -- Helm is installed. +- Kubernetes 1.19+ +- Helm 3.2.0+ - -## Chart Structure ---- -This chart has following structue: -``` - ├── cenm-auth - │ ├── Chart.yaml - │ ├── templates - │ │ ├── deployment.yaml - │ │ ├── _helpers.tpl - │ │ ├── pvc.yaml - | | |__ configmap.yaml - │ │ └── service.yaml - │ └── values.yaml -``` - -Type of files used: +If Hashicorp Vault is used, then +- HashiCorp Vault Server 1.13.1+ -- `templates` : This directory contains the Kubernetes manifest templates that define the resources to be deployed. -- `deployment.yaml` : This deployment file defines the setup and configuration of an authentication service for R3 Corda Enterprise, handling tasks such as retrieving SSL certificates and authentication keys from a Vault server, generating JWT, managing the service configuration, and setting up liveness and readiness checks. -- `_helpers.tpl` : A template file used for defining custom labels in the Helm chart. -- `pvc.yaml` : A PersistentVolumeClaim (PVC) is a request for storage by a user. -- `configmap.yaml` : ConfigMap resource in Kubernetes with a specific name and namespace, along with labels for identification. -- `service.yaml` : This file defines a Kubernetes Service with multiple ports for protocols and targets, and supports Ambassador proxy annotations for specific configurations when using the "ambassador" proxy provider. -- `chart.yaml` : Provides metadata about the chart, such as its name, version, and description. -- `values.yaml` : Contains the default configuration values for the chart. It includes configuration for the metadata, image, storage, Vault, etc. +> **Important**: Ensure the `enterprise-init` chart has been installed before installing this. Also check the dependent charts. Installing this chart seperately is not required as it is a dependent chart for cenm, and is installed with cenm chart. - -## Configuration ---- -The [values.yaml](https://github.com/hyperledger/bevel/blob/main/platforms/r3-corda-ent/charts/cenm-auth/values.yaml) file contains configurable values for the Helm chart. We can modify these values according to the deployment requirements. Here are some important configuration options: +## Installing the Chart -## Parameters ---- +To install the chart with the release name `auth`: -### Name +```bash +helm repo add bevel https://hyperledger.github.io/bevel +helm install auth bevel/cenm-auth +``` -| Name | Description | Default Value | -| -----------| -------------------------------------------------- | ------------- | -| nodeName | Provide the name of the node | auth | +The command deploys the chart on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation. -### Metadata +> **Tip**: List all releases using `helm list` -| Name | Description | Default Value | -| ----------------| ----------------------------------------------------------------| ------------- | -| namespace | Provide the namespace for the Corda Enterprise Auth | cenm | -| labels | Provide any additional labels for the Corda Enterprise Auth | "" | +## Uninstalling the Chart -### Image +To uninstall/delete the `auth` deployment: -| Name | Description | Default Value | -| ------------------------ | --------------------------------------------------------------------------------------- | ----------------------------------------------| -| initContainerName | Provide the alpine utils image, which is used for all init-containers of deployments/jobs | ghcr.io/hyperledger | -| authContainerName | Provide the image for the main Corda Enterprise Auth | corda/enterprise-auth:1.5.1-zulu-openjdk8u242 | -| imagePullSecret | Provide the docker-registry secret created and stored in kubernetes cluster as a secret | "" | -| pullPolicy | Pull policy to be used for the Docker image | IfNotPresent | - -### storage - -| Name | Description | Default Value | -| --------------------- | ------------------------------------------| ------------- | -| name | Provide the name of the storageclass | cenm | -| acceptLicense | Required parameter to start any files | yes | - -### Vault - -| Name | Description | Default Value | -| ------------------------- | --------------------------------------------------------------------------| ----------------------------------| -| address | Address/URL of the Vault server | "" | -| role | Role used for authentication with Vault | vault-role | -| authpath | Authentication path for Vault | entcordacenm | -| serviceAccountName | Provide the already created service account name autheticated to vault | vault-auth | -| certSecretPrefix | Provide the vault path where the certificates are stored | secret/cenm-org-name/signer/certs | -| retries | Number of retries to check contents from vault | 10 | -| sleepTimeAfterError | Sleep time in seconds when error while registration | 15 | - -### Database - -| Name | Description | Default Value | -| --------------------- | -------------------------------------------------------- | ------------------ | -| driverClassName | Java class name to use for the database | org.h2.Driver | -| jdbcDriver | The DB connection URL | "" | -| user | DB user | example-db-user | -| password | DB password | example-db-password| -| runMigration | Option to run database migrations as part of startup | "true" | - -### Config - -| Name | Description | Default Value | -| ------------------------ | -------------------------------------------------------------------------------------| --------------- | -| baseDir | Provide volume related specifications | "/opt/corda" | -| jarPath | Provide the path where the CENM Idman .jar-file is stored | "bin" | -| configPath | Provide the path where the CENM Service configuration files are stored | "etc" | -| pvc | Provide any extra annotations for the PVCs | "" | -| deployment | Provide any extra annotations for the deployment | "value" | -| pod | Set memory limits of pod | "" | -| podSecurityContext | Allows you to set security-related settings at the Pod level | "" | -| securityContext | Securitycontext at pod level | "" | -| replicas | Provide the number of replicas for your pods | "1" | -| logsContainersEnabled | Enable container displaying live logs | "true" | -| cordaJar | Specify the maximum size of the memory allocation pool | "" | -| sleepTimeAfterError | Sleep time in seconds, occurs after any error is encountered in start-up | "120" | - -### Service - -| Name | Description | Default Value | -| --------------------- | ------------------------------------------| ------------- | -| type | Provide the type of service | ClusterIP | -| port | provide the port for service | 8081 | - - -## Deployment ---- - -To deploy the Auth Helm chart, follow these steps: - -1. Modify the [values.yaml](https://github.com/hyperledger/bevel/blob/main/platforms/r3-corda-ent/charts/cenm-auth/values.yaml) file to set the desired configuration values. -2. Run the following Helm command to install, upgrade, verify delete the chart: - -To install the chart: ```bash -helm repo add bevel https://hyperledger.github.io/bevel/ -helm install ./cenm-auth +helm uninstall auth ``` -To upgrade the chart: -```bash -helm upgrade ./cenm-auth -``` +The command removes all the Kubernetes components associated with the chart and deletes the release. -To verify the deployment: -```bash -kubectl get jobs -n -``` -Note : Replace `` with the actual namespace where the Job was created. This command will display information about the Job, including the number of completions and the current status of the Job's pods. +## Parameters -To delete the chart: -```bash -helm uninstall -``` -Note : Replace `` with the desired name for the release. +### Global parameters +These parameters are refered to as same in each parent or child chart +| Name | Description | Default Value | +|--------|---------|-------------| +|`global.serviceAccountName` | The serviceaccount name that will be used for Vault Auth management| `vault-auth` | +| `global.cluster.provider` | Kubernetes cluster provider like AWS EKS or minikube. Currently ony `aws` and `minikube` is tested | `aws` | +| `global.cluster.cloudNativeServices` | only `false` is implemented, `true` to use Cloud Native Services (SecretsManager and IAM for AWS; KeyVault & Managed Identities for Azure) is for future | `false` | +| `global.vault.type` | Type of Vault to support other providers. Currently, only `hashicorp` is supported. | `hashicorp` | +| `global.vault.role` | Role used for authentication with Vault | `vault-role` | +| `global.vault.address`| URL of the Vault server. | `""` | +| `global.vault.authPath` | Authentication path for Vault | `supplychain` | +| `global.vault.secretEngine` | The value for vault secret engine name | `secretsv2` | +| `global.vault.secretPrefix` | The value for vault secret prefix which must start with `data/` | `data/supplychain` | +| `global.proxy.provider` | The proxy or Ingress provider. Can be `none` or `ambassador` | `ambassador` | +| `global.proxy.externalUrlSuffix` | The External URL suffix at which the Besu P2P and RPC service will be available | `test.blockchaincloudpoc.com` | +| `global.cenm.sharedCreds.truststore` | The truststore password the pki created truststores | `password` | +| `global.cenm.sharedCreds.keystore` | The truststore password the pki created ketstores | `password` | +| `global.cenm.auth.port` | The port for auth api | `8081` | + +### Storage + +| Name | Description | Default Value | +|--------|---------|-------------| +| `storage.allowedTopologies.enabled` | Check [bevel-storageclass](../../../shared/charts/bevel-storageclass/README.md) for details | `false` | - -## Contributing ---- -If you encounter any bugs, have suggestions, or would like to contribute to the [Auth Deployment Helm Chart](https://github.com/hyperledger/bevel/tree/main/platforms/r3-corda-ent/charts/cenm-auth), please feel free to open an issue or submit a pull request on the [project's GitHub repository](https://github.com/hyperledger/bevel). +### Image +| Name | Description | Default Value | +| -------------| ---------- | --------- | +| `image.pullSecret` | Secret name in the namespace containing private image registry credentials | `""` | +| `image.pullPolicy` | Pull policy to be used for the Docker images | `IfNotPresent` | +| `image.auth.repository` | CENM auth image repository | `corda/enterprise-auth`| +| `image.auth.tag` | CENM auth image tag as per version | `1.5.9-zulu-openjdk8u382`| +| `image.enterprise-cli.repository` | Corda enterprise-cli image repository | `corda/enterprise-cli` | +| `image.enterprise-cli.tag` | Corda enterprise-cli image tag | `1.5.9-zulu-openjdk8u382` | + +### Database Settings +| Name | Description | Default Value | +| -------------| ---------- | --------- | +| `database.driverClassName` | DB drive class name | `org.h2.Driver` | +| `database.jdbcDriver` | DB jdbc driver | `""` | +| `database.driverClassName` | DB url | `jdbc:h2:file:./h2/auth-persistence;DB_CLOSE_ON_EXIT=FALSE;LOCK_TIMEOUT=10000;WRITE_DELAY=0;AUTO_SERVER_PORT=0` | +| `database.user` | DB user name | `auth-db-user` | +| `database.password` | DB password | `auth-db-password` | + +### Subjects + +| Name | Description | Default Value | +| -------------| ---------- | --------- | +| `subjects.auth` | X.509 Subject for the auth | `"CN=Test TLS Auth Service Certificate, OU=HQ, O=HoldCo LLC, L=New York, C=US"` | + - ## License This chart is licensed under the Apache v2.0 license. -Copyright © 2023 Accenture +Copyright © 2024 Accenture ### Attribution diff --git a/platforms/r3-corda-ent/charts/cenm-auth/files/authservice.conf b/platforms/r3-corda-ent/charts/cenm-auth/files/authservice.conf index ea7eebdaf62..d4ae0e9a3ad 100644 --- a/platforms/r3-corda-ent/charts/cenm-auth/files/authservice.conf +++ b/platforms/r3-corda-ent/charts/cenm-auth/files/authservice.conf @@ -1,11 +1,12 @@ database { driverClassName = "{{ .Values.database.driverClassName }}" - jdbcDriver = "{{ .Values.database.jdbcDriver }}" + jdbcDriver = "" url = "{{ .Values.database.url }}" user = "{{ .Values.database.user }}" password = "{{ .Values.database.password }}" - runMigration = "{{ .Values.database.runMigration }}" + runMigration = {{ .Values.database.runMigration }} } + jwk { location = "etc/jwt-store.jks" password = "password" @@ -35,16 +36,16 @@ clientConfig = { issuer = "http://test" } server { - port = {{ .Values.service.port }} + port = {{ .Values.global.cenm.auth.port }} ssl = { keyStore = { - location = "./DATA/root/corda-ssl-auth-keys.jks" - password = "AUTH_SSL" + location = "/certs/corda-ssl-auth-keys.jks" + password = {{ .Values.global.cenm.sharedCreds.keystore }} keyAlias = "cordasslauthservice" } trustStore = { - location = "./DATA/root/corda-ssl-trust-store.jks" - password = "SSL_TRUSTSTORE" + location = "/certs/corda-ssl-trust-store.jks" + password = {{ .Values.global.cenm.sharedCreds.truststore }} } } } diff --git a/platforms/r3-corda-ent/charts/cenm-auth/files/run.sh b/platforms/r3-corda-ent/charts/cenm-auth/files/run.sh new file mode 100644 index 00000000000..bfd6b5cae06 --- /dev/null +++ b/platforms/r3-corda-ent/charts/cenm-auth/files/run.sh @@ -0,0 +1,34 @@ +#!/bin/sh + +# +# main run +# +if [ -f /opt/cenm/bin/accounts-application.jar ] +then + echo + echo "CENM: starting CENM Auth service ..." + echo + java -jar /opt/cenm/bin/accounts-application.jar \ + --config-file authservice.conf \ + --initial-user-name {{ .Values.creds.authInitUserName }} \ + --initial-user-password {{ .Values.creds.authInitUserPassword }} \ + --keep-running --verbose + EXIT_CODE=${?} +else + echo "Missing Auth service jar file." + EXIT_CODE=110 +fi + +if [ "${EXIT_CODE}" -ne "0" ] +then + HOW_LONG={{ .Values.sleepTimeAfterError }} + echo + echo "Auth service failed - exit code: ${EXIT_CODE} (error)" + echo + echo "Going to sleep for the requested {{ .Values.sleepTimeAfterError }} seconds to let you log in and investigate." + sleep {{ .Values.sleepTimeAfterError }} + echo +fi + +sleep ${HOW_LONG} +echo diff --git a/platforms/r3-corda-ent/charts/cenm-auth/requirements.yaml b/platforms/r3-corda-ent/charts/cenm-auth/requirements.yaml new file mode 100644 index 00000000000..895f0a0e1cf --- /dev/null +++ b/platforms/r3-corda-ent/charts/cenm-auth/requirements.yaml @@ -0,0 +1,7 @@ +dependencies: + - name: bevel-storageclass + alias: storage + repository: "file://../../../shared/charts/bevel-storageclass" + tags: + - storage + version: ~1.0.0 diff --git a/platforms/r3-corda-ent/charts/cenm-auth/templates/_helpers.tpl b/platforms/r3-corda-ent/charts/cenm-auth/templates/_helpers.tpl index 7f9b0dc6131..aa36fcf8b80 100644 --- a/platforms/r3-corda-ent/charts/cenm-auth/templates/_helpers.tpl +++ b/platforms/r3-corda-ent/charts/cenm-auth/templates/_helpers.tpl @@ -1,5 +1,29 @@ -{{- define "labels.custom" -}} - {{- range $key, $val := .Values.metadata.labels -}} - {{- $key -}}: {{- $val -}} - {{- end -}} +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "auth.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "auth.fullname" -}} +{{- $name := default .Chart.Name -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" $name .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} + + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "auth.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} {{- end -}} diff --git a/platforms/r3-corda-ent/charts/cenm-auth/templates/configmap.yaml b/platforms/r3-corda-ent/charts/cenm-auth/templates/configmap.yaml index 735c4a95057..4a2a359f979 100644 --- a/platforms/r3-corda-ent/charts/cenm-auth/templates/configmap.yaml +++ b/platforms/r3-corda-ent/charts/cenm-auth/templates/configmap.yaml @@ -8,13 +8,18 @@ apiVersion: v1 kind: ConfigMap metadata: - name: {{ .Values.prefix }}-auth-conf - namespace: {{ .Values.metadata.namespace }} + name: {{ include "auth.fullname" . }}-conf + namespace: {{ .Release.Namespace }} labels: - app.kubernetes.io/name: {{ .Values.nodeName }} + app.kubernetes.io/name: {{ include "auth.fullname" . }} helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - {{- include "labels.custom" . | nindent 2 }} + app.kubernetes.io/part-of: {{ include "auth.fullname" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} data: authservice.conf: |+ {{ tpl (.Files.Get "files/authservice.conf") . | indent 4 }} + + run.sh: |+ +{{ tpl (.Files.Get "files/run.sh") . | indent 4 }} diff --git a/platforms/r3-corda-ent/charts/cenm-auth/templates/deployment.yaml b/platforms/r3-corda-ent/charts/cenm-auth/templates/deployment.yaml deleted file mode 100644 index fc7786420c4..00000000000 --- a/platforms/r3-corda-ent/charts/cenm-auth/templates/deployment.yaml +++ /dev/null @@ -1,296 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - ---- -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: {{ .Values.nodeName }} - namespace: {{ .Values.metadata.namespace }} - labels: - app.kubernetes.io/name: {{ .Values.nodeName }} - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} - {{- include "labels.custom" . | nindent 2 }} -spec: - serviceName: {{ .Values.nodeName }} - replicas: 1 - selector: - matchLabels: - app: {{ .Values.nodeName }} - app.kubernetes.io/name: {{ .Values.nodeName }} - app.kubernetes.io/instance: {{ .Release.Name }} - template: - metadata: - labels: - app: {{ .Values.nodeName }} - app.kubernetes.io/name: {{ .Values.nodeName }} - app.kubernetes.io/instance: {{ .Release.Name }} - spec: - {{- with .Values.image.imagePullSecrets }} - imagePullSecrets: - {{- toYaml . | nindent 8 }} - {{- end }} - serviceAccountName: {{ .Values.vault.serviceAccountName }} - securityContext: - {{- toYaml .Values.config.podSecurityContext | nindent 8 }} - initContainers: - - name: init-certificates - image: {{ .Values.image.initContainerName }} - imagePullPolicy: {{ .Values.image.pullPolicy }} - env: - - name: VAULT_ADDR - value: {{ $.Values.vault.address }} - - name: KUBERNETES_AUTH_PATH - value: {{ $.Values.vault.authPath }} - - name: VAULT_APP_ROLE - value: {{ $.Values.vault.role }} - - name: BASE_DIR - value: {{ $.Values.config.volume.baseDir }} - - name: CERTS_SECRET_PREFIX - value: {{ .Values.vault.certSecretPrefix }} - - name: MOUNT_PATH - value: "/DATA" - command: ["sh", "-c"] - args: - - |- - #!/usr/bin/env sh - validateVaultResponse () { - if echo ${2} | grep "errors"; then - echo "ERROR: unable to retrieve ${1}: ${2}" - exit 1 - fi - if [ "$3" == "LOOKUPSECRETRESPONSE" ] - then - http_code=$(curl -sS -o /dev/null -w "%{http_code}" \ - --header "X-Vault-Token: ${VAULT_TOKEN}" \ - ${VAULT_ADDR}/v1/${1}) - curl_response=$? - if test "$http_code" != "200" ; then - echo "Http response code from Vault - $http_code and curl_response - $curl_response" - if test "$curl_response" != "0"; then - echo "Error: curl command failed with error code - $curl_response" - exit 1 - fi - fi - fi - } - - # setting up env to get secrets from vault - echo "Getting secrets from Vault Server" - KUBE_SA_TOKEN=$(cat /var/run/secrets/kubernetes.io/serviceaccount/token) - VAULT_TOKEN=$(curl -sS --request POST ${VAULT_ADDR}/v1/auth/${KUBERNETES_AUTH_PATH}/login -H "Content-Type: application/json" -d '{"role":"'"${VAULT_APP_ROLE}"'","jwt":"'"${KUBE_SA_TOKEN}"'"}' | jq -r 'if .errors then . else .auth.client_token end') - validateVaultResponse 'vault login token' "${VAULT_TOKEN}" - - mkdir -p ${MOUNT_PATH}/root; - - # ssl trust-stores and auth key store from vault - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${CERTS_SECRET_PREFIX}/root/certs | jq -r 'if .errors then . else . end') - validateVaultResponse "${CERTS_SECRET_PREFIX}/root/certs" "${LOOKUP_SECRET_RESPONSE}" "LOOKUPSECRETRESPONSE" - - corda_ssl_trust_store=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["corda-ssl-trust-store.jks"]') - echo "${corda_ssl_trust_store}" | base64 -d > ${MOUNT_PATH}/root/corda-ssl-trust-store.jks - - corda_ssl_auth_keys=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["corda-ssl-auth-keys.jks"]') - echo "${corda_ssl_auth_keys}" | base64 -d > ${MOUNT_PATH}/root/corda-ssl-auth-keys.jks - - mkdir -p ${MOUNT_PATH}/ssl; - OUTPUT_PATH=${MOUNT_PATH}/ssl; - # Fetching credentials for ssl certificates - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${CERTS_SECRET_PREFIX}/credentials/ssl | jq -r 'if .errors then . else . end') - validateVaultResponse "${CERTS_SECRET_PREFIX}/credentials/ssl" "${LOOKUP_SECRET_RESPONSE}" "LOOKUPSECRETRESPONSE" - AUTH_SSL=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["auth"]') - echo "${AUTH_SSL}"> ${OUTPUT_PATH}/authssl - - # Fetching credentials for truststores - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${CERTS_SECRET_PREFIX}/credentials/truststore | jq -r 'if .errors then . else . end') - validateVaultResponse "${CERTS_SECRET_PREFIX}/credentials/truststore" "${LOOKUP_SECRET_RESPONSE}" "LOOKUPSECRETRESPONSE" - SSL_TRUSTSTORE=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["ssl"]') - echo "${SSL_TRUSTSTORE}"> ${OUTPUT_PATH}/sslts - - # Fetching credentials for initial user - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${CERTS_SECRET_PREFIX}/credentials/user | jq -r 'if .errors then . else . end') - validateVaultResponse "${CERTS_SECRET_PREFIX}/credentials/user" "${LOOKUP_SECRET_RESPONSE}" "LOOKUPSECRETRESPONSE" - USERNAME=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["username"]') - echo "${USERNAME}"> ${OUTPUT_PATH}/username - USERPWD=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["userpwd"]') - echo "${USERPWD}"> ${OUTPUT_PATH}/userpwd - echo "Done" - volumeMounts: - - name: certificates - mountPath: /DATA - - name: init-jwt - securityContext: - {{- toYaml .Values.config.securityContext | nindent 12 }} - image: "{{ .Values.image.authContainerName }}" - imagePullPolicy: {{ .Values.image.pullPolicy }} - command: ["/bin/sh", "-c"] - args: - - |- - #[TODO] : The JWT creation can be moved to PKI Job - keytool -genkeypair -alias oauth-test-jwt -keyalg RSA -keypass password -keystore etc/jwt-store.jks -storepass password -dname "{{ .Values.authSubject }}" - resources: - requests: - memory: {{ .Values.config.pod.resources.requests }} - limits: - memory: {{ .Values.config.pod.resources.limits }} - volumeMounts: - - name: {{ .Values.prefix }}-auth-etc - mountPath: {{ .Values.config.volume.baseDir }}/etc - containers: - - name: main - securityContext: - {{- toYaml .Values.config.securityContext | nindent 12 }} - image: "{{ .Values.image.authContainerName }}" - imagePullPolicy: {{ .Values.image.pullPolicy }} - command: ["/bin/sh", "-c"] - args: - - |- - mkdir -p {{ .Values.config.volume.baseDir }}/config; - - install {{ .Values.config.volume.baseDir }}/authservice.conf {{ .Values.config.volume.baseDir }}/config/; - - #replacement of the variables in the authservice conf file - export AUTH_SSL=$(cat {{ .Values.config.volume.baseDir }}/DATA/ssl/authssl) - sed -i -e "s*AUTH_SSL*${AUTH_SSL}*g" {{ .Values.config.volume.baseDir }}/config/authservice.conf - - export SSL_TRUSTSTORE=$(cat {{ .Values.config.volume.baseDir }}/DATA/ssl/sslts) - sed -i -e "s*SSL_TRUSTSTORE*${SSL_TRUSTSTORE}*g" {{ .Values.config.volume.baseDir }}/config/authservice.conf - - export USERNAME=$(cat {{ .Values.config.volume.baseDir }}/DATA/ssl/username) - export USERPWD=$(cat {{ .Values.config.volume.baseDir }}/DATA/ssl/userpwd) - - #running the auth service - /bin/sh - # main run - if [ -f bin/accounts-application.jar ] - then - sha256sum bin/accounts-application.jar - echo - echo "CENM: starting CENM Auth service ..." - echo - java -Xmx{{ .Values.config.cordaJar.memorySize }}{{ .Values.config.cordaJar.unit }} -jar bin/accounts-application.jar --config-file {{ .Values.config.volume.baseDir }}/config/authservice.conf --initial-user-name $USERNAME --initial-user-password $USERPWD --keep-running --verbose - EXIT_CODE=${?} - else - echo "Missing Auth Service jar file in {{ .Values.config.jarPath }} folder:" - ls -al {{ .Values.config.jarPath }} - EXIT_CODE=110 - fi - - if [ "${EXIT_CODE}" -ne "0" ] - then - HOW_LONG={{ .Values.config.sleepTimeAfterError }} - echo - echo "exit code: ${EXIT_CODE} (error)" - echo "Going to sleep for requested ${HOW_LONG} seconds to let you login and investigate." - fi - sleep ${HOW_LONG} - echo - volumeMounts: - - name: auth-conf - mountPath: {{ .Values.config.volume.baseDir }}/authservice.conf - subPath: authservice.conf - - name: {{ .Values.prefix }}-auth-etc - mountPath: {{ .Values.config.volume.baseDir }}/etc - - name: certificates - mountPath: {{ .Values.config.volume.baseDir }}/DATA - - name: {{ .Values.prefix }}-auth-logs - mountPath: {{ .Values.config.volume.baseDir }}/logs - - name: {{ .Values.prefix }}-auth-h2 - mountPath: {{ .Values.config.volume.baseDir }}/h2 - {{- if .Values.livenessProbe.enabled }} - livenessProbe: - tcpSocket: - port: {{ .Values.service.port }} - initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} - periodSeconds: {{ .Values.livenessProbe.periodSeconds }} - timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} - successThreshold: {{ .Values.livenessProbe.successThreshold }} - failureThreshold: {{ .Values.livenessProbe.failureThreshold }} - {{- end }} - {{- if .Values.readinessProbe.enabled }} - readinessProbe: - tcpSocket: - port: {{ .Values.service.port }} - initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }} - periodSeconds: {{ .Values.readinessProbe.periodSeconds }} - timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }} - successThreshold: {{ .Values.readinessProbe.successThreshold }} - failureThreshold: {{ .Values.readinessProbe.failureThreshold }} - {{- end }} - resources: - requests: - memory: {{ .Values.config.pod.resources.requests }} - limits: - memory: {{ .Values.config.pod.resources.limits }} - {{- if .Values.config.logsContainersEnabled }} - - name: logs-auth - securityContext: - {{- toYaml .Values.config.securityContext | nindent 12 }} - image: "{{ .Values.image.authContainerName }}" - imagePullPolicy: {{ .Values.image.pullPolicy }} - command: ["/bin/bash", "-c"] - args: - - |- - cd {{ .Values.config.volume.baseDir }}/ - while true; do tail -f logs/accounts-service/*.log 2>/dev/null; sleep 5; done - # in case sth went wrong just wait indefinitely ... - tail -f /dev/null - volumeMounts: - - name: {{ .Values.prefix }}-auth-logs - mountPath: {{ .Values.config.volume.baseDir }}/logs - resources: - requests: - memory: {{ .Values.config.pod.resources.requests }} - limits: - memory: {{ .Values.config.pod.resources.limits }} - {{- end }} - volumes: - - name: auth-conf - configMap: - name: {{ .Values.prefix }}-auth-conf - defaultMode: 0777 - - name: certificates - emptyDir: - medium: Memory - {{- with .Values.config.nodeSelector }} - nodeSelector: - {{- toYaml . | nindent 8 }} - {{- end }} - {{- with .Values.config.affinity }} - affinity: - {{- toYaml . | nindent 8 }} - {{- end }} - {{- with .Values.config.tolerations }} - tolerations: - {{- toYaml . | nindent 8 }} - {{- end }} - volumeClaimTemplates: - - metadata: - name: {{ .Values.prefix }}-auth-etc - spec: - storageClassName: {{ .Values.storage.name }} - accessModes: [ "ReadWriteOnce" ] - resources: - requests: - storage: {{ .Values.config.pvc.volumeSizeAuthEtc }} - - metadata: - name: {{ .Values.prefix }}-auth-h2 - spec: - storageClassName: {{ .Values.storage.name }} - accessModes: [ "ReadWriteOnce" ] - resources: - requests: - storage: {{ .Values.config.pvc.volumeSizeAuthH2 }} - - metadata: - name: {{ .Values.prefix }}-auth-logs - spec: - storageClassName: {{ .Values.storage.name }} - accessModes: [ "ReadWriteOnce" ] - resources: - requests: - storage: {{ .Values.config.pvc.volumeSizeAuthLogs }} diff --git a/platforms/r3-corda-ent/charts/cenm-auth/templates/service.yaml b/platforms/r3-corda-ent/charts/cenm-auth/templates/service.yaml index cab7d65e06e..11e9515bde1 100644 --- a/platforms/r3-corda-ent/charts/cenm-auth/templates/service.yaml +++ b/platforms/r3-corda-ent/charts/cenm-auth/templates/service.yaml @@ -4,23 +4,30 @@ # SPDX-License-Identifier: Apache-2.0 ############################################################################################## +--- apiVersion: v1 kind: Service metadata: - name: {{ .Values.nodeName }} - namespace: {{ .Values.metadata.namespace }} + name: {{ include "auth.name" . }} + namespace: {{ .Release.Namespace }} labels: - run: {{ .Values.nodeName }} - app.kubernetes.io/name: {{ .Values.nodeName }} - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} + app.kubernetes.io/name: auth-service + app.kubernetes.io/component: auth + app.kubernetes.io/part-of: {{ include "auth.fullname" . }} app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} - {{- include "labels.custom" . | nindent 2 }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} spec: - type: {{ .Values.service.type }} - ports: - - port: {{ .Values.service.port }} - targetPort: {{ .Values.service.port }} - protocol: TCP + type: ClusterIP selector: - app: {{ .Values.nodeName }} + app.kubernetes.io/name: auth-statefulset + app.kubernetes.io/component: auth + app.kubernetes.io/part-of: {{ include "auth.fullname" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} + ports: + - name: auth + protocol: TCP + port: {{ .Values.global.cenm.auth.port }} + targetPort: {{ .Values.global.cenm.auth.port }} diff --git a/platforms/r3-corda-ent/charts/cenm-auth/templates/statefulset.yaml b/platforms/r3-corda-ent/charts/cenm-auth/templates/statefulset.yaml new file mode 100644 index 00000000000..7c45e358854 --- /dev/null +++ b/platforms/r3-corda-ent/charts/cenm-auth/templates/statefulset.yaml @@ -0,0 +1,121 @@ +############################################################################################## +# Copyright Accenture. All Rights Reserved. +# +# SPDX-License-Identifier: Apache-2.0 +############################################################################################## + +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ template "auth.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + app: {{ include "auth.fullname" . }} + app.kubernetes.io/name: auth-statefulset + app.kubernetes.io/component: auth + app.kubernetes.io/part-of: {{ include "auth.fullname" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} +spec: + replicas: 1 + podManagementPolicy: OrderedReady + updateStrategy: + type: RollingUpdate + selector: + matchLabels: + app: {{ include "auth.fullname" . }} + app.kubernetes.io/name: auth-statefulset + app.kubernetes.io/component: auth + app.kubernetes.io/part-of: {{ include "auth.fullname" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} + serviceName: {{ include "auth.fullname" . }} + volumeClaimTemplates: + - metadata: + name: auth-h2 + spec: + storageClassName: storage-{{ .Release.Name }} + accessModes: [ "ReadWriteOnce" ] + resources: + requests: + storage: {{ .Values.storage.dbSize }} + - metadata: + name: auth-logs + spec: + storageClassName: storage-{{ .Release.Name }} + accessModes: [ "ReadWriteOnce" ] + resources: + requests: + storage: {{ .Values.storage.size }} + template: + metadata: + labels: + app: {{ include "auth.fullname" . }} + app.kubernetes.io/name: auth-statefulset + app.kubernetes.io/component: auth + app.kubernetes.io/part-of: {{ include "auth.fullname" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} + spec: + serviceAccountName: {{ .Values.global.serviceAccountName }} + securityContext: + runAsUser: 1000 + runAsGroup: 1000 + fsGroup: 1000 + hostname: {{ .Release.Name }} + imagePullSecrets: + {{- if .Values.image.pullSecret }} + - name: {{ .Values.image.pullSecret }} + {{- end }} + containers: + - name: auth + image: {{ .Values.image.auth.repository }}:{{ .Values.image.auth.tag }} + imagePullPolicy: IfNotPresent + command: ["sh", "-c"] + args: + - |- + # init-jwt + keytool -genkeypair -alias oauth-test-jwt -keyalg RSA -keypass password -keystore etc/jwt-store.jks -storepass password -dname "{{ .Values.subjects.auth }}" + # run the auth service + ./run.sh + volumeMounts: + - name: auth-conf + mountPath: /opt/cenm/authservice.conf + subPath: authservice.conf + - name: auth-conf + mountPath: /opt/cenm/run.sh + subPath: run.sh + - name: auth-etc + mountPath: /opt/cenm/etc + - name: auth-logs + mountPath: /opt/cenm/logs + - name: cenm-certs + mountPath: "/certs" + - name: logs + image: {{ .Values.image.auth.repository }}:{{ .Values.image.auth.tag }} + imagePullPolicy: IfNotPresent + command: ["sh", "-c"] + args: + - |- + cd /opt/cenm + while true; do tail -f logs/accounts-service/*.log 2>/dev/null; sleep 5; done + # in case sth went wrong just wait indefinitely ... + tail -f /dev/null + volumeMounts: + - name: auth-logs + mountPath: /opt/cenm/logs + volumes: + - name: cenm-certs + secret: + secretName: cenm-certs + - name: auth-conf + configMap: + name: {{ include "auth.fullname" . }}-conf + defaultMode: 0777 + - name: auth-etc + emptyDir: + medium: Memory diff --git a/platforms/r3-corda-ent/charts/cenm-auth/values.yaml b/platforms/r3-corda-ent/charts/cenm-auth/values.yaml index fde8f277b64..eabfc737c2f 100644 --- a/platforms/r3-corda-ent/charts/cenm-auth/values.yaml +++ b/platforms/r3-corda-ent/charts/cenm-auth/values.yaml @@ -4,223 +4,76 @@ # SPDX-License-Identifier: Apache-2.0 ############################################################################################## -# Default values for Auth service. +# Default values for cenm-auth chart. # This is a YAML-formatted file. # Declare variables to be passed into your templates. - -############################################################# -# Basic Configuration # -############################################################# -# Provide the name of the deployment -# Eg. nodeName: auth -nodeName: auth - -# This section contains the Corda Enterprise Auth metadata. -metadata: - # Provide the namespace for the Corda Enterprise Auth. - # Eg. namespace: cenm - namespace: cenm - # Provide any additional labels for the Corda Enterprise Auth. - labels: -# prefix for the deployment names e.g orgName (cenm) -prefix: - -# Provide information regarding the Docker images used. -image: - # Provide the alpine utils image, which is used for all init-containers of deployments/jobs. - # NOTE: The alpine image used is the base alpine image with CURL installed. - # Eg. initContainerName: ghcr.io/hyperledger/bevel-alpine:latest - initContainerName: ghcr.io/hyperledger/bevel-alpine:latest - # Provide the image for the main Auth container. - # Eg. authContainerName: corda/enterprise-auth:1.5.1-zulu-openjdk8u242 - authContainerName: corda/enterprise-auth:1.5.1-zulu-openjdk8u242 - # Provide the docker-registry secret created and stored in kubernetes cluster as a secret. - # Eg. imagePullSecrets: regcred, can add multiple creds - imagePullSecrets: - - name: "" - # Pull policy to be used for the Docker image - # Eg. pullPolicy: Always - pullPolicy: IfNotPresent - -# This section contains the storage information. -storage: - # Provide the name of the storageclass. - # NOTE: Make sure that the storageclass exist prior to this deployment as - # this chart doesn't create the storageclass. - # Eg. name: cenm - name: cenm - -# Required parameter to start any .jar files -# e.g. acceptLicense: YES -acceptLicense: YES - -############################################################# -# HashiCorp Vault Configuration # -############################################################# -# NOTE: Make sure that the vault is already unsealed, intialized and configured to -# use Kubernetes service account token based authentication. -# For more info, see https://www.vaultproject.io/docs/auth/kubernetes - -vault: - # Provide the vault address - # Eg. address: http://vault.example.com:8200 - address: - # Provide the vault role used. - # Eg. role: vault-role - role: vault-role - # Provide the authpath configured to be used. - # Eg. authpath: entcordacenm - authPath: entcordacenm - # Provide the service account name autheticated to vault. - # NOTE: Make sure that the service account is already created and autheticated to use the vault. - # Eg. serviceAccountName: vault-auth +global: serviceAccountName: vault-auth - # Provide the vault path where the certificates are stored - # Eg. certsecretprefix: secret/cenm-org-name/signer/certs - certSecretPrefix: secret/cenm-org-name/signer/certs - # The amount of times to retry fetching from/writing to Vault before giving up. - # Eg. retries: 10 - retries: 10 - # The amount of time in seconds to wait after an error occurs when fetching from/writing to Vault. - # Eg. sleepTimeAfterError: 15 - sleepTimeAfterError: 15 + cluster: + provider: aws # choose from: minikube | aws | azure | gcp + cloudNativeServices: false # set to true to use Cloud Native Services (SecretsManager and IAM for AWS; KeyVault & Managed Identities for Azure) + vault: + type: hashicorp + role: vault-role + address: + authPath: supplychain + secretEngine: secretsv2 + secretPrefix: "data/supplychain" + proxy: + #This will be the proxy/ingress provider. Can have values "ambassador" or "none" + #Eg. provider: "ambassador" + provider: "ambassador" + #This field contains the external URL of the node + #Eg. externalUrlSuffix: test.blockchaincloudpoc.com + externalUrlSuffix: test.blockchaincloudpoc.com + cenm: + sharedCreds: + truststore: password + keystore: password + auth: + port: 8081 -############################################################# -# Database Options and Configuration # -############################################################# -# Database configuration +storage: + size: 1Gi + dbSize: 5Gi + allowedTopologies: + enabled: false + +image: + #Provide the docker secret name in the namespace + #Eg. pullSecret: regcred + pullSecret: + #Pull policy to be used for the Docker image + #Eg. pullPolicy: IfNotPresent + pullPolicy: IfNotPresent + #Provide a valid image and version for auth service + auth: + repository: corda/enterprise-auth + tag: 1.5.9-zulu-openjdk8u382 + #Provide a valid image and version for enterprise-cli service + enterpriseCli: + repository: corda/enterprise-cli + tag: 1.5.9-zulu-openjdk8u382 + +# subjects +subjects: + auth: "CN=Test TLS Auth Service Certificate, OU=HQ, O=HoldCo LLC, L=New York, C=US" + +# db related configuration database: - # Java class name to use for the database - # Eg. driverClassName: "org.h2.Driver" - driverClassName: org.h2.Driver - # The DB connection URL - # Eg. url: "jdbc:h2:file:./h2/identity-manager-persistence;DB_CLOSE_ON_EXIT=FALSE;LOCK_TIMEOUT=10000;WRITE_DELAY=0;AUTO_SERVER_PORT=0" + driverClassName: "org.h2.Driver" jdbcDriver: "" - url: - # DB user - # Eg. user: "example-db-user" - user: example-db-user - # DB password - # Eg. password: "example-db-password" - password: example-db-password - # Migrations of database can be run as part of the startup of Idman, if set to true. - # If set to false, it will be run prior to setting up the Idman. - # Eg. runMigration: "true" + url: "jdbc:h2:file:./h2/auth-persistence;DB_CLOSE_ON_EXIT=FALSE;LOCK_TIMEOUT=10000;WRITE_DELAY=0;AUTO_SERVER_PORT=0" + user: "auth-db-user" + password: "auth-db-password" runMigration: true -############################################################# -# Settings # -############################################################# -config: - # Provide volume related specifications - volume: - # Eg. baseDir: /opt/corda - baseDir: /opt/corda - - # Provide the path where the CENM Idman .jar-file is stored - # Eg. jarPath: bin - jarPath: bin - - # Provide the path where the CENM Service configuration files are stored - # Eg. configPath: etc - configPath: etc - - # Provide any extra annotations for the PVCs - pvc: - # annotations: - # key: "value" - annotations: {} - # Volume size for etc/ directory - volumeSizeAuthEtc: - # Volume size for h2/ directory - volumeSizeAuthH2: - # Volume size for logs/ directory - volumeSizeAuthLogs: - - # Provide any extra annotations for the deployment - deployment: - # annotations: - # key: "value" - annotations: {} - - # Set memory limits of pod - pod: - resources: - limits: - requests: - - nodeSelector: {} - - tolerations: [] - - affinity: {} - - podSecurityContext: - runAsUser: 1000 - runAsGroup: 1000 - fsGroup: 1000 - - securityContext: {} - # capabilities: - # drop: - # - ALL - # readOnlyRootFilesystem: true - # runAsNonRoot: true - # runAsUser: 1000 - # Provide the number of replicas for your pods - # Eg. replicas: 1 - replicas: 1 - # Enable container displaying live logs - logsContainersEnabled: true - - # Specify the maximum size of the memory allocation pool - cordaJar: - # Provide the memory size. - # Eg. memorySize: 4096 (if using kilobytes) - # Eg. memorySize: 512 (if using megabytes) - # Eg. memorySize: 1 (if using gigabytes) - memorySize: 512 - # Provide the unit of greatness for the size, one of three options: - # - k or K for kilobytes - # - m or M for megabytes - # - g or G for gigabytes - # Eg. unit: M - unit: M - - # Sleep time in seconds, occurs after any error is encountered in start-up - # Eg. 120 - sleepTimeAfterError: 120 - -## Liveness and readiness probe values -## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes -## -livenessProbe: - enabled: false - failureThreshold: 3 - initialDelaySeconds: 10 - periodSeconds: 10 - successThreshold: 1 - timeoutSeconds: 1 -readinessProbe: - enabled: false - failureThreshold: 3 - initialDelaySeconds: 10 - periodSeconds: 10 - successThreshold: 1 - timeoutSeconds: 1 - -######################################## -### Auth Configuration ### -######################################## - -service: - # e.g type: ClusterIP - type: ClusterIP - # e.g port: 8081 - port: 8081 +# auth specific settings +creds: + authInitUserName: admin + authInitUserPassword: p4ssWord -nodeSelector: {} -tolerations: [] -affinity: {} -# e.g authSubject: CN=abc1, OU=abc2, O=abc3, L=abc4, ST=abc5, C=abc6 -authSubject: +# Sleep time (in seconds) after an error occured +sleepTimeAfterError: 300 +# base dir path +baseDir: /opt/cenm diff --git a/platforms/r3-corda-ent/charts/cenm-gateway/Chart.yaml b/platforms/r3-corda-ent/charts/cenm-gateway/Chart.yaml index d09762763b4..cb2feadcaa4 100644 --- a/platforms/r3-corda-ent/charts/cenm-gateway/Chart.yaml +++ b/platforms/r3-corda-ent/charts/cenm-gateway/Chart.yaml @@ -5,7 +5,21 @@ ############################################################################################## apiVersion: v1 -appVersion: "2.0" -description: "R3-corda-ent: Deploys the CENM gateway service." name: cenm-gateway +description: "R3 Corda Enterprise Network Manager Gateway Service" version: 1.0.0 +appVersion: "latest" +keywords: + - bevel + - corda + - hyperledger + - enterprise + - blockchain + - deployment + - accenture +home: https://hyperledger-bevel.readthedocs.io/en/latest/ +sources: + - https://github.com/hyperledger/bevel +maintainers: + - name: Hyperledger Bevel maintainers + email: bevel@lists.hyperledger.org diff --git a/platforms/r3-corda-ent/charts/cenm-gateway/README.md b/platforms/r3-corda-ent/charts/cenm-gateway/README.md index 41834b0f9bd..cc868964fb4 100644 --- a/platforms/r3-corda-ent/charts/cenm-gateway/README.md +++ b/platforms/r3-corda-ent/charts/cenm-gateway/README.md @@ -3,186 +3,92 @@ [//]: # (SPDX-License-Identifier: Apache-2.0) [//]: # (##############################################################################################) - -# Gateway Deployment - -- [Gateway Deployment Helm Chart](#Gateway-deployment-helm-chart) -- [Prerequisites](#prerequisites) -- [Chart Structure](#chart-structure) -- [Configuration](#configuration) -- [Deployment](#deployment) -- [Contributing](#contributing) -- [License](#license) - - -## Gateway Deployment Helm Chart ---- -This [Helm chart](https://github.com/hyperledger/bevel/tree/main/platforms/r3-corda-ent/charts/cenm-gateway) deploys the CENM Gateway Service. - - -## Prerequisites ---- -Before deploying the chart please ensure you have the following prerequisites: - -- Kubernetes cluster up and running. -- A HashiCorp Vault instance is set up and configured to use Kubernetes service account token-based authentication. -- The Vault is unsealed and initialized. -- Helm is installed. - - -## Chart Structure ---- -This chart has following structue: -``` - ├── cenm-gateway - │ ├── Chart.yaml - │ ├── templates - │ │ ├── deployment.yaml - │ │ ├── _helpers.tpl - │ │ ├── pvc.yaml - | | |__ job.yaml - | | |__ configmap.yaml - │ │ └── service.yaml - │ └── values.yaml +# cenm gateway-service + +This chart is a component of Hyperledger Bevel. The cenm-gateway chart deploys a R3 Corda Enterprise gateway. If enabled, the keys are then stored on the configured vault and stored as Kubernetes secrets. See [Bevel documentation](https://hyperledger-bevel.readthedocs.io/en/latest/) for details. + +## TL;DR + +```bash +helm repo add bevel https://hyperledger.github.io/bevel +helm install gateway bevel/cenm-gateway ``` -Type of files used: - -- `templates` : This directory contains the Kubernetes manifest templates that define the resources to be deployed. -- `deployment.yaml` : Deploy and manage an application in a Kubernetes cluster using a Deployment resource.It includes an init container responsible for fetching secrets from a Vault server and creating directories.Optional liveness and readiness probes can be used to monitor the health and readiness of the application. -- `_helpers.tpl` : A template file used for defining custom labels in the Helm chart. -- `pvc.yaml` : A PersistentVolumeClaim (PVC) is a request for storage by a user. -- `service.yaml` : This file defines a Kubernetes Service with multiple ports for protocols and targets, and supports Ambassador proxy annotations for specific configurations when using the "ambassador" proxy provider. -- `configmap.yaml` : ConfigMap resource in Kubernetes with a specific name and namespace, along with labels for identification. -- `job.yaml` : The job has two init containers for pre-task checks and a main container that runs a shell script, And main container is mounted with necessary files from a ConfigMap. -- `chart.yaml` : Provides metadata about the chart, such as its name, version, and description. -- `values.yaml` : Contains the default configuration values for the chart. It includes configuration for the metadata, image, storage, Vault, etc. - - -## Configuration ---- -The [values.yaml](https://github.com/hyperledger/bevel/blob/main/platforms/r3-corda-ent/charts/cenm-gateway/values.yaml) file contains configurable values for the Helm chart. We can modify these values according to the deployment requirements. Here are some important configuration options: -## Parameters ---- +## Prerequisites -### Name +- Kubernetes 1.19+ +- Helm 3.2.0+ -| Name | Description | Default Value | -| -----------| -------------------------------------------------- | ------------- | -| nodeName | Provide the name of the node | gateway | +If Hashicorp Vault is used, then +- HashiCorp Vault Server 1.13.1+ -### Metadata +> **Important**: Ensure the `enterprise-init` chart has been installed before installing this. Also check the dependent charts. Installing this chart seperately is not required as it is a dependent chart for cenm, and is installed with cenm chart. -| Name | Description | Default Value | -| ----------------| ----------------------------------------------------------------| ------------- | -| namespace | Provide the namespace for the Corda Enterprise Auth | cenm-ent | -| labels | Provide any additional labels for the Corda Enterprise Auth | "" | +## Installing the Chart -### Image +To install the chart with the release name `gateway`: -| Name | Description | Default Value | -| ------------------------ | ---------------------------------------------------------------------------------- | ---------------------------------------------- | -| initContainerName | Information about the Docker container used for the init-containers | ghcr.io/hyperledger | -| gatewayContainerName | Provide the image for the main Signer container |corda/enterprise-gateway:1.5.1-zulu-openjdk8u242| -| ImagePullSecret | Provide the K8s secret that has rights for pulling the image off the registry | "" | -| pullPolicy | Provide the pull policy for Docker images, either Always or IfNotPresent | IfNotPresent | - -### CenmServices - -| Name | Description | Default Value | -| ---------------| ------------------------------------------| ------------- | -| idmanName | Provide the name of the idman | idman | -| zoneName | Name of the zone service | zone | -| zonePort | Zone Service port | 12345 | -| gatewayPort | Gateway Service port | 8080 | -| authName | Name of the auth service | auth | -| authPort | Auth Service port | 8081 | - -### storage - -| Name | Description | Default Value | -| --------------------- | ------------------------------------------| ------------- | -| name | Provide the name of the storage class | cordaentsc | - -### Vault - -| Name | Description | Default Value | -| ------------------------- | --------------------------------------------------------------------------| ------------- | -| address | Address/URL of the Vault server | "" | -| role | Role used for authentication with Vault | vault-role | -| authpath | Authentication path for Vault | entcordacenm | -| serviceAccountName | Provide the already created service account name autheticated to vault | vault-auth | -| certSecretPrefix | Provide the vault path where the certificates are stored | "" | -| retries | Amount of times to retry fetching from/writing to Vault before giving up | 10 | - -### Config - -| Name | Description | Default Value | -| ------------------------ | -------------------------------------------------------------------------------------- | --------------- | -| baseDir | Provide volume related specifications | "" | -| jarPath | Provide the path where the CENM Idman .jar-file is stored | "bin" | -| configPath | Provide the path where the CENM Service configuration files are stored | "etc" | -| pvc | Provide any extra annotations for the PVCs | "" | -| deployment | Provide any extra annotations for the deployment | "value" | -| pod | Set memory limits of pod | "" | -| podSecurityContext | Allows you to set security-related settings at the Pod level | "" | -| securityContext | Securitycontext at pod level | "" | -| replicas | Provide the number of replicas for your pods | "1" | -| logsContainersEnabled | Enable container displaying live logs | "true" | -| cordaJar | Specify the maximum size of the memory allocation pool | "512" | -| sleepTimeAfterError | Sleep time in seconds, occurs after any error is encountered in start-up | "120" | - -### Service - -| Name | Description | Default Value | -| --------------------- | ------------------------------------------| ------------- | -| type | Provide the type of service | ClusterIP | -| port | provide the port for service | 8080 | - - - -## Deployment ---- - -To deploy the Gateway Helm chart, follow these steps: - -1. Modify the [values.yaml](https://github.com/hyperledger/bevel/blob/main/platforms/r3-corda-ent/charts/cenm-gateway/values.yaml) file to set the desired configuration values. -2. Run the following Helm command to install, upgrade, verify delete the chart: - -To install the chart: ```bash -helm repo add bevel https://hyperledger.github.io/bevel/ -helm install ./cenm-gateway +helm repo add bevel https://hyperledger.github.io/bevel +helm install gateway bevel/cenm-gateway ``` -To upgrade the chart: -```bash -helm upgrade ./cenm-gateway -``` +The command deploys the chart on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation. -To verify the deployment: -```bash -kubectl get jobs -n -``` -Note : Replace `` with the actual namespace where the Job was created. This command will display information about the Job, including the number of completions and the current status of the Job's pods. +> **Tip**: List all releases using `helm list` + +## Uninstalling the Chart + +To uninstall/delete the `gateway` deployment: -To delete the chart: ```bash -helm uninstall +helm uninstall gateway ``` -Note : Replace `` with the desired name for the release. - -## Contributing ---- -If you encounter any bugs, have suggestions, or would like to contribute to the [Gateway Deployment Helm Chart](https://github.com/hyperledger/bevel/tree/main/platforms/r3-corda-ent/charts/cenm-gateway), please feel free to open an issue or submit a pull request on the [project's GitHub repository](https://github.com/hyperledger/bevel). +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Parameters + +### Global parameters +These parameters are refered to as same in each parent or child chart +| Name | Description | Default Value | +|--------|---------|-------------| +|`global.serviceAccountName` | The serviceaccount name that will be used for Vault Auth management| `vault-auth` | +| `global.cluster.provider` | Kubernetes cluster provider like AWS EKS or minikube. Currently ony `aws` and `minikube` is tested | `aws` | +| `global.cluster.cloudNativeServices` | only `false` is implemented, `true` to use Cloud Native Services (SecretsManager and IAM for AWS; KeyVault & Managed Identities for Azure) is for future | `false` | +| `global.vault.type` | Type of Vault to support other providers. Currently, only `hashicorp` is supported. | `hashicorp` | +| `global.vault.role` | Role used for authentication with Vault | `vault-role` | +| `global.vault.address`| URL of the Vault server. | `""` | +| `global.vault.authPath` | Authentication path for Vault | `supplychain` | +| `global.vault.secretEngine` | The value for vault secret engine name | `secretsv2` | +| `global.vault.secretPrefix` | The value for vault secret prefix which must start with `data/` | `data/supplychain` | +| `global.proxy.provider` | The proxy or Ingress provider. Can be `none` or `ambassador` | `ambassador` | +| `global.proxy.externalUrlSuffix` | The External URL suffix at which the Besu P2P and RPC service will be available | `test.blockchaincloudpoc.com` | +| `global.cenm.sharedCreds.truststore` | The truststore password the pki created truststores | `password` | +| `global.cenm.sharedCreds.keystore` | The truststore password the pki created ketstores | `password` | +| `global.cenm.gateway.port` | The port for gateway api | `8080` | +| `global.cenm.zone.adminPort` | The port for zone adminPort | `12345` | + +### Storage + +| Name | Description | Default Value | +|--------|---------|-------------| +| `storage.allowedTopologies.enabled` | Check [bevel-storageclass](../../../shared/charts/bevel-storageclass/README.md) for details | `false` | + + +### Image +| Name | Description | Default Value | +| -------------| ---------- | --------- | +| `image.pullSecret` | Secret name in the namespace containing private image registry credentials | `""` | +| `image.pullPolicy` | Pull policy to be used for the Docker images | `IfNotPresent` | +| `image.gateway.repository` | CENM gateway image repository | `corda/enterprise-gateway`| +| `image.gateway.tag` | CENM gateway image tag as per version | `1.5.9-zulu-openjdk8u382`| - ## License This chart is licensed under the Apache v2.0 license. -Copyright © 2023 Accenture +Copyright © 2024 Accenture ### Attribution diff --git a/platforms/r3-corda-ent/charts/cenm-gateway/files/gateway.conf b/platforms/r3-corda-ent/charts/cenm-gateway/files/gateway.conf index 5188eecf93a..5532df2bd89 100644 --- a/platforms/r3-corda-ent/charts/cenm-gateway/files/gateway.conf +++ b/platforms/r3-corda-ent/charts/cenm-gateway/files/gateway.conf @@ -1,8 +1,8 @@ auth { - serverUrl = "https://{{ .Values.cenmServices.authName }}.{{ .Values.metadata.namespace }}:{{ .Values.cenmServices.authPort }}" + serverUrl = "https://auth.{{ .Release.Namespace }}:{{ .Values.global.cenm.auth.port }}" sslConfig = { - trustStore = "./DATA/root/corda-ssl-trust-store.jks" - trustStorePassword = "password" + trustStore = "/certs/corda-ssl-trust-store.jks" + trustStorePassword = {{ .Values.global.cenm.sharedCreds.truststore }} } clientCredentials = { clientId = "gateway1" @@ -11,20 +11,20 @@ auth { } cenm { - zoneHost: "{{ .Values.cenmServices.zoneName }}.{{ .Values.metadata.namespace }}" - zonePort: {{ .Values.cenmServices.zonePort }} + zoneHost: zone.{{ .Release.Namespace }} + zonePort: {{ .Values.global.cenm.zone.adminPort }} ssl = { keyStore = { - location = ./DATA/root/corda-ssl-identity-manager-keys.jks - password = IDMAN_SSL + location = /certs/corda-ssl-identity-manager-keys.jks + password = {{ .Values.global.cenm.sharedCreds.keystore }} } trustStore = { - location = ./DATA/root/corda-ssl-trust-store.jks - password = SSL_TRUSTSTORE + location = /certs/corda-ssl-trust-store.jks + password = {{ .Values.global.cenm.sharedCreds.truststore }} } } } server { - port = {{ .Values.service.port }} + port = {{ .Values.global.cenm.gateway.port }} } diff --git a/platforms/r3-corda-ent/charts/cenm-gateway/files/run.sh b/platforms/r3-corda-ent/charts/cenm-gateway/files/run.sh new file mode 100644 index 00000000000..131bdbfec40 --- /dev/null +++ b/platforms/r3-corda-ent/charts/cenm-gateway/files/run.sh @@ -0,0 +1,29 @@ +#!/bin/sh + +# +# main run +# +if [ -f /opt/cenm/bin/gateway.jar ] +then + echo + echo "CENM: starting CENM Gateway service ..." + echo + java -jar /opt/cenm/bin/gateway.jar --config-file gateway.conf + EXIT_CODE=${?} +else + echo "Missing gateway service jar file." + EXIT_CODE=110 +fi + +if [ "${EXIT_CODE}" -ne "0" ] +then + HOW_LONG={{ .Values.sleepTimeAfterError }} + echo + echo "Gateway service failed - exit code: ${EXIT_CODE} (error)" + echo + echo "Going to sleep for requested ${HOW_LONG} seconds to let you login and investigate." + echo +fi + +sleep ${HOW_LONG} +echo diff --git a/platforms/r3-corda-ent/charts/cenm-gateway/requirements.yaml b/platforms/r3-corda-ent/charts/cenm-gateway/requirements.yaml new file mode 100644 index 00000000000..895f0a0e1cf --- /dev/null +++ b/platforms/r3-corda-ent/charts/cenm-gateway/requirements.yaml @@ -0,0 +1,7 @@ +dependencies: + - name: bevel-storageclass + alias: storage + repository: "file://../../../shared/charts/bevel-storageclass" + tags: + - storage + version: ~1.0.0 diff --git a/platforms/r3-corda-ent/charts/cenm-gateway/templates/_helpers.tpl b/platforms/r3-corda-ent/charts/cenm-gateway/templates/_helpers.tpl index 7f9b0dc6131..27f2a7f4e02 100644 --- a/platforms/r3-corda-ent/charts/cenm-gateway/templates/_helpers.tpl +++ b/platforms/r3-corda-ent/charts/cenm-gateway/templates/_helpers.tpl @@ -1,5 +1,29 @@ -{{- define "labels.custom" -}} - {{- range $key, $val := .Values.metadata.labels -}} - {{- $key -}}: {{- $val -}} - {{- end -}} +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "gateway.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "gateway.fullname" -}} +{{- $name := default .Chart.Name -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" $name .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} + + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "gateway.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} {{- end -}} diff --git a/platforms/r3-corda-ent/charts/cenm-gateway/templates/configmap.yaml b/platforms/r3-corda-ent/charts/cenm-gateway/templates/configmap.yaml index a7cea88b55f..d01029d0e0c 100644 --- a/platforms/r3-corda-ent/charts/cenm-gateway/templates/configmap.yaml +++ b/platforms/r3-corda-ent/charts/cenm-gateway/templates/configmap.yaml @@ -8,14 +8,19 @@ apiVersion: v1 kind: ConfigMap metadata: - name: {{ .Values.prefix }}-gateway-conf - namespace: {{ .Values.metadata.namespace }} + name: {{ include "gateway.fullname" . }}-conf + namespace: {{ .Release.Namespace }} labels: - app.kubernetes.io/name: {{ .Values.nodeName }} + app.kubernetes.io/name: {{ include "gateway.fullname" . }} helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - {{- include "labels.custom" . | nindent 2 }} + app.kubernetes.io/part-of: {{ include "gateway.fullname" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} data: + run.sh: |+ +{{ tpl (.Files.Get "files/run.sh") . | indent 4 }} + gateway.conf: |+ {{ tpl (.Files.Get "files/gateway.conf") . | indent 4 }} diff --git a/platforms/r3-corda-ent/charts/cenm-gateway/templates/deployment.yaml b/platforms/r3-corda-ent/charts/cenm-gateway/templates/deployment.yaml deleted file mode 100644 index 67e43af4cb1..00000000000 --- a/platforms/r3-corda-ent/charts/cenm-gateway/templates/deployment.yaml +++ /dev/null @@ -1,283 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - ---- -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: {{ .Values.nodeName }} - namespace: {{ .Values.metadata.namespace }} - labels: - app.kubernetes.io/name: {{ .Values.nodeName }} - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} - {{- include "labels.custom" . | nindent 2 }} - annotations: - helm.sh/hook-weight: "0" -spec: - serviceName: {{ .Values.nodeName }} - replicas: 1 - selector: - matchLabels: - app: {{ .Values.nodeName }} - app.kubernetes.io/name: {{ .Values.nodeName }} - app.kubernetes.io/instance: {{ .Release.Name }} - template: - metadata: - labels: - app: {{ .Values.nodeName }} - app.kubernetes.io/name: {{ .Values.nodeName }} - app.kubernetes.io/instance: {{ .Release.Name }} - spec: - {{- with .Values.image.imagePullSecrets }} - imagePullSecrets: - {{- toYaml . | nindent 8 }} - {{- end }} - serviceAccountName: {{ .Values.vault.serviceAccountName }} - securityContext: - {{- toYaml .Values.config.podSecurityContext | nindent 8 }} - initContainers: - - name: init-certificates - image: {{ .Values.image.initContainerName }} - imagePullPolicy: {{ .Values.image.pullPolicy }} - env: - - name: VAULT_ADDR - value: {{ $.Values.vault.address }} - - name: KUBERNETES_AUTH_PATH - value: {{ $.Values.vault.authPath }} - - name: VAULT_APP_ROLE - value: {{ $.Values.vault.role }} - - name: BASE_DIR - value: {{ $.Values.config.volume.baseDir }} - - name: CERTS_SECRET_PREFIX - value: {{ .Values.vault.certSecretPrefix }} - - name: IDMAN_NODENAME - value: {{ $.Values.cenmServices.idmanName }} - - name: MOUNT_PATH - value: "/DATA" - command: ["sh", "-c"] - args: - - |- - #!/usr/bin/env sh - validateVaultResponse () { - if echo ${2} | grep "errors"; then - echo "ERROR: unable to retrieve ${1}: ${2}" - exit 1 - fi - if [ "$3" == "LOOKUPSECRETRESPONSE" ] - then - http_code=$(curl -sS -o /dev/null -w "%{http_code}" \ - --header "X-Vault-Token: ${VAULT_TOKEN}" \ - ${VAULT_ADDR}/v1/${1}) - curl_response=$? - if test "$http_code" != "200" ; then - echo "Http response code from Vault - $http_code and curl_response - $curl_response" - if test "$curl_response" != "0"; then - echo "Error: curl command failed with error code - $curl_response" - exit 1 - fi - fi - fi - } - - # setting up env to get secrets from vault - echo "Getting secrets from Vault Server" - KUBE_SA_TOKEN=$(cat /var/run/secrets/kubernetes.io/serviceaccount/token) - VAULT_TOKEN=$(curl -sS --request POST ${VAULT_ADDR}/v1/auth/${KUBERNETES_AUTH_PATH}/login -H "Content-Type: application/json" -d '{"role":"'"${VAULT_APP_ROLE}"'","jwt":"'"${KUBE_SA_TOKEN}"'"}' | jq -r 'if .errors then . else .auth.client_token end') - validateVaultResponse 'vault login token' "${VAULT_TOKEN}" - - # Creating dirs for storing certificates and credentials - mkdir -p ${MOUNT_PATH}/root; - mkdir -p ${MOUNT_PATH}/ssl; - mkdir -p ${MOUNT_PATH}/truststore; - - - # ssl trust-stores and auth key store from vault - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${CERTS_SECRET_PREFIX}/root/certs | jq -r 'if .errors then . else . end') - validateVaultResponse "${CERTS_SECRET_PREFIX}/root/certs" "${LOOKUP_SECRET_RESPONSE}" "LOOKUPSECRETRESPONSE" - - corda_ssl_trust_store=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["corda-ssl-trust-store.jks"]') - echo "${corda_ssl_trust_store}" | base64 -d > ${MOUNT_PATH}/root/corda-ssl-trust-store.jks - - # ssl identity manager keys from vault - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${CERTS_SECRET_PREFIX}/${IDMAN_NODENAME}/certs | jq -r 'if .errors then . else . end') - validateVaultResponse "${CERTS_SECRET_PREFIX}/${IDMAN_NODENAME}/certs" "${LOOKUP_SECRET_RESPONSE}" "LOOKUPSECRETRESPONSE" - - corda_ssl_identity_manager_keys=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["corda-ssl-identity-manager-keys.jks"]') - echo "${corda_ssl_identity_manager_keys}" | base64 -d > ${MOUNT_PATH}/root/corda-ssl-identity-manager-keys.jks - OUTPUT_PATH=${MOUNT_PATH}/ssl; - - # Fetching the idman ssl credentials from vault - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${CERTS_SECRET_PREFIX}/credentials/ssl | jq -r 'if .errors then . else . end') - validateVaultResponse "${CERTS_SECRET_PREFIX}/credentials/ssl" "${LOOKUP_SECRET_RESPONSE}" "LOOKUPSECRETRESPONSE" - IDMAN_SSL=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["idman"]') - echo "${IDMAN_SSL}"> ${OUTPUT_PATH}/idmanssl - - #Fetching ssl truststore from vault - OUTPUT_PATH=${MOUNT_PATH}/truststore; - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${CERTS_SECRET_PREFIX}/credentials/truststore | jq -r 'if .errors then . else . end') - validateVaultResponse "${CERTS_SECRET_PREFIX}/credentials/truststore" "${LOOKUP_SECRET_RESPONSE}" "LOOKUPSECRETRESPONSE" - SSL_TRUSTSTORE=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["ssl"]') - echo "${SSL_TRUSTSTORE}"> ${OUTPUT_PATH}/sslts - - echo "Done" - volumeMounts: - - name: certificates - mountPath: /DATA - - name: init-check-auth - env: - - name: ACCEPT_LICENSE - value: "{{required "You must accept the license agreement to use this software." .Values.acceptLicense }}" - securityContext: - {{- toYaml .Values.config.securityContext | nindent 12 }} - image: "{{ .Values.image.gatewayContainerName }}" - imagePullPolicy: {{ .Values.image.pullPolicy }} - command: ["/bin/bash", "-c"] - args: - - |- - echo Probing {{ .Values.cenmServices.authName }}.{{ .Values.metadata.namespace }} port {{ .Values.cenmServices.authPort }} - until nc -w 5 -vz {{ .Values.cenmServices.authName }}.{{ .Values.metadata.namespace }} {{ .Values.cenmServices.authPort }} > /dev/null 2>&1 - do - echo "Connect to {{ .Values.cenmServices.authName }}.{{ .Values.metadata.namespace }} port {{ .Values.cenmServices.authPort }} (tcp) failed: Connection refused" - done - echo "Connection {{ .Values.cenmServices.authName }}.{{ .Values.metadata.namespace }} port {{ .Values.cenmServices.authPort }} successful" - resources: - {{- toYaml .Values.config.pod.resources | nindent 12 }} - containers: - - name: main - env: - - name: "CENM_ZONE_PORT" - value: "{{ .Values.cenmServices.zonePort }}" - securityContext: - {{- toYaml .Values.config.securityContext | nindent 12 }} - image: "{{ .Values.image.gatewayContainerName }}" - imagePullPolicy: {{ .Values.image.pullPolicy }} - command: ["/bin/sh", "-c"] - args: - - |- - mkdir -p {{ .Values.config.volume.baseDir }}/config; - install {{ .Values.config.volume.baseDir }}/etc/gateway.conf {{ .Values.config.volume.baseDir }}/config/; - - #replacing the variables in idman.conf with actual values - export IDMAN_SSL=$(cat {{ .Values.config.volume.baseDir }}/DATA/ssl/idmanssl) - sed -i -e "s*IDMAN_SSL*${IDMAN_SSL}*g" {{ .Values.config.volume.baseDir }}/config/gateway.conf - export SSL_TRUSTSTORE=$(cat {{ .Values.config.volume.baseDir }}/DATA/truststore/sslts) - sed -i -e "s*SSL_TRUSTSTORE*${SSL_TRUSTSTORE}*g" {{ .Values.config.volume.baseDir }}/config/gateway.conf - - #running the gateway service - /bin/sh - # main run - if [ -f bin/gateway.jar ] - then - sha256sum bin/gateway.jar - echo - echo "CENM: starting CENM gateway service ..." - echo - java -Xmx{{ .Values.config.cordaJar.memorySize }}{{ .Values.config.cordaJar.unit }} -jar bin/gateway.jar --config-file {{ .Values.config.volume.baseDir }}/config/gateway.conf -v --logging-level=ALL - EXIT_CODE=${?} - else - echo "Missing Gateway Service jar file in {{ .Values.config.jarPath }} folder:" - ls -al {{ .Values.config.jarPath }} - EXIT_CODE=110 - fi - if [ "${EXIT_CODE}" -ne "0" ] - then - HOW_LONG={{ .Values.config.sleepTimeAfterError }} - echo - echo "exit code: ${EXIT_CODE} (error)" - echo "Going to sleep for requested ${HOW_LONG} seconds to let you login and investigate." - fi - sleep ${HOW_LONG} - echo - volumeMounts: - - name: gateway-conf - mountPath: {{ .Values.config.volume.baseDir }}/etc/gateway.conf - subPath: gateway.conf - - name: {{ .Values.prefix }}-gateway-etc - mountPath: {{ .Values.config.volume.baseDir }}/etc - - name: certificates - mountPath: {{ .Values.config.volume.baseDir }}/DATA - - name: {{ .Values.prefix }}-gateway-logs - mountPath: {{ .Values.config.volume.baseDir }}/logs - {{- if .Values.livenessProbe.enabled }} - livenessProbe: - tcpSocket: - port: {{ .Values.service.port }} - initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} - periodSeconds: {{ .Values.livenessProbe.periodSeconds }} - timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} - successThreshold: {{ .Values.livenessProbe.successThreshold }} - failureThreshold: {{ .Values.livenessProbe.failureThreshold }} - {{- end }} - {{- if .Values.readinessProbe.enabled }} - readinessProbe: - tcpSocket: - port: {{ .Values.service.port }} - initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }} - periodSeconds: {{ .Values.readinessProbe.periodSeconds }} - timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }} - successThreshold: {{ .Values.readinessProbe.successThreshold }} - failureThreshold: {{ .Values.readinessProbe.failureThreshold }} - {{- end }} - resources: - {{- toYaml .Values.config.pod.resources | nindent 12 }} - {{- if .Values.config.logsContainersEnabled }} - - name: logs-gateway - securityContext: - {{- toYaml .Values.config.securityContext | nindent 12 }} - image: "{{ .Values.image.gatewayContainerName }}" - imagePullPolicy: {{ .Values.image.pullPolicy }} - command: ["/bin/bash", "-c"] - args: - - |- - cd {{ .Values.config.volume.baseDir }}/ - while true; do tail -f logs/gateway-service/*.log 2>/dev/null; sleep 5; done - # in case sth went wrong just wait indefinitely ... - tail -f /dev/null - volumeMounts: - - name: {{ .Values.prefix }}-gateway-logs - mountPath: {{ .Values.config.volume.baseDir }}/logs - resources: - {{- toYaml .Values.config.pod.resources | nindent 12 }} - {{- end }} - volumes: - - name: gateway-conf - configMap: - name: {{ .Values.prefix }}-gateway-conf - defaultMode: 0777 - - name: certificates - emptyDir: - medium: Memory - {{- with .Values.config.nodeSelector }} - nodeSelector: - {{- toYaml . | nindent 8 }} - {{- end }} - {{- with .Values.config.affinity }} - affinity: - {{- toYaml . | nindent 8 }} - {{- end }} - {{- with .Values.config.tolerations }} - tolerations: - {{- toYaml . | nindent 8 }} - {{- end }} - volumeClaimTemplates: - - metadata: - name: {{ .Values.prefix }}-gateway-etc - spec: - storageClassName: {{ .Values.storage.name }} - accessModes: [ "ReadWriteOnce" ] - resources: - requests: - storage: {{ .Values.config.pvc.volumeSizeGatewayEtc }} - - metadata: - name: {{ .Values.prefix }}-gateway-logs - spec: - storageClassName: {{ .Values.storage.name }} - accessModes: [ "ReadWriteOnce" ] - resources: - requests: - storage: {{ .Values.config.pvc.volumeSizeGatewayLogs }} diff --git a/platforms/r3-corda-ent/charts/cenm-gateway/templates/job.yaml b/platforms/r3-corda-ent/charts/cenm-gateway/templates/job.yaml index 10200421389..348b6c0247e 100644 --- a/platforms/r3-corda-ent/charts/cenm-gateway/templates/job.yaml +++ b/platforms/r3-corda-ent/charts/cenm-gateway/templates/job.yaml @@ -8,128 +8,108 @@ apiVersion: batch/v1 kind: Job metadata: - name: {{ .Values.nodeName }}-job - namespace: {{ .Values.metadata.namespace }} + name: {{ template "gateway.fullname" . }}-job + namespace: {{ .Release.Namespace }} labels: - app: {{ .Values.nodeName }}-job - app.kubernetes.io/name: {{ .Values.nodeName }}-job - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} + app: {{ include "gateway.fullname" . }}-job + app.kubernetes.io/name: gateway-job + app.kubernetes.io/component: gateway + app.kubernetes.io/part-of: {{ include "gateway.fullname" . }} app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} - {{- include "labels.custom" . | nindent 2 }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} annotations: helm.sh/hook-weight: "5" spec: backoffLimit: {{ .Values.backoffLimit }} template: spec: - {{- with .Values.image.imagePullSecrets }} + securityContext: + runAsUser: 1000 + runAsGroup: 1000 + fsGroup: 1000 imagePullSecrets: - {{- toYaml . | nindent 8 }} - {{- end }} + {{- if .Values.image.pullSecret }} + - name: {{ .Values.image.pullSecret }} + {{- end }} initContainers: - - name: {{ .Chart.Name }}-check-gateway - securityContext: - {{- toYaml .Values.config.securityContext | nindent 12 }} - image: "{{ .Values.image.gatewayContainerName }}" - imagePullPolicy: {{ .Values.image.pullPolicy }} + - name: init-check-gateway + image: {{ .Values.image.gateway.repository }}:{{ .Values.image.gateway.tag }} + imagePullPolicy: IfNotPresent command: ["/bin/bash", "-c"] args: - |- - echo Probing gateway port {{ .Values.cenmServices.gatewayPort }} - until nc -w 5 -vz {{ .Values.nodeName }}.{{ .Values.metadata.namespace }} {{ .Values.cenmServices.gatewayPort }} > /dev/null 2>&1 + echo Probing gateway.{{ .Release.Namespace }} port {{ .Values.global.cenm.gateway.port }} + until nc -w 5 -vz gateway.{{ .Release.Namespace }} {{ .Values.global.cenm.gateway.port }} > /dev/null 2>&1 do - echo "Connect to {{ .Values.nodeName }}.{{ .Values.metadata.namespace }} port {{ .Values.cenmServices.gatewayPort }} (tcp) failed: Connection refused" + echo "Connect to gateway.{{ .Release.Namespace }} port {{ .Values.global.cenm.gateway.port }} (tcp) failed: Connection refused" done - echo "Connection {{ .Values.nodeName }}.{{ .Values.metadata.namespace }} port {{ .Values.cenmServices.gatewayPort }} successful" - resources: - {{- toYaml .Values.config.pod.resources | nindent 12 }} - - name: {{ .Chart.Name }}-check-auth - securityContext: - {{- toYaml .Values.config.securityContext | nindent 12 }} - image: "{{ .Values.image.gatewayContainerName }}" - imagePullPolicy: {{ .Values.image.pullPolicy }} + echo "Connection gateway.{{ .Release.Namespace }} port {{ .Values.global.cenm.gateway.port }} successful" + - name: init-check-auth + image: {{ .Values.image.gateway.repository }}:{{ .Values.image.gateway.tag }} + imagePullPolicy: IfNotPresent command: ["/bin/bash", "-c"] args: - |- - echo Probing {{ .Values.cenmServices.authName }}.{{ .Values.metadata.namespace }} port {{ .Values.cenmServices.authPort }} - until nc -w 5 -vz {{ .Values.cenmServices.authName }}.{{ .Values.metadata.namespace }} {{ .Values.cenmServices.authPort }} > /dev/null 2>&1 + echo Probing auth.{{ .Release.Namespace }} port {{ .Values.global.cenm.auth.port }} + until nc -w 5 -vz auth.{{ .Release.Namespace }} {{ .Values.global.cenm.auth.port }} > /dev/null 2>&1 do - echo "Connect to {{ .Values.cenmServices.authName }}.{{ .Values.metadata.namespace }} port {{ .Values.cenmServices.authPort }} (tcp) failed: Connection refused" + echo "Connect to auth.{{ .Release.Namespace }} port {{ .Values.global.cenm.auth.port }} (tcp) failed: Connection refused" done - echo "Connection {{ .Values.cenmServices.authName }}.{{ .Values.metadata.namespace }} port {{ .Values.cenmServices.authPort }} successful" - resources: - {{- toYaml .Values.config.pod.resources | nindent 12 }} + echo "Connection auth.{{ .Release.Namespace }} port {{ .Values.global.cenm.auth.port }} successful" containers: - name: main - securityContext: - {{- toYaml .Values.config.securityContext | nindent 12 }} - image: "{{ .Values.image.gatewayContainerName }}" - imagePullPolicy: {{ .Values.image.pullPolicy }} + image: {{ .Values.image.gateway.repository }}:{{ .Values.image.gateway.tag }} + imagePullPolicy: IfNotPresent command: ["/bin/bash", "-c"] args: - |- - pwd - cd {{ .Values.config.volume.baseDir }}/CM-FILES/ - ls -alR - ./setupAuth.sh {{ .Values.nodeName }}.{{ .Values.metadata.namespace }} {{ .Values.cenmServices.gatewayPort }} - resources: - {{- toYaml .Values.config.pod.resources | nindent 12 }} + # run the auth users setup + cd /opt/cenm/CM-FILES + ./setupAuth.sh gateway.{{ .Release.Namespace }} {{ .Values.global.cenm.gateway.port }} volumeMounts: - name: gateway-conf - mountPath: {{ .Values.config.volume.baseDir }}/CM-FILES/setupAuth.sh + mountPath: /opt/cenm/CM-FILES/setupAuth.sh subPath: setupAuth.sh - name: gateway-conf - mountPath: {{ .Values.config.volume.baseDir }}/CM-FILES/g/configuration-maintainers.json + mountPath: /opt/cenm/CM-FILES/g/configuration-maintainers.json subPath: configuration-maintainers.json - name: gateway-conf - mountPath: {{ .Values.config.volume.baseDir }}/CM-FILES/g/network-maintainers.json + mountPath: /opt/cenm/CM-FILES/g/network-maintainers.json subPath: network-maintainers.json - name: gateway-conf - mountPath: {{ .Values.config.volume.baseDir }}/CM-FILES/g/network-operation-readers.json + mountPath: /opt/cenm/CM-FILES/g/network-operation-readers.json subPath: network-operation-readers.json - name: gateway-conf - mountPath: {{ .Values.config.volume.baseDir }}/CM-FILES/g/network-operators.json + mountPath: /opt/cenm/CM-FILES/g/network-operators.json subPath: network-operators.json - name: gateway-conf - mountPath: {{ .Values.config.volume.baseDir }}/CM-FILES/r/ConfigurationMaintainer.json + mountPath: /opt/cenm/CM-FILES/r/ConfigurationMaintainer.json subPath: ConfigurationMaintainer.json - name: gateway-conf - mountPath: {{ .Values.config.volume.baseDir }}/CM-FILES/r/NetworkMaintainer.json + mountPath: /opt/cenm/CM-FILES/r/NetworkMaintainer.json subPath: NetworkMaintainer.json - name: gateway-conf - mountPath: {{ .Values.config.volume.baseDir }}/CM-FILES/r/NetworkOperationsReader.json + mountPath: /opt/cenm/CM-FILES/r/NetworkOperationsReader.json subPath: NetworkOperationsReader.json - name: gateway-conf - mountPath: {{ .Values.config.volume.baseDir }}/CM-FILES/r/NetworkOperator.json + mountPath: /opt/cenm/CM-FILES/r/NetworkOperator.json subPath: NetworkOperator.json - name: gateway-conf - mountPath: {{ .Values.config.volume.baseDir }}/CM-FILES/u/business-reader.json + mountPath: /opt/cenm/CM-FILES/u/business-reader.json subPath: business-reader.json - name: gateway-conf - mountPath: {{ .Values.config.volume.baseDir }}/CM-FILES/u/config-maintainer.json + mountPath: /opt/cenm/CM-FILES/u/config-maintainer.json subPath: config-maintainer.json - name: gateway-conf - mountPath: {{ .Values.config.volume.baseDir }}/CM-FILES/u/network-maintainer.json + mountPath: /opt/cenm/CM-FILES/u/network-maintainer.json subPath: network-maintainer.json - name: gateway-conf - mountPath: {{ .Values.config.volume.baseDir }}/CM-FILES/u/network-operator.json + mountPath: /opt/cenm/CM-FILES/u/network-operator.json subPath: network-operator.json restartPolicy: OnFailure volumes: - name: gateway-conf configMap: - name: {{ .Values.prefix }}-gateway-conf + name: {{ include "gateway.fullname" . }}-conf defaultMode: 0777 - {{- with .Values.config.nodeSelector }} - nodeSelector: - {{- toYaml . | nindent 8 }} - {{- end }} - {{- with .Values.config.affinity }} - affinity: - {{- toYaml . | nindent 8 }} - {{- end }} - {{- with .Values.config.tolerations }} - tolerations: - {{- toYaml . | nindent 8 }} - {{- end }} diff --git a/platforms/r3-corda-ent/charts/cenm-gateway/templates/service.yaml b/platforms/r3-corda-ent/charts/cenm-gateway/templates/service.yaml index 38644502172..afa42d248bf 100644 --- a/platforms/r3-corda-ent/charts/cenm-gateway/templates/service.yaml +++ b/platforms/r3-corda-ent/charts/cenm-gateway/templates/service.yaml @@ -8,61 +8,26 @@ apiVersion: v1 kind: Service metadata: - name: {{ .Values.nodeName }} - namespace: {{ .Values.metadata.namespace }} + name: {{ include "gateway.name" . }} + namespace: {{ .Release.Namespace }} labels: - run: {{ .Values.nodeName }} - app.kubernetes.io/name: {{ .Values.nodeName }} - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} + app.kubernetes.io/name: gateway-service + app.kubernetes.io/component: gateway + app.kubernetes.io/part-of: {{ include "gateway.fullname" . }} app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} - {{- include "labels.custom" . | nindent 2 }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} spec: - type: {{ .Values.service.type }} - ports: - - port: {{ .Values.service.port }} - targetPort: {{ .Values.service.port }} - protocol: TCP + type: ClusterIP selector: - app: {{ .Values.nodeName }} -{{ if $.Values.ambassador }} ---- -apiVersion: getambassador.io/v3alpha1 -kind: Host -metadata: - name: {{ .Values.nodeName }}-host -spec: - hostname: {{ .Values.nodeName }}.{{ .Values.ambassador.external_url_suffix }} - acmeProvider: - authority: none - requestPolicy: - insecure: - action: Reject - tlsSecret: - name: {{ .Values.nodeName }}-ambassador-certs - namespace: {{ .Values.metadata.namespace }} - tls: - min_tls_version: v1.2 ---- -apiVersion: getambassador.io/v3alpha1 -kind: TCPMapping -metadata: - name: {{ .Values.nodeName }}-https - namespace: {{ .Values.metadata.namespace }} -spec: - host: {{ .Values.nodeName }}.{{ .Values.ambassador.external_url_suffix }} - port: {{ .Values.ambassador.port }} - service: {{ .Values.nodeName }}.{{ .Values.metadata.namespace }}:{{ .Values.service.port }} ---- -apiVersion: getambassador.io/v3alpha1 -kind: TLSContext -metadata: - name: {{ .Values.nodeName }}-tlscontext - namespace: {{ .Values.metadata.namespace }} -spec: - hosts: - - {{ .Values.nodeName }}.{{ .Values.ambassador.external_url_suffix }} - secret: {{ .Values.nodeName }}-ambassador-certs.{{ .Values.metadata.namespace }} - secret_namespacing: true - min_tls_version: v1.2 -{{- end }} + app.kubernetes.io/name: gateway-statefulset + app.kubernetes.io/component: gateway + app.kubernetes.io/part-of: {{ include "gateway.fullname" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} + ports: + - name: gateway + protocol: TCP + port: {{ .Values.global.cenm.gateway.port }} + targetPort: {{ .Values.global.cenm.gateway.port }} diff --git a/platforms/r3-corda-ent/charts/cenm-gateway/templates/statefulset.yaml b/platforms/r3-corda-ent/charts/cenm-gateway/templates/statefulset.yaml new file mode 100644 index 00000000000..3dbb87538b0 --- /dev/null +++ b/platforms/r3-corda-ent/charts/cenm-gateway/templates/statefulset.yaml @@ -0,0 +1,130 @@ +############################################################################################## +# Copyright Accenture. All Rights Reserved. +# +# SPDX-License-Identifier: Apache-2.0 +############################################################################################## + +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ template "gateway.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + app: {{ include "gateway.fullname" . }} + app.kubernetes.io/name: gateway-statefulset + app.kubernetes.io/component: gateway + app.kubernetes.io/part-of: {{ include "gateway.fullname" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} +spec: + replicas: 1 + podManagementPolicy: OrderedReady + updateStrategy: + type: RollingUpdate + selector: + matchLabels: + app: {{ include "gateway.fullname" . }} + app.kubernetes.io/name: gateway-statefulset + app.kubernetes.io/component: gateway + app.kubernetes.io/part-of: {{ include "gateway.fullname" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} + serviceName: {{ include "gateway.fullname" . }} + volumeClaimTemplates: + - metadata: + name: gateway-logs + spec: + storageClassName: storage-{{ .Release.Name }} + accessModes: [ "ReadWriteOnce" ] + resources: + requests: + storage: {{ .Values.storage.size }} + template: + metadata: + labels: + app: {{ include "gateway.fullname" . }} + app.kubernetes.io/name: gateway-statefulset + app.kubernetes.io/component: gateway + app.kubernetes.io/part-of: {{ include "gateway.fullname" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} + spec: + serviceAccountName: {{ .Values.global.serviceAccountName }} + securityContext: + runAsUser: 1000 + runAsGroup: 1000 + fsGroup: 1000 + hostname: {{ .Release.Name }} + imagePullSecrets: + {{- if .Values.image.pullSecret }} + - name: {{ .Values.image.pullSecret }} + {{- end }} + initContainers: + - name: init-check-auth + env: + - name: ACCEPT_LICENSE + value: "YES" + image: {{ .Values.image.gateway.repository }}:{{ .Values.image.gateway.tag }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + command: ["/bin/bash", "-c"] + args: + - |- + echo Probing auth.{{ .Release.Namespace }} port {{ .Values.global.cenm.auth.port }} + until nc -w 5 -vz auth.{{ .Release.Namespace }} {{ .Values.global.cenm.auth.port}} > /dev/null 2>&1 + do + echo "Connect to auth.{{ .Release.Namespace }} port {{ .Values.global.cenm.auth.port }} (tcp) failed: Connection refused" + done + echo "Connection auth.{{ .Release.Namespace }} port {{ .Values.global.cenm.auth.port }} successful" + containers: + - name: gateway + env: + - name: "CENM_ZONE_PORT" + value: "{{ .Values.global.cenm.zone.adminPort }}" + image: {{ .Values.image.gateway.repository }}:{{ .Values.image.gateway.tag }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + command: ["/bin/sh", "-c"] + args: + - |- + # run the gateway service + ./run.sh + volumeMounts: + - name: gateway-conf + mountPath: /opt/cenm/gateway.conf + subPath: gateway.conf + - name: gateway-conf + mountPath: /opt/cenm/run.sh + subPath: run.sh + - name: gateway-etc + mountPath: /opt/cenm/etc + - name: cenm-certs + mountPath: "/certs" + - name: gateway-logs + mountPath: /opt/cenm/logs + - name: logs + image: {{ .Values.image.gateway.repository }}:{{ .Values.image.gateway.tag }} + imagePullPolicy: IfNotPresent + command: ["sh", "-c"] + args: + - |- + cd /opt/cenm + while true; do tail -f logs/gateway-service/*.log 2>/dev/null; sleep 5; done + # in case sth went wrong just wait indefinitely ... + tail -f /dev/null + volumeMounts: + - name: gateway-logs + mountPath: /opt/cenm/logs + volumes: + - name: gateway-conf + configMap: + name: {{ include "gateway.fullname" . }}-conf + defaultMode: 0777 + - name: cenm-certs + secret: + secretName: cenm-certs + - name: gateway-etc + emptyDir: + medium: Memory diff --git a/platforms/r3-corda-ent/charts/cenm-gateway/values.yaml b/platforms/r3-corda-ent/charts/cenm-gateway/values.yaml index 4bf47dcda31..8de558317a0 100644 --- a/platforms/r3-corda-ent/charts/cenm-gateway/values.yaml +++ b/platforms/r3-corda-ent/charts/cenm-gateway/values.yaml @@ -4,217 +4,54 @@ # SPDX-License-Identifier: Apache-2.0 ############################################################################################## -# Default values for Gateway service. +# Default values for cenm-gateway chart. # This is a YAML-formatted file. # Declare variables to be passed into your templates. - -############################################################# -# Basic Configuration # -############################################################# -# Provide the name of the deployment -# Eg. nodeName: gateway -nodeName: gateway - -# This section contains the Corda Enterprise Auth metadata. -metadata: - # Provide the namespace for the Corda Enterprise Auth. - # Eg. namespace: cenm-ent - namespace: cenm-ent - # Provide any additional labels for the Corda Enterprise Auth. - labels: -# prefix for the deployment names e.g orgName (cenm) -prefix: - -# Provide information regarding the Docker images used. -image: - # Provide the alpine utils image, which is used for all init-containers of deployments/jobs. - # NOTE: The alpine image used is the base alpine image with CURL installed. - # Eg. initContainerName: ghcr.io/hyperledger/bevel-alpine:latest - initContainerName: ghcr.io/hyperledger/bevel-alpine:latest - # Provide the image for the main Signer container. - # Eg. gatewayContainerName: corda/enterprise-gateway:1.5.1-zulu-openjdk8u242 - gatewayContainerName: corda/enterprise-gateway:1.5.1-zulu-openjdk8u242 - # Provide the docker-registry secret created and stored in kubernetes cluster as a secret. - # Eg. imagePullSecrets: regcred, can add multiple creds - imagePullSecrets: - - name: "" - # Pull policy to be used for the Docker image - # Eg. pullPolicy: Always - pullPolicy: IfNotPresent -############################################################# -# CENM Service Details # -############################################################# -# This section details the CENM service names as per the configuration file -# It also contains the passwords for keystores and truststores -cenmServices: - # Provide the name of the idman - # Eg. idmanName: idman - idmanName: idman - # Eg. zoneName: zone - # Name of the zone service - zoneName: zone - # Zone Service port - # e.g. zonePort: 12345 - zonePort: 12345 - # Gateway Service port - # e.g. gatewayPort: 8080 - gatewayPort: 8080 - # Eg. authName: auth - # Name of the auth service - authName: auth - # Auth Service port - # Eg. authPort: 8081 - authPort: 8081 -# This section contains the storage information. -storage: - # Provide the name of the storageclass. - # NOTE: Make sure that the storageclass exist prior to this deployment as - # this chart doesn't create the storageclass. - # Eg. name: cordaentsc - name: cordaentsc - -# Required parameter to start any .jar files -# e.g. acceptLicense: YES -acceptLicense: YES - -############################################################# -# HashiCorp Vault Configuration # -############################################################# -# NOTE: Make sure that the vault is already unsealed, intialized and configured to -# use Kubernetes service account token based authentication. -# For more info, see https://www.vaultproject.io/docs/auth/kubernetes - -vault: - # Provide the vault address - # Eg. address: http://vault.example.com:8200 - address: - # Provide the vault role used. - # Eg. role: vault-role - role: vault-role - # Provide the authpath configured to be used. - # Eg. authpath: entcordacenm - authPath: entcordacenm - # Provide the service account name autheticated to vault. - # NOTE: Make sure that the service account is already created and autheticated to use the vault. - # Eg. serviceAccountName: vault-auth +global: serviceAccountName: vault-auth - # Provide the vault path where the certificates are stored - # Eg. certsecretprefix: secret/cenm-org-name/signer/certs - certSecretPrefix: - # The amount of times to retry fetching from/writing to Vault before giving up. - # Eg. retries: 10 - retries: 10 - # Specify the maximum size of the memory allocation pool - -############################################################# -# Settings # -############################################################# -config: - # Provide volume related specifications - volume: - baseDir: - - # Provide the path where the CENM Idman .jar-file is stored - # Eg. jarPath: bin - jarPath: bin - - # Provide the path where the CENM Service configuration files are stored - # Eg. configPath: etc - configPath: - - # Provide any extra annotations for the PVCs - pvc: - # annotations: - # key: "value" - annotations: {} - # Volume size for etc/ directory - volumeSizeGatewayEtc: 1Gi - # Volume size for logs/ directory - volumeSizeGatewayLogs: 5Gi + cluster: + provider: aws # choose from: minikube | aws | azure | gcp + cloudNativeServices: false # set to true to use Cloud Native Services (SecretsManager and IAM for AWS; KeyVault & Managed Identities for Azure) + vault: + type: hashicorp + role: vault-role + address: + authPath: supplychain + secretEngine: secretsv2 + secretPrefix: "data/supplychain" + proxy: + #This will be the proxy/ingress provider. Can have values "ambassador" or "none" + #Eg. provider: "ambassador" + provider: "ambassador" + #This field contains the external URL of the node + #Eg. externalUrlSuffix: test.blockchaincloudpoc.com + externalUrlSuffix: test.blockchaincloudpoc.com + cenm: + sharedCreds: + truststore: password + keystore: password + gateway: + port: 8080 + zone: + adminPort: 12345 - # Provide any extra annotations for the deployment - deployment: - # annotations: - # key: "value" - annotations: {} - - # Set memory limits of pod - pod: - resources: - limits: - # cpu: 2Gi - memory: - requests: - # cpu: 2Gi - memory: - - nodeSelector: {} - - tolerations: [] - - affinity: {} - - podSecurityContext: - runAsUser: 1000 - runAsGroup: 1000 - fsGroup: 1000 - - securityContext: {} - # capabilities: - # drop: - # - ALL - # readOnlyRootFilesystem: true - # runAsNonRoot: true - # runAsUser: 1000 - # Provide the number of replicas for your pods - # Eg. replicas: 1 - replicas: 1 - # Enable container displaying live logs - logsContainersEnabled: true - - cordaJar: - # Provide the memory size. - # Eg. memorySize: 4096 (if using kilobytes) - # Eg. memorySize: 512 (if using megabytes) - # Eg. memorySize: 1 (if using gigabytes) - memorySize: 512 - # Provide the unit of greatness for the size, one of three options: - # - k or K for kilobytes - # - m or M for megabytes - # - g or G for gigabytes - # Eg. unit: M - unit: M - - - # Sleep time in seconds, occurs after any error is encountered in start-up - # Eg. 120 - sleepTimeAfterError: 120 - -## Liveness and readiness probe values -## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes -## -livenessProbe: - enabled: false - failureThreshold: 3 - initialDelaySeconds: 10 - periodSeconds: 10 - successThreshold: 1 - timeoutSeconds: 1 -readinessProbe: - enabled: false - failureThreshold: 3 - initialDelaySeconds: 10 - periodSeconds: 10 - successThreshold: 1 - timeoutSeconds: 1 -backoffLimit: 6 - -######################################## -### Gateway Configuration ### -######################################## - -service: - # e.g type: ClusterIP - type: ClusterIP - # e.g port: 8080 - port: 8080 +storage: + size: 1Gi + allowedTopologies: + enabled: false +image: + #Provide the docker secret name in the namespace + #Eg. pullSecret: regcred + pullSecret: + #Pull policy to be used for the Docker image + #Eg. pullPolicy: IfNotPresent + pullPolicy: IfNotPresent + #Provide a valid image and version for enterprise-gateway service + gateway: + repository: corda/enterprise-gateway + tag: 1.5.9-zulu-openjdk8u382 + +# Sleep time (in seconds) after an error occured +sleepTimeAfterError: 300 +# path to base dir +baseDir: /opt/cenm diff --git a/platforms/r3-corda-ent/charts/cenm-idman/Chart.yaml b/platforms/r3-corda-ent/charts/cenm-idman/Chart.yaml index 2e0fbdd2890..249e74bb33d 100644 --- a/platforms/r3-corda-ent/charts/cenm-idman/Chart.yaml +++ b/platforms/r3-corda-ent/charts/cenm-idman/Chart.yaml @@ -5,7 +5,21 @@ ############################################################################################## apiVersion: v1 -appVersion: "2.0" -description: "R3-corda-ent: Deploys the idman service." name: cenm-idman +description: "R3 Corda Enterprise Network Manager Identity Manager Service" version: 1.0.0 +appVersion: "latest" +keywords: + - bevel + - corda + - hyperledger + - enterprise + - blockchain + - deployment + - accenture +home: https://hyperledger-bevel.readthedocs.io/en/latest/ +sources: + - https://github.com/hyperledger/bevel +maintainers: + - name: Hyperledger Bevel maintainers + email: bevel@lists.hyperledger.org diff --git a/platforms/r3-corda-ent/charts/cenm-idman/README.md b/platforms/r3-corda-ent/charts/cenm-idman/README.md index fc9c8865ef5..f932cd3908c 100644 --- a/platforms/r3-corda-ent/charts/cenm-idman/README.md +++ b/platforms/r3-corda-ent/charts/cenm-idman/README.md @@ -3,207 +3,105 @@ [//]: # (SPDX-License-Identifier: Apache-2.0) [//]: # (##############################################################################################) - -# Idman Deployment - -- [Idman Deployment Helm Chart](#Idman-deployment-helm-chart) -- [Prerequisites](#prerequisites) -- [Chart Structure](#chart-structure) -- [Configuration](#configuration) -- [Deployment](#deployment) -- [Contributing](#contributing) -- [License](#license) - - -## Idman Deployment Helm Chart ---- -This [Helm chart](https://github.com/hyperledger/bevel/tree/main/platforms/r3-corda-ent/charts/cenm-idman) deploys the identity manager Service. - - -## Prerequisites ---- -Before deploying the chart please ensure you have the following prerequisites: - -- Kubernetes cluster up and running. -- A HashiCorp Vault instance is set up and configured to use Kubernetes service account token-based authentication. -- The Vault is unsealed and initialized. -- Helm is installed. - - -## Chart Structure ---- -This chart has following structue: -``` - ├── cenm-idman - │ ├── Chart.yaml - │ ├── templates - │ │ ├── deployment.yaml - │ │ ├── _helpers.tpl - │ │ ├── pvc.yaml - | | |__ configmap.yaml - │ │ └── service.yaml - │ └── values.yaml -``` +# cenm idman-service -Type of files used: +This chart is a component of Hyperledger Bevel. The cenm-idman chart deploys a R3 Corda Enterprise identity manager. If enabled, the keys are then stored on the configured vault and stored as Kubernetes secrets. See [Bevel documentation](https://hyperledger-bevel.readthedocs.io/en/latest/) for details. -- `templates` : This directory contains the Kubernetes manifest templates that define the resources to be deployed. -- `deployment.yaml` : Deploying the "Identity Manager" service as containers in a Kubernetes cluster. The init container is defined to perform certain setup tasks before the main containers start. The main container is the primary application container responsible for running the "Identity Manager" service. Both the main container and log container use volume mounts to access shared storage within the pod. -- `_helpers.tpl` : A template file used for defining custom labels in the Helm chart. -- `pvc.yaml` : A PersistentVolumeClaim (PVC) is a request for storage by a user. -- `service.yaml` : This file defines a Kubernetes Service with multiple ports for protocols and targets, and supports Ambassador proxy annotations for specific configurations when using the "ambassador" proxy provider. -- `configmap.yaml` : ConfigMap resource in Kubernetes with a specific name and namespace, along with labels for identification. -- `chart.yaml` : Provides metadata about the chart, such as its name, version, and description. -- `values.yaml` : Contains the default configuration values for the chart. It includes configuration for the metadata, image, storage, Vault, etc. +## TL;DR +```bash +helm repo add bevel https://hyperledger.github.io/bevel +helm install idman bevel/cenm-idman +``` - -## Configuration ---- -The [values.yaml](https://github.com/hyperledger/bevel/blob/main/platforms/r3-corda-ent/charts/cenm-idman/values.yaml) file contains configurable values for the Helm chart. We can modify these values according to the deployment requirements. Here are some important configuration options: - -## Parameters ---- +## Prerequisites -### Name +- Kubernetes 1.19+ +- Helm 3.2.0+ -| Name | Description | Default Value | -| -----------| -----------------------------------------| ------------- | -| nodeName | Provide the name of the node | idman | +If Hashicorp Vault is used, then +- HashiCorp Vault Server 1.13.1+ -### Metadata +> **Important**: Ensure the `enterprise-init` chart has been installed before installing this. Also check the dependent charts. Installing this chart seperately is not required as it is a dependent chart for cenm, and is installed with cenm chart. -| Name | Description | Default Value | -| ----------------| ----------------------------------------------------------------| ------------- | -| namespace | Provide the namespace for the Corda Enterprise Idman | cenm | -| labels | Provide any additional labels for the Corda Enterprise Idman | "" | +## Installing the Chart -### Image +To install the chart with the release name `idman`: -| Name | Description | Default Value | -| ------------------------ | ------------------------------------------------------------------------------------------ | ---------------------------------------------| -| initContainerName | Information about the Docker container used for the init-containers | ghcr.io/hyperledger | -| idmanContainer | Provide the image for the main Idman container | identitymanager:1.2-zulu-openjdk8u242 | -| enterpriseCliContainer | Provide the docker-registry secret created and stored in kubernetes cluster as a secret | corda/enterprise-cli:1.5.1-zulu-openjdk8u242 | -| ImagePullSecret | Provide the K8s secret that has rights for pulling the image off the registry | "" | -| pullPolicy | Provide the pull policy for Docker images, either Always or IfNotPresent | IfNotPresent | - -### storage - -| Name | Description | Default Value | -| --------------------- | ------------------------------------------ | ------------- | -| name | Provide the name of the storage class | cenm | -| memory | Provide the memory size for the storage class| 64Mi | - -### Vault - -| Name | Description | Default Value | -| ------------------------- | --------------------------------------------------------------------------| ------------- | -| address | Address/URL of the Vault server | "" | -| role | Role used for authentication with Vault | vault-role | -| authpath | Authentication path for Vault | entcordacenm | -| serviceAccountName | Provide the already created service account name autheticated to vault | vault-auth | -| certSecretPrefix | Provide the vault path where the certificates are stored | "" | -| retries | Amount of times to retry fetching from/writing to Vault before giving up | 10 | -| sleepTimeAfterError | Amount of time in seconds wait after an error occurs | 15 | - -### Service - -| Name | Description | Default Value | -| --------------------- | ------------------------------------------| ------------- | -| external port | Idman 'main' service | 100000 | -| internal port | Internal service, inside the K8s cluster | 5052 | -| revocation port | revocation service | 5053 | -| adminListener port | Provide the admin listener port | "" | - -### Database - -| Name | Description | Default Value | -| ------------------------ | --------------------------------------------------------------------------------| --------------- | -| driverClassName | Java class name to use for the database | /opt/cenm | -| jdbcDriver | JDBC Driver name | "org.h2.Driver" | -| url | The DB connection URL | jdbc:h2:file | -| user | DB user name | "example-db-user" | -| password | DB password | "example-db-password" | -| runMigration | Option to run database migrations as part of startup | "true" | - -### Config - -| Name | Description | Default Value | -| ------------------------ | -------------------------------------------------------------------------------------------| --------------- | -| baseDir | Provide volume related specifications | /opt/corda | -| jarPath | Provide the path where the CENM Idman .jar-file is stored | "bin" | -| configPath | Provide the path where the CENM Service configuration files are stored | "etc" | -| pvc | Provide any extra annotations for the PVCs | "value" | -| cordaJar | Specify the maximum size of the memory allocation pool | "value" | -| deployment | Provide any extra annotations for the deployment | "value" | -| pod | Set memory limits of pod | "" | -| replicas | Provide the number of replicas for your pods | "1" | -| sleepTimeAfterError | Sleep time in seconds, occurs after any error is encountered in start-up | 120 | - - -### CenmServices - -| Name | Description | Default Value | -| ---------------| ------------------------------------------| ------------- | -| gatewayName | Gateway service name | "" | -| gatewayPort | Gateway service api endpoint port | "" | -| zoneName | Zone service name | "" | -| zoneEnmPort | Zone service enm port | "" | -| authName | Name of the auth service | "" | -| authPort | Auth Service port | "" | - -### Healthcheck - -| Name | Description | Default Value | -| ----------------------------| -----------------------------------------------------------| ------------- | -| nodePort | Health Check node port set to get rid of logs pollution | 0 | - - - -## Deployment ---- -To deploy the idman Helm chart, follow these steps: - -1. Modify the [values.yaml](https://github.com/hyperledger/bevel/blob/main/platforms/r3-corda-ent/charts/cenm-idman/values.yaml) file to set the desired configuration values. -2. Run the following Helm command to install, upgrade, verify delete the chart: - -To install the chart: ```bash -helm repo add bevel https://hyperledger.github.io/bevel/ -helm install ./cenm-idman +helm repo add bevel https://hyperledger.github.io/bevel +helm install idman bevel/cenm-idman ``` -To upgrade the chart: -```bash -helm upgrade ./cenm-idman -``` +The command deploys the chart on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation. -To verify the deployment: -```bash -kubectl get jobs -n -``` -Note : Replace `` with the actual namespace where the Job was created. This command will display information about the Job, including the number of completions and the current status of the Job's pods. +> **Tip**: List all releases using `helm list` + +## Uninstalling the Chart + +To uninstall/delete the `idman` deployment: -To delete the chart: ```bash -helm uninstall +helm uninstall idman ``` -Note : Replace `` with the desired name for the release. +The command removes all the Kubernetes components associated with the chart and deletes the release. - -## Contributing ---- -If you encounter any bugs, have suggestions, or would like to contribute to the [idman Deployment Helm Chart](https://github.com/hyperledger/bevel/tree/main/platforms/r3-corda-ent/charts/cenm-idman), please feel free to open an issue or submit a pull request on the [project's GitHub repository](https://github.com/hyperledger/bevel). +## Parameters + +### Global parameters +These parameters are refered to as same in each parent or child chart +| Name | Description | Default Value | +|--------|---------|-------------| +|`global.serviceAccountName` | The serviceaccount name that will be used for Vault Auth management| `vault-auth` | +| `global.cluster.provider` | Kubernetes cluster provider like AWS EKS or minikube. Currently ony `aws` and `minikube` is tested | `aws` | +| `global.cluster.cloudNativeServices` | only `false` is implemented, `true` to use Cloud Native Services (SecretsManager and IAM for AWS; KeyVault & Managed Identities for Azure) is for future | `false` | +| `global.vault.type` | Type of Vault to support other providers. Currently, only `hashicorp` is supported. | `hashicorp` | +| `global.vault.role` | Role used for authentication with Vault | `vault-role` | +| `global.vault.address`| URL of the Vault server. | `""` | +| `global.vault.authPath` | Authentication path for Vault | `supplychain` | +| `global.vault.secretEngine` | The value for vault secret engine name | `secretsv2` | +| `global.vault.secretPrefix` | The value for vault secret prefix which must start with `data/` | `data/supplychain` | +| `global.proxy.provider` | The proxy or Ingress provider. Can be `none` or `ambassador` | `ambassador` | +| `global.proxy.externalUrlSuffix` | The External URL suffix at which the Besu P2P and RPC service will be available | `test.blockchaincloudpoc.com` | +| `global.cenm.sharedCreds.truststore` | The truststore password the pki created truststores | `password` | +| `global.cenm.sharedCreds.keystore` | The truststore password the pki created ketstores | `password` | +| `global.cenm.identityManager.port` | The port for identity manager issuance | `10000` | +| `global.cenm.identityManager.revocation.port` | The port for identity manager revocation | `5053` | +| `global.cenm.identityManager.internal.port` | The port for identity manager internal listener | `5052` | +| `global.cenm.auth.port` | The port for auth api | `8081` | +| `global.cenm.gateway.port` | The port for gateway api | `8080` | +| `global.cenm.zone.enmPort` | The port for zone ENM | `25000` | + +### Storage + +| Name | Description | Default Value | +|--------|---------|-------------| +| `storage.allowedTopologies.enabled` | Check [bevel-storageclass](../../../shared/charts/bevel-storageclass/README.md) for details | `false` | - +### Image +| Name | Description | Default Value | +| -------------| ---------- | --------- | +| `image.pullSecret` | Secret name in the namespace containing private image registry credentials | `""` | +| `image.pullPolicy` | Pull policy to be used for the Docker images | `IfNotPresent` | +| `image.idman.repository` | CENM idman image repository | `corda/enterprise-idman`| +| `image.idman.tag` | CENM idman image tag as per version | `1.5.9-zulu-openjdk8u382`| + +### Database Settings +| Name | Description | Default Value | +| -------------| ---------- | --------- | +| `database.driverClassName` | DB drive class name | `org.h2.Driver` | +| `database.jdbcDriver` | DB jdbc driver | `""` | +| `database.driverClassName` | DB url | `jdbc:h2:file:./h2/idman-persistence;DB_CLOSE_ON_EXIT=FALSE;LOCK_TIMEOUT=10000;WRITE_DELAY=0;AUTO_SERVER_PORT=0` | +| `database.user` | DB user name | `idman-db-user` | +| `database.password` | DB password | `idman-db-password` | + ## License This chart is licensed under the Apache v2.0 license. -Copyright © 2023 Accenture +Copyright © 2024 Accenture ### Attribution diff --git a/platforms/r3-corda-ent/charts/cenm-idman/files/getZoneToken.sh b/platforms/r3-corda-ent/charts/cenm-idman/files/getZoneToken.sh index 4b12a940dbb..5b8b2ebb7dd 100644 --- a/platforms/r3-corda-ent/charts/cenm-idman/files/getZoneToken.sh +++ b/platforms/r3-corda-ent/charts/cenm-idman/files/getZoneToken.sh @@ -1,13 +1,12 @@ #!/bin/sh -set -x -if [ ! -f etc/token ] +if [ ! -f /opt/cenm/etc/token ] then EXIT_CODE=1 until [ "${EXIT_CODE}" -eq "0" ] do - echo "CENM: Attempting to login to gateway:8080 ..." - java -jar bin/cenm-tool.jar context login -s http://{{ .Values.cenmServices.gatewayName }}.{{ .Values.metadata.namespace }}:{{ .Values.cenmServices.gatewayPort }} -u config-maintainer -p p4ssWord + echo "Trying to login to gateway.{{ .Release.Namespace }}:{{ .Values.global.cenm.gateway.port }} ..." + java -jar bin/cenm-tool.jar context login -s http://gateway.{{ .Release.Namespace }}:{{ .Values.global.cenm.gateway.port }} -u config-maintainer -p p4ssWord EXIT_CODE=${?} if [ "${EXIT_CODE}" -ne "0" ] then @@ -18,12 +17,9 @@ then fi done EXIT_CODE=1 - {{ if eq .Values.bashDebug true }} - cat etc/idman.conf - {{ end }} until [ "${EXIT_CODE}" -eq "0" ] do - ZONE_TOKEN=$(java -jar bin/cenm-tool.jar identity-manager config set -f=etc/idman.conf --zone-token) + ZONE_TOKEN=$(java -jar /opt/cenm/bin/cenm-tool.jar identity-manager config set -f=/opt/cenm/etc/idman.conf --zone-token) EXIT_CODE=${?} if [ "${EXIT_CODE}" -ne "0" ] then @@ -33,10 +29,5 @@ then break fi done - echo ${ZONE_TOKEN} - echo ${ZONE_TOKEN} > etc/token - {{ if eq .Values.bashDebug true }} - cat etc/token - {{ end }} - java -jar bin/cenm-tool.jar identity-manager config set-admin-address -a={{ .Values.nodeName }}.{{ .Values.metadata.namespace }}:{{ .Values.service.adminListener.port }} + java -jar bin/cenm-tool.jar identity-manager config set-admin-address -a=idman.{{ .Release.Namespace }}:{{ .Values.adminListener.port }} fi diff --git a/platforms/r3-corda-ent/charts/cenm-idman/files/idman.conf b/platforms/r3-corda-ent/charts/cenm-idman/files/idman.conf new file mode 100644 index 00000000000..342cd0b6e80 --- /dev/null +++ b/platforms/r3-corda-ent/charts/cenm-idman/files/idman.conf @@ -0,0 +1,77 @@ +address = "0.0.0.0:{{ .Values.global.cenm.identityManager.port }}" +database { + driverClassName = "{{ .Values.database.driverClassName }}" + url = "{{ .Values.database.url }}" + user = "{{ .Values.database.user }}" + password = {{ .Values.database.password }} + runMigration = {{ .Values.database.runMigration }} +} +workflows = { + "identity-manager" = { + type = ISSUANCE + updateInterval = 10000 + enmListener = { + port = {{ .Values.global.cenm.identityManager.internal.port }} + reconnect = true + ssl = { + keyStore = { + location = /certs/corda-ssl-identity-manager-keys.jks + password = {{ .Values.global.cenm.sharedCreds.keystore }} + } + trustStore = { + location = /certs/corda-ssl-trust-store.jks + password = {{ .Values.global.cenm.sharedCreds.truststore }} + } + } + } + plugin = { + pluginClass = "com.r3.enmplugins.approveall.ApproveAll" + } + }, + "revocation" = { + type = REVOCATION + crlCacheTimeout = 900000 # every 15 minutes + crlFiles = ["/certs/tls.crl", "/certs/root.crl", "/certs/subordinate.crl"] + enmListener = { + port = {{ .Values.global.cenm.identityManager.revocation.port }} + reconnect = true + ssl = { + keyStore = { + location = /certs/corda-ssl-identity-manager-keys.jks + password = {{ .Values.global.cenm.sharedCreds.keystore }} + } + trustStore = { + location = /certs/corda-ssl-trust-store.jks + password = {{ .Values.global.cenm.sharedCreds.truststore }} + } + } + } + plugin = { + pluginClass = "com.r3.enmplugins.approveall.ApproveAll" + } + } +} +authServiceConfig = { + disableAuthentication=false + host="auth.{{ .Release.Namespace }}" + port={{ .Values.global.cenm.auth.port }} + trustStore = { + location = "/certs/corda-ssl-trust-store.jks" + password = {{ .Values.global.cenm.sharedCreds.truststore }} + } + issuer="http://test" + leeway=5s +} +adminListener = { + port = {{ .Values.adminListener.port }} + ssl = { + keyStore = { + location = /certs/corda-ssl-identity-manager-keys.jks + password = {{ .Values.global.cenm.sharedCreds.keystore }} + } + trustStore = { + location = /certs/corda-ssl-trust-store.jks + password = {{ .Values.global.cenm.sharedCreds.truststore }} + } + } +} diff --git a/platforms/r3-corda-ent/charts/cenm-idman/requirements.yaml b/platforms/r3-corda-ent/charts/cenm-idman/requirements.yaml new file mode 100644 index 00000000000..895f0a0e1cf --- /dev/null +++ b/platforms/r3-corda-ent/charts/cenm-idman/requirements.yaml @@ -0,0 +1,7 @@ +dependencies: + - name: bevel-storageclass + alias: storage + repository: "file://../../../shared/charts/bevel-storageclass" + tags: + - storage + version: ~1.0.0 diff --git a/platforms/r3-corda-ent/charts/cenm-idman/templates/_helpers.tpl b/platforms/r3-corda-ent/charts/cenm-idman/templates/_helpers.tpl index 7f9b0dc6131..4b66534616d 100644 --- a/platforms/r3-corda-ent/charts/cenm-idman/templates/_helpers.tpl +++ b/platforms/r3-corda-ent/charts/cenm-idman/templates/_helpers.tpl @@ -1,5 +1,28 @@ -{{- define "labels.custom" -}} - {{- range $key, $val := .Values.metadata.labels -}} - {{- $key -}}: {{- $val -}} - {{- end -}} +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "idman.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "idman.fullname" -}} +{{- $name := default .Chart.Name -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" $name .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "idman.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} {{- end -}} diff --git a/platforms/r3-corda-ent/charts/cenm-idman/templates/configmap.yaml b/platforms/r3-corda-ent/charts/cenm-idman/templates/configmap.yaml index 6f2e8f6b030..252bb810356 100644 --- a/platforms/r3-corda-ent/charts/cenm-idman/templates/configmap.yaml +++ b/platforms/r3-corda-ent/charts/cenm-idman/templates/configmap.yaml @@ -1,14 +1,25 @@ +############################################################################################## +# Copyright Accenture. All Rights Reserved. +# +# SPDX-License-Identifier: Apache-2.0 +############################################################################################## + --- apiVersion: v1 kind: ConfigMap metadata: - name: {{ .Values.prefix }}-idman-conf - namespace: {{ .Values.metadata.namespace }} + name: {{ include "idman.fullname" . }}-conf + namespace: {{ .Release.Namespace }} labels: - app.kubernetes.io/name: {{ .Values.nodeName }} + app.kubernetes.io/name: {{ include "idman.fullname" . }} helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - {{- include "labels.custom" . | nindent 2 }} + app.kubernetes.io/part-of: {{ include "idman.fullname" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} data: + idman.conf: |+ +{{ tpl (.Files.Get "files/idman.conf") . | indent 4 }} + getZoneToken.sh: |+ - {{ tpl (.Files.Get "files/getZoneToken.sh") . | nindent 4 }} +{{ tpl (.Files.Get "files/getZoneToken.sh") . | nindent 4 }} diff --git a/platforms/r3-corda-ent/charts/cenm-idman/templates/deployment.yaml b/platforms/r3-corda-ent/charts/cenm-idman/templates/deployment.yaml deleted file mode 100644 index 74a4babf065..00000000000 --- a/platforms/r3-corda-ent/charts/cenm-idman/templates/deployment.yaml +++ /dev/null @@ -1,444 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - ---- -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: {{ .Values.nodeName }} - namespace: {{ .Values.metadata.namespace }} - {{- if .Values.config.deployment.annotations }} - annotations: -{{ toYaml .Values.config.deployment.annotations | indent 8 }} - {{- end }} - labels: - app.kubernetes.io/name: {{ .Values.nodeName }} - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} -spec: - serviceName: {{ .Values.nodeName }} - replicas: {{ .Values.config.replicas }} - selector: - matchLabels: - app: {{ .Values.nodeName }} - app.kubernetes.io/name: {{ .Values.nodeName }} - app.kubernetes.io/instance: {{ .Release.Name }} - template: - metadata: - labels: - app: {{ .Values.nodeName }} - app.kubernetes.io/name: {{ .Values.nodeName }} - app.kubernetes.io/instance: {{ .Release.Name }} - spec: - serviceAccountName: {{ $.Values.vault.serviceAccountName }} - securityContext: - runAsUser: 1000 - runAsGroup: 1000 - fsGroup: 1000 - initContainers: - - name: init-certificates - image: "{{ .Values.image.initContainer }}" - imagePullPolicy: {{ .Values.image.pullPolicy }} - env: - - name: VAULT_ADDR - value: {{ $.Values.vault.address }} - - name: KUBERNETES_AUTH_PATH - value: {{ $.Values.vault.authPath }} - - name: VAULT_APP_ROLE - value: {{ $.Values.vault.role }} - - name: BASE_DIR - value: {{ $.Values.config.volume.baseDir }} - - name: CERTS_SECRET_PREFIX - value: {{ .Values.vault.certSecretPrefix }} - - name: MOUNT_PATH - value: "/DATA" - command: ["sh", "-c"] - args: - - |- - #!/usr/bin/env sh - validateVaultResponse () { - if echo ${2} | grep "errors"; then - echo "ERROR: unable to retrieve ${1}: ${2}" - exit 1 - fi - if [ "$3" == "LOOKUPSECRETRESPONSE" ] - then - http_code=$(curl -sS -o /dev/null -w "%{http_code}" \ - --header "X-Vault-Token: ${VAULT_TOKEN}" \ - ${VAULT_ADDR}/v1/${1}) - curl_response=$? - if test "$http_code" != "200" ; then - echo "Http response code from Vault - $http_code and curl_response - $curl_response" - if test "$curl_response" != "0"; then - echo "Error: curl command failed with error code - $curl_response" - exit 1 - fi - fi - fi - } - - # Setting up the environment to get secrets/certificates from Vault - echo "Getting certificates/secrets from Vault Server" - KUBE_SA_TOKEN=$(cat /var/run/secrets/kubernetes.io/serviceaccount/token) - VAULT_TOKEN=$(curl -sS --request POST ${VAULT_ADDR}/v1/auth/${KUBERNETES_AUTH_PATH}/login -H "Content-Type: application/json" -d '{"role":"'"${VAULT_APP_ROLE}"'","jwt":"'"${KUBE_SA_TOKEN}"'"}' | jq -r 'if .errors then . else .auth.client_token end') - validateVaultResponse 'vault login token' "${VAULT_TOKEN}" - echo "Logged into Vault" - # Creating dirs for storing certificates and credentials - mkdir -p ${MOUNT_PATH}/idman; - mkdir -p ${MOUNT_PATH}/root; - mkdir -p ${MOUNT_PATH}/crl-files; - mkdir -p ${MOUNT_PATH}/ssl; - mkdir -p ${MOUNT_PATH}/truststore; - - # Fetching ssl-idman certificates from vault - COUNTER=1 - while [ "$COUNTER" -lt {{ $.Values.vault.retries }} ] - do - # Get keystores from Vault, to see if certificates are created and have been put in Vault - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${CERTS_SECRET_PREFIX}/{{ $.Values.nodeName }}/certs | jq -r 'if .errors then . else . end') - data_info="$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data["data"]')" - - if [ "$data_info" == "null" ] - then - sleep {{ $.Values.vault.sleepTimeAfterError }} - else - validateVaultResponse "${CERTS_SECRET_PREFIX}/{{ $.Values.nodeName }}/certs" "${LOOKUP_SECRET_RESPONSE}" "LOOKUPSECRETRESPONSE" - idm_ssl=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["corda-ssl-identity-manager-keys.jks"]') - echo "${idm_ssl}" | base64 -d > ${MOUNT_PATH}/idman/corda-ssl-identity-manager-keys.jks - echo "Successfully got SSL Idman certifcates" - break - fi - COUNTER=`expr "$COUNTER" + 1` - done - - if [ "$COUNTER" -ge {{ $.Values.vault.retries }} ] - then - echo "Idman SSL Certificates might not have been put in Vault. Giving up after $COUNTER tries!" - exit 1 - fi - echo "Done" - - # Fetching corda-ssl-trust-store certificates from vault - COUNTER=1 - while [ "$COUNTER" -lt {{ $.Values.vault.retries }} ] - do - # Get keystores from Vault, to see if certificates are created and have been put in Vault - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${CERTS_SECRET_PREFIX}/root/certs | jq -r 'if .errors then . else . end') - data_info="$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data["data"]')" - - if [ "$data_info" == "null" ] - then - sleep {{ $.Values.vault.sleepTimeAfterError }} - else - validateVaultResponse "${CERTS_SECRET_PREFIX}/root/certs" "${LOOKUP_SECRET_RESPONSE}" "LOOKUPSECRETRESPONSE" - root_ssl=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["corda-ssl-trust-store.jks"]') - echo "${root_ssl}" | base64 -d > ${MOUNT_PATH}/root/corda-ssl-trust-store.jks - echo "Successfully got Root SSL certifcates" - break - fi - COUNTER=`expr "$COUNTER" + 1` - done - - if [ "$COUNTER" -ge {{ $.Values.vault.retries }} ] - then - echo "Root SSL Certificates might not have been put in Vault. Giving up after $COUNTER tries!" - exit 1 - fi - echo "Done" - - # Fetching CRL certificates from vault - COUNTER=1 - while [ "$COUNTER" -lt {{ $.Values.vault.retries }} ] - do - # Get CRLs from vault to see if certificates are created, and have been put in Vault - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${CERTS_SECRET_PREFIX}/{{ .Values.nodeName }}/crls | jq -r 'if .errors then . else . end') - data_info="$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data["data"]')" - - if [ "$data_info" == "null" ] - then - sleep {{ $.Values.vault.sleepTimeAfterError }} - else - validateVaultResponse "${CERTS_SECRET_PREFIX}/{{ .Values.nodeName }}/crls" "${LOOKUP_SECRET_RESPONSE}" "LOOKUPSECRETRESPONSE" - tls_crl=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["tls.crl"]') - echo "${tls_crl}" | base64 -d > ${MOUNT_PATH}/crl-files/tls.crl - - root_crl=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["root.crl"]') - echo "${root_crl}" | base64 -d > ${MOUNT_PATH}/crl-files/root.crl - - subordinate_crl=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["subordinate.crl"]') - echo "${subordinate_crl}" | base64 -d > ${MOUNT_PATH}/crl-files/subordinate.crl - - echo "Successfully got CRL Certifcates" - break - fi - COUNTER=`expr "$COUNTER" + 1` - done - - if [ "$COUNTER" -ge {{ $.Values.vault.retries }} ] - then - echo "CRL Certificates might not have been put in Vault. Giving up after $COUNTER tries!" - exit 1 - fi - - # Fetching the idman ssl credentials from vault - OUTPUT_PATH=${MOUNT_PATH}/ssl; - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${CERTS_SECRET_PREFIX}/credentials/ssl | jq -r 'if .errors then . else . end') - validateVaultResponse "${CERTS_SECRET_PREFIX}/credentials/ssl" "${LOOKUP_SECRET_RESPONSE}" "LOOKUPSECRETRESPONSE" - IDMAN_SSL=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["idman"]') - echo "${IDMAN_SSL}"> ${OUTPUT_PATH}/idmanssl - - #Fetching ssl truststore from vault - OUTPUT_PATH=${MOUNT_PATH}/truststore; - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${CERTS_SECRET_PREFIX}/credentials/truststore | jq -r 'if .errors then . else . end') - validateVaultResponse "${CERTS_SECRET_PREFIX}/credentials/truststore" "${LOOKUP_SECRET_RESPONSE}" "LOOKUPSECRETRESPONSE" - SSL_TRUSTSTORE=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["ssl"]') - echo "${SSL_TRUSTSTORE}"> ${OUTPUT_PATH}/sslts - - echo "Done" - volumeMounts: - - name: certificates - mountPath: /DATA - - name: init-token - image: "{{ .Values.image.enterpriseCliContainer }}" - imagePullPolicy: {{ .Values.image.pullPolicy }} - env: - - name: ACCEPT_LICENSE - value: "{{ .Values.acceptLicense }}" - command: ["/bin/bash", "-c"] - args: - - |- - pwd - cp CM/*.sh bin/ - chmod +x bin/* - ls -alR - echo 'address = "0.0.0.0:{{ .Values.service.external.port }}" - database { - driverClassName = "{{ .Values.database.driverClassName }}" - url = "{{ .Values.database.url }}" - user = "{{ .Values.database.user }}" - password = "{{ .Values.database.password }}" - runMigration = "{{ .Values.database.runMigration }}" - } - workflows = { - "identity-manager" = { - type = ISSUANCE - updateInterval = 10000 - enmListener = { - port = {{ .Values.service.internal.port }} - reconnect = true - ssl = { - keyStore = { - location = ./DATA/idman/corda-ssl-identity-manager-keys.jks - password = IDMAN_SSL - } - trustStore = { - location = ./DATA/root/corda-ssl-trust-store.jks - password = SSL_TRUSTSTORE - } - } - } - plugin = { - pluginClass = "com.r3.enmplugins.approveall.ApproveAll" - } - }, - "revocation" = { - type = REVOCATION - crlCacheTimeout = 900000 # every 15 minutes - crlFiles = ["./DATA/crl-files/tls.crl", "./DATA/crl-files/root.crl", "./DATA/crl-files/subordinate.crl"] - enmListener = { - port = {{ .Values.service.revocation.port }} - reconnect = true - ssl = { - keyStore = { - location = ./DATA/idman/corda-ssl-identity-manager-keys.jks - password = IDMAN_SSL - } - trustStore = { - location = ./DATA/root/corda-ssl-trust-store.jks - password = SSL_TRUSTSTORE - } - } - } - plugin = { - pluginClass = "com.r3.enmplugins.approveall.ApproveAll" - } - } - } - authServiceConfig = { - disableAuthentication=false - host="{{ .Values.cenmServices.authName }}.{{ .Values.metadata.namespace }}" - port={{ .Values.cenmServices.authPort }} - trustStore = { - location = "./DATA/root/corda-ssl-trust-store.jks" - password = SSL_TRUSTSTORE - } - issuer="http://test" - leeway=5s - } - adminListener = { - port = {{ .Values.service.adminListener.port }} - ssl = { - keyStore = { - location = ./DATA/idman/corda-ssl-identity-manager-keys.jks - password = IDMAN_SSL - } - trustStore = { - location = ./DATA/root/corda-ssl-trust-store.jks - password = SSL_TRUSTSTORE - } - } - }' >> etc/idman.conf - - #replacing the variables in idman.conf with actual values - export IDMAN_SSL=$(cat {{ .Values.config.volume.baseDir }}/DATA/ssl/idmanssl) - sed -i -e "s*IDMAN_SSL*${IDMAN_SSL}*g" etc/idman.conf - export SSL_TRUSTSTORE=$(cat {{ .Values.config.volume.baseDir }}/DATA/truststore/sslts) - sed -i -e "s*SSL_TRUSTSTORE*${SSL_TRUSTSTORE}*g" etc/idman.conf - bin/getZoneToken.sh - resources: - {{- toYaml .Values.resources | nindent 12 }} - volumeMounts: - - name: idman-etc - mountPath: {{ .Values.config.volume.baseDir }}/etc - - name: idman-conf - mountPath: {{ .Values.config.volume.baseDir }}/CM/getZoneToken.sh - subPath: getZoneToken.sh - - name: certificates - mountPath: {{ .Values.config.volume.baseDir }}/DATA - containers: - - name: idman - image: "{{ .Values.image.idmanContainer }}" - imagePullPolicy: {{ .Values.image.pullPolicy }} - env: - - name: ACCEPT_LICENSE - value: "{{required "You must accept the license agreement to run this software" .Values.acceptLicense }}" - command: ["/bin/bash", "-c"] - args: - - |- - #running the idman service - /bin/sh - #main run - export IDMAN_SSL=$(cat {{ .Values.config.volume.baseDir }}/DATA/ssl/idmanssl) - export SSL_TRUSTSTORE=$(cat {{ .Values.config.volume.baseDir }}/DATA/truststore/sslts) - if [ -f {{ .Values.config.jarPath }}/identitymanager.jar ] - then - sha256sum {{ .Values.config.jarPath }}/identitymanager.jar - sha256sum {{ .Values.config.jarPath }}/angel.jar - cp {{ .Values.config.configPath }}/idman.conf {{ .Values.config.configPath }}/identitymanager.conf - cat {{ .Values.config.configPath }}/identitymanager.conf - echo - echo "CENM: starting Identity Manager process ..." - echo - java -jar {{ .Values.config.jarPath }}/angel.jar \ - --jar-name={{ .Values.config.jarPath }}/identitymanager.jar \ - --zone-host={{ .Values.cenmServices.zoneName }}.{{ .Values.metadata.namespace }} \ - --zone-port={{ .Values.cenmServices.zoneEnmPort }} \ - --token=${TOKEN} \ - --service=IDENTITY_MANAGER \ - --working-dir=etc/ \ - --polling-interval=10 \ - --tls=true \ - --tls-keystore={{ .Values.config.volume.baseDir }}/DATA/idman/corda-ssl-identity-manager-keys.jks \ - --tls-keystore-password=${IDMAN_SSL} \ - --tls-truststore={{ .Values.config.volume.baseDir }}/DATA/root/corda-ssl-trust-store.jks \ - --tls-truststore-password=${SSL_TRUSTSTORE} \ - --verbose - EXIT_CODE=${?} - else - echo "Missing Identity Manager jar file in {{ .Values.config.jarPath }} folder:" - ls -al {{ .Values.config.jarPath }} - EXIT_CODE=110 - fi - - if [ "${EXIT_CODE}" -ne "0" ] - then - HOW_LONG={{ .Values.config.sleepTimeAfterError }} - echo - echo "exit code: ${EXIT_CODE} (error)" - echo "Going to sleep for requested ${HOW_LONG} seconds to let you login and investigate." - fi - sleep ${HOW_LONG} - echo - volumeMounts: - - name: idman-etc - mountPath: {{ .Values.config.volume.baseDir }}/etc - - name: {{ .Values.nodeName }}-pvc-logs - mountPath: {{ .Values.config.volume.baseDir }}/logs - - name: {{ .Values.nodeName }}-pvc-h2 - mountPath: {{ .Values.config.volume.baseDir }}/h2 - - name: certificates - mountPath: {{ .Values.config.volume.baseDir }}/DATA - resources: - requests: - memory: {{ .Values.config.pod.resources.requests}} - limits: - memory: {{ .Values.config.pod.resources.limits}} - - name: logs - image: "{{ .Values.image.idmanContainer }}" - imagePullPolicy: {{ .Values.image.pullPolicy }} - env: - - name: ACCEPT_LICENSE - value: "{{required "You must accept the license agreement to run this software" .Values.acceptLicense }}" - command: ["/bin/bash", "-c"] - args: - - |- - cd {{ .Values.config.volume.baseDir }} - while true; do tail -f logs/angel-service/*.log 2>/dev/null; sleep 5; done - # in case sth went wrong just wait indefinitely ... - tail -f /dev/null - volumeMounts: - - name: {{ .Values.nodeName }}-pvc-logs - mountPath: {{ .Values.config.volume.baseDir }}/logs - {{- with .Values.image.imagePullSecrets }} - imagePullSecrets: - {{- toYaml . | nindent 8 }} - {{- end }} - volumes: - - name: idman-conf - configMap: - name: {{ .Values.prefix }}-idman-conf - - name: idman-etc - emptyDir: - medium: Memory - - name: certificates - emptyDir: - medium: Memory - volumeClaimTemplates: - - metadata: - name: {{ .Values.nodeName }}-pvc-logs -{{- if .Values.config.pvc.annotations }} - annotations: -{{ toYaml .Values.config.pvc.annotations | indent 8 }} -{{- end }} - labels: - app.kubernetes.io/name: {{ .Values.nodeName }}-pvc-logs - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} - spec: - storageClassName: {{ .Values.storage.name }} - accessModes: [ "ReadWriteOnce" ] - resources: - requests: - storage: {{ .Values.storage.memory }} - - metadata: - name: {{ .Values.nodeName }}-pvc-h2 -{{- if .Values.config.pvc.annotations }} - annotations: -{{ toYaml .Values.config.pvc.annotations | indent 8 }} -{{- end }} - labels: - app.kubernetes.io/name: {{ .Values.nodeName }}-pvc-h2 - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} - spec: - storageClassName: {{ .Values.storage.name }} - accessModes: [ "ReadWriteOnce" ] - resources: - requests: - storage: {{ .Values.storage.memory }} diff --git a/platforms/r3-corda-ent/charts/cenm-idman/templates/service.yaml b/platforms/r3-corda-ent/charts/cenm-idman/templates/service.yaml index 89d317db57b..516287fecce 100644 --- a/platforms/r3-corda-ent/charts/cenm-idman/templates/service.yaml +++ b/platforms/r3-corda-ent/charts/cenm-idman/templates/service.yaml @@ -4,79 +4,71 @@ # SPDX-License-Identifier: Apache-2.0 ############################################################################################## +--- apiVersion: v1 kind: Service metadata: - name: {{ .Values.nodeName }} - namespace: {{ .Values.metadata.namespace }} + name: {{ include "idman.name" . }} + namespace: {{ .Release.Namespace }} labels: - run: {{ .Values.nodeName }} - app.kubernetes.io/name: {{ .Values.nodeName }} - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} + app.kubernetes.io/name: idman-service + app.kubernetes.io/component: idman + app.kubernetes.io/part-of: {{ include "idman.fullname" . }} app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} spec: + type: ClusterIP selector: - app: {{ .Values.nodeName }} -# we need Health Check node port set to get rid of logs pollution -{{- if (.Values.healthCheck.nodePort) }} - healthCheckNodePort: {{ .Values.healthCheck.nodePort }} -{{- end }} + app.kubernetes.io/name: idman-statefulset + app.kubernetes.io/component: idman + app.kubernetes.io/part-of: {{ include "idman.fullname" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} ports: - - port: {{ .Values.service.internal.port }} - targetPort: {{ .Values.service.internal.port }} - protocol: TCP - name: issuance - - port: {{ .Values.service.revocation.port }} - targetPort: {{ .Values.service.revocation.port }} - protocol: TCP - name: revocation - - port: {{ .Values.service.external.port }} - targetPort: {{ .Values.service.external.port }} - protocol: TCP - name: main - - port: {{ .Values.service.adminListener.port }} - targetPort: {{ .Values.service.adminListener.port }} - protocol: TCP - name: adminlistener -{{ if $.Values.ambassador }} + - port: {{ .Values.global.cenm.identityManager.internal.port }} + targetPort: {{ .Values.global.cenm.identityManager.internal.port }} + protocol: TCP + name: issuance + - port: {{ .Values.global.cenm.identityManager.revocation.port }} + targetPort: {{ .Values.global.cenm.identityManager.revocation.port }} + protocol: TCP + name: revocation + - port: {{ .Values.global.cenm.identityManager.port }} + targetPort: {{ .Values.global.cenm.identityManager.port }} + protocol: TCP + name: main + - port: {{ .Values.adminListener.port }} + targetPort: {{ .Values.adminListener.port }} + protocol: TCP + name: adminlistener +{{- if eq .Values.global.proxy.provider "ambassador" }} --- +## Host for doorman apiVersion: getambassador.io/v3alpha1 kind: Host metadata: - name: {{ .Values.nodeName }}-host + name: {{ .Release.Name }}-doorman spec: - hostname: {{ .Values.nodeName }}.{{ .Values.ambassador.external_url_suffix }} + hostname: {{ .Release.Name }}-doorman.{{ .Values.global.proxy.externalUrlSuffix }} acmeProvider: authority: none requestPolicy: insecure: action: Reject tlsSecret: - name: {{ .Values.nodeName }}-ambassador-certs - namespace: {{ .Values.metadata.namespace }} - tls: - min_tls_version: v1.2 + name: doorman-tls-certs + namespace: {{ .Release.Namespace }} --- +## Mapping for doorman port apiVersion: getambassador.io/v3alpha1 kind: Mapping metadata: - name: {{ .Values.nodeName }}-https - namespace: {{ .Values.metadata.namespace }} + name: {{ .Release.Name }}-mapping + namespace: {{ .Release.Namespace }} spec: - host: {{ .Values.nodeName }}.{{ .Values.ambassador.external_url_suffix }} + host: {{ .Release.Name }}-doorman.{{ .Values.global.proxy.externalUrlSuffix }} prefix: / - service: {{ .Values.nodeName }}.{{ .Values.metadata.namespace }}:{{ .Values.service.external.port }} ---- -apiVersion: getambassador.io/v3alpha1 -kind: TLSContext -metadata: - name: {{ .Values.nodeName }}-tlscontext - namespace: {{ .Values.metadata.namespace }} -spec: - hosts: - - {{ .Values.nodeName }}.{{ .Values.ambassador.external_url_suffix }} - secret: {{ .Values.nodeName }}-ambassador-certs.{{ .Values.metadata.namespace }} - secret_namespacing: true - min_tls_version: v1.2 + service: {{ include "idman.name" . }}.{{ .Release.Namespace }}:{{ .Values.global.cenm.identityManager.port }} {{- end }} diff --git a/platforms/r3-corda-ent/charts/cenm-idman/templates/statefulset.yaml b/platforms/r3-corda-ent/charts/cenm-idman/templates/statefulset.yaml new file mode 100644 index 00000000000..9abd8d714f0 --- /dev/null +++ b/platforms/r3-corda-ent/charts/cenm-idman/templates/statefulset.yaml @@ -0,0 +1,181 @@ +############################################################################################## +# Copyright Accenture. All Rights Reserved. +# +# SPDX-License-Identifier: Apache-2.0 +############################################################################################## + +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ template "idman.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + app: {{ include "idman.fullname" . }} + app.kubernetes.io/name: idman-statefulset + app.kubernetes.io/component: idman + app.kubernetes.io/part-of: {{ include "idman.fullname" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} +spec: + replicas: 1 + podManagementPolicy: OrderedReady + updateStrategy: + type: RollingUpdate + selector: + matchLabels: + app: {{ include "idman.fullname" . }} + app.kubernetes.io/name: idman-statefulset + app.kubernetes.io/component: idman + app.kubernetes.io/part-of: {{ include "idman.fullname" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} + serviceName: {{ include "idman.fullname" . }} + volumeClaimTemplates: + - metadata: + name: idman-h2 + spec: + storageClassName: storage-{{ .Release.Name }} + accessModes: [ "ReadWriteOnce" ] + resources: + requests: + storage: {{ .Values.storage.dbSize }} + - metadata: + name: idman-etc + spec: + storageClassName: storage-{{ .Release.Name }} + accessModes: [ "ReadWriteOnce" ] + resources: + requests: + storage: {{ .Values.storage.size }} + - metadata: + name: idman-logs + spec: + storageClassName: storage-{{ .Release.Name }} + accessModes: [ "ReadWriteOnce" ] + resources: + requests: + storage: {{ .Values.storage.size }} + template: + metadata: + labels: + app: {{ include "idman.fullname" . }} + app.kubernetes.io/name: idman-statefulset + app.kubernetes.io/component: idman + app.kubernetes.io/part-of: {{ include "idman.fullname" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} + spec: + serviceAccountName: {{ .Values.global.serviceAccountName }} + securityContext: + runAsUser: 1000 + runAsGroup: 1000 + fsGroup: 1000 + hostname: {{ .Release.Name }} + imagePullSecrets: + {{- if .Values.image.pullSecret }} + - name: {{ .Values.image.pullSecret }} + {{- end }} + initContainers: + - name: init-token + image: {{ .Values.image.enterpriseCli.repository }}:{{ .Values.image.enterpriseCli.tag }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + command: ["/bin/bash", "-c"] + args: + - |- + bin/getZoneToken.sh + volumeMounts: + - name: idman-etc + mountPath: /opt/cenm/etc + - name: cenm-certs + mountPath: /certs + - name: idman-conf + mountPath: /opt/cenm/bin/getZoneToken.sh + subPath: getZoneToken.sh + - name: idman-conf + mountPath: /opt/cenm/etc/idman.conf + subPath: idman.conf + containers: + - name: idman + image: {{ .Values.image.idman.repository }}:{{ .Values.image.idman.tag }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + command: ["/bin/bash", "-c"] + args: + - |- + # running the idman service + /bin/sh + # main run + if [ -f bin/identitymanager.jar ] + then + sha256sum bin/identitymanager.jar + sha256sum bin/angel.jar + cp etc/idman.conf etc/identitymanager.conf + cat etc/identitymanager.conf + echo + echo "CENM: starting Identity Manager process ..." + echo + java -jar bin/angel.jar \ + --jar-name=bin/identitymanager.jar \ + --zone-host=zone.{{ .Release.Namespace }} \ + --zone-port={{ .Values.global.cenm.zone.enmPort }} \ + --token=${TOKEN} \ + --service=IDENTITY_MANAGER \ + --working-dir=etc/ \ + --polling-interval=10 \ + --tls=true \ + --tls-keystore=/certs/corda-ssl-identity-manager-keys.jks \ + --tls-keystore-password={{ .Values.global.cenm.sharedCreds.keystore }} \ + --tls-truststore=/certs/corda-ssl-trust-store.jks \ + --tls-truststore-password={{ .Values.global.cenm.sharedCreds.truststore }} \ + --verbose + EXIT_CODE=${?} + else + echo "Missing Identity Manager jar file in bin folder:" + ls -al bin + EXIT_CODE=110 + fi + + if [ "${EXIT_CODE}" -ne "0" ] + then + HOW_LONG={{ .Values.sleepTimeAfterError }} + echo + echo "exit code: ${EXIT_CODE} (error)" + echo "Going to sleep for requested ${HOW_LONG} seconds to let you login and investigate." + fi + sleep ${HOW_LONG} + echo + volumeMounts: + - name: idman-etc + mountPath: /opt/cenm/etc + - name: cenm-certs + mountPath: /certs + - name: idman-conf + mountPath: /opt/cenm/etc/idman.conf + subPath: idman.conf + - name: idman-logs + mountPath: /opt/cenm/logs + - name: idman-h2 + mountPath: /opt/cenm/h2 + - name: logs + image: {{ .Values.image.idman.repository }}:{{ .Values.image.idman.tag }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + command: ["/bin/bash", "-c"] + args: + - |- + cd /opt/cenm + while true; do tail -f logs/angel-service/*.log 2>/dev/null; sleep 5; done + # in case sth went wrong just wait indefinitely ... + tail -f /dev/null + volumeMounts: + - name: idman-logs + mountPath: /opt/cenm/logs + volumes: + - name: idman-conf + configMap: + name: {{ include "idman.fullname" . }}-conf + defaultMode: 0777 + - name: cenm-certs + secret: + secretName: cenm-certs diff --git a/platforms/r3-corda-ent/charts/cenm-idman/values.yaml b/platforms/r3-corda-ent/charts/cenm-idman/values.yaml index fc49b6144c8..a871be53d87 100644 --- a/platforms/r3-corda-ent/charts/cenm-idman/values.yaml +++ b/platforms/r3-corda-ent/charts/cenm-idman/values.yaml @@ -4,214 +4,80 @@ # SPDX-License-Identifier: Apache-2.0 ############################################################################################## -# Default values for Identity Manager (Idman) service. +# Default values for cenm-idman chart. # This is a YAML-formatted file. # Declare variables to be passed into your templates. - -############################################################# -# Basic Configuration # -############################################################# -# Provide the name of the node -# Eg. nodeName: idman -nodeName: idman -# Debug mode -bashDebug: -prefix: - -# This section contains the Corda Enterprise Idman metadata. -metadata: - # Provide the namespace for the Corda Enterprise Idman. - # Eg. namespace: cenm - namespace: cenm - # Provide any additional labels for the Corda Enterprise Idman. - labels: - -# Provide information regarding the Docker images used. -image: - # Provide the alpine utils image, which is used for all init-containers of deployments/jobs. - # NOTE: The alpine image used is the base alpine image with CURL installed. - # Eg. initContainerName: ghcr.io/hyperledger/bevel-alpine:latest - initContainer: ghcr.io/hyperledger/bevel-alpine:latest - # Provide the image for the main Idman container. - # Eg. idmanContainerName: corda/enterprise-identitymanager:1.2-zulu-openjdk8u242 - idmanContainer: corda/enterprise-identitymanager:1.2-zulu-openjdk8u242 - # Provide the image for the main Idman container. - # Eg. enterpriseCli: corda/enterprise-cli:1.5.1-zulu-openjdk8u242 - enterpriseCliContainer: corda/enterprise-cli:1.5.1-zulu-openjdk8u242 - # Provide the docker-registry secret created and stored in kubernetes cluster as a secret. - # Eg. imagePullSecrets: regcred, can add multiple creds - imagePullSecrets: - - name: - # Pull policy to be used for the Docker image - # Eg. pullPolicy: Always - pullPolicy: IfNotPresent - -# This section contains the storage information, used for the Persistent Volume Claims (PVC). -storage: - # Provide the name of the storageclass. - # NOTE: Make sure that the storageclass exist prior to this deployment as - # this chart doesn't create the storageclass. - # Eg. name: cenm - name: cenm - # Provide the memory size for the storage class. - # Eg. memory: 64Mi - memory: 64Mi - -# Required parameter to start any .jar files -# Eg. acceptLicense: YES -acceptLicense: YES - -############################################################# -# HashiCorp Vault Configuration # -############################################################# -# NOTE: Make sure that the vault is already unsealed, intialized and configured to -# use Kubernetes service account token based authentication. -# For more info, see https://www.vaultproject.io/docs/auth/kubernetes - -vault: - # Provide the vault address - # Eg. address: http://vault.example.com:8200 - address: - # Provide the vault role used. - # Eg. role: vault-role - role: vault-role - # Provide the authpath configured to be used. - # Eg. authpath: entcordacenm - authPath: entcordacenm - # Provide the service account name autheticated to vault. - # NOTE: Make sure that the service account is already created and autheticated to use the vault. - # Eg. serviceaccountname: vault-auth +global: serviceAccountName: vault-auth - # Provide the vault path where the certificates are stored - # Eg. certsecretprefix: secret/cenm-org-name - certSecretPrefix: - # The amount of times to retry fetching from/writing to Vault before giving up. - # Eg. retries: 10 - retries: 10 - # The amount of time in seconds to wait after an error occurs when fetching from/writing to Vault. - # Eg. sleepTimeAfterError: 15 - sleepTimeAfterError: 15 - -############################################################# -# Idman Configuration # -############################################################# - -service: - # Idman 'main' service - external: - # Eg. port: 10000 - port: 10000 - # Internal service, inside the K8s cluster - internal: - # Eg. port: 5052 - port: 5052 - revocation: - # Eg. port: 5053 - port: 5053 - # Provide the admin listener port - adminListener: - port: - -############################################################# -# Database Options and Configuration # -############################################################# -# Database configuration + cluster: + provider: aws # choose from: minikube | aws | azure | gcp + cloudNativeServices: false # set to true to use Cloud Native Services (SecretsManager and IAM for AWS; KeyVault & Managed Identities for Azure) + vault: + type: hashicorp + role: vault-role + address: + authPath: supplychain + secretEngine: secretsv2 + secretPrefix: "data/supplychain" + proxy: + #This will be the proxy/ingress provider. Can have values "ambassador" or "none" + #Eg. provider: "ambassador" + provider: "ambassador" + #This field contains the external URL of the node + #Eg. externalUrlSuffix: test.blockchaincloudpoc.com + externalUrlSuffix: test.blockchaincloudpoc.com + cenm: + sharedCreds: + truststore: password + keystore: password + identityManager: + port: 10000 + revocation: + port: 5053 + internal: + port: 5052 + auth: + port: 8081 + gateway: + port: 8080 + zone: + enmPort: 25000 + +# db related configuration database: - # Java class name to use for the database - # Eg. driverClassName: "org.h2.Driver" - driverClassName: - # The DB connection URL - # Eg. url: "jdbc:h2:file:./h2/identity-manager-persistence;DB_CLOSE_ON_EXIT=FALSE;LOCK_TIMEOUT=10000;WRITE_DELAY=0;AUTO_SERVER_PORT=0" - url: - # DB user - # Eg. user: "example-db-user" - user: "example-db-user" - # DB password - # Eg. password: "example-db-password" - password: "example-db-password" - # Migrations of database can be run as part of the startup of Idman, if set to true. - # If set to false, it will be run prior to setting up the Idman. - # Eg. runMigration: "true" - runMigration: "true" + driverClassName: "org.h2.Driver" + jdbcDriver: "" + url: "jdbc:h2:file:./h2/identity-manager-persistence;DB_CLOSE_ON_EXIT=FALSE;LOCK_TIMEOUT=10000;WRITE_DELAY=0;AUTO_SERVER_PORT=0" + user: "idman-db-user" + password: "idman-db-password" + runMigration: true -############################################################# -# Settings # -############################################################# -config: - # Provide volume related specifications - volume: - # Eg. baseDir: /opt/corda - baseDir: /opt/corda - - # Provide the path where the CENM Idman .jar-file is stored - # Eg. jarPath: bin - jarPath: - - # Provide the path where the CENM Service configuration files are stored - # Eg. configPath: etc - configPath: - - # Provide any extra annotations for the PVCs - pvc: - # annotations: - # key: "value" - annotations: {} - - # Provide any extra annotations for the deployment - deployment: - # annotations: - # key: "value" - annotations: {} - - # Specify the maximum size of the memory allocation pool - cordaJar: - # Provide the memory size. - # Eg. memorySize: 4096 (if using kilobytes) - # Eg. memorySize: 512 (if using megabytes) - # Eg. memorySize: 1 (if using gigabytes) - memorySize: - # Provide the unit of greatness for the size, one of three options: - # - k or K for kilobytes - # - m or M for megabytes - # - g or G for gigabytes - # Eg. unit: M - unit: - - # Set memory limits of pod - pod: - resources: - # Provide the limit memory for node - # Eg. limits: 512M - limits: 512M - # Provide the requests memory for node - # Eg. requests: 550M - requests: 550M - - # Provide the number of replicas for your pods - # Eg. replicas: 1 - replicas: 1 - - # Sleep time in seconds, occurs after any error is encountered in start-up - # Eg. 120 - sleepTimeAfterError: 120 -############################################################# -# CENM SERVICES DETAILS # -############################################################# -cenmServices: - # Gateway service name - gatewayName: - # Gateway service api endpoint port - gatewayPort: - # Zone service name - zoneName: - # Zone service enm port - zoneEnmPort: - # Auth service name - authName: - # Auth service port - authPort: - -healthCheck: - # Health Check node port set to get rid of logs pollution - # Eg. nodePort: 0 - nodePort: +storage: + size: 1Gi + dbSize: 5Gi + allowedTopologies: + enabled: false +image: + #Provide the docker secret name in the namespace + #Eg. pullSecret: regcred + pullSecret: + #Pull policy to be used for the Docker image + #Eg. pullPolicy: IfNotPresent + pullPolicy: IfNotPresent + #Provide a valid image and version for enterprise-gateway service + enterpriseCli: + repository: corda/enterprise-cli + tag: 1.5.9-zulu-openjdk8u382 + #Provide a valid image and version for enterprise-gateway service + idman: + repository: corda/enterprise-identitymanager + tag: 1.5.9-zulu-openjdk8u382 + +# Sleep time (in seconds) after an error occured +sleepTimeAfterError: 300 +# path to base dir +baseDir: /opt/cenm + +# idman internal adminListener port +adminListener: + port: 6000 diff --git a/platforms/r3-corda-ent/charts/cenm-networkmap/Chart.yaml b/platforms/r3-corda-ent/charts/cenm-networkmap/Chart.yaml index e94c3ead03a..52a44420edc 100644 --- a/platforms/r3-corda-ent/charts/cenm-networkmap/Chart.yaml +++ b/platforms/r3-corda-ent/charts/cenm-networkmap/Chart.yaml @@ -5,7 +5,21 @@ ############################################################################################## apiVersion: v1 -appVersion: "2.0" -description: "R3-corda-ent: Deploys the networkmap service." name: cenm-networkmap +description: "R3 Corda Enterprise Network Manager Networkmap Service" version: 1.0.0 +appVersion: "latest" +keywords: + - bevel + - corda + - hyperledger + - enterprise + - blockchain + - deployment + - accenture +home: https://hyperledger-bevel.readthedocs.io/en/latest/ +sources: + - https://github.com/hyperledger/bevel +maintainers: + - name: Hyperledger Bevel maintainers + email: bevel@lists.hyperledger.org diff --git a/platforms/r3-corda-ent/charts/cenm-networkmap/README.md b/platforms/r3-corda-ent/charts/cenm-networkmap/README.md index f8774de21b3..8d05f44f0c5 100644 --- a/platforms/r3-corda-ent/charts/cenm-networkmap/README.md +++ b/platforms/r3-corda-ent/charts/cenm-networkmap/README.md @@ -3,225 +3,106 @@ [//]: # (SPDX-License-Identifier: Apache-2.0) [//]: # (##############################################################################################) - -# Nmap Deployment - -- [Nmap Deployment Helm Chart](#Nmap-deployment-helm-chart) -- [Prerequisites](#prerequisites) -- [Chart Structure](#chart-structure) -- [Configuration](#configuration) -- [Deployment](#deployment) -- [Contributing](#contributing) -- [License](#license) - - -## Nmap Deployment Helm Chart ---- -This [Helm chart](https://github.com/hyperledger/bevel/tree/main/platforms/r3-corda-ent/charts/cenm-networkmap) deploys the networkmap Service. - - -## Prerequisites ---- -Before deploying the chart please ensure you have the following prerequisites: - -- Kubernetes cluster up and running. -- A HashiCorp Vault instance is set up and configured to use Kubernetes service account token-based authentication. -- The Vault is unsealed and initialized. -- Helm is installed. - - -## Chart Structure ---- -This chart has following structue: -``` - ├── cenm-networkmap - │ ├── Chart.yaml - │ ├── templates - │ │ ├── deployment.yaml - │ │ ├── _helpers.tpl - │ │ ├── pvc.yaml - | | |__ configmap.yaml - │ │ └── service.yaml - │ └── values.yaml -``` +# cenm networkmap-service -Type of files used: +This chart is a component of Hyperledger Bevel. The cenm-networkmap chart deploys a R3 Corda Enterprise identity manager. If enabled, the keys are then stored on the configured vault and stored as Kubernetes secrets. See [Bevel documentation](https://hyperledger-bevel.readthedocs.io/en/latest/) for details. -- `templates` : This directory contains the Kubernetes manifest templates that define the resources to be deployed. -- `deployment.yaml` : Deploying the "networkmap" service as containers in a Kubernetes cluster. The init container is defined to perform certain setup tasks before the main containers start. The main container is the primary application container responsible for running the "networkmap" service. Both the main container and log container use volume mounts to access shared storage within the pod. -- `_helpers.tpl` : A template file used for defining custom labels in the Helm chart. -- `pvc.yaml` : A PersistentVolumeClaim (PVC) is a request for storage by a user. -- `service.yaml` : This file defines a Kubernetes Service with multiple ports for protocols and targets, and supports Ambassador proxy annotations for specific configurations when using the "ambassador" proxy provider. -- `configmap.yaml` : ConfigMap resource in Kubernetes with a specific name and namespace, along with labels for identification. -- `chart.yaml` : Provides metadata about the chart, such as its name, version, and description. -- `values.yaml` : Contains the default configuration values for the chart. It includes configuration for the metadata, image, storage, Vault, etc. +## TL;DR +```bash +helm repo add bevel https://hyperledger.github.io/bevel +helm install networkmap bevel/cenm-networkmap +``` - -## Configuration ---- -The [values.yaml](https://github.com/hyperledger/bevel/blob/main/platforms/r3-corda-ent/charts/cenm-networkmap/values.yaml) file contains configurable values for the Helm chart. We can modify these values according to the deployment requirements. Here are some important configuration options: +## Prerequisites -## Parameters ---- +- Kubernetes 1.19+ +- Helm 3.2.0+ -### Name +If Hashicorp Vault is used, then +- HashiCorp Vault Server 1.13.1+ -| Name | Description | Default Value | -| -----------| -------------------------------------------------- | ------------- | -| nodeName | Provide the name of the node | networkmap | +> **Important**: Ensure the `enterprise-init` `enterprise-cenm` and `enterprise-notary` charts have been installed before installing this. Also check the dependent charts. +## Installing the Chart -### Metadata +To install the chart with the release name `networkmap`: -| Name | Description | Default Value | -| ----------------| ----------------------------------------------------------------| ------------- | -| namespace | Provide the namespace for the Corda Enterprise nmap | cenm | -| labels | Provide any additional labels for the Corda Enterprise nmap | "" | +```bash +helm repo add bevel https://hyperledger.github.io/bevel +helm install networkmap bevel/cenm-networkmap +``` -### storage +The command deploys the chart on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation. -| Name | Description | Default Value | -| --------------------- | ------------------------------------------ | ------------- | -| name | Provide the name of the storage class | cenm | -| memory | Provide the memory size for the storage class| 64Mi | +> **Tip**: List all releases using `helm list` -### Image +## Uninstalling the Chart -| Name | Description | Default Value | -| ------------------------ | ---------------------------------------------------------------------------------------| -------------------------------------------------| -| initContainerName | Information about the Docker container used for the init-containers | ghcr.io/hyperledger | -| nmapContainer | Provide the image for the main nmap container | corda/enterprise-networkmap:1.2-zulu-openjdk8u24 | -| enterpriseCliContainer | Provide the docker-registry secret created and stored in kubernetes cluster as a secret| corda/enterprise-cli:1.5.1-zulu-openjdk8u242 | -| ImagePullSecret | Provide the K8s secret that has rights for pulling the image off the registry | "" | -| pullPolicy | Provide the pull policy for Docker images, either Always or IfNotPresent | IfNotPresent | - -### Vault - -| Name | Description | Default Value | -| ------------------------- | --------------------------------------------------------------------------| ------------- | -| address | Address/URL of the Vault server | "" | -| role | Role used for authentication with Vault | vault-role | -| authpath | Authentication path for Vault | entcordacenm | -| serviceAccountName | Provide the already created service account name autheticated to vault | vault-auth | -| certSecretPrefix | Provide the vault path where the certificates are stored | cenm/certs | -| retries | Amount of times to retry fetching from/writing to Vault before giving up | 10 | -| sleepTimeAfterError | Amount of time in seconds wait after an error occurs | 15 | - -### Service - -| Name | Description | Default Value | -| --------------------- | ------------------------------------------| ------------- | -| external port | Idman 'main' service | 1000 | -| internal port | Internal service, inside the K8s cluster | 5050 | -| revocation port | revocation service | 5053 | -| adminListener port | Provide the admin listener port | "" | - -### ServiceLocations - -| Name | Description | Default Value | -| ------------------------ | -------------------------------------------------| --------------- | -| identityManager | Provide the idman service address | "" | -| name | Name of idman service | idman | -| domain | External domain name of idman service | "" | -| port | Port of idman | 5052 | -| host | Host of idman | 443 | -| notary | Values for notary service | "" | - -### Database - -| Name | Description | Default Value | -| ------------------------ | --------------------------------------------------------------------------------| --------------- | -| driverClassName | Java class name to use for the database | /opt/cenm | -| jdbcDriver | JDBC Driver name | "org.h2.Driver" | -| url | The DB connection URL | jdbc:h2:file | -| user | DB user name | "example-db-user" | -| password | DB password | "example-db-password" | -| runMigration | Option to run database migrations as part of startup | "true" | - -### Config - -| Name | Description | Default Value | -| ------------------------ | -------------------------------------------------------------------------------------- | --------------- | -| baseDir | Provide volume related specifications | /opt/corda | -| jarPath | Provide the path where the CENM nmap .jar-file is stored | "bin" | -| configPath | Provide the path where the CENM Service configuration files are stored | "etc" | -| pvc | Provide any extra annotations for the PVCs | "value" | -| cordaJar | Specify the maximum size of the memory allocation pool | "value" | -| deployment | Provide any extra annotations for the deployment | "value" | -| pod | Set memory limits of pod | "" | -| replicas | Provide the number of replicas for your pods | "1" | -| checkRevocation | Whether the NMS will check the certificate revocation list | true | - -### CenmServices - -| Name | Description | Default Value | -| ---------------| ------------------------------------------| ------------- | -| gatewayName | Gateway service name | "" | -| gatewayPort | Gateway service api endpoint port | "" | -| zoneName | Zone service name | "" | -| zoneEnmPort | Zone service enm port | "" | -| authName | Name of the auth service | "" | -| authPort | Auth Service port | "" | - -### Healthcheck - -| Name | Description | Default Value | -| ----------------------------| -------------------------------------------------------------| ------------- | -| nodePort | Health Check node port set to get rid of logs pollution | 0 | - -### nmapUpdate - -| Name | Description | Default Value | -| --------------------- | ------------------------------------------| ----------------------------------| -| addNotaries | Add additional notary information | "" | -| nodeinfoFileName | Notary nodeinfo file name | nodeInfo-056A07FF98F9872C4F4F9 | -| nodeinfoFile | Notary nodeinfoFile in base64 format | Y29yZGEBAAAAgMViAAAAAAAB0AAAFqEAA | -| validating | set notary validating true or false | "true" | - - - -## Deployment ---- -To deploy the nmap Helm chart, follow these steps: - -1. Modify the [values.yaml](https://github.com/hyperledger/bevel/blob/main/platforms/r3-corda-ent/charts/cenm-networkmap/values.yaml) file to set the desired configuration values. -2. Run the following Helm command to install, upgrade, verify delete the chart: - -To install the chart: -```bash -helm repo add bevel https://hyperledger.github.io/bevel/ -helm install ./cenm-networkmap -``` +To uninstall/delete the `networkmap` deployment: -To upgrade the chart: ```bash -helm upgrade ./cenm-networkmap +helm uninstall networkmap ``` -To verify the deployment: -```bash -kubectl get jobs -n -``` -Note : Replace `` with the actual namespace where the Job was created. This command will display information about the Job, including the number of completions and the current status of the Job's pods. +The command removes all the Kubernetes components associated with the chart and deletes the release. -To delete the chart: -```bash -helm uninstall -``` -Note : Replace `` with the desired name for the release. +## Parameters +### Global parameters +These parameters are refered to as same in each parent or child chart +| Name | Description | Default Value | +|--------|---------|-------------| +|`global.serviceAccountName` | The serviceaccount name that will be used for Vault Auth management| `vault-auth` | +| `global.cluster.provider` | Kubernetes cluster provider like AWS EKS or minikube. Currently ony `aws` and `minikube` is tested | `aws` | +| `global.cluster.cloudNativeServices` | only `false` is implemented, `true` to use Cloud Native Services (SecretsManager and IAM for AWS; KeyVault & Managed Identities for Azure) is for future | `false` | +| `global.vault.type` | Type of Vault to support other providers. Currently, only `hashicorp` is supported. | `hashicorp` | +| `global.vault.role` | Role used for authentication with Vault | `vault-role` | +| `global.vault.address`| URL of the Vault server. | `""` | +| `global.vault.authPath` | Authentication path for Vault | `supplychain` | +| `global.vault.secretEngine` | The value for vault secret engine name | `secretsv2` | +| `global.vault.secretPrefix` | The value for vault secret prefix which must start with `data/` | `data/supplychain` | +| `global.proxy.provider` | The proxy or Ingress provider. Can be `none` or `ambassador` | `ambassador` | +| `global.proxy.externalUrlSuffix` | The External URL suffix at which the Besu P2P and RPC service will be available | `test.blockchaincloudpoc.com` | +| `global.cenm.sharedCreds.truststore` | The truststore password the pki created truststores | `password` | +| `global.cenm.sharedCreds.keystore` | The truststore password the pki created ketstores | `password` | +| `global.cenm.identityManager.port` | The port for identity manager issuance | `10000` | +| `global.cenm.identityManager.revocation.port` | The port for identity manager revocation | `5053` | +| `global.cenm.identityManager.internal.port` | The port for identity manager internal listener | `5052` | +| `global.cenm.auth.port` | The port for auth api | `8081` | +| `global.cenm.gateway.port` | The port for gateway api | `8080` | +| `global.cenm.zone.enmPort` | The port for zone ENM | `25000` | + +### Storage + +| Name | Description | Default Value | +|--------|---------|-------------| +| `storage.allowedTopologies.enabled` | Check [bevel-storageclass](../../../shared/charts/bevel-storageclass/README.md) for details | `false` | - -## Contributing ---- -If you encounter any bugs, have suggestions, or would like to contribute to the [nmap Deployment Helm Chart](https://github.com/hyperledger/bevel/tree/main/platforms/r3-corda-ent/charts/cenm-networkmap), please feel free to open an issue or submit a pull request on the [project's GitHub repository](https://github.com/hyperledger/bevel). - +### Image +| Name | Description | Default Value | +| -------------| ---------- | --------- | +| `image.pullSecret` | Secret name in the namespace containing private image registry credentials | `""` | +| `image.pullPolicy` | Pull policy to be used for the Docker images | `IfNotPresent` | +| `image.networkmap.repository` | CENM networkmap image repository | `corda/enterprise-networkmap`| +| `image.networkmap.tag` | CENM networkmap image tag as per version | `1.5.9-zulu-openjdk8u382`| +| `image.enterpriseCli.repository` | CENM enterprise cli image repository | `corda/enterprise-enterprise-cli`| +| `image.enterpriseCli.tag` | CENM enterprise cli image tag as per version | `1.5.9-zulu-openjdk8u382`| + +### Database Settings +| Name | Description | Default Value | +| -------------| ---------- | --------- | +| `database.driverClassName` | DB drive class name | `org.h2.Driver` | +| `database.jdbcDriver` | DB jdbc driver | `""` | +| `database.driverClassName` | DB url | `jdbc:h2:file:./h2/networkmap-persistence;DB_CLOSE_ON_EXIT=FALSE;LOCK_TIMEOUT=10000;WRITE_DELAY=0;AUTO_SERVER_PORT=0` | +| `database.user` | DB user name | `networkmap-db-user` | +| `database.password` | DB password | `networkmap-db-password` | + ## License This chart is licensed under the Apache v2.0 license. -Copyright © 2023 Accenture +Copyright © 2024 Accenture ### Attribution diff --git a/platforms/r3-corda-ent/charts/cenm-networkmap/files/getZoneToken.sh b/platforms/r3-corda-ent/charts/cenm-networkmap/files/getZoneToken.sh index cbcfe1ba2cb..010cc506673 100644 --- a/platforms/r3-corda-ent/charts/cenm-networkmap/files/getZoneToken.sh +++ b/platforms/r3-corda-ent/charts/cenm-networkmap/files/getZoneToken.sh @@ -1,21 +1,12 @@ #!/bin/sh -set -x - -echo "Waiting for notary-nodeinfo/network-parameters-initial.conf ..." -until [ -f notary-nodeinfo/network-parameters-initial.conf ] -do - sleep 10 -done -echo "Waiting for notary-nodeinfo/network-parameters-initial.conf ... done." - -if [ ! -f {{ .Values.config.configPath }}/token ] +if [ ! -f /opt/cenm/etc/token ] then EXIT_CODE=1 until [ "${EXIT_CODE}" -eq "0" ] do - echo "Trying to login to gateway:8080 ..." - java -jar bin/cenm-tool.jar context login -s http://{{ .Values.cenmServices.gatewayName }}.{{ .Values.metadata.namespace }}:{{ .Values.cenmServices.gatewayPort }} -u network-maintainer -p p4ssWord + echo "Trying to login to gateway.{{ .Release.Namespace }}:{{ .Values.global.cenm.gateway.port }} ..." + java -jar bin/cenm-tool.jar context login -s http://gateway.{{ .Release.Namespace }}:{{ .Values.global.cenm.gateway.port }} -u network-maintainer -p p4ssWord EXIT_CODE=${?} if [ "${EXIT_CODE}" -ne "0" ] then @@ -25,13 +16,8 @@ then break fi done - cat ./notary-nodeinfo/network-parameters-initial.conf ZONE_TOKEN=$(java -jar bin/cenm-tool.jar zone create-subzone \ - --config-file={{ .Values.config.configPath }}/nmap.conf --network-map-address={{ .Values.nodeName }}.{{ .Values.metadata.namespace }}:{{ .Values.service.adminListener.port }} \ + --config-file=etc/networkmap.conf --network-map-address=cenm-networkmap.{{ .Release.Namespace }}:{{ .Values.adminListener.port }} \ --network-parameters=./notary-nodeinfo/network-parameters-initial.conf --label=Main --label-color='#941213' --zone-token) - echo ${ZONE_TOKEN} - echo ${ZONE_TOKEN} > {{ .Values.config.configPath }}/token - {{ if eq .Values.bashDebug true }} - cat {{ .Values.config.configPath }}/token - {{ end }} + echo ${ZONE_TOKEN} > etc/token fi diff --git a/platforms/r3-corda-ent/charts/cenm-networkmap/files/networkmap.conf b/platforms/r3-corda-ent/charts/cenm-networkmap/files/networkmap.conf new file mode 100644 index 00000000000..3ba33e798cb --- /dev/null +++ b/platforms/r3-corda-ent/charts/cenm-networkmap/files/networkmap.conf @@ -0,0 +1,82 @@ +address = "0.0.0.0:{{ .Values.global.cenm.networkmap.port }}" +pollingInterval = 10000 +checkRevocation = "true" + +database { + driverClassName = "{{ .Values.database.driverClassName }}" + url = "{{ .Values.database.url }}" + user = "{{ .Values.database.user }}" + password = {{ .Values.database.password }} + runMigration = {{ .Values.database.runMigration }} +} + +enmListener = { + port = {{ .Values.global.cenm.networkmap.internal.port }} + ssl = { + keyStore = { + location = "/certs/corda-ssl-network-map-keys.jks" + password = {{ .Values.global.cenm.sharedCreds.keystore }} + } + trustStore = { + location = "/certs/corda-ssl-trust-store.jks" + password = {{ .Values.global.cenm.sharedCreds.truststore }} + } + } +} + +identityManager = { + host = idman.{{ .Release.Namespace }} + port = {{ .Values.global.cenm.identityManager.internal.port }} + ssl = { + keyStore = { + location = "/certs/corda-ssl-network-map-keys.jks" + password = {{ .Values.global.cenm.sharedCreds.keystore }} + } + trustStore = { + location = "/certs/corda-ssl-trust-store.jks" + password = {{ .Values.global.cenm.sharedCreds.truststore }} + } + } +} + +revocation = { + host = idman.{{ .Release.Namespace }} + port = {{ .Values.global.cenm.identityManager.revocation.port }} + ssl = { + keyStore = { + location = "/certs/corda-ssl-network-map-keys.jks" + password = {{ .Values.global.cenm.sharedCreds.keystore }} + } + trustStore = { + location = "/certs/corda-ssl-trust-store.jks" + password = {{ .Values.global.cenm.sharedCreds.truststore }} + } + } +} + +authObjectId=1 +authServiceConfig = { + disableAuthentication=false + host="auth.{{ .Release.Namespace }}" + port={{ .Values.global.cenm.auth.port }} + trustStore = { + location = "/certs/corda-ssl-trust-store.jks" + password = {{ .Values.global.cenm.sharedCreds.truststore }} + } + issuer="http://test" + leeway=5s +} + +adminListener = { + port = {{ .Values.adminListener.port }} + ssl = { + keyStore = { + location = /certs/corda-ssl-network-map-keys.jks + password = {{ .Values.global.cenm.sharedCreds.keystore }} + } + trustStore = { + location = /certs/corda-ssl-trust-store.jks + password = {{ .Values.global.cenm.sharedCreds.truststore }} + } + } +} diff --git a/platforms/r3-corda-ent/charts/cenm-networkmap/files/run.sh b/platforms/r3-corda-ent/charts/cenm-networkmap/files/run.sh new file mode 100644 index 00000000000..abc82894bcf --- /dev/null +++ b/platforms/r3-corda-ent/charts/cenm-networkmap/files/run.sh @@ -0,0 +1,47 @@ +#!/bin/sh + +# +# main run +# +if [ -f bin/networkmap.jar ] +then + echo + echo "CENM: starting Network Map process ..." + echo + java -jar bin/angel.jar \ + --jar-name=bin/networkmap.jar \ + --zone-host=zone.{{ .Release.Namespace }} \ + --zone-port={{ .Values.global.cenm.zone.enmPort }} \ + --token=${TOKEN} \ + --service=NETWORK_MAP \ + --polling-interval=10 \ + --working-dir=/opt/cenm/etc/ \ + --network-truststore=/certs/network-root-truststore.jks \ + --truststore-password={{ .Values.global.cenm.sharedCreds.truststore }} \ + --root-alias=cordarootca \ + --network-parameters-file=/opt/cenm/notary-nodeinfo/network-parameters-initial.conf \ + --tls=true \ + --tls-keystore=/certs/corda-ssl-network-map-keys.jks \ + --tls-keystore-password={{ .Values.global.cenm.sharedCreds.keystore }} \ + --tls-truststore=/certs/corda-ssl-trust-store.jks \ + --tls-truststore-password={{ .Values.global.cenm.sharedCreds.truststore }} \ + --verbose + EXIT_CODE=${?} +else + echo "Missing Network Map jar file in bin/ directory:" + ls -al bin + EXIT_CODE=110 +fi + +if [ "${EXIT_CODE}" -ne "0" ] +then + HOW_LONG={{ .Values.sleepTimeAfterError }} + echo + echo "Network Map failed - exit code: ${EXIT_CODE} (error)" + echo + echo "Going to sleep for requested 120 seconds to let you login and investigate." + echo +fi + +sleep ${HOW_LONG} +echo diff --git a/platforms/r3-corda-ent/charts/cenm-networkmap/requirements.yaml b/platforms/r3-corda-ent/charts/cenm-networkmap/requirements.yaml new file mode 100644 index 00000000000..895f0a0e1cf --- /dev/null +++ b/platforms/r3-corda-ent/charts/cenm-networkmap/requirements.yaml @@ -0,0 +1,7 @@ +dependencies: + - name: bevel-storageclass + alias: storage + repository: "file://../../../shared/charts/bevel-storageclass" + tags: + - storage + version: ~1.0.0 diff --git a/platforms/r3-corda-ent/charts/cenm-networkmap/templates/_helpers.tpl b/platforms/r3-corda-ent/charts/cenm-networkmap/templates/_helpers.tpl index 7f9b0dc6131..4a9f69dba35 100644 --- a/platforms/r3-corda-ent/charts/cenm-networkmap/templates/_helpers.tpl +++ b/platforms/r3-corda-ent/charts/cenm-networkmap/templates/_helpers.tpl @@ -1,5 +1,29 @@ -{{- define "labels.custom" -}} - {{- range $key, $val := .Values.metadata.labels -}} - {{- $key -}}: {{- $val -}} - {{- end -}} +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "networkmap.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "networkmap.fullname" -}} +{{- $name := default .Chart.Name -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" $name .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} + + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "networkmap.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} {{- end -}} diff --git a/platforms/r3-corda-ent/charts/cenm-networkmap/templates/configmap.yaml b/platforms/r3-corda-ent/charts/cenm-networkmap/templates/configmap.yaml index c0d8c65e409..c0599a0a598 100644 --- a/platforms/r3-corda-ent/charts/cenm-networkmap/templates/configmap.yaml +++ b/platforms/r3-corda-ent/charts/cenm-networkmap/templates/configmap.yaml @@ -1,14 +1,28 @@ +############################################################################################## +# Copyright Accenture. All Rights Reserved. +# +# SPDX-License-Identifier: Apache-2.0 +############################################################################################## + --- apiVersion: v1 kind: ConfigMap metadata: - name: {{ .Values.prefix }}-nmap-conf - namespace: {{ .Values.metadata.namespace }} + name: {{ include "networkmap.fullname" . }}-conf + namespace: {{ .Release.Namespace }} labels: - app.kubernetes.io/name: {{ .Values.nodeName }} + app.kubernetes.io/name: {{ include "networkmap.fullname" . }} helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - {{- include "labels.custom" . | nindent 2 }} + app.kubernetes.io/part-of: {{ include "networkmap.fullname" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} data: + networkmap.conf: |+ +{{ tpl (.Files.Get "files/networkmap.conf") . | indent 4 }} + getZoneToken.sh: |+ - {{ tpl (.Files.Get "files/getZoneToken.sh") . | nindent 4 }} +{{ tpl (.Files.Get "files/getZoneToken.sh") . | nindent 4 }} + + run.sh: |+ +{{ tpl (.Files.Get "files/run.sh") . | nindent 4 }} diff --git a/platforms/r3-corda-ent/charts/cenm-networkmap/templates/deployment.yaml b/platforms/r3-corda-ent/charts/cenm-networkmap/templates/deployment.yaml deleted file mode 100644 index 49b4f98ca0f..00000000000 --- a/platforms/r3-corda-ent/charts/cenm-networkmap/templates/deployment.yaml +++ /dev/null @@ -1,632 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - ---- -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: {{ .Values.nodeName }} - namespace: {{ .Values.metadata.namespace }} - {{- if .Values.config.deployment.annotations }} - annotations: -{{ toYaml .Values.config.deployment.annotations | indent 8 }} - {{- end }} - labels: - app.kubernetes.io/name: {{ .Values.nodeName }} - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} -spec: - serviceName: {{ .Values.nodeName }} - replicas: 1 - selector: - matchLabels: - app: {{ .Values.nodeName }} - app.kubernetes.io/name: {{ .Values.nodeName }} - app.kubernetes.io/instance: {{ .Release.Name }} - updateStrategy: - type: RollingUpdate - template: - metadata: - labels: - app: {{ .Values.nodeName }} - app.kubernetes.io/name: {{ .Values.nodeName }} - app.kubernetes.io/instance: {{ .Release.Name }} - spec: - serviceAccountName: {{ $.Values.vault.serviceAccountName }} - securityContext: - fsGroup: 1000 - initContainers: - - name: init-certificates - image: {{ .Values.image.initContainer }} - imagePullPolicy: Always - env: - - name: VAULT_ADDR - value: {{ $.Values.vault.address }} - - name: KUBERNETES_AUTH_PATH - value: {{ $.Values.vault.authPath }} - - name: VAULT_APP_ROLE - value: {{ $.Values.vault.role }} - - name: BASE_DIR - value: {{ $.Values.config.volume.baseDir }} - - name: CERTS_SECRET_PREFIX - value: {{ .Values.vault.certSecretPrefix }} - - name: MOUNT_PATH - value: "/DATA" - - name: NODEINFO_MOUNT_PATH - value: "/notary-nodeinfo" - command: ["sh", "-c"] - args: - - |- - #!/usr/bin/env sh - validateVaultResponse () { - if echo ${2} | grep "errors"; then - echo "ERROR: unable to retrieve ${1}: ${2}" - exit 1 - fi - if [ "$3" == "LOOKUPSECRETRESPONSE" ] - then - http_code=$(curl -sS -o /dev/null -w "%{http_code}" \ - --header "X-Vault-Token: ${VAULT_TOKEN}" \ - ${VAULT_ADDR}/v1/${1}) - curl_response=$? - if test "$http_code" != "200" ; then - echo "Http response code from Vault - $http_code and curl_response - $curl_response" - if test "$curl_response" != "0"; then - echo "Error: curl command failed with error code - $curl_response" - exit 1 - fi - fi - fi - } - - # Setting up the environment to get secrets from Vault - echo "Getting secrets from Vault Server" - KUBE_SA_TOKEN=$(cat /var/run/secrets/kubernetes.io/serviceaccount/token) - VAULT_TOKEN=$(curl -sS --request POST ${VAULT_ADDR}/v1/auth/${KUBERNETES_AUTH_PATH}/login -H "Content-Type: application/json" -d '{"role":"'"${VAULT_APP_ROLE}"'","jwt":"'"${KUBE_SA_TOKEN}"'"}' | jq -r 'if .errors then . else .auth.client_token end') - validateVaultResponse 'vault login token' "${VAULT_TOKEN}" - echo "logged into vault" - - # Creating dirs for storing certificates - mkdir -p ${MOUNT_PATH}/key-stores; - mkdir -p ${MOUNT_PATH}/trust-stores; - mkdir -p ${MOUNT_PATH}/crl-files; - - # Getting node Info File from Vault - - notaries=$(echo {{ $.Values.serviceLocations.notary }} | tr -d '[]') - file="${NODEINFO_MOUNT_PATH}/network-parameters-initial.conf" - echo "notaries : [" > $file - for notary in $notaries - do - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${CERTS_SECRET_PREFIX}/${notary}/nodeInfo | jq -r 'if .errors then . else . end') - data_info="$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data["data"]')" - - if [ "$data_info" == "null" ] - then - sleep {{ $.Values.vault.sleepTimeAfterError }} - else - validateVaultResponse "${CERTS_SECRET_PREFIX}/${notary}/nodeInfo" "${LOOKUP_SECRET_RESPONSE}" "LOOKUPSECRETRESPONSE" - notary_nodeinfo=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["nodeInfoFile"]') - notary_nodeinfo_name=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["nodeInfoName"]') - validating=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["validating"]') - echo "${notary_nodeinfo}" | base64 -d > ${NODEINFO_MOUNT_PATH}/${notary_nodeinfo_name} - echo "Successfully got node info file" - echo " {" >> $file - echo " notaryNodeInfoFile: \"notary-nodeinfo/${notary_nodeinfo_name}\"" >> $file - echo " validating = \"${validating}\"" >> $file - echo " }" >> $file - fi - done - echo "]" >> $file - echo "minimumPlatformVersion = 4" >> $file - echo "maxMessageSize = 10485760" >> $file - echo "maxTransactionSize = 10485760" >> $file - echo "eventHorizonDays = 1" >> $file - - # Fetching ssl-idman certificates from vault - COUNTER=1 - while [ "$COUNTER" -lt {{ $.Values.vault.retries }} ] - do - # get keystores from vault to see if certificates are created and put in vault - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${CERTS_SECRET_PREFIX}/{{ $.Values.nodeName }}/certs | jq -r 'if .errors then . else . end') - data_info="$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data["data"]')" - - if [ "$data_info" == "null" ] - then - sleep {{ $.Values.vault.sleepTimeAfterError }} - else - validateVaultResponse "${CERTS_SECRET_PREFIX}/{{ $.Values.nodeName }}/certs" "${LOOKUP_SECRET_RESPONSE}" "LOOKUPSECRETRESPONSE" - idm_ssl=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["corda-ssl-network-map-keys.jks"]') - echo "${idm_ssl}" | base64 -d > ${MOUNT_PATH}/key-stores/corda-ssl-network-map-keys.jks - echo "Successfully got ssl idman certifcates" - break - fi - COUNTER=`expr "$COUNTER" + 1` - done - - if [ "$COUNTER" -ge {{ $.Values.vault.retries }} ] - then - # printing number of trial done before giving up - echo "$COUNTER" - echo "ssl idman certificates might not have been put in vault. Giving up!!!" - exit 1 - fi - echo "Done" - - # Fetching corda-ssl-trust-store certificates from vault - COUNTER=1 - while [ "$COUNTER" -lt {{ $.Values.vault.retries }} ] - do - # get keystores from vault to see if certificates are created and put in vault - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${CERTS_SECRET_PREFIX}/root/certs | jq -r 'if .errors then . else . end') - data_info="$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data["data"]')" - - if [ "$data_info" == "null" ] - then - sleep {{ $.Values.vault.sleepTimeAfterError }} - else - validateVaultResponse "${CERTS_SECRET_PREFIX}/root/certs" "${LOOKUP_SECRET_RESPONSE}" "LOOKUPSECRETRESPONSE" - root_ssl=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["corda-ssl-trust-store.jks"]') - echo "${root_ssl}" | base64 -d > ${MOUNT_PATH}/trust-stores/corda-ssl-trust-store.jks - - root_trust=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["network-root-truststore.jks"]') - echo "${root_trust}" | base64 -d > ${MOUNT_PATH}/trust-stores/network-root-truststore.jks - - echo "Successfully got root ssl and trust_store certifcates" - break - fi - COUNTER=`expr "$COUNTER" + 1` - done - - if [ "$COUNTER" -ge {{ $.Values.vault.retries }} ] - then - # printing number of trial done before giving up - echo "$COUNTER" - echo "root ssl certificates might not have been put in vault. Giving up!!!" - exit 1 - fi - echo "Done" - - # Fetching crl certificates from vault - # TODO: Check if CRL certificates are required for NMS - COUNTER=1 - while [ "$COUNTER" -lt {{ $.Values.vault.retries }} ] - do - # get crls from vault to see if certificates are created and put in vault - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${CERTS_SECRET_PREFIX}/{{ $.Values.serviceLocations.identityManager.name }}/crls | jq -r 'if .errors then . else . end') - data_info="$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data["data"]')" - - if [ "$data_info" == "null" ] - then - sleep {{ $.Values.vault.sleepTimeAfterError }} - else - validateVaultResponse "${CERTS_SECRET_PREFIX}/{{ $.Values.serviceLocations.identityManager.name }}/crls" "${LOOKUP_SECRET_RESPONSE}" "LOOKUPSECRETRESPONSE" - tls_crl=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["tls.crl"]') - echo "${tls_crl}" | base64 -d > ${MOUNT_PATH}/crl-files/tls.crl - - root_crl=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["root.crl"]') - echo "${root_crl}" | base64 -d > ${MOUNT_PATH}/crl-files/root.crl - - subordinate_crl=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["subordinate.crl"]') - echo "${subordinate_crl}" | base64 -d > ${MOUNT_PATH}/crl-files/subordinate.crl - - echo "Successfully got crl certifcates" - break - fi - COUNTER=`expr "$COUNTER" + 1` - done - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${CERTS_SECRET_PREFIX}/{{ .Values.serviceLocations.identityManager.name }}/tlscerts | jq -r 'if .errors then . else . end') - validateVaultResponse "${CERTS_SECRET_PREFIX}/{{ .Values.serviceLocations.identityManager.name }}/tlscerts" "${LOOKUP_SECRET_RESPONSE}" "LOOKUPSECRETRESPONSE" - IDMAN_CERT=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["tlscacerts"]') - echo "${IDMAN_CERT}" | base64 -d > ${MOUNT_PATH}/idman.crt - - #fetching networkmap ssl credentials from vault - mkdir -p ${MOUNT_PATH}/ssl - OUTPUT_PATH=${MOUNT_PATH}/ssl; - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${CERTS_SECRET_PREFIX}/credentials/ssl | jq -r 'if .errors then . else . end') - validateVaultResponse "${CERTS_SECRET_PREFIX}/credentials/ssl" "${LOOKUP_SECRET_RESPONSE}" "LOOKUPSECRETRESPONSE" - NETWORKMAP_SSL=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["networkmap"]') - echo "${NETWORKMAP_SSL}"> ${OUTPUT_PATH}/networkmapssl - - #fetching truststore ssl credentials from vault - mkdir -p ${MOUNT_PATH}/truststore - OUTPUT_PATH=${MOUNT_PATH}/truststore; - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${CERTS_SECRET_PREFIX}/credentials/truststore | jq -r 'if .errors then . else . end') - validateVaultResponse "${CERTS_SECRET_PREFIX}/credentials/truststore" "${LOOKUP_SECRET_RESPONSE}" "LOOKUPSECRETRESPONSE" - ROOTCA_TRUSTSTORE=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["rootca"]') - echo "${ROOTCA_TRUSTSTORE}"> ${OUTPUT_PATH}/rootcats - SSL_TRUSTSTORE=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["ssl"]') - echo "${SSL_TRUSTSTORE}"> ${OUTPUT_PATH}/sslts - - if [ "$COUNTER" -ge {{ $.Values.vault.retries }} ] - then - # printing number of trial done before giving up - echo "$COUNTER" - echo "crl certificates might not have been put in vault. Giving up!!!" - exit 1 - fi - echo "Done all certificates" - volumeMounts: - - name: certificates - mountPath: /DATA - - name: notary-nodeinfo - mountPath: /notary-nodeinfo - - name: setnparam - image: "{{ .Values.image.nmapContainer }}" - imagePullPolicy: {{ .Values.image.pullPolicy }} - env: - - name: ACCEPT_LICENSE - value: "{{required "You must accept the license agreement to run this software" .Values.acceptLicense }}" - command: ["/bin/bash", "-c"] - args: - - |- - echo 'address = "0.0.0.0:{{ .Values.service.external.port }}" - pollingInterval = 10000 - checkRevocation = "{{ .Values.config.checkRevocation }}" - database { - driverClassName = "{{ .Values.database.driverClassName }}" - url = "{{ .Values.database.url }}" - user = "{{ .Values.database.user }}" - password = "{{ .Values.database.password }}" - runMigration = "{{ .Values.database.runMigration }}" - } - - enmListener = { - port = {{ .Values.service.internal.port }} - ssl = { - keyStore = { - location = "{{ .Values.config.volume.baseDir }}/DATA/key-stores/corda-ssl-network-map-keys.jks" - password = NETWORKMAP_SSL - keyPassword = NETWORKMAP_SSL - } - trustStore = { - location = "{{ .Values.config.volume.baseDir }}/DATA/trust-stores/corda-ssl-trust-store.jks" - password = SSL_TRUSTSTORE - } - } - } - - identityManager = { - host = {{ .Values.serviceLocations.identityManager.host }} - port = {{ .Values.serviceLocations.identityManager.port }} - ssl = { - keyStore = { - location = "{{ .Values.config.volume.baseDir }}/DATA/key-stores/corda-ssl-network-map-keys.jks" - password = NETWORKMAP_SSL - keyPassword = NETWORKMAP_SSL - } - trustStore = { - location = "{{ .Values.config.volume.baseDir }}/DATA/trust-stores/corda-ssl-trust-store.jks" - password = SSL_TRUSTSTORE - } - } - } - - revocation = { - host = {{ .Values.serviceLocations.identityManager.host }} - port = {{ .Values.service.revocation.port }} - ssl = { - keyStore = { - location = "{{ .Values.config.volume.baseDir }}/DATA/key-stores/corda-ssl-network-map-keys.jks" - password = NETWORKMAP_SSL - keyPassword = NETWORKMAP_SSL - } - trustStore = { - location = "{{ .Values.config.volume.baseDir }}/DATA/trust-stores/corda-ssl-trust-store.jks" - password = SSL_TRUSTSTORE - } - } - } - - authObjectId=1 - authServiceConfig = { - disableAuthentication=false - host="{{ .Values.cenmServices.authName }}.{{ .Values.metadata.namespace }}" - port={{ .Values.cenmServices.authPort }} - trustStore = { - location = "{{ .Values.config.volume.baseDir }}/DATA/trust-stores/corda-ssl-trust-store.jks" - password = SSL_TRUSTSTORE - } - issuer="http://test" - leeway=5s - } - - adminListener = { - port = {{ .Values.service.adminListener.port }} - ssl = { - keyStore = { - location = {{ .Values.config.volume.baseDir }}/DATA/key-stores/corda-ssl-network-map-keys.jks - password = NETWORKMAP_SSL - } - trustStore = { - location = {{ .Values.config.volume.baseDir }}/DATA/trust-stores/corda-ssl-trust-store.jks - password = SSL_TRUSTSTORE - } - } - }' > etc/nmap.conf - - export NETWORKMAP_SSL=$(cat {{ .Values.config.volume.baseDir }}/DATA/ssl/networkmapssl) - sed -i -e "s*NETWORKMAP_SSL*${NETWORKMAP_SSL}*g" etc/nmap.conf - export SSL_TRUSTSTORE=$(cat {{ .Values.config.volume.baseDir }}/DATA/truststore/sslts) - sed -i -e "s*SSL_TRUSTSTORE*${SSL_TRUSTSTORE}*g" etc/nmap.conf - - if [ ! -f {{ .Values.config.configPath }}/network-parameters-initial-set-succesfully ] - then - echo "Setting network parameters for deployment..." - echo "Waiting for notary-nodeinfo/network-parameters-initial.conf ..." - - if [ ! -f {{ .Values.config.configPath }}/network-parameters-initial-set-succesfully ] - then - until [ -f notary-nodeinfo/network-parameters-initial.conf ] - do - sleep 1 - done - fi - ROOTCA_TRUSTSTORE=$(cat {{ .Values.config.volume.baseDir }}/DATA/truststore/rootcats) - echo "Waiting for notary-nodeinfo/network-parameters-initial.conf ... done." - ls -al notary-nodeinfo/network-parameters-initial.conf - cp notary-nodeinfo/network-parameters-initial.conf {{ .Values.config.configPath }}/ - - echo "Setting initial network parameters ..." - java -jar {{ .Values.config.jarPath }}/networkmap.jar \ - -f {{ .Values.config.configPath }}/nmap.conf \ - --set-network-parameters {{ .Values.config.configPath }}/network-parameters-initial.conf \ - --network-truststore DATA/trust-stores/network-root-truststore.jks \ - --truststore-password ${ROOTCA_TRUSTSTORE} \ - --root-alias cordarootca - - EXIT_CODE=${?} - - if [ "${EXIT_CODE}" -ne "0" ] - then - HOW_LONG=120 - echo - echo "Network Map: setting network parameters failed - exit code: ${EXIT_CODE} (error)" - echo - echo "Going to sleep for requested ${HOW_LONG} seconds to let you login and investigate." - echo - else - HOW_LONG=0 - echo - echo "Network Map: initial network parameters have been set." - echo "No errors." - echo - touch {{ .Values.config.configPath }}/network-parameters-initial-set-succesfully - fi - - sleep ${HOW_LONG} - exit ${EXIT_CODE} - else - echo "Already set, no need to set network parameters" - fi - {{- if .Values.nmapUpdate }} - echo "Starting networkmap update..." - rm {{ .Values.config.configPath }}/network-parameters-initial-set-succesfully - echo "# This is a file for updating network parameters" > {{ .Values.config.configPath }}/network-parameters-update.conf - cat {{ .Values.config.configPath }}/network-parameters-initial.conf >> {{ .Values.config.configPath }}/network-parameters-update.conf - export updateDeadline=$(date -u +'%Y-%m-%dT%H:%M:%S.%3NZ' -d +10minutes) - echo $updateDeadline - echo 'parametersUpdate { - description = "Update network parameters settings" - updateDeadline = "UPDATEDEADLINE" - }' >> {{ .Values.config.configPath }}/network-parameters-update.conf - sed -i -e "s*UPDATEDEADLINE*${updateDeadline}*g" {{ .Values.config.configPath }}/network-parameters-update.conf - # Adding notaries - {{- $confPath := .Values.config.configPath -}} - {{- range .Values.addNotaries }} - echo {{ .notary.nodeinfoFile }} | base64 -d > notary-nodeinfo/{{ .notary.nodeinfoFileName }} - sed -i '3i {' {{ $confPath }}/network-parameters-update.conf - sed -i '4i notaryNodeInfoFile: "notary-nodeinfo/{{ .notary.nodeinfoFileName }}"' {{ $confPath }}/network-parameters-update.conf - sed -i '5i validating = "{{ .notary.validating }}"' {{ $confPath }}/network-parameters-update.conf - sed -i '6i },' {{ $confPath }}/network-parameters-update.conf - {{- end }} - echo "CENM: Updating Networkmap service network params ..." - ROOTCA_TRUSTSTORE=$(cat {{ .Values.config.volume.baseDir }}/DATA/truststore/rootcats) - java -jar {{ .Values.config.jarPath }}/networkmap.jar \ - -f {{ .Values.config.configPath }}/nmap.conf \ - --set-network-parameters {{ .Values.config.configPath }}/network-parameters-update.conf \ - --network-truststore DATA/trust-stores/network-root-truststore.jks \ - --truststore-password ${ROOTCA_TRUSTSTORE} \ - --root-alias cordarootca - - touch {{ .Values.config.configPath }}/network-parameters-initial-set-succesfully - {{- end }} - volumeMounts: - - name: notary-nodeinfo - mountPath: {{ .Values.config.volume.baseDir }}/notary-nodeinfo - - name: {{ .Values.nodeName }}-etc - mountPath: {{ .Values.config.volume.baseDir }}/etc - - name: certificates - mountPath: {{ .Values.config.volume.baseDir }}/DATA - - name: {{ .Values.nodeName }}-pvc-logs - mountPath: {{ .Values.config.volume.baseDir }}/logs - - name: {{ .Values.nodeName }}-pvc-h2 - mountPath: {{ .Values.config.volume.baseDir }}/h2 - resources: - requests: - memory: {{ .Values.config.pod.resources.requests }} - limits: - memory: {{ .Values.config.pod.resources.limits }} - - name: init-token - env: - - name: ACCEPT_LICENSE - value: "{{ .Values.acceptLicense }}" - image: "{{ .Values.image.enterpriseCliContainer }}" - imagePullPolicy: {{ .Values.image.pullPolicy }} - command: ["/bin/bash", "-c"] - args: - - |- - pwd - cp CM/*.sh bin/ - chmod +x bin/* - ls -alR - bin/getZoneToken.sh - resources: - {{- toYaml .Values.resources | nindent 12 }} - volumeMounts: - - name: {{ .Values.nodeName }}-etc - mountPath: {{ .Values.config.volume.baseDir }}/etc - - name: nmap-conf - mountPath: {{ .Values.config.volume.baseDir }}/CM/getZoneToken.sh - subPath: getZoneToken.sh - - name: certificates - mountPath: {{ .Values.config.volume.baseDir }}/DATA - - name: notary-nodeinfo - mountPath: {{ .Values.config.volume.baseDir }}/notary-nodeinfo - containers: - - name: main - image: "{{ .Values.image.nmapContainer }}" - imagePullPolicy: {{ .Values.image.pullPolicy }} - env: - - name: ACCEPT_LICENSE - value: "{{required "You must accept the license agreement to run this software" .Values.acceptLicense }}" - command: ["/bin/bash", "-c"] - args: - - |- - until [ -f {{ .Values.config.configPath }}/network-parameters-initial-set-succesfully ] - do - sleep 5 - echo "Waiting for network parameters to be set..." - done - echo "Network parameters have been set!" - yes | keytool -importcert -file {{ .Values.config.volume.baseDir }}/DATA/idman.crt -storepass changeit -alias {{ .Values.serviceLocations.identityManager.domain }} -keystore /usr/lib/jvm/zulu-8-amd64/jre/lib/security/cacerts - - # Variable for keys/trust store passwords - ROOTCA_TRUSTSTORE=$(cat {{ .Values.config.volume.baseDir }}/DATA/truststore/rootcats) - NETWORKMAP_SSL=$(cat {{ .Values.config.volume.baseDir }}/DATA/ssl/networkmapssl) - SSL_TRUSTSTORE=$(cat {{ .Values.config.volume.baseDir }}/DATA/truststore/sslts) - - if [ -f {{ .Values.config.jarPath }}/networkmap.jar ] - then - echo - echo "CENM: starting Networkmap service ..." - echo - TOKEN=$(cat {{ .Values.config.configPath }}/token) - ls -alR - set -x - java -jar {{ .Values.config.jarPath }}/angel.jar \ - --jar-name={{ .Values.config.jarPath }}/networkmap.jar \ - --zone-host={{ .Values.cenmServices.zoneName }}.{{ .Values.metadata.namespace }} \ - --zone-port={{ .Values.cenmServices.zoneEnmPort }} \ - --token=${TOKEN} \ - --service=NETWORK_MAP \ - --polling-interval=10 \ - --working-dir=etc/ \ - --network-truststore={{ .Values.config.volume.baseDir }}/DATA/trust-stores/network-root-truststore.jks \ - --truststore-password=${ROOTCA_TRUSTSTORE} \ - --root-alias=cordarootca \ - --network-parameters-file={{ .Values.config.configPath }}/nmap.conf \ - --tls=true \ - --tls-keystore={{ .Values.config.volume.baseDir }}/DATA/key-stores/corda-ssl-network-map-keys.jks \ - --tls-keystore-password=${NETWORKMAP_SSL} \ - --tls-truststore={{ .Values.config.volume.baseDir }}/DATA/trust-stores/corda-ssl-trust-store.jks \ - --tls-truststore-password=${SSL_TRUSTSTORE} \ - --verbose - EXIT_CODE=${?} - else - echo "Missing networkmap jar file in {{ .Values.config.jarPath }} folder:" - ls -al {{ .Values.config.jarPath }} - EXIT_CODE=110 - fi - volumeMounts: - - name: notary-nodeinfo - mountPath: {{ .Values.config.volume.baseDir }}/notary-nodeinfo - - name: {{ .Values.nodeName }}-pvc-h2 - mountPath: {{ .Values.config.volume.baseDir }}/h2 - - name: {{ .Values.nodeName }}-etc - mountPath: {{ .Values.config.volume.baseDir }}/etc - - name: {{ .Values.nodeName }}-pvc-logs - mountPath: {{ .Values.config.volume.baseDir }}/logs - - name: certificates - mountPath: {{ .Values.config.volume.baseDir }}/DATA - resources: - requests: - memory: {{ .Values.config.pod.resources.requests }} - limits: - memory: {{ .Values.config.pod.resources.limits }} - - name: logs - image: "{{ .Values.image.nmapContainer }}" - imagePullPolicy: {{ .Values.image.pullPolicy }} - env: - - name: ACCEPT_LICENSE - value: "{{required "You must accept the license agreement to run this software" .Values.acceptLicense }}" - command: ["/bin/bash", "-c"] - args: - - |- - cd {{ .Values.config.volume.baseDir }}/ - while true; do tail -f logs/network-map/*.log 2>/dev/null; sleep 5; done - # in case sth went wrong just wait indefinitely ... - tail -f /dev/null - volumeMounts: - - name: {{ .Values.nodeName }}-pvc-logs - mountPath: {{ .Values.config.volume.baseDir }}/logs - {{- with .Values.image.imagePullSecrets }} - imagePullSecrets: - {{- toYaml . | nindent 8 }} - {{- end }} - volumes: - - name: nmap-conf - configMap: - name: {{ .Values.prefix }}-nmap-conf - - name: certificates - emptyDir: - medium: Memory - - name: notary-nodeinfo - emptyDir: - medium: Memory - volumeClaimTemplates: - - metadata: - name: {{ .Values.nodeName }}-pvc-logs -{{- if .Values.config.pvc.annotations }} - annotations: -{{ toYaml .Values.config.pvc.annotations | indent 8 }} -{{- end }} - labels: - app.kubernetes.io/name: {{ .Values.nodeName }}-pvc-logs - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} - spec: - storageClassName: {{ .Values.storage.name }} - accessModes: [ "ReadWriteOnce" ] - resources: - requests: - storage: 64Mi - - metadata: - name: {{ .Values.nodeName }}-pvc-h2 -{{- if .Values.config.pvc.annotations }} - annotations: -{{ toYaml .Values.config.pvc.annotations | indent 8 }} -{{- end }} - labels: - app.kubernetes.io/name: {{ .Values.nodeName }}-pvc-h2 - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} - spec: - storageClassName: {{ .Values.storage.name }} - accessModes: [ "ReadWriteOnce" ] - resources: - requests: - storage: 64Mi - - metadata: - name: {{ .Values.nodeName }}-etc -{{- if .Values.config.pvc.annotations }} - annotations: -{{ toYaml .Values.config.pvc.annotations | indent 8 }} -{{- end }} - labels: - app.kubernetes.io/name: {{ .Values.nodeName }}-etc - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} - spec: - storageClassName: {{ .Values.storage.name }} - accessModes: [ "ReadWriteOnce" ] - resources: - requests: - storage: 64Mi diff --git a/platforms/r3-corda-ent/charts/cenm-networkmap/templates/service.yaml b/platforms/r3-corda-ent/charts/cenm-networkmap/templates/service.yaml index a60b86dad7d..bb492e1dfa0 100644 --- a/platforms/r3-corda-ent/charts/cenm-networkmap/templates/service.yaml +++ b/platforms/r3-corda-ent/charts/cenm-networkmap/templates/service.yaml @@ -8,72 +8,63 @@ apiVersion: v1 kind: Service metadata: - name: {{ .Values.nodeName }} - namespace: {{ .Values.metadata.namespace }} + name: {{ include "networkmap.name" . }} + namespace: {{ .Release.Namespace }} labels: - run: {{ .Values.nodeName }} - app.kubernetes.io/name: {{ .Values.nodeName }} - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} + app.kubernetes.io/name: networkmap-service + app.kubernetes.io/component: networkmap + app.kubernetes.io/part-of: {{ include "networkmap.fullname" . }} app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} spec: + type: ClusterIP selector: - app: {{ .Values.nodeName }} -# W e need healthCheckNodePort set to get rid of logs pollution -{{- if (.Values.healthCheck.nodePort) }} - healthCheckNodePort: {{ .Values.healthCheck.nodePort }} -{{- end }} + app.kubernetes.io/name: networkmap-statefulset + app.kubernetes.io/component: networkmap + app.kubernetes.io/part-of: {{ include "networkmap.fullname" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} ports: - - port: {{ .Values.service.internal.port }} - targetPort: {{ .Values.service.internal.port }} - protocol: TCP - name: http-internal - - port: {{ .Values.service.external.port }} - targetPort: {{ .Values.service.external.port }} - protocol: TCP - name: http - - port: {{ .Values.service.adminListener.port }} - targetPort: {{ .Values.service.adminListener.port }} - protocol: TCP - name: adminlistener -{{ if $.Values.ambassador }} + - port: {{ .Values.global.cenm.networkmap.internal.port }} + targetPort: {{ .Values.global.cenm.networkmap.internal.port }} + protocol: TCP + name: http + - port: {{ .Values.global.cenm.networkmap.port }} + targetPort: {{ .Values.global.cenm.networkmap.port }} + protocol: TCP + name: http-external + - port: {{ .Values.adminListener.port }} + targetPort: {{ .Values.adminListener.port }} + protocol: TCP + name: adminlistener +{{- if eq .Values.global.proxy.provider "ambassador" }} --- +## Host for doorman apiVersion: getambassador.io/v3alpha1 kind: Host metadata: - name: {{ .Values.nodeName }}-host + name: {{ .Release.Name }}-nms spec: - hostname: {{ .Values.nodeName }}.{{ .Values.ambassador.external_url_suffix }} + hostname: {{ .Values.cenm.prefix }}-nms.{{ .Values.global.proxy.externalUrlSuffix }} acmeProvider: authority: none requestPolicy: insecure: action: Reject tlsSecret: - name: {{ .Values.nodeName }}-ambassador-certs - namespace: {{ .Values.metadata.namespace }} - tls: - min_tls_version: v1.2 + name: nms-tls-certs + namespace: {{ .Release.Namespace }} --- +## Mapping for nms port apiVersion: getambassador.io/v3alpha1 kind: Mapping metadata: - name: {{ .Values.nodeName }}-https - namespace: {{ .Values.metadata.namespace }} + name: {{ .Release.Name }}-mapping + namespace: {{ .Release.Namespace }} spec: - host: {{ .Values.nodeName }}.{{ .Values.ambassador.external_url_suffix }} + host: {{ .Values.cenm.prefix }}-nms.{{ .Values.global.proxy.externalUrlSuffix }} prefix: / - service: {{ .Values.nodeName }}.{{ .Values.metadata.namespace }}:{{ .Values.service.external.port }} ---- -apiVersion: getambassador.io/v3alpha1 -kind: TLSContext -metadata: - name: {{ .Values.nodeName }}-tlscontext - namespace: {{ .Values.metadata.namespace }} -spec: - hosts: - - {{ .Values.nodeName }}.{{ .Values.ambassador.external_url_suffix }} - secret: {{ .Values.nodeName }}-ambassador-certs.{{ .Values.metadata.namespace }} - secret_namespacing: true - min_tls_version: v1.2 + service: {{ include "networkmap.name" . }}.{{ .Release.Namespace }}:{{ .Values.global.cenm.networkmap.port }} {{- end }} diff --git a/platforms/r3-corda-ent/charts/cenm-networkmap/templates/statefulset.yaml b/platforms/r3-corda-ent/charts/cenm-networkmap/templates/statefulset.yaml new file mode 100644 index 00000000000..c1b28a8de85 --- /dev/null +++ b/platforms/r3-corda-ent/charts/cenm-networkmap/templates/statefulset.yaml @@ -0,0 +1,209 @@ +############################################################################################## +# Copyright Accenture. All Rights Reserved. +# +# SPDX-License-Identifier: Apache-2.0 +############################################################################################## + +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ template "networkmap.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + app: {{ include "networkmap.fullname" . }} + app.kubernetes.io/name: networkmap-statefulset + app.kubernetes.io/component: networkmap + app.kubernetes.io/part-of: {{ include "networkmap.fullname" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} +spec: + replicas: 1 + podManagementPolicy: OrderedReady + updateStrategy: + type: RollingUpdate + selector: + matchLabels: + app: {{ include "networkmap.fullname" . }} + app.kubernetes.io/name: networkmap-statefulset + app.kubernetes.io/component: networkmap + app.kubernetes.io/part-of: {{ include "networkmap.fullname" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} + serviceName: {{ include "networkmap.fullname" . }} + volumeClaimTemplates: + - metadata: + name: networkmap-h2 + spec: + storageClassName: storage-{{ .Release.Name }} + accessModes: [ "ReadWriteOnce" ] + resources: + requests: + storage: {{ .Values.storage.dbSize }} + - metadata: + name: networkmap-logs + spec: + storageClassName: storage-{{ .Release.Name }} + accessModes: [ "ReadWriteOnce" ] + resources: + requests: + storage: {{ .Values.storage.size }} + - metadata: + name: networkmap-etc + spec: + storageClassName: storage-{{ .Release.Name }} + accessModes: [ "ReadWriteOnce" ] + resources: + requests: + storage: {{ .Values.storage.size }} + - metadata: + name: notary-nodeinfo + spec: + storageClassName: storage-{{ .Release.Name }} + accessModes: [ "ReadWriteOnce" ] + resources: + requests: + storage: {{ .Values.storage.size }} + template: + metadata: + labels: + app: {{ include "networkmap.fullname" . }} + app.kubernetes.io/name: networkmap-statefulset + app.kubernetes.io/component: networkmap + app.kubernetes.io/part-of: {{ include "networkmap.fullname" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} + spec: + serviceAccountName: {{ .Values.global.serviceAccountName }} + securityContext: + runAsUser: 1000 + runAsGroup: 1000 + fsGroup: 1000 + hostname: {{ .Release.Name }} + imagePullSecrets: + {{- if .Values.image.pullSecret }} + - name: {{ .Values.image.pullSecret }} + {{- end }} + containers: + - name: init-networkmap + image: {{ .Values.image.enterpriseCli.repository }}:{{ .Values.image.enterpriseCli.tag }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + command: ["/bin/bash", "-c"] + args: + - |- + # Remove the old network-paramenters files + rm -f /opt/cenm/etc/network-parameters-initial.conf + # Create the network-paramenters file + file="/opt/cenm/etc/network-parameters-initial.conf" + # Move to the mount folder containing the notary details + cd /opt/cenm/notary-details + echo "notaries : [" > $file + for notary in nodeInfo* + do + echo " {" >> $file + echo " notaryNodeInfoFile: \"notary-nodeinfo/$notary\"" >> $file + echo " validating = $(cat isValidating_$notary)" >> $file + echo " }" >> $file + done + echo "]" >> $file + echo "minimumPlatformVersion = 4" >> $file + echo "maxMessageSize = 10485760" >> $file + echo "maxTransactionSize = 10485760" >> $file + echo "eventHorizonDays = 1" >> $file + # Move the node info files to the notary-nodeinfo + cp nodeInfo* /opt/cenm/notary-nodeinfo/ + cp $file /opt/cenm/notary-nodeinfo/ + # setup the zone token and create the subzone + cd /opt/cenm + bin/getZoneToken.sh + # Keep the container running for network related manual ops + tail -f /dev/null + volumeMounts: + - name: networkmap-etc + mountPath: /opt/cenm/etc + - name: cenm-certs + mountPath: /certs + - name: notary-details + mountPath: /opt/cenm/notary-details + - name: notary-nodeinfo + mountPath: /opt/cenm/notary-nodeinfo + - name: networkmap-conf + mountPath: /opt/cenm/bin/getZoneToken.sh + subPath: getZoneToken.sh + - name: networkmap-conf + mountPath: /opt/cenm/etc/networkmap.conf + subPath: networkmap.conf + - name: networkmap + image: {{ .Values.image.networkmap.repository }}:{{ .Values.image.networkmap.tag }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + command: ["/bin/bash", "-c"] + args: + - |- + # Setup networkwork parameter + cp /opt/cenm/etc/networkmap.conf /opt/cenm/etc/networkmap-init.conf + NETWORK_PARAM=/opt/cenm/notary-nodeinfo/network-parameters-initial.conf + {{- if .Values.nmapUpdate }} + # Remove old network param update config + rm -f /opt/cenm/notary-nodeinfo/network-parameters-update.conf + echo "Starting networkmap update..." + echo "# This is a file for updating network parameters" > /opt/cenm/notary-nodeinfo/network-parameters-update.conf + NETWORK_PARAM=/opt/cenm/notary-nodeinfo/network-parameters-update.conf + cat /opt/cenm/notary-nodeinfo/network-parameters-initial.conf >> /opt/cenm/notary-nodeinfo/network-parameters-update.conf + echo 'parametersUpdate { + description = "Update network parameters settings" + updateDeadline = "'$(date -u +'%Y-%m-%dT%H:%M:%S.%3NZ' -d +10minutes)'" + }' >> /opt/cenm/notary-nodeinfo/network-parameters-update.conf + {{- end }} + java -jar bin/networkmap.jar \ + -f /opt/cenm/etc/networkmap.conf \ + --set-network-parameters $NETWORK_PARAM \ + --network-truststore /certs/network-root-truststore.jks \ + --truststore-password {{ .Values.global.cenm.sharedCreds.truststore }} \ + --root-alias cordarootca + # running the networkmap service + bin/run.sh + volumeMounts: + - name: networkmap-etc + mountPath: /opt/cenm/etc + - name: networkmap-conf + mountPath: /opt/cenm/bin/run.sh + subPath: run.sh + - name: cenm-certs + mountPath: /certs + - name: notary-details + mountPath: /opt/cenm/notary-details + - name: notary-nodeinfo + mountPath: /opt/cenm/notary-nodeinfo + - name: networkmap-conf + mountPath: /opt/cenm/etc/networkmap.conf + subPath: networkmap.conf + - name: networkmap-logs + mountPath: /opt/cenm/logs + - name: networkmap-h2 + mountPath: /opt/cenm/h2 + - name: logs + image: {{ .Values.image.networkmap.repository }}:{{ .Values.image.networkmap.tag }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + command: ["/bin/bash", "-c"] + args: + - |- + cd /opt/cenm + while true; do tail -f logs/angel-service/*.log 2>/dev/null; sleep 5; done + # in case sth went wrong just wait indefinitely ... + tail -f /dev/null + volumeMounts: + - name: networkmap-logs + mountPath: /opt/cenm/logs + volumes: + - name: networkmap-conf + configMap: + name: {{ include "networkmap.fullname" . }}-conf + defaultMode: 0777 + - name: cenm-certs + secret: + secretName: cenm-certs + - name: notary-details + secret: + secretName: notary-info diff --git a/platforms/r3-corda-ent/charts/cenm-networkmap/values.yaml b/platforms/r3-corda-ent/charts/cenm-networkmap/values.yaml index 41cfcb360eb..b1bc8ce7ea9 100644 --- a/platforms/r3-corda-ent/charts/cenm-networkmap/values.yaml +++ b/platforms/r3-corda-ent/charts/cenm-networkmap/values.yaml @@ -4,242 +4,87 @@ # SPDX-License-Identifier: Apache-2.0 ############################################################################################## -# Default values for the Networkmap (nmap) service. +# Default values for cenm-networkmap chart. # This is a YAML-formatted file. # Declare variables to be passed into your templates. - -############################################################# -# Basic Configuration # -############################################################# - -# Provide the name of the node -# e.g. `networkmap` -nodeName: networkmap -bashDebug: true -prefix: cenm - -# This section contains the Corda Enterprise nmap metadata. -metadata: - # Provide the namespace for the Corda Enterprise nmap. - # Eg. namespace: cenm - namespace: cenm - # Provide any additional labels for the Corda Enterprise nmap. - labels: +global: + serviceAccountName: vault-auth + cluster: + provider: aws # choose from: minikube | aws | azure | gcp + cloudNativeServices: false # set to true to use Cloud Native Services (SecretsManager and IAM for AWS; KeyVault & Managed Identities for Azure) + vault: + type: hashicorp + role: vault-role + address: + authPath: supplychain + secretEngine: secretsv2 + secretPrefix: "data/supplychain" + proxy: + #This will be the proxy/ingress provider. Can have values "ambassador" or "none" + #Eg. provider: "ambassador" + provider: "ambassador" + #This field contains the external URL of the node + #Eg. externalUrlSuffix: test.blockchaincloudpoc.com + externalUrlSuffix: test.blockchaincloudpoc.com + cenm: + sharedCreds: + truststore: password + keystore: password + identityManager: + port: 10000 + revocation: + port: 5053 + internal: + port: 5052 + auth: + port: 8081 + gateway: + port: 8080 + zone: + enmPort: 25000 + networkmap: + port: 10000 + internal: + port: 5050 storage: - # Provide the name of the storageclass. - # NOTE: Make sure that the storageclass exist prior to this deployment as - # this chart doesn't create the storageclass. - # Eg. name: cenm - name: cenm - # Provide the memory size for the storage class. - # Eg. memory: 64Mi - memory: 64Mi - -# Image for the init-containers -image: - # Name of the Docker image to use for init-containers, e.g. `ghcr.io/hyperledger/bevel-alpine:latest` - initContainer: ghcr.io/hyperledger/bevel-alpine:latest - # Name of the Docker image to use for the main Networkmap Service, e.g. corda/enterprise-networkmap:1.2-zulu-openjdk8u24 - nmapContainer: corda/enterprise-networkmap:1.2-zulu-openjdk8u24 - # Provide the image for the main Idman container. - # Eg. enterpriseCli: corda/enterprise-cli:1.5.1-zulu-openjdk8u242 - enterpriseCliContainer: corda/enterprise-cli:1.5.1-zulu-openjdk8u242 - # Provide the docker-registry secret created and stored in kubernetes cluster as a secret. - # Eg. imagePullSecrets: regcred, can add multiple creds - imagePullSecrets: - - name: - # Pull policy to be used for the Docker image - # Eg. pullPolicy: Always - pullPolicy: -# required parameter -acceptLicense: YES - -############################################################# -# HashiCorp Vault Configuration # -############################################################# -# NOTE: Make sure that the vault is already unsealed, intialized and configured to -# use Kubernetes service account token based authentication. -# For more info, see https://www.vaultproject.io/docs/auth/kubernetes - -# Values for HashiCorp Vault -vault: - # Address of the vault, e.g. `http://vault.internal.dev.aws.blockchaincloudpoc-develop.com:8200` - address: - # Which role to use when connecting to vault, e.g. `vault-role` - role: vault-role - # Authpath created for networkmap, e.g. cordaentcenm - authPath: cordaentcenm - # Serviceaccount, eg. vault-auth - serviceAccountName: vault-auth - # Prefix to use for secret engine for certificates, e.g. `cenm/certs` - certSecretPrefix: cenm/certs - # The amount of times to retry fetching from/writing to Vault before giving up. - # Eg. retries: 10 - retries: 10 - # The amount of time in seconds to wait after an error occurs when fetching from/writing to Vault. - # Eg. sleepTimeAfterError: 15 - sleepTimeAfterError: 15 - - -############################################################# -# Nmap Configuration # -############################################################# - -service: - # Nmap 'main' service - external: - # E.g. port: 10000 - port: 10000 - # Internal service, inside the K8s cluster - internal: - # E.g. port: 5050 - port: 5050 - # Values for the Revocation check - revocation: - # On which port to check for Revocation, e.g. `5053` - port: 5053 - # Provide the admin listener port - adminListener: - port: + size: 1Gi + dbSize: 5Gi + allowedTopologies: + enabled: false -serviceLocations: - # Values for the Identity Manager (idman) - identityManager: - # Name of idman service - # E.g. name: idman - name: idman - # External domain name of idman service - # E.g. domain: - domain: - # Host of idman - # E.g. host: idman.{namespace} - host: - # Port of idman - # E.g. port: 5052 - port: 5052 - # Values for notary service - notary: - # List of the notary nodes - -############################################################# -# Database Options and Configuration # -############################################################# +# db related configuration database: - # Java class name to use for the database - # Eg. driverClassName: "org.h2.Driver" - driverClassName: - # The DB connection URL - # Eg. url: "jdbc:h2:file:./h2/identity-manager-persistence;DB_CLOSE_ON_EXIT=FALSE;LOCK_TIMEOUT=10000;WRITE_DELAY=0;AUTO_SERVER_PORT=0" - url: - # DB user - # Eg. user: "example-db-user" - user: - # DB password - # Eg. password: "example-db-password" - password: - # Migrations of database can be run as part of the startup of nmap, if set to true. - # If set to false, it will be run prior to setting up the nmap. - # E.g. runMigration: true - runMigration: - -############################################################# -# Settings # -############################################################# -config: - # Provide volume related specifications - volume: - # E.g. baseDir: /opt/corda - baseDir: /opt/corda - - # Provide the path where the CENM Idman .jar-file is stored - # Eg. jarPath: bin - jarPath: bin - - # Provide the path where the CENM Service configuration files are stored - # Eg. configPath: etc - configPath: etc - - # Provide any extra annotations for the PVCs - pvc: - # annotations: - # key: "value" - annotations: {} - - # Provide any extra annotations for the deployment - deployment: - # annotations: - # key: "value" - annotations: {} - - # Specify the maximum size of the memory allocation pool - cordaJar: - # Provide the memory size. - # Eg. memorySize: 4096 (if using kilobytes) - # Eg. memorySize: 512 (if using megabytes) - # Eg. memorySize: 1 (if using gigabytes) - memorySize: - # Provide the unit of greatness for the size, one of three options: - # - k or K for kilobytes - # - m or M for megabytes - # - g or G for gigabytes - # Eg. unit: M - unit: - - # Set memory limits of pod - pod: - resources: - # Provide the limit memory for node - # Eg. limits: 512M - limits: 512M - # Provide the requests memory for node - # Eg. requests: 550M - requests: 550M - - # Provide the number of replicas for your pods - # Eg. replicas: 1 - replicas: 1 - - # Whether the NMS will check the certificate revocation list, e.g. `true` - checkRevocation: true - -# URL Suffix for the ambassador load balancer -ambassador: - external_url_suffix: -############################################################# -# CENM SERVICES DETAILS # -############################################################# -cenmServices: - # Gateway service name - gatewayName: - # Gateway service api endpoint port - gatewayPort: - # Zone service name - zoneName: - # Zone service enm port - zoneEnmPort: - # Auth service name - authName: - # Auth service port - authPort: -healthCheck: - # Health Check node port set to get rid of logs pollution - # Eg. nodePort: 0 - nodePort: -############################################################# -# Network Params Update # -############################################################# -# Mark as true when you want to update the networkmap param -# Only allows addition of new notaries now + driverClassName: "org.h2.Driver" + jdbcDriver: "" + url: "jdbc:h2:file:./h2/networkmap-manager-persistence;DB_CLOSE_ON_EXIT=FALSE;LOCK_TIMEOUT=10000;WRITE_DELAY=0;AUTO_SERVER_PORT=0" + user: "networkmap-db-user" + password: "networkmap-db-password" + runMigration: true + +image: + #Provide the docker secret name in the namespace + #Eg. pullSecret: regcred + pullSecret: + #Pull policy to be used for the Docker image + #Eg. pullPolicy: IfNotPresent + pullPolicy: IfNotPresent + #Provide a valid image and version for enterprise-cli service + enterpriseCli: + repository: corda/enterprise-cli + tag: 1.5.9-zulu-openjdk8u382 + #Provide a valid image and version for enterprise-networkmap service + networkmap: + repository: corda/enterprise-networkmap + tag: 1.5.9-zulu-openjdk8u382 + +# Flag for updating the network param file nmapUpdate: false -# Add additional notary information below: -addNotaries: - - notary: - # Notary nodeinfo file name - # Eg: nodeinfoFileName: nodeInfo-056A07FF98F9872C4F4F9... - nodeinfoFileName: - # Notary nodeinfoFile in base64 format (single line) - # Eg: nodeinfoFile: Y29yZGEBAAAAgMViAAAAAAAB0AAAFqEAAAADAKMibmV0LmNvcmRhOmllVjl4Z2RiUXhEUFNPN0Qxd0Nh... - nodeinfoFile: - # set notary validating true or false - validating: true +# Sleep time (in seconds) after an error occured +sleepTimeAfterError: 120 +# path to base dir +baseDir: /opt/cenm + +# NMS internal adminListener port +adminListener: + port: 6000 diff --git a/platforms/r3-corda-ent/charts/cenm-pki-gen/Chart.yaml b/platforms/r3-corda-ent/charts/cenm-pki-gen/Chart.yaml deleted file mode 100644 index ad6f80de731..00000000000 --- a/platforms/r3-corda-ent/charts/cenm-pki-gen/Chart.yaml +++ /dev/null @@ -1,11 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -apiVersion: v1 -appVersion: "2.0" -description: "R3-corda-ent: Deploys the pki for cenm services." -name: cenm-pki-gen -version: 1.0.0 diff --git a/platforms/r3-corda-ent/charts/cenm-pki-gen/README.md b/platforms/r3-corda-ent/charts/cenm-pki-gen/README.md deleted file mode 100644 index f1ff6bb8616..00000000000 --- a/platforms/r3-corda-ent/charts/cenm-pki-gen/README.md +++ /dev/null @@ -1,190 +0,0 @@ -[//]: # (##############################################################################################) -[//]: # (Copyright Accenture. All Rights Reserved.) -[//]: # (SPDX-License-Identifier: Apache-2.0) -[//]: # (##############################################################################################) - - -# Generate-pki Deployment - -- [Generate-pki Deployment Helm Chart](#Generate-pki-deployment-helm-chart) -- [Prerequisites](#prerequisites) -- [Chart Structure](#chart-structure) -- [Configuration](#configuration) -- [Deployment](#deployment) -- [Contributing](#contributing) -- [License](#license) - - -## Generate-pki Deployment Helm Chart ---- -This [Helm chart](https://github.com/hyperledger/bevel/tree/main/platforms/r3-corda-ent/charts/cemm-pki-gen) deploys the pki to generate the corda complaint certificate hierarchy for the CENM services. - - -## Prerequisites ---- -Before deploying the chart please ensure you have the following prerequisites: - -- Kubernetes cluster up and running. -- A HashiCorp Vault instance is set up and configured to use Kubernetes service account token-based authentication. -- The Vault is unsealed and initialized. -- Helm is installed. - - - -## Chart Structure ---- -This chart has following structue: -``` - ├── cenm-pki-gen - │ ├── Chart.yaml - │ ├── templates - │ │ ├── job.yaml - │ │ ├── _helpers.tpl - │ └── values.yaml -``` - -Type of files used: - -- `templates` : This directory contains the Kubernetes manifest templates that define the resources to be deployed. -- `_helpers.tpl` : A template file used for defining custom labels in the Helm chart. -- `job.yaml` : A Kubernetes Job to generate and fetch certificates and other cryptographic materials from a Vault server.This job contains templates, scripts, and configurations for generating certificates and keys. -- `chart.yaml` : Provides metadata about the chart, such as its name, version, and description. -- `values.yaml` : Contains the default configuration values for the chart. It includes configuration for the metadata, image, storage, Vault, etc. - - -## Configuration ---- -The [values.yaml](https://github.com/hyperledger/bevel/blob/main/platforms/r3-corda-ent/charts/cemm-pki-gen/values.yaml) file contains configurable values for the Helm chart. We can modify these values according to the deployment requirements. Here are some important configuration options: - -## Parameters ---- - -### Name - -| Name | Description | Default Value | -| -----------| -------------------------------------------------- | ------------- | -| nodeName | Provide the name of the node | idman | - -### Metadata - -| Name | Description | Default Value | -| ----------------| -------------------------------------------------------------------- | ------------- | -| namespace | Provide the namespace for the Corda Enterprise PKI Generator | cenm | -| labels | Provide any additional labels for the Corda Enterprise PKI Generator | "" | - -### Image - -| Name | Description | Default Value | -| ------------------------ | -------------------------------------------------------------------------------- | --------------- | -| initContainerName | Information about the Docker container used for the init-containers | ghcr.io/hyperledger| -| pkiContainerName | Provide the image for the pki container | test | -| ImagePullSecret | Provide the K8s secret that has rights for pulling the image off the registry | "" | -| pullPolicy | Provide the pull policy for Docker images, either Always or IfNotPresent | IfNotPresent | - -### Vault - -| Name | Description | Default Value | -| ------------------------- | --------------------------------------------------------------------------| ------------- | -| address | Address/URL of the Vault server | "" | -| role | Role used for authentication with Vault | vault-role | -| authpath | Authentication path for Vault | entcordacenm | -| serviceAccountName | Provide the already created service account name autheticated to vault | vault-auth | -| certSecretPrefix | Provide the vault path where the certificates are stored | secret/cenm-org-name| -| retries | Amount of times to retry fetching from/writing to Vault before giving up | 10 | -| sleepTimeAfterError | Amount of time in seconds to wait after an error occurs | 15 | - -### CenmServices - -| Name | Description | Default Value | -| ---------------| ------------------------------------------| ------------- | -| signerName | Provide the name of the signer | signer | -| idmanName | Provide the name of the idman | idman | -| networkmapName | Provide the name of the networkmap | networkmap | -| notaryName | Provide the name of the notary | notary | - -### IdentityManager - -| Name | Description | Default Value | -| --------------------- | ------------------------------------------| ------------- | -| publicIp | Mention the idman public IP | "" | -| publicPort | Mention the idman public port | 443 | - -### Subjects - -| Name | Description | Default Value | -| ------------------------- | -----------------------------------------------| ------------------------------------------------------------------ | -| tlscrlsigner | Mention the subject for tls crl signer | "CN=Test TLS Signer Certificate, OU=HQ, O=HoldCo LLC, L=New York, C=US" | -| tlscrlissuer | Mention the subject for the tls crl issuer | "CN=Corda TLS CRL Authority,OU=Corda UAT,O=R3 HoldCo LLC,L=New York,C=US" | -| rootca | Mention the subject for rootca | "CN=DLT Root CA,OU=DLT,O=DLT,L=London,C=GB" | -| subordinateca | Mention the subject for subordinateca | "CN=Test Subordinate CA Certificate, OU=HQ, O=HoldCo LLC, L=New York, C=US" | -| idmanca | Mention the subject for idmanca | "CN=Test Identity Manager Service Certificate, OU=HQ, O=HoldCo LLC, L=New York, C=US"| -| networkmap | Mention the subject for networkmap | "CN=Test Network Map Service Certificate, OU=HQ, O=HoldCo LLC, L=New York, C=US" | - -### volume - -| Name | Description | Default Value | -| ------------------| ------------------------------------------ | ------------- | -| baseDir | Provide the base directory for the container| /opt/corda | - - - -## Deployment ---- - -To deploy the Generate-pki Helm chart, follow these steps: - -1. Modify the [values.yaml](https://github.com/hyperledger/bevel/blob/main/platforms/r3-corda-ent/charts/cemm-pki-gen/values.yaml) file to set the desired configuration values. -2. Run the following Helm command to install, upgrade, verify delete the chart: - -To install the chart: -```bash -helm repo add bevel https://hyperledger.github.io/bevel/ -helm install ./cemm-pki-gen -``` - -To upgrade the chart: -```bash -helm upgrade ./cemm-pki-gen -``` - -To verify the deployment: -```bash -kubectl get jobs -n -``` -Note : Replace `` with the actual namespace where the Job was created. This command will display information about the Job, including the number of completions and the current status of the Job's pods. - -To delete the chart: -```bash -helm uninstall -``` -Note : Replace `` with the desired name for the release. - - -## Contributing ---- -If you encounter any bugs, have suggestions, or would like to contribute to the [Generate-pki Deployment Helm Chart](https://github.com/hyperledger/bevel/tree/main/platforms/r3-corda-ent/charts/cemm-pki-gen), please feel free to open an issue or submit a pull request on the [project's GitHub repository](https://github.com/hyperledger/bevel). - - -## License - -This chart is licensed under the Apache v2.0 license. - -Copyright © 2023 Accenture - -### Attribution - -This chart is adapted from the [charts](https://hyperledger.github.io/bevel/) which is licensed under the Apache v2.0 License which is reproduced here: - -``` -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -``` diff --git a/platforms/r3-corda-ent/charts/cenm-pki-gen/templates/_helpers.tpl b/platforms/r3-corda-ent/charts/cenm-pki-gen/templates/_helpers.tpl deleted file mode 100644 index 7bf5f530a8e..00000000000 --- a/platforms/r3-corda-ent/charts/cenm-pki-gen/templates/_helpers.tpl +++ /dev/null @@ -1,5 +0,0 @@ -{{- define "labels.custom" }} - {{ range $key, $val := $.Values.metadata.labels }} - {{ $key }}: {{ $val }} - {{ end }} -{{- end }} \ No newline at end of file diff --git a/platforms/r3-corda-ent/charts/cenm-pki-gen/templates/job.yaml b/platforms/r3-corda-ent/charts/cenm-pki-gen/templates/job.yaml deleted file mode 100644 index 2da313dfb01..00000000000 --- a/platforms/r3-corda-ent/charts/cenm-pki-gen/templates/job.yaml +++ /dev/null @@ -1,690 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -apiVersion: batch/v1 -kind: Job -metadata: - name: {{ .Values.nodeName }}-generate-pki - namespace: {{ .Values.metadata.namespace }} - labels: - app: {{ .Values.nodeName }}-generate-pki - app.kubernetes.io/name: {{ .Values.nodeName }}-generate-pki - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} - {{- include "labels.custom" . | nindent 2 }} -spec: - backoffLimit: 6 - template: - metadata: - labels: - app: {{ .Values.nodeName }}-generate-pki - app.kubernetes.io/name: {{ .Values.nodeName }}-generate-pki - app.kubernetes.io/instance: {{ .Release.Name }} - spec: - restartPolicy: "OnFailure" - serviceAccountName: {{ $.Values.vault.serviceaccountname }} - hostname: {{ .Values.nodeName }} - securityContext: - fsGroup: 1000 - initContainers: - - name: init-check-certificates - image: {{ .Values.image.initContainerName }} - imagePullPolicy: Always - env: - - name: VAULT_ADDR - value: {{ $.Values.vault.address }} - - name: VAULT_APP_ROLE - value: {{.Values.vault.role}} - - name: KUBERNETES_AUTH_PATH - value: {{ $.Values.vault.authpath }} - - name: CERTS_SECRET_PREFIX - value: {{ .Values.vault.certsecretprefix }} - - name: MOUNT_PATH - value: "/certcheck" - command: ["sh", "-c"] - args: - - |- - #!/usr/bin/env sh - validateVaultResponse () { - if echo ${2} | grep "errors"; then - echo "ERROR: unable to retrieve ${1}: ${2}" - exit 1 - fi - if [ "$3" == "LOOKUPSECRETRESPONSE" ] - then - http_code=$(curl -sS -o /dev/null -w "%{http_code}" \ - --header "X-Vault-Token: ${VAULT_TOKEN}" \ - ${VAULT_ADDR}/v1/${1}) - curl_response=$? - if test "$http_code" != "200" ; then - echo "Http response code from Vault - $http_code and curl_response - $curl_response" - if test "$curl_response" != "0"; then - echo "Error: curl command failed with error code - $curl_response" - exit 1 - fi - fi - fi - } - # Setting up the environment to get secrets/certificates from Vault - echo "Getting secrets/certificates from Vault server" - KUBE_SA_TOKEN=$(cat /var/run/secrets/kubernetes.io/serviceaccount/token) - VAULT_TOKEN=$(curl -sS --request POST ${VAULT_ADDR}/v1/auth/${KUBERNETES_AUTH_PATH}/login -H "Content-Type: application/json" -d '{"role":"'"${VAULT_APP_ROLE}"'","jwt":"'"${KUBE_SA_TOKEN}"'"}' | jq -r 'if .errors then . else .auth.client_token end') - validateVaultResponse 'vault login token' "${VAULT_TOKEN}" - echo "Logged into Vault" - mkdir -p ${MOUNT_PATH} - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${CERTS_SECRET_PREFIX}/root/certs | jq -r 'if .errors then . else . end') - data_info="$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data["data"]')" - - if [ "$data_info" == "null" ] - then - echo "Certficates absent in vault. Ignore error warning" - touch ${MOUNT_PATH}/absent.txt - else - validateVaultResponse "${CERTS_SECRET_PREFIX}/root/certs" "${LOOKUP_SECRET_RESPONSE}" "LOOKUPSECRETRESPONSE" - echo "Certificates present in vault" - touch ${MOUNT_PATH}/present.txt - fi - echo "Done checking for certificates in vault" - volumeMounts: - - name: certcheck - mountPath: /certcheck - - name: init-credentials - image: {{ .Values.image.initContainerName }} - imagePullPolicy: Always - env: - - name: VAULT_ADDR - value: {{ $.Values.vault.address }} - - name: KUBERNETES_AUTH_PATH - value: {{ $.Values.vault.authpath }} - - name: VAULT_APP_ROLE - value: {{ $.Values.vault.role }} - - name: BASE_DIR - value: {{ $.Values.volume.baseDir }} - - name: CERTS_SECRET_PREFIX - value: {{ .Values.vault.certsecretprefix }} - - name: MOUNT_PATH - value: "/DATA" - - name: NODEINFO_MOUNT_PATH - value: "/notary-nodeinfo" - command: ["sh", "-c"] - args: - - |- - #!/usr/bin/env sh - validateVaultResponse () { - if echo ${2} | grep "errors"; then - echo "ERROR: unable to retrieve ${1}: ${2}" - exit 1 - fi - if [ "$3" == "LOOKUPSECRETRESPONSE" ] - then - http_code=$(curl -sS -o /dev/null -w "%{http_code}" \ - --header "X-Vault-Token: ${VAULT_TOKEN}" \ - ${VAULT_ADDR}/v1/${1}) - curl_response=$? - if test "$http_code" != "200" ; then - echo "Http response code from Vault - $http_code and curl_response - $curl_response" - if test "$curl_response" != "0"; then - echo "Error: curl command failed with error code - $curl_response" - exit 1 - fi - fi - fi - } - if [ -e /certcheck/present.txt ] - then - echo "Certificates already present in the vault. Skipping.." - exit 0 - fi - # Setting up the environment to get secrets from Vault - echo "Getting secrets from Vault Server" - KUBE_SA_TOKEN=$(cat /var/run/secrets/kubernetes.io/serviceaccount/token) - VAULT_TOKEN=$(curl -sS --request POST ${VAULT_ADDR}/v1/auth/${KUBERNETES_AUTH_PATH}/login -H "Content-Type: application/json" -d '{"role":"vault-role","jwt":"'"${KUBE_SA_TOKEN}"'"}' | jq -r 'if .errors then . else .auth.client_token end') - validateVaultResponse 'vault login token' "${VAULT_TOKEN}" - echo "logged into vault" - # Creating dirs for storing certificates - mkdir -p ${MOUNT_PATH}/truststore; - mkdir -p ${MOUNT_PATH}/keystore; - mkdir -p ${MOUNT_PATH}/ssl; - OUTPUT_PATH=${MOUNT_PATH}/truststore; - # Fetching credentials for truststores - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${CERTS_SECRET_PREFIX}/credentials/truststore | jq -r 'if .errors then . else . end') - validateVaultResponse "${CERTS_SECRET_PREFIX}/credentials/truststore" "${LOOKUP_SECRET_RESPONSE}" "LOOKUPSECRETRESPONSE" - ROOTCA_TRUSTSTORE=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["rootca"]') - echo "${ROOTCA_TRUSTSTORE}"> ${OUTPUT_PATH}/rootcats - SSL_TRUSTSTORE=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["ssl"]') - echo "${SSL_TRUSTSTORE}"> ${OUTPUT_PATH}/sslts - OUTPUT_PATH=${MOUNT_PATH}/keystore; - # Fetching credentials for keystores - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${CERTS_SECRET_PREFIX}/credentials/keystore | jq -r 'if .errors then . else . end') - validateVaultResponse "${CERTS_SECRET_PREFIX}/credentials/keystore" "${LOOKUP_SECRET_RESPONSE}" "LOOKUPSECRETRESPONSE" - IDMAN_KEYSTORE=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["idman"]') - echo "${IDMAN_KEYSTORE}"> ${OUTPUT_PATH}/idmanks - NETWORKMAP_KEYSTORE=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["networkmap"]') - echo "${NETWORKMAP_KEYSTORE}"> ${OUTPUT_PATH}/networkmapks - SUBORDINATECA_KEYSTORE=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["subordinateca"]') - echo "${SUBORDINATECA_KEYSTORE}"> ${OUTPUT_PATH}/subordinatecaks - ROOTCA_KEYSTORE=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["rootca"]') - echo "${ROOTCA_KEYSTORE}"> ${OUTPUT_PATH}/rootcaks - TLSCRLSIGNER_KEYSTORE=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["tlscrlsigner"]') - echo "${TLSCRLSIGNER_KEYSTORE}"> ${OUTPUT_PATH}/tlscrlsignerks - OUTPUT_PATH=${MOUNT_PATH}/ssl; - # Fetching credentials for ssl certificates - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${CERTS_SECRET_PREFIX}/credentials/ssl | jq -r 'if .errors then . else . end') - validateVaultResponse "${CERTS_SECRET_PREFIX}/credentials/ssl" "${LOOKUP_SECRET_RESPONSE}" "LOOKUPSECRETRESPONSE" - IDMAN_SSL=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["idman"]') - echo "${IDMAN_SSL}"> ${OUTPUT_PATH}/idmanssl - NETWORKMAP_SSL=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["networkmap"]') - echo "${NETWORKMAP_SSL}"> ${OUTPUT_PATH}/networkmapssl - SIGNER_SSL=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["signer"]') - echo "${SIGNER_SSL}"> ${OUTPUT_PATH}/signerssl - AUTH_SSL=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["auth"]') - echo "${AUTH_SSL}"> ${OUTPUT_PATH}/authssl - ROOT_SSL=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["root"]') - echo "${ROOT_SSL}"> ${OUTPUT_PATH}/rootssl - touch /DATA/done.txt - echo "Done" - volumeMounts: - - name: credentials - mountPath: /DATA - - name: certcheck - mountPath: /certcheck - containers: - - name: pki - image: "{{ required "pki[main]: missing value for .Values.image.pkiContainerName" .Values.image.pkiContainerName }}" - env: - - name: ACCEPT_LICENSE - value: "{{ .Values.acceptLicense }}" - - name: BASE_DIR - value: "{{ .Values.volume.baseDir }}" - imagePullPolicy: {{ .Values.image.pullPolicy }} - command: ["/bin/bash", "-c"] - args: - - |- - if [ -e /certcheck/present.txt ] - then - echo "Certificates already present in the vault. Skipping.." - exit 0 - fi - rm -r ${BASE_DIR}/DATA/done.txt - echo 'keyStores = { - "identity-manager-key-store" = { - type = LOCAL - file = "./DATA/signer/identity-manager-key-store.jks" - password = "IDMAN_KEYSTORE" - } - "network-map-key-store" = { - type = LOCAL - file = "./DATA/signer/network-map-key-store.jks" - password = "NETWORKMAP_KEYSTORE" - } - "subordinate-key-store" = { - type = LOCAL - file = "./DATA/root/subordinate-key-store.jks" - password = "SUBORDINATECA_KEYSTORE" - } - "root-key-store" = { - type = LOCAL - file = "./DATA/root/root-key-store.jks" - password = "ROOTCA_KEYSTORE" - } - "tls-crl-signer-key-store" = { - type = LOCAL - file = "./DATA/root/tls-crl-signer-key-store.jks" - password = "TLSCRLSIGNER_KEYSTORE" - } - "corda-ssl-network-map-keys" = { - type = LOCAL - file = "./DATA/networkmap/corda-ssl-network-map-keys.jks" - password = "NETWORKMAP_SSL" - }, - "corda-ssl-identity-manager-keys" = { - type = LOCAL - file = "./DATA/idman/certs/corda-ssl-identity-manager-keys.jks" - password = "IDMAN_SSL" - }, - "corda-ssl-signer-keys" = { - type = LOCAL - file = "./DATA/signer/corda-ssl-signer-keys.jks" - password = "SIGNER_SSL" - }, - "corda-ssl-auth-keys" = { - type = LOCAL - file = "./DATA/root/corda-ssl-auth-keys.jks" - password = "AUTH_SSL" - }, - "corda-ssl-root-keys" = { - type = LOCAL - file = "./DATA/root/corda-ssl-root-keys.jks" - password = "ROOT_SSL" - } - } - certificatesStores = { - "network-root-trust-store" = { - file = "./DATA/root/network-root-truststore.jks" - password = "ROOTCA_TRUSTSTORE" - } - "corda-ssl-trust-store" = { - file = "./DATA/root/corda-ssl-trust-store.jks" - password = "SSL_TRUSTSTORE" - } - } - certificates = { - "tlscrlsigner" = { - key = { - type = LOCAL - includeIn = ["tls-crl-signer-key-store"] - algorithm = "ECDSA_SECP256R1_SHA256" - password = "TLSCRLSIGNER_KEYSTORE" - } - isSelfSigned = true - keyUsages = [CRL_SIGN] - keyPurposes = [SERVER_AUTH, CLIENT_AUTH] - validDays = 7300 - issuesCertificates = true - subject = {{ .Values.subjects.tlscrlsigner | quote }} - includeIn = ["network-root-trust-store"] - crl = { - crlDistributionUrl = "https://{{ .Values.identityManager.publicIp }}:{{ .Values.identityManager.publicPort }}/certificate-revocation-list/tls" - indirectIssuer = true - issuer = {{ .Values.subjects.tlscrlissuer | quote }} - file = "./DATA/idman/crls/tls.crl" - } - }, - "cordarootca" = { - key = { - type = LOCAL - includeIn = ["root-key-store"] - algorithm = "ECDSA_SECP256R1_SHA256" - password = "ROOTCA_KEYSTORE" - } - isSelfSigned = true - keyUsages = [DIGITAL_SIGNATURE, KEY_CERT_SIGN, CRL_SIGN] - keyPurposes = [SERVER_AUTH, CLIENT_AUTH] - validDays = 7300 - issuesCertificates = true - subject = {{ .Values.subjects.rootca | quote }} - includeIn = ["network-root-trust-store"] - crl = { - crlDistributionUrl = "https://{{ .Values.identityManager.publicIp }}:{{ .Values.identityManager.publicPort }}/certificate-revocation-list/root" - file = "./DATA/idman/crls/root.crl" - } - }, - "subordinateca" = { - key = { - type = LOCAL - includeIn = ["subordinate-key-store"] - algorithm = "ECDSA_SECP256R1_SHA256" - password = "SUBORDINATECA_KEYSTORE" - } - isSelfSigned = false - signedBy = "cordarootca" - keyUsages = [DIGITAL_SIGNATURE, KEY_CERT_SIGN, CRL_SIGN] - keyPurposes = [SERVER_AUTH, CLIENT_AUTH] - validDays = 7300 - issuesCertificates = true - subject = {{ .Values.subjects.subordinateca | quote }} - crl = { - crlDistributionUrl = "https://{{ .Values.identityManager.publicIp }}:{{ .Values.identityManager.publicPort }}/certificate-revocation-list/subordinate" - file = "./DATA/idman/crls/subordinate.crl" - } - }, - "identitymanagerca" = { - key = { - type = LOCAL - includeIn = ["identity-manager-key-store"] - algorithm = "ECDSA_SECP256R1_SHA256" - password = "IDMAN_KEYSTORE" - } - isSelfSigned = false - signedBy = "subordinateca" - keyUsages = [DIGITAL_SIGNATURE, KEY_CERT_SIGN, CRL_SIGN] - keyPurposes = [SERVER_AUTH, CLIENT_AUTH] - validDays = 7300 - role = DOORMAN_CA - issuesCertificates = true - subject = {{ .Values.subjects.idmanca | quote }} - }, - "networkmap" = { - key = { - type = LOCAL - includeIn = ["network-map-key-store"] - algorithm = "ECDSA_SECP256R1_SHA256" - password = "NETWORKMAP_KEYSTORE" - } - isSelfSigned = false - signedBy = "subordinateca" - keyUsages = [DIGITAL_SIGNATURE, KEY_CERT_SIGN, CRL_SIGN] - keyPurposes = [SERVER_AUTH, CLIENT_AUTH] - validDays = 7300 - role = NETWORK_MAP - issuesCertificates = false - subject = {{ .Values.subjects.networkmap | quote }} - }, - "::CORDA_SSL_ROOT" { - key = { - type = LOCAL - includeIn = ["corda-ssl-root-keys"] - algorithm = "ECDSA_SECP256R1_SHA256" - password = "ROOT_SSL" - } - crl = { - crlDistributionUrl = "https://{{ .Values.identityManager.publicIp }}:{{ .Values.identityManager.publicPort }}/certificate-revocation-list/ssl" - file = "./DATA/root/crls/ssl.crl" - } - }, - "::CORDA_SSL_IDENTITY_MANAGER" { - key = { - type = LOCAL - includeIn = ["corda-ssl-identity-manager-keys"] - algorithm = "ECDSA_SECP256R1_SHA256" - password = "IDMAN_SSL" - } - crl = { - crlDistributionUrl = "https://{{ .Values.identityManager.publicIp }}:{{ .Values.identityManager.publicPort }}/certificate-revocation-list/ssl" - file = "./DATA/idman/crls/ssl.crl" - } - }, - "::CORDA_SSL_NETWORK_MAP" { - key = { - type = LOCAL - includeIn = ["corda-ssl-network-map-keys"] - algorithm = "ECDSA_SECP256R1_SHA256" - password = "NETWORKMAP_SSL" - } - crl = { - crlDistributionUrl = "https://{{ .Values.identityManager.publicIp }}:{{ .Values.identityManager.publicPort }}/certificate-revocation-list/ssl" - file = "./DATA/networkmap/crls/ssl.crl" - } - }, - "::CORDA_SSL_SIGNER" { - key = { - type = LOCAL - includeIn = ["corda-ssl-signer-keys"] - algorithm = "ECDSA_SECP256R1_SHA256" - password = "SIGNER_SSL" - } - crl = { - crlDistributionUrl = "https://{{ .Values.identityManager.publicIp }}:{{ .Values.identityManager.publicPort }}/certificate-revocation-list/ssl" - file = "./DATA/signer/crls/ssl.crl" - } - }, - "::CORDA_SSL_AUTH_SERVICE" { - key = { - type = LOCAL - includeIn = ["corda-ssl-auth-keys"] - algorithm = "ECDSA_SECP256R1_SHA256" - password = "AUTH_SSL" - } - crl = { - crlDistributionUrl = "https://{{ .Values.identityManager.publicIp }}:{{ .Values.identityManager.publicPort }}/certificate-revocation-list/ssl" - file = "./DATA/root/crls/ssl.crl" - } - } - }' >> {{ .Values.configPath }}/pki.conf - #replacement of the variables in the pki conf file - export ROOTCA_TRUSTSTORE=$(cat {{ .Values.volume.baseDir }}/credentials/truststore/rootcats) - sed -i -e "s*ROOTCA_TRUSTSTORE*${ROOTCA_TRUSTSTORE}*g" {{ .Values.configPath }}/pki.conf - export SSL_TRUSTSTORE=$(cat {{ .Values.volume.baseDir }}/credentials/truststore/sslts) - sed -i -e "s*SSL_TRUSTSTORE*${SSL_TRUSTSTORE}*g" {{ .Values.configPath }}/pki.conf - export IDMAN_KEYSTORE=$(cat {{ .Values.volume.baseDir }}/credentials/keystore/idmanks) - sed -i -e "s*IDMAN_KEYSTORE*${IDMAN_KEYSTORE}*g" {{ .Values.configPath }}/pki.conf - export NETWORKMAP_KEYSTORE=$(cat {{ .Values.volume.baseDir }}/credentials/keystore/networkmapks) - sed -i -e "s*NETWORKMAP_KEYSTORE*${NETWORKMAP_KEYSTORE}*g" {{ .Values.configPath }}/pki.conf - export SUBORDINATECA_KEYSTORE=$(cat {{ .Values.volume.baseDir }}/credentials/keystore/subordinatecaks) - sed -i -e "s*SUBORDINATECA_KEYSTORE*${SUBORDINATECA_KEYSTORE}*g" {{ .Values.configPath }}/pki.conf - export ROOTCA_KEYSTORE=$(cat {{ .Values.volume.baseDir }}/credentials/keystore/rootcaks) - sed -i -e "s*ROOTCA_KEYSTORE*${ROOTCA_KEYSTORE}*g" {{ .Values.configPath }}/pki.conf - export TLSCRLSIGNER_KEYSTORE=$(cat {{ .Values.volume.baseDir }}/credentials/keystore/tlscrlsignerks) - sed -i -e "s*TLSCRLSIGNER_KEYSTORE*${TLSCRLSIGNER_KEYSTORE}*g" {{ .Values.configPath }}/pki.conf - export IDMAN_SSL=$(cat {{ .Values.volume.baseDir }}/credentials/ssl/idmanssl) - sed -i -e "s*IDMAN_SSL*${IDMAN_SSL}*g" {{ .Values.configPath }}/pki.conf - export NETWORKMAP_SSL=$(cat {{ .Values.volume.baseDir }}/credentials/ssl/networkmapssl) - sed -i -e "s*NETWORKMAP_SSL*${NETWORKMAP_SSL}*g" {{ .Values.configPath }}/pki.conf - export SIGNER_SSL=$(cat {{ .Values.volume.baseDir }}/credentials/ssl/signerssl) - sed -i -e "s*SIGNER_SSL*${SIGNER_SSL}*g" {{ .Values.configPath }}/pki.conf - export AUTH_SSL=$(cat {{ .Values.volume.baseDir }}/credentials/ssl/authssl) - sed -i -e "s*AUTH_SSL*${AUTH_SSL}*g" {{ .Values.configPath }}/pki.conf - export ROOT_SSL=$(cat {{ .Values.volume.baseDir }}/credentials/ssl/rootssl) - sed -i -e "s*ROOT_SSL*${ROOT_SSL}*g" {{ .Values.configPath }}/pki.conf - mkdir DATA/signer DATA/root DATA/networkmap DATA/idman DATA/idman/crls DATA/idman/certs - time java -Xmx{{ .Values.cordaJarMx }}M -jar bin/pkitool.jar --config-file {{ .Values.configPath }}/pki.conf - #creating a dummy file to perform check if last line is executed or not. - touch ${BASE_DIR}/DATA/done.txt - volumeMounts: - - name: certcheck - mountPath: /certcheck - - name: credentials - mountPath: {{ .Values.volume.baseDir }}/credentials - - name: pkitool-certs-keys - mountPath: {{ .Values.volume.baseDir }}/DATA - - name: pkitool-etc - mountPath: {{ .Values.volume.baseDir }}/etc - resources: - requests: - memory: {{ .Values.cordaJarMx }}M - limits: - memory: {{ add .Values.cordaJarMx 2 }}M - - name: store-certs - image: {{ .Values.image.initContainerName }} - imagePullPolicy: Always - env: - - name: VAULT_ADDR - value: {{ $.Values.vault.address }} - - name: KUBERNETES_AUTH_PATH - value: {{ $.Values.vault.authpath }} - - name: VAULT_APP_ROLE - value: {{ $.Values.vault.role }} - - name: BASE_DIR - value: {{ $.Values.volume.baseDir }} - - name: CERTS_SECRET_PREFIX - value: {{ .Values.vault.certsecretprefix }} - command: ["sh", "-c"] - args: - - |- - #!/usr/bin/env sh - validateVaultResponse () { - if echo ${2} | grep "errors"; then - echo "ERROR: unable to retrieve ${1}: ${2}" - exit 1 - fi - } - - if [ -e /certcheck/present.txt ] - then - echo "Certificates already present in the vault. Skipping.." - exit 0 - fi - # setting up env to get secrets from vault - KUBE_SA_TOKEN=$(cat /var/run/secrets/kubernetes.io/serviceaccount/token) - VAULT_TOKEN=$(curl -sS --request POST ${VAULT_ADDR}/v1/auth/${KUBERNETES_AUTH_PATH}/login -H "Content-Type: application/json" -d '{"role":"'"${VAULT_APP_ROLE}"'","jwt":"'"${KUBE_SA_TOKEN}"'"}' | jq -r 'if .errors then . else .auth.client_token end') - validateVaultResponse 'vault login token' "${VAULT_TOKEN}" - cd ${BASE_DIR}/DATA - # putting certificate for cenm root - COUNTER=1 - while [ "$COUNTER" -lt {{ $.Values.vault.retries }} ] - do - if [ -e done.txt ] - then - cd ${BASE_DIR}/DATA/root - echo "found root certificates, performing vault put for root path" - (echo '{"data": {' - for FILE in *; - do - echo '"'$FILE'": "'; base64 ${FILE}; echo '",' - done; - ) >> ../temp_root.json - sed -i '$ s/.$//' ../temp_root.json - echo '}}' >> ../temp_root.json - cat ../temp_root.json | tr -d '\n' >> ../root.json - echo "before curl" - curl \ - --header "X-Vault-Token: ${VAULT_TOKEN}" \ - --request POST \ - --data @../root.json \ - ${VAULT_ADDR}/v1/${CERTS_SECRET_PREFIX}/root/certs - echo "after curl" - - # get root certs from vault - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${CERTS_SECRET_PREFIX}/root/certs | jq -r 'if .errors then . else . end') - CORDA_SSL_AUTH_KEYS=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data[ "corda-ssl-auth-keys.jks" ]' 2>&1) - CORDA_SSL_ROOT_KEYS=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data[ "corda-ssl-root-keys.jks" ]' 2>&1) - CORDA_SSL_TRUSTSTORE=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data[ "corda-ssl-trust-store.jks" ]' 2>&1) - NETWORK_ROOT_TRUSTSTORE=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data[ "network-root-truststore.jks" ]' 2>&1) - ROOT_KEY_STORE=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data[ "root-key-store.jks" ]' 2>&1) - SUBORDINATE_KEYSTORE=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data[ "subordinate-key-store.jks" ]' 2>&1) - TLSCRL_SIGNER_KEYSTORE=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data[ "tls-crl-signer-key-store.jks" ]' 2>&1) - - if [ "$CORDA_SSL_AUTH_KEYS" == "null" ] || [ "$CORDA_SSL_ROOT_KEYS" == "null" ] || [ "$CORDA_SSL_TRUSTSTORE" == "null" ] || [ "$NETWORK_ROOT_TRUSTSTORE" == "null" ] || [ "$ROOT_KEY_STORE" == "null" ] || [ "$SUBORDINATE_KEYSTORE" == "null" ] || [ "$TLSCRL_SIGNER_KEYSTORE" == "null" ] || [[ "$CORDA_SSL_AUTH_KEYS" == "parse error"* ]] || [[ "$CORDA_SSL_ROOT_KEYS" == "parse error"* ]] || [[ "$CORDA_SSL_TRUSTSTORE" == "parse error"* ]] || [[ "$NETWORK_ROOT_TRUSTSTORE" == "parse error"* ]] || [[ "$ROOT_KEY_STORE" == "parse error"* ]] || [[ "$SUBORDINATE_KEYSTORE" == "parse error"* ]] || [[ "$TLSCRL_SIGNER_KEYSTORE" == "parse error"* ]] - then - echo "certificates write or read fail" - sleep {{ $.Values.vault.sleepTimeAfterError }} - if [ "$COUNTER" -ge {{ $.Values.vault.retries }} ] - then - echo "Retry attempted $COUNTER times, certificates have not been saved" - exit 1 - fi - fi - break - COUNTER=`expr "$COUNTER" + 1` - fi - done - cd ${BASE_DIR}/DATA - # putting certificate for cenm signer - COUNTER=1 - while [ "$COUNTER" -lt {{ $.Values.vault.retries }} ] - do - if [ -e done.txt ] - then - cd ${BASE_DIR}/DATA/signer - echo "found signer certificates, performing vault put for signer path" - (echo '{"data": {' - for FILE in *; - do - echo '"'$FILE'": "'; base64 ${FILE}; echo '",' - done; - ) >> ../temp_signer.json - sed -i '$ s/.$//' ../temp_signer.json - echo '}}' >> ../temp_signer.json - cat ../temp_signer.json | tr -d '\n' >> ../signer.json - echo "before curl" - curl \ - --header "X-Vault-Token: ${VAULT_TOKEN}" \ - --request POST \ - --data @../signer.json \ - ${VAULT_ADDR}/v1/${CERTS_SECRET_PREFIX}/{{ $.Values.cenmServices.signerName }}/certs - echo "after curl" - # get signer certs from vault - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${CERTS_SECRET_PREFIX}/{{ $.Values.cenmServices.signerName }}/certs | jq -r 'if .errors then . else . end') - CORDA_SSL_SIGNER_KEYS=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data[ "corda-ssl-signer-keys.jks" ]' 2>&1) - IDM_KEYSTORE=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data[ "identity-manager-key-store.jks" ]' 2>&1) - NMAP_KEYSTORE=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data[ "network-map-key-store.jks" ]' 2>&1) - - if [ "$CORDA_SSL_SIGNER_KEYS" == "null" ] || [ "$IDM_KEYSTORE" == "null" ] || [ "$NMAP_KEYSTORE" == "null" ] || [[ "$CORDA_SSL_SIGNER_KEYS" == "parse error"* ]] || [[ "$IDM_KEYSTORE" == "parse error"* ]] || [[ "$NMAP_KEYSTORE" == "parse error"* ]] - then - echo "certificates write or read fail" - sleep {{ $.Values.vault.sleepTimeAfterError }} - if [ "$COUNTER" -ge {{ $.Values.vault.retries }} ] - then - echo "Retry attempted $COUNTER times, certificates have not been saved" - exit 1 - fi - fi - break - COUNTER=`expr "$COUNTER" + 1` - fi - done - # putting idman certificate for cenm idman - cd ${BASE_DIR}/DATA - COUNTER=1 - while [ "$COUNTER" -lt {{ $.Values.vault.retries }} ] - do - if [ -e done.txt ] - then - cd ${BASE_DIR}/DATA/idman/crls - echo "found idman certificates, performing vault put for idman path" - (echo '{"data": {' - for FILE in *; - do - echo '"'$FILE'": "'; base64 ${FILE}; echo '",' - done; - ) >> ../temp_crl.json - sed -i '$ s/.$//' ../temp_crl.json - echo '}}' >> ../temp_crl.json - cat ../temp_crl.json | tr -d '\n' >> ../crl.json - echo "before curl" - curl \ - --header "X-Vault-Token: ${VAULT_TOKEN}" \ - --request POST \ - --data @../crl.json \ - ${VAULT_ADDR}/v1/${CERTS_SECRET_PREFIX}/{{ $.Values.cenmServices.idmanName }}/crls - echo "After curl" - # putting certs for idman at certs path - (echo '{"data": {"corda-ssl-identity-manager-keys.jks": "'; base64 ${BASE_DIR}/DATA/idman/certs/corda-ssl-identity-manager-keys.jks; echo '"}}') | curl -H "X-Vault-Token: ${VAULT_TOKEN}" -d @- ${VAULT_ADDR}/v1/${CERTS_SECRET_PREFIX}/{{ $.Values.cenmServices.idmanName }}/certs - # putting certs for networkmap at certs path - (echo '{"data": {"corda-ssl-network-map-keys.jks": "'; base64 ${BASE_DIR}/DATA/networkmap/corda-ssl-network-map-keys.jks; echo '"}}') | curl -H "X-Vault-Token: ${VAULT_TOKEN}" -d @- ${VAULT_ADDR}/v1/${CERTS_SECRET_PREFIX}/{{ $.Values.cenmServices.networkmapName }}/certs - # get idman crls from vault - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${CERTS_SECRET_PREFIX}/{{ $.Values.cenmServices.idmanName }}/crls | jq -r 'if .errors then . else . end') - ROOT_CRL=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data[ "root.crl" ]' 2>&1) - SSL_CRL=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data[ "ssl.crl" ]' 2>&1) - SUBORDINATE_CRL=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data[ "subordinate.crl" ]' 2>&1) - TLS_CRL=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data[ "tls.crl" ]' 2>&1) - - # get idman certs from vault - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${CERTS_SECRET_PREFIX}/{{ $.Values.cenmServices.idmanName }}/certs | jq -r 'if .errors then . else . end') - CORDA_SSL_IDM_KEYS=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data[ "corda-ssl-identity-manager-keys.jks" ]' 2>&1) - # get nmap certs from vault - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${CERTS_SECRET_PREFIX}/{{ $.Values.cenmServices.networkmapName }}/certs | jq -r 'if .errors then . else . end') - CORDA_SSL_NMAP_KEYS=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data[ "corda-ssl-network-map-keys.jks" ]' 2>&1) - - if [ "$ROOT_CRL" == "null" ] || [ "$SSL_CRL" == "null" ] || [ "$SUBORDINATE_CRL" == "null" ] || [ "$TLS_CRL" == "null" ] || [ "$CORDA_SSL_IDM_KEYS" == "null" ] || [ "$CORDA_SSL_NMAP_KEYS" == "null" ] || [[ "$ROOT_CRL" == "parse error"* ]] || [[ "$SSL_CRL" == "parse error"* ]] || [[ "$SUBORDINATE_CRL" == "parse error"* ]] || [[ "$TLS_CRL" == "parse error"* ]] || [[ "$CORDA_SSL_IDM_KEYS" == "parse error"* ]] || [[ "$CORDA_SSL_NMAP_KEYS" == "parse error"* ]] - then - echo "certificates write or read fail" - sleep {{ $.Values.vault.sleepTimeAfterError }} - if [ "$COUNTER" -ge {{ $.Values.vault.retries }} ] - then - echo "Retry attempted $COUNTER times, certificates have not been saved" - exit 1 - fi - fi - break - COUNTER=`expr "$COUNTER" + 1` - fi - done - if [ "$COUNTER" -gt {{ $.Values.vault.retries }} ] - then - echo "Retry attempted $COUNTER times, no files found. Giving up!" - exit 1 - break - fi - echo "completed" - volumeMounts: - - name: certcheck - mountPath: /certcheck - - name: pkitool-certs-keys - mountPath: {{ .Values.volume.baseDir }}/DATA - readOnly: false - imagePullSecrets: - - name: {{ .Values.image.imagePullSecret }} - volumes: - - name: certcheck - emptyDir: - medium: Memory - - name: credentials - emptyDir: - medium: Memory - - name: pkitool-signer-etc - emptyDir: - medium: Memory - - name: signer-logs - emptyDir: - medium: Memory - - name: pkitool-certs-keys - emptyDir: - medium: Memory - - name: pkitool-etc - emptyDir: - medium: Memory diff --git a/platforms/r3-corda-ent/charts/cenm-pki-gen/values.yaml b/platforms/r3-corda-ent/charts/cenm-pki-gen/values.yaml deleted file mode 100644 index 633c97aa40d..00000000000 --- a/platforms/r3-corda-ent/charts/cenm-pki-gen/values.yaml +++ /dev/null @@ -1,139 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -# Default values for PKI Generator chart. -# This is a YAML-formatted file. -# Declare variables to be passed into your templates. - -############################################################# -# Basic Configuration # -############################################################# -# Provide the name of the node -# Eg. nodeName: idman -nodeName: idman - -# This section contains the Corda Enterprise metadata. -metadata: - # Provide the namespace for the Corda Enterprise PKI Generator. - # Eg. namespace: cenm - namespace: cenm - # Provide any additional labels for the Corda Enterprise PKI Generator. - labels: - -# Provide information regarding the Docker images used. -image: - # Provide the alpine utils image, which is used for all init-containers of deployments/jobs. - # NOTE: The alpine image used is the base alpine image with CURL installed. - # Eg. initContainerName: ghcr.io/hyperledger/bevel-alpine:latest - initContainerName: ghcr.io/hyperledger/bevel-alpine:latest - # Provide the image for the pki container. - # Eg. pkiContainerName: corda/enterprise-pki:1.2-zulu-openjdk8u242 - pkiContainerName: test - # Provide the docker-registry secret created and stored in kubernetes cluster as a secret. - # Eg. imagePullSecret: regcred - imagePullSecret: "" - # Pull policy to be used for the Docker image - # Eg. pullPolicy: Always - pullPolicy: IfNotPresent - -# Required parameter to start any .jar files -# Eg. acceptLicense: YES -acceptLicense: yes - - -############################################################# -# HashiCorp Vault Configuration # -############################################################# -# NOTE: Make sure that the vault is already unsealed, intialized and configured to -# use Kubernetes service account token based authentication. -# For more info, see https://www.vaultproject.io/docs/auth/kubernetes - -vault: - # Provide the vault address - # Eg. address: http://vault.example.com:8200 - address: - # Provide the vault role used. - # Eg. role: vault-role - role: vault-role - # Provide the authpath configured to be used. - # Eg. authpath: cordaentcenm - authPath: cordaentcenm - # Provide the service account name autheticated to vault. - # NOTE: Make sure that the service account is already created and autheticated to use the vault. - # Eg. serviceaccountname: vault-auth - serviceAccountName: vault-auth - # Provide the vault path where the certificates are stored - # Eg. certsecretprefix: secret/cenm-org-name - certSecretPrefix: secret/cenm-org-name - # The amount of times to retry fetching from/writing to Vault before giving up. - # Eg. retries: 10 - retries: 10 - # The amount of time in seconds to wait after an error occurs when fetching from/writing to Vault. - # Eg. sleepTimeAfterError: 15 - sleepTimeAfterError: 15 - - -############################################################# -# CENM Service Details # -############################################################# -# This section details the CENM service names as per the configuration file - -cenmServices: - # Provide the name of the signer - # Eg. signerName: signer - signerName: signer - # Provide the name of the idman - # Eg. idmanName: idman - idmanName: idman - # Provide the name of the networkmap - # Eg. networkmapName: networkmap - networkmapName: networkmap - # Provide the name of the notary - # Eg. notaryName: notary - notaryName: notary - -identityManager: - # Mention the idman public IP - # Eg. publicIp: idman.bevel.com - publicIp: - # Mention the idman public port: - # Eg. publicPort: 443 - publicPort: 443 - -subjects: - # Mention the subject for tls crl signer - # Eg. tlscrlsigner: "CN=Test TLS Signer Certificate, OU=HQ, O=HoldCo LLC, L=New York, C=US" - tlscrlsigner: "CN=Test TLS Signer Certificate, OU=HQ, O=HoldCo LLC, L=New York, C=US" - # Mention the subject for the tls crl issuer - # Eg. tlscrlissuer: "CN=Corda TLS CRL Authority,OU=Corda UAT,O=R3 HoldCo LLC,L=New York,C=US" - tlscrlissuer: "CN=Corda TLS CRL Authority,OU=Corda UAT,O=R3 HoldCo LLC,L=New York,C=US" - # Mention the subject for rootca - # Eg. rootca: "CN=DLT Root CA,OU=DLT,O=DLT,L=London,C=GB" - rootca: "CN=DLT Root CA,OU=DLT,O=DLT,L=London,C=GB" - # Mention the subject for subordinateca - # Eg. subordinateca: "CN=Test Subordinate CA Certificate, OU=HQ, O=HoldCo LLC, L=New York, C=US" - subordinateca: "CN=Test Subordinate CA Certificate, OU=HQ, O=HoldCo LLC, L=New York, C=US" - # Mention the subject for idmanca - # Eg. idmanca: "CN=Test Identity Manager Service Certificate, OU=HQ, O=HoldCo LLC, L=New York, C=US" - idmanca: "CN=Test Identity Manager Service Certificate, OU=HQ, O=HoldCo LLC, L=New York, C=US" - # Mention the subject for networkmap - # Eg. networkmap: "CN=Test Network Map Service Certificate, OU=HQ, O=HoldCo LLC, L=New York, C=US" - networkmap: "CN=Test Network Map Service Certificate, OU=HQ, O=HoldCo LLC, L=New York, C=US" - - -############################################################# -# Settings # -############################################################# -volume: - # Eg. baseDir: /opt/corda - baseDir: /opt/corda -# Mention the maximum size, in megabytes, of the memory allocation pool -# This is consumed by the pki jar -# Eg. cordaJarMx: 256 -cordaJarMx: -# Provide the path where the CENM Service configuration files are stored -# Eg. configPath: etc -configPath: diff --git a/platforms/r3-corda-ent/charts/cenm-signer/Chart.yaml b/platforms/r3-corda-ent/charts/cenm-signer/Chart.yaml index 347516f106a..783c7d07cb5 100644 --- a/platforms/r3-corda-ent/charts/cenm-signer/Chart.yaml +++ b/platforms/r3-corda-ent/charts/cenm-signer/Chart.yaml @@ -5,7 +5,21 @@ ############################################################################################## apiVersion: v1 -appVersion: "2.0" -description: "R3-corda-ent: Deploys the CENM signer." name: cenm-signer +description: "R3 Corda Enterprise Network Manager Signer Service" version: 1.0.0 +appVersion: "latest" +keywords: + - bevel + - corda + - hyperledger + - enterprise + - blockchain + - deployment + - accenture +home: https://hyperledger-bevel.readthedocs.io/en/latest/ +sources: + - https://github.com/hyperledger/bevel +maintainers: + - name: Hyperledger Bevel maintainers + email: bevel@lists.hyperledger.org diff --git a/platforms/r3-corda-ent/charts/cenm-signer/README.md b/platforms/r3-corda-ent/charts/cenm-signer/README.md index 770d3dcbc20..ce62b2425e9 100644 --- a/platforms/r3-corda-ent/charts/cenm-signer/README.md +++ b/platforms/r3-corda-ent/charts/cenm-signer/README.md @@ -3,196 +3,107 @@ [//]: # (SPDX-License-Identifier: Apache-2.0) [//]: # (##############################################################################################) - -# Signer Deployment - -- [Signer Deployment Helm Chart](#Signer-deployment-helm-chart) -- [Prerequisites](#prerequisites) -- [Chart Structure](#chart-structure) -- [Configuration](#configuration) -- [Deployment](#deployment) -- [Contributing](#contributing) -- [License](#license) - - -## Signer Deployment Helm Chart ---- -This [Helm chart](https://github.com/hyperledger/bevel/tree/main/platforms/r3-corda-ent/charts/cenm-signer) Deploys and configure a signer service within a Kubernetes cluster. - - -## Prerequisites ---- -Before deploying the chart please ensure you have the following prerequisites: - -- Kubernetes cluster up and running. -- A HashiCorp Vault instance is set up and configured to use Kubernetes service account token-based authentication. -- The Vault is unsealed and initialized. -- Helm is installed. - - -## Chart Structure ---- -This chart has following structue: -``` - ├── signer - │ ├── Chart.yaml - │ ├── templates - │ │ ├── deployment.yaml - │ │ ├── _helpers.tpl - │ │ ├── service.yaml - │ └── values.yaml -``` - -Type of files used: +# cenm signer-service -- `templates` : This directory contains the Kubernetes manifest templates that define the resources to be deployed. -- `deployment.yaml` : Deploys an application using the kubernetes deployment resource, which ensures that a specified number of replica pods are running at all times. The container runs a signing keys and generating certificates and responsible for tailing and displaying log files from the signer service. -- `_helpers.tpl` : A template file used for defining custom labels in the Helm chart. -- `service.yaml` : This file defines a Kubernetes Service with multiple ports for protocols and targets, and supports Ambassador proxy annotations for specific configurations when using the "ambassador" proxy provider. -- `chart.yaml` : Provides metadata about the chart, such as its name, version, and description. -- `values.yaml` : Contains the default configuration values for the chart. It includes configuration for the metadata, image, storage, Vault, etc. - - -## Configuration ---- -The [values.yaml](https://github.com/hyperledger/bevel/blob/main/platforms/r3-corda-ent/charts/cenm-signer/values.yaml) file contains configurable values for the Helm chart. We can modify these values according to the deployment requirements. Here are some important configuration options: - -## Parameters ---- +This chart is a component of Hyperledger Bevel. The cenm-signer chart deploys a R3 Corda Enterprise identity manager. If enabled, the keys are then stored on the configured vault and stored as Kubernetes secrets. See [Bevel documentation](https://hyperledger-bevel.readthedocs.io/en/latest/) for details. -### Name +## TL;DR -| Name | Description | Default Value | -| -----------| -------------------------------------------------- | ------------- | -| nodeName | Provide the name of the node | signer | - -### Metadata - -| Name | Description | Default Value | -| ----------------| ----------------------------------------------------------------| ------------- | -| namespace | Provide the namespace for the Corda Enterprise Signer | cenm | -| labels | Provide any additional labels for the Corda Enterprise Signer | "" | +```bash +helm repo add bevel https://hyperledger.github.io/bevel +helm install signer bevel/cenm-signer +``` -### Image +## Prerequisites -| Name | Description | Default Value | -| ------------------------ | -------------------------------------------------------------------------------- | ----------------------------------------------| -| initContainerName | Information about the Docker container used for the init-containers | ghcr.io/hyperledger | -| signerContainer | Provide the image for the main Signer container | corda/enterprise-signer:1.2-zulu-openjdk8u242 | -| ImagePullSecret | Provide the K8s secret that has rights for pulling the image off the registry | "" | -| pullPolicy | Provide the pull policy for Docker images, either Always or IfNotPresent | IfNotPresent | - -### Vault - -| Name | Description | Default Value | -| ------------------------- | --------------------------------------------------------------------------| --------------------------------- | -| address | Address/URL of the Vault server | "" | -| role | Role used for authentication with Vault | vault-role | -| authpath | Authentication path for Vault | entcordacenm | -| serviceAccountName | Provide the already created service account name autheticated to vault | vault-auth | -| certSecretPrefix | Provide the vault path where the certificates are stored | secret/cenm-org-name/signer/certs | -| retries | Amount of times to retry fetching from/writing to Vault before giving up | 10 | -| sleepTimeAfterError | Amount of time in seconds wait after an error occurs | 15 | - -### Service - -| Name | Description | Default Value | -| --------------------- | ------------------------------------------| ------------- | -| type | Provide the type of service | clusterip | -| port | provide the port for service | 6000 | - -### CenmServices - -| Name | Description | Default Value | -| ---------------| ------------------------------------------| ------------- | -| idmanName | Provide the name of the idman | idman | -| authName | Name of the auth service | "" | -| authPort | Auth Service port | "" | - -### ServiceLocations - -| Name | Description | Default Value | -| ------------------------ | --------------------------------------------------------------------------------------| --------------- | -| identityManager | Provide the idman service address | "" | -| host | The internal hostname for the Idman service, inside the K8s cluster | idman.namespace | -| publicIp | The public IP of the Idman service, accessible outside of the K8s cluster | "" | -| port | Port at which idman service is accessible, inside the K8s cluster | 5052 | -| publicPort | Public port at which the Idman service is accessible outside the K8s cluster | 443 | -| networkMap | networkmap service details | "" | -| revocation port | Details of service where certificate revocation list will be published by idman | 5053 | +- Kubernetes 1.19+ +- Helm 3.2.0+ -### Signers +If Hashicorp Vault is used, then +- HashiCorp Vault Server 1.13.1+ -| Name | Description | Default Value | -| --------------- | ----------------------------------------------------------| ------------- | -| CSR | For checking Certificate Signing Request (CSR) schedule | 1m | -| CRL | For checking Certificate Revocation List (CRL) schedule | 1d | -| NetworkMap | For checking with NetworkMap (NMS) | 1d | -| NetworkParameters | For checking network parameters interval | 1m | +> **Important**: Ensure the `enterprise-init` chart has been installed before installing this. Also check the dependent charts. Installing this chart seperately is not required as it is a dependent chart for cenm, and is installed with cenm chart. -### Config +## Installing the Chart -| Name | Description | Default Value | -| ------------------------ | ----------------------------------------------------------------------------- | --------------- | -| baseDir | Provide volume related specifications | /opt/corda | -| jarPath | Provide the path where the CENM Signer .jar-file is stored | "bin" | -| configPath | Provide the path where the CENM Service configuration files are stored | "etc" | -| cordaJar | Provide configuration of the .jar files used in the Node | "" | -| deployment | Provide any extra annotations for the deployment | "vaule" | -| pod | Set memory limits of pod | "" | -| replicas | Provide the number of replicas for your pods | "1" | +To install the chart with the release name `signer`: -### Healthcheck +```bash +helm repo add bevel https://hyperledger.github.io/bevel +helm install signer bevel/cenm-signer +``` -| Name | Description | Default Value | -| ----------------------------| --------------------------------------------------------------| ------------- | -| nodePort | Health Check node port set to get rid of logs pollution | 0 | +The command deploys the chart on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation. +> **Tip**: List all releases using `helm list` - -## Deployment ---- -To deploy the Signer Helm chart, follow these steps: +## Uninstalling the Chart -1. Modify the [values.yaml](https://github.com/hyperledger/bevel/blob/main/platforms/r3-corda-ent/charts/cenm-signer/values.yaml) file to set the desired configuration values. -2. Run the following Helm command to install, upgrade, verify delete the chart: +To uninstall/delete the `signer` deployment: -To install the chart: ```bash -helm repo add bevel https://hyperledger.github.io/bevel/ -helm install ./cenm-signer +helm uninstall signer ``` -To upgrade the chart: -```bash -helm upgrade ./cenm-signer -``` +The command removes all the Kubernetes components associated with the chart and deletes the release. -To verify the deployment: -```bash -kubectl get jobs -n -``` -Note : Replace `` with the actual namespace where the Job was created. This command will display information about the Job, including the number of completions and the current status of the Job's pods. +## Parameters -To delete the chart: -```bash -helm uninstall -``` -Note : Replace `` with the desired name for the release. +### Global parameters +These parameters are refered to as same in each parent or child chart +| Name | Description | Default Value | +|--------|---------|-------------| +|`global.serviceAccountName` | The serviceaccount name that will be used for Vault Auth management| `vault-auth` | +| `global.cluster.provider` | Kubernetes cluster provider like AWS EKS or minikube. Currently ony `aws` and `minikube` is tested | `aws` | +| `global.cluster.cloudNativeServices` | only `false` is implemented, `true` to use Cloud Native Services (SecretsManager and IAM for AWS; KeyVault & Managed Identities for Azure) is for future | `false` | +| `global.vault.type` | Type of Vault to support other providers. Currently, only `hashicorp` is supported. | `hashicorp` | +| `global.vault.role` | Role used for authentication with Vault | `vault-role` | +| `global.vault.address`| URL of the Vault server. | `""` | +| `global.vault.authPath` | Authentication path for Vault | `supplychain` | +| `global.vault.secretEngine` | The value for vault secret engine name | `secretsv2` | +| `global.vault.secretPrefix` | The value for vault secret prefix which must start with `data/` | `data/supplychain` | +| `global.proxy.provider` | The proxy or Ingress provider. Can be `none` or `ambassador` | `ambassador` | +| `global.proxy.externalUrlSuffix` | The External URL suffix at which the Besu P2P and RPC service will be available | `test.blockchaincloudpoc.com` | +| `global.cenm.sharedCreds.truststore` | The truststore password the pki created truststores | `password` | +| `global.cenm.sharedCreds.keystore` | The truststore password the pki created ketstores | `password` | +| `global.cenm.identityManager.port` | The port for identity manager issuance | `10000` | +| `global.cenm.identityManager.revocation.port` | The port for identity manager revocation | `5053` | +| `global.cenm.identityManager.internal.port` | The port for identity manager internal listener | `5052` | +| `global.cenm.auth.port` | The port for auth api | `8081` | +| `global.cenm.gateway.port` | The port for gateway api | `8080` | +| `global.cenm.zone.enmPort` | The port for zone ENM | `25000` | + +### Storage + +| Name | Description | Default Value | +|--------|---------|-------------| +| `storage.allowedTopologies.enabled` | Check [bevel-storageclass](../../../shared/charts/bevel-storageclass/README.md) for details | `false` | - -## Contributing ---- -If you encounter any bugs, have suggestions, or would like to contribute to the [Signer Deployment Helm Chart](https://github.com/hyperledger/bevel/tree/main/platforms/r3-corda-ent/charts/cenm-signer), please feel free to open an issue or submit a pull request on the [project's GitHub repository](https://github.com/hyperledger/bevel). +### Image +| Name | Description | Default Value | +| -------------| ---------- | --------- | +| `image.pullSecret` | Secret name in the namespace containing private image registry credentials | `""` | +| `image.pullPolicy` | Pull policy to be used for the Docker images | `IfNotPresent` | +| `image.signer.repository` | CENM idman image repository | `corda/enterprise-singer`| +| `image.signer.tag` | CENM idman image tag as per version | `1.5.9-zulu-openjdk8u382`| +| `image.enterpriseCli.repository` | CENM idman image repository | `corda/enterprise-cli`| +| `image.enterpriseCli.tag` | CENM idman image tag as per version | `1.5.9-zulu-openjdk8u382`| + +### Signers +| Name | Description | Default Value | +| -------------| ---------- | --------- | +| `signers.CSR.schedule.interval` | Certificate sigining request interval | `"1m"` | +| `signers.CRL.schedule.interval` | Certificate revocation interval | `"1d"` | +| `signers.NetworkMap.schedule.interval` | NetworkMap sigining interval | `"1m"` | +| `signers.NetworkParameters.schedule.interval` | Network Parameters sigining interval | `"1m"` | - ## License This chart is licensed under the Apache v2.0 license. -Copyright © 2023 Accenture +Copyright © 2024 Accenture ### Attribution diff --git a/platforms/r3-corda-ent/charts/cenm-signer/files/getZoneToken.sh b/platforms/r3-corda-ent/charts/cenm-signer/files/getZoneToken.sh new file mode 100644 index 00000000000..11cd9b8790d --- /dev/null +++ b/platforms/r3-corda-ent/charts/cenm-signer/files/getZoneToken.sh @@ -0,0 +1,16 @@ +#!/bin/sh + +if [ ! -f /opt/cenm/etc/token ] +then + EXIT_CODE=1 + until [ "${EXIT_CODE}" -eq "0" ] + do + echo "Trying to login to gateway.{{ .Release.Namespace }}:{{ .Values.global.cenm.gateway.port }} ..." + java -jar bin/cenm-tool.jar context login -s http://gateway.{{ .Release.Namespace }}:{{ .Values.global.cenm.gateway.port }} -u config-maintainer -p p4ssWord + EXIT_CODE=${?} + echo "EXIT_CODE=${EXIT_CODE}" + sleep 5 + done + + java -jar bin/cenm-tool.jar signer config set-admin-address -a=signer.{{ .Release.Namespace }}:{{ .Values.adminListener.port }} +fi diff --git a/platforms/r3-corda-ent/charts/cenm-signer/files/run.sh b/platforms/r3-corda-ent/charts/cenm-signer/files/run.sh new file mode 100644 index 00000000000..e066075a17c --- /dev/null +++ b/platforms/r3-corda-ent/charts/cenm-signer/files/run.sh @@ -0,0 +1,31 @@ +#!/bin/sh + +# +# main run +# + +if [ -f bin/signer.jar ] +then + echo + echo "CENM: starting Signer process ..." + echo + java -Xmx1G -jar bin/signer.jar --config-file etc/signer.conf + EXIT_CODE=${?} +else + echo "Missing Signer jar file in bin directory:" + ls -al bin + EXIT_CODE=110 +fi + +if [ "${EXIT_CODE}" -ne "0" ] +then + HOW_LONG={{ .Values.sleepTimeAfterError }} + echo + echo "Signer failed - exit code: ${EXIT_CODE} (error)" + echo + echo "Going to sleep for requested ${HOW_LONG} seconds to let you login and investigate." + echo +fi + +sleep ${HOW_LONG} +echo diff --git a/platforms/r3-corda-ent/charts/cenm-signer/files/signer.conf b/platforms/r3-corda-ent/charts/cenm-signer/files/signer.conf new file mode 100644 index 00000000000..408d8593526 --- /dev/null +++ b/platforms/r3-corda-ent/charts/cenm-signer/files/signer.conf @@ -0,0 +1,150 @@ +signingKeys = { + "cordaidentitymanagerca" = { + alias = "cordaidentitymanagerca" + type = LOCAL + password = {{ .Values.global.cenm.sharedCreds.keystore }} + keyStore { + file = "/certs/identity-manager-key-store.jks" + password = {{ .Values.global.cenm.sharedCreds.keystore }} + } + }, + "cordanetworkmap" = { + alias = "cordanetworkmap" + type = LOCAL + password = {{ .Values.global.cenm.sharedCreds.keystore }} + keyStore { + file = "/certs/network-map-key-store.jks" + password = {{ .Values.global.cenm.sharedCreds.keystore }} + } + } +} + +signers = { + "CSR" = { + type = CSR + signingKeyAlias = "cordaidentitymanagerca" + crlDistributionPoint = "{{ include "identityManager.URL" . }}/certificate-revocation-list/doorman" + validDays = 7300 # 20 year certificate expiry + schedule { + interval = {{ .Values.signers.CSR.schedule.interval }} + } + serviceLocation = [ + { + host = idman.{{ .Release.Namespace }} + port = {{ .Values.global.cenm.identityManager.internal.port }} + ssl = { + keyStore = { + location = "/certs/corda-ssl-signer-keys.jks" + password = {{ .Values.global.cenm.sharedCreds.keystore }} + } + trustStore = { + location = "/certs/corda-ssl-trust-store.jks" + password = {{ .Values.global.cenm.sharedCreds.truststore }} + } + validate = true + } + } + ] + + }, + "CRL" = { + type = CRL + signingKeyAlias = "cordaidentitymanagerca" + crlDistributionPoint = "{{ include "identityManager.URL" . }}/certificate-revocation-list/doorman" + # updatePeriod = 86400000 # 1 day CRL expiry + updatePeriod = 604800000 # 1 week CRL expiry + schedule { + interval = {{ .Values.signers.CRL.schedule.interval }} + } + serviceLocation = [ + { + host = idman.{{ .Release.Namespace }} + port = {{ .Values.global.cenm.identityManager.revocation.port }} + ssl = { + keyStore = { + location = "/certs/corda-ssl-signer-keys.jks" + password = {{ .Values.global.cenm.sharedCreds.keystore }} + } + trustStore = { + location = "/certs/corda-ssl-trust-store.jks" + password = {{ .Values.global.cenm.sharedCreds.truststore }} + } + validate = true + } + } + ] + }, + "NetworkMap" = { + type = NETWORK_MAP + signingKeyAlias = "cordanetworkmap" + schedule { + interval = {{ .Values.signers.NetworkMap.schedule.interval }} + } + serviceLocation = [ + { + host = cenm-networkmap.{{ .Release.Namespace }} + port = {{ .Values.global.cenm.networkmap.internal.port }} + ssl = { + keyStore = { + location = "/certs/corda-ssl-signer-keys.jks" + password = {{ .Values.global.cenm.sharedCreds.keystore }} + } + trustStore = { + location = "/certs/corda-ssl-trust-store.jks" + password = {{ .Values.global.cenm.sharedCreds.truststore }} + } + validate = true + } + } + ] + }, + "NetworkParameters" = { + type = NETWORK_PARAMETERS + signingKeyAlias = "cordanetworkmap" + schedule { + interval = {{ .Values.signers.NetworkParameters.schedule.interval }} + } + serviceLocation = [ + { + host = cenm-networkmap.{{ .Release.Namespace }} + port = {{ .Values.global.cenm.networkmap.internal.port }} + ssl = { + keyStore = { + location = "/certs/corda-ssl-signer-keys.jks" + password = {{ .Values.global.cenm.sharedCreds.keystore }} + } + trustStore = { + location = "/certs/corda-ssl-trust-store.jks" + password = {{ .Values.global.cenm.sharedCreds.truststore }} + } + validate = true + } + } + ] + } +} +authServiceConfig = { + disableAuthentication=false + host=auth.{{ .Release.Namespace }} + port={{ .Values.global.cenm.auth.port }} + trustStore = { + location = "/certs/corda-ssl-trust-store.jks" + password = {{ .Values.global.cenm.sharedCreds.truststore }} + } + issuer="http://test" + leeway=5s +} + +adminListener = { + port = {{ .Values.adminListener.port }} + ssl = { + keyStore = { + location = /certs/corda-ssl-identity-manager-keys.jks + password = {{ .Values.global.cenm.sharedCreds.keystore }} + } + trustStore = { + location = /certs/corda-ssl-trust-store.jks + password = {{ .Values.global.cenm.sharedCreds.truststore }} + } + } +} diff --git a/platforms/r3-corda-ent/charts/cenm-signer/requirements.yaml b/platforms/r3-corda-ent/charts/cenm-signer/requirements.yaml new file mode 100644 index 00000000000..895f0a0e1cf --- /dev/null +++ b/platforms/r3-corda-ent/charts/cenm-signer/requirements.yaml @@ -0,0 +1,7 @@ +dependencies: + - name: bevel-storageclass + alias: storage + repository: "file://../../../shared/charts/bevel-storageclass" + tags: + - storage + version: ~1.0.0 diff --git a/platforms/r3-corda-ent/charts/cenm-signer/templates/_helpers.tpl b/platforms/r3-corda-ent/charts/cenm-signer/templates/_helpers.tpl index 7f9b0dc6131..fa4ea317059 100644 --- a/platforms/r3-corda-ent/charts/cenm-signer/templates/_helpers.tpl +++ b/platforms/r3-corda-ent/charts/cenm-signer/templates/_helpers.tpl @@ -1,5 +1,29 @@ -{{- define "labels.custom" -}} - {{- range $key, $val := .Values.metadata.labels -}} - {{- $key -}}: {{- $val -}} - {{- end -}} +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "signer.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "signer.fullname" -}} +{{- $name := default .Chart.Name -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" $name .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} + + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "signer.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} {{- end -}} diff --git a/platforms/r3-corda-ent/charts/cenm-signer/templates/configmap.yaml b/platforms/r3-corda-ent/charts/cenm-signer/templates/configmap.yaml new file mode 100644 index 00000000000..470d839daa7 --- /dev/null +++ b/platforms/r3-corda-ent/charts/cenm-signer/templates/configmap.yaml @@ -0,0 +1,28 @@ +############################################################################################## +# Copyright Accenture. All Rights Reserved. +# +# SPDX-License-Identifier: Apache-2.0 +############################################################################################## + +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "signer.fullname" . }}-conf + namespace: {{ .Release.Namespace }} + labels: + app.kubernetes.io/name: {{ include "signer.fullname" . }} + helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} + app.kubernetes.io/part-of: {{ include "signer.fullname" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} +data: + run.sh: |+ + {{ tpl (.Files.Get "files/run.sh") . | nindent 4 }} + + signer.conf: |+ + {{ tpl (.Files.Get "files/signer.conf") . | nindent 4 }} + + getZoneToken.sh: |+ + {{ tpl (.Files.Get "files/getZoneToken.sh") . | nindent 4 }} diff --git a/platforms/r3-corda-ent/charts/cenm-signer/templates/deployment.yaml b/platforms/r3-corda-ent/charts/cenm-signer/templates/deployment.yaml deleted file mode 100644 index 3f6ba9e5f67..00000000000 --- a/platforms/r3-corda-ent/charts/cenm-signer/templates/deployment.yaml +++ /dev/null @@ -1,449 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: {{ .Values.nodeName }} - namespace: {{ .Values.metadata.namespace }} - {{- if .Values.config.deployment.annotations }} - annotations: -{{ toYaml .Values.config.deployment.annotations | indent 8 }} - {{- end }} - labels: - app.kubernetes.io/name: {{ .Values.nodeName }} - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} -spec: - replicas: {{ .Values.config.replicas }} - selector: - matchLabels: - app: {{ .Values.nodeName }} - app.kubernetes.io/name: {{ .Values.nodeName }} - app.kubernetes.io/instance: {{ .Release.Name }} - template: - metadata: - labels: - app: {{ .Values.nodeName }} - app.kubernetes.io/name: {{ .Values.nodeName }} - app.kubernetes.io/instance: {{ .Release.Name }} - spec: - serviceAccountName: {{ $.Values.vault.serviceAccountName }} - securityContext: - runAsUser: 1000 - runAsGroup: 1000 - fsGroup: 1000 - initContainers: - - name: init-check-certificates - image: {{ .Values.image.initContainer }} - imagePullPolicy: Always - env: - - name: VAULT_ADDR - value: {{ $.Values.vault.address }} - - name: VAULT_APP_ROLE - value: {{.Values.vault.role}} - - name: KUBERNETES_AUTH_PATH - value: {{ $.Values.vault.authPath }} - - name: CERTS_SECRET_PREFIX - value: {{ .Values.vault.certsecretprefix }} - command: ["sh", "-c"] - args: - - |- - #!/usr/bin/env sh - validateVaultResponse () { - if echo ${2} | grep "errors"; then - echo "ERROR: unable to retrieve ${1}: ${2}" - exit 1 - fi - if [ "$3" == "LOOKUPSECRETRESPONSE" ] - then - http_code=$(curl -sS -o /dev/null -w "%{http_code}" \ - --header "X-Vault-Token: ${VAULT_TOKEN}" \ - ${VAULT_ADDR}/v1/${1}) - curl_response=$? - if test "$http_code" != "200" ; then - echo "Http response code from Vault - $http_code and curl_response - $curl_response" - if test "$curl_response" != "0"; then - echo "Error: curl command failed with error code - $curl_response" - exit 1 - fi - fi - fi - } - - # Setting up the environment to get secrets/certificates from Vault - echo "Getting secrets/certificates from Vault server" - KUBE_SA_TOKEN=$(cat /var/run/secrets/kubernetes.io/serviceaccount/token) - VAULT_TOKEN=$(curl -sS --request POST ${VAULT_ADDR}/v1/auth/${KUBERNETES_AUTH_PATH}/login -H "Content-Type: application/json" -d '{"role":"'"${VAULT_APP_ROLE}"'","jwt":"'"${KUBE_SA_TOKEN}"'"}' | jq -r 'if .errors then . else .auth.client_token end') - validateVaultResponse 'vault login token' "${VAULT_TOKEN}" - echo "Logged into Vault" - - COUNTER=1 - while [ "$COUNTER" -lt {{ $.Values.vault.retries }} ] - do - # Get keystores from Vault, to see if the certificates are created and have been put in Vault - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${CERTS_SECRET_PREFIX}/{{ $.Values.nodeName }}/certs | jq -r 'if .errors then . else . end') - echo ${VAULT_ADDR}/v1/${CERTS_SECRET_PREFIX}/{{ $.Values.nodeName }}/certs - data_info="$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data["data"]')" - - if [ "$data_info" == "null" ] - then - sleep {{ $.Values.vault.sleepTimeAfterError }} - else - validateVaultResponse "${CERTS_SECRET_PREFIX}/{{ $.Values.nodeName }}/certs" "${LOOKUP_SECRET_RESPONSE}" "LOOKUPSECRETRESPONSE" - break - fi - COUNTER=`expr "$COUNTER" + 1` - done - - if [ "$COUNTER" -ge {{ $.Values.vault.retries }} ] - then - echo "Certificates might not have been put in Vault. Giving up after $COUNTER tries!" - exit 1 - fi - echo "Done" - - name: init-certificates - image: {{ .Values.image.initContainer }} - imagePullPolicy: Always - env: - - name: VAULT_ADDR - value: {{ $.Values.vault.address }} - - name: KUBERNETES_AUTH_PATH - value: {{ $.Values.vault.authPath }} - - name: VAULT_APP_ROLE - value: {{ $.Values.vault.role }} - - name: BASE_DIR - value: {{ $.Values.config.volume.baseDir }} - - name: CERTS_SECRET_PREFIX - value: {{ .Values.vault.certsecretprefix }} - - name: IDMAN_NODENAME - value: {{ $.Values.cenmServices.idmanName }} - - name: MOUNT_PATH - value: "/DATA" - command: ["sh", "-c"] - args: - - |- - #!/usr/bin/env sh - validateVaultResponse () { - if echo ${2} | grep "errors"; then - echo "ERROR: unable to retrieve ${1}: ${2}" - exit 1 - fi - if [ "$3" == "LOOKUPSECRETRESPONSE" ] - then - http_code=$(curl -sS -o /dev/null -w "%{http_code}" \ - --header "X-Vault-Token: ${VAULT_TOKEN}" \ - ${VAULT_ADDR}/v1/${1}) - curl_response=$? - if test "$http_code" != "200" ; then - echo "Http response code from Vault - $http_code and curl_response - $curl_response" - if test "$curl_response" != "0"; then - echo "Error: curl command failed with error code - $curl_response" - exit 1 - fi - fi - fi - } - - # setting up env to get secrets from vault - echo "Getting secrets from Vault Server" - KUBE_SA_TOKEN=$(cat /var/run/secrets/kubernetes.io/serviceaccount/token) - VAULT_TOKEN=$(curl -sS --request POST ${VAULT_ADDR}/v1/auth/${KUBERNETES_AUTH_PATH}/login -H "Content-Type: application/json" -d '{"role":"'"${VAULT_APP_ROLE}"'","jwt":"'"${KUBE_SA_TOKEN}"'"}' | jq -r 'if .errors then . else .auth.client_token end') - validateVaultResponse 'vault login token' "${VAULT_TOKEN}" - - mkdir -p ${MOUNT_PATH} - mkdir -p ${MOUNT_PATH}/signer; - mkdir -p ${MOUNT_PATH}/root; - mkdir -p ${MOUNT_PATH}/idman; - - # signer-files from vault - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${CERTS_SECRET_PREFIX}/{{ $.Values.nodeName }}/certs | jq -r 'if .errors then . else . end') - validateVaultResponse "${CERTS_SECRET_PREFIX}/{{ $.Values.nodeName }}/certs" "${LOOKUP_SECRET_RESPONSE}" "LOOKUPSECRETRESPONSE" - idm_jks=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["identity-manager-key-store.jks"]') - echo "${idm_jks}" | base64 -d > ${MOUNT_PATH}/signer/identity-manager-key-store.jks - - nms_jks=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["network-map-key-store.jks"]') - echo "${nms_jks}" | base64 -d > ${MOUNT_PATH}/signer/network-map-key-store.jks - - corda_ssl_signer=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["corda-ssl-signer-keys.jks"]') - echo "${corda_ssl_signer}" | base64 -d > ${MOUNT_PATH}/signer/corda-ssl-signer-keys.jks - - # idman ssl key-stores from vault - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${CERTS_SECRET_PREFIX}/${IDMAN_NODENAME}/certs | jq -r 'if .errors then . else . end') - idm_ssl=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["corda-ssl-identity-manager-keys.jks"]') - echo "${idm_ssl}" | base64 -d > ${MOUNT_PATH}/idman/corda-ssl-identity-manager-keys.jks - echo "Successfully got SSL Idman certifcates" - - # ssl trust-stores from vault - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${CERTS_SECRET_PREFIX}/root/certs | jq -r 'if .errors then . else . end') - validateVaultResponse "${CERTS_SECRET_PREFIX}/root/certs" "${LOOKUP_SECRET_RESPONSE}" "LOOKUPSECRETRESPONSE" - - corda_ssl_trust_store=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["corda-ssl-trust-store.jks"]') - echo "${corda_ssl_trust_store}" | base64 -d > ${MOUNT_PATH}/root/corda-ssl-trust-store.jks - - # Fetching credentials from vault - mkdir -p ${MOUNT_PATH}/credentials/truststore - OUTPUT_PATH=${MOUNT_PATH}/credentials/truststore; - - #Fetching truststore credentials from vault - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${CERTS_SECRET_PREFIX}/credentials/truststore | jq -r 'if .errors then . else . end') - validateVaultResponse "${CERTS_SECRET_PREFIX}/credentials/truststore" "${LOOKUP_SECRET_RESPONSE}" "LOOKUPSECRETRESPONSE" - - SSL_TRUSTSTORE=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["ssl"]') - echo "${SSL_TRUSTSTORE}"> ${OUTPUT_PATH}/sslts - - mkdir -p ${MOUNT_PATH}/credentials/keystore - OUTPUT_PATH=${MOUNT_PATH}/credentials/keystore; - - #Fetching keystore credentials from vault - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${CERTS_SECRET_PREFIX}/credentials/keystore | jq -r 'if .errors then . else . end') - validateVaultResponse "${CERTS_SECRET_PREFIX}/credentials/keystore" "${LOOKUP_SECRET_RESPONSE}" "LOOKUPSECRETRESPONSE" - - IDMAN_KEYSTORE=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["idman"]') - echo "${IDMAN_KEYSTORE}"> ${OUTPUT_PATH}/idmanks - NETWORKMAP_KEYSTORE=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["networkmap"]') - echo "${NETWORKMAP_KEYSTORE}"> ${OUTPUT_PATH}/networkmapks - - mkdir -p ${MOUNT_PATH}/credentials/ssl - OUTPUT_PATH=${MOUNT_PATH}/credentials/ssl; - - #Fetching ssl credentials from vault - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${CERTS_SECRET_PREFIX}/credentials/ssl | jq -r 'if .errors then . else . end') - validateVaultResponse "${CERTS_SECRET_PREFIX}/credentials/ssl" "${LOOKUP_SECRET_RESPONSE}" "LOOKUPSECRETRESPONSE" - - SIGNER_SSL=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["signer"]') - echo "${SIGNER_SSL}"> ${OUTPUT_PATH}/signerssl - - IDMAN_SSL=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["idman"]') - echo "${IDMAN_SSL}"> ${OUTPUT_PATH}/idmanssl - - echo "Done" - volumeMounts: - - name: certificates - mountPath: /DATA - containers: - - name: signer - image: "{{ .Values.image.signerContainer }}" - env: - - name: ACCEPT_LICENSE - value: "{{ .Values.acceptLicense }}" - imagePullPolicy: {{ .Values.image.pullPolicy }} - command: ["/bin/bash", "-c"] - args: - - |- - echo 'signingKeys = { - "cordaidentitymanagerca" = { - alias = "identitymanagerca" - type = LOCAL - password = "IDMAN_KEYSTORE" - keyStore { - file = "./DATA/signer/identity-manager-key-store.jks" - password = "IDMAN_KEYSTORE" - } - }, - "cordanetworkmap" = { - alias = "networkmap" - type = LOCAL - password = "NETWORKMAP_KEYSTORE" - keyStore { - file = "./DATA/signer/network-map-key-store.jks" - password = "NETWORKMAP_KEYSTORE" - } - } - } - - signers = { - "CSR" = { - type = CSR - signingKeyAlias = "cordaidentitymanagerca" - crlDistributionPoint = "https://{{ .Values.serviceLocations.identityManager.publicIp }}:{{ .Values.serviceLocations.identityManager.publicPort }}/certificate-revocation-list/doorman" - validDays = 7300 # 20 year certificate expiry - schedule { - interval = {{ .Values.signers.CSR.schedule.interval }} - } - serviceLocation = [ - { - host = {{ .Values.serviceLocations.identityManager.host }} - port = {{ .Values.serviceLocations.identityManager.port }} - ssl = { - keyStore = { - location = "./DATA/signer/corda-ssl-signer-keys.jks" - password = SIGNER_SSL - keyPassword = SIGNER_SSL - } - trustStore = { - location = "./DATA/root/corda-ssl-trust-store.jks" - password = SSL_TRUSTSTORE - } - validate = true - } - } - ] - - }, - "CRL" = { - type = CRL - signingKeyAlias = "cordaidentitymanagerca" - crlDistributionPoint = "https://{{ .Values.serviceLocations.identityManager.publicIp }}:{{ .Values.serviceLocations.identityManager.publicPort }}/certificate-revocation-list/doorman" - # updatePeriod = 86400000 # 1 day CRL expiry - updatePeriod = 604800000 # 1 week CRL expiry - schedule { - interval = {{ .Values.signers.CRL.schedule.interval }} - } - serviceLocation = [ - { - host = {{ .Values.serviceLocations.identityManager.host }} - port = {{ .Values.serviceLocations.revocation.port }} - ssl = { - keyStore = { - location = "./DATA/signer/corda-ssl-signer-keys.jks" - password = SIGNER_SSL - keyPassword = SIGNER_SSL - } - trustStore = { - location = "./DATA/root/corda-ssl-trust-store.jks" - password = SSL_TRUSTSTORE - } - validate = true - } - } - ] - }, - "NetworkMap" = { - type = NETWORK_MAP - signingKeyAlias = "cordanetworkmap" - schedule { - interval = {{ .Values.signers.NetworkMap.schedule.interval }} - } - serviceLocation = [ - { - host = {{ .Values.serviceLocations.networkMap.host }} - port = {{ .Values.serviceLocations.networkMap.port }} - ssl = { - keyStore = { - location = "./DATA/signer/corda-ssl-signer-keys.jks" - password = SIGNER_SSL - keyPassword = SIGNER_SSL - } - trustStore = { - location = "./DATA/root/corda-ssl-trust-store.jks" - password = SSL_TRUSTSTORE - } - validate = true - } - } - ] - }, - "NetworkParameters" = { - type = NETWORK_PARAMETERS - signingKeyAlias = "cordanetworkmap" - schedule { - interval = {{ .Values.signers.NetworkParameters.schedule.interval }} - } - serviceLocation = [ - { - host = {{ .Values.serviceLocations.networkMap.host }} - port = {{ .Values.serviceLocations.networkMap.port }} - ssl = { - keyStore = { - location = "./DATA/signer/corda-ssl-signer-keys.jks" - password = SIGNER_SSL - keyPassword = SIGNER_SSL - } - trustStore = { - location = "./DATA/root/corda-ssl-trust-store.jks" - password = SSL_TRUSTSTORE - } - validate = true - } - } - ] - } - } - authServiceConfig = { - disableAuthentication=false - host="{{ .Values.cenmServices.authName }}.{{ .Values.metadata.namespace }}" - port={{ .Values.cenmServices.authPort }} - trustStore = { - location = "./DATA/root/corda-ssl-trust-store.jks" - password = SSL_TRUSTSTORE - } - issuer="http://test" - leeway=5s - } - - adminListener = { - port = {{ .Values.service.adminListener.port }} - ssl = { - keyStore = { - location = ./DATA/idman/corda-ssl-identity-manager-keys.jks - password = IDMAN_SSL - } - trustStore = { - location = ./DATA/root/corda-ssl-trust-store.jks - password = SSL_TRUSTSTORE - } - } - }' >> {{ .Values.config.configPath }}/signer.conf - - #replacement of the variables - export IDMAN_SSL=$(cat {{ .Values.config.volume.baseDir }}/DATA/credentials/ssl/idmanssl) - sed -i -e "s*IDMAN_SSL*${IDMAN_SSL}*g" etc/signer.conf - export IDMAN_KEYSTORE=$(cat {{ .Values.config.volume.baseDir }}/DATA/credentials/keystore/idmanks) - sed -i -e "s*IDMAN_KEYSTORE*${IDMAN_KEYSTORE}*g" {{ .Values.config.configPath }}/signer.conf - export NETWORKMAP_KEYSTORE=$(cat {{ .Values.config.volume.baseDir }}/DATA/credentials/keystore/networkmapks) - sed -i -e "s*NETWORKMAP_KEYSTORE*${NETWORKMAP_KEYSTORE}*g" {{ .Values.config.configPath }}/signer.conf - export SIGNER_SSL=$(cat {{ .Values.config.volume.baseDir }}/DATA/credentials/ssl/signerssl) - sed -i -e "s*SIGNER_SSL*${SIGNER_SSL}*g" {{ .Values.config.configPath }}/signer.conf - export SSL_TRUSTSTORE=$(cat {{ .Values.config.volume.baseDir }}/DATA/credentials/truststore/sslts) - sed -i -e "s*SSL_TRUSTSTORE*${SSL_TRUSTSTORE}*g" {{ .Values.config.configPath }}/signer.conf - - java -Xmx{{ .Values.config.cordaJar.memorySize }}{{ .Values.config.cordaJar.unit }} -jar {{ .Values.config.jarPath }}/signer.jar --config-file {{ .Values.config.configPath }}/signer.conf - - volumeMounts: - - name: pkitool-signer-etc - mountPath: {{ .Values.config.volume.baseDir }}/etc - - name: signer-logs - mountPath: {{ .Values.config.volume.baseDir }}/logs - - name: certificates - mountPath: {{ .Values.config.volume.baseDir }}/DATA - resources: - requests: - memory: {{ .Values.config.pod.resources.requests }} - limits: - memory: {{ .Values.config.pod.resources.limits }} - - name: logs - image: "{{ .Values.image.signerContainer }}" - env: - - name: ACCEPT_LICENSE - value: "{{required "You must accept the license agreement to run this software" .Values.acceptLicense }}" - imagePullPolicy: {{ .Values.image.pullPolicy }} - command: ["/bin/bash", "-c"] - args: - - |- - cd {{ .Values.config.volume.baseDir }}/ - while true; do tail -f logs/signing-service/*.log 2>/dev/null ; sleep 5; done - # in case sth went wrong just wait indefinitely ... - tail -f /dev/null - volumeMounts: - - name: signer-logs - mountPath: {{ .Values.config.volume.baseDir }}/logs - {{- with .Values.image.imagePullSecrets }} - imagePullSecrets: - {{- toYaml . | nindent 8 }} - {{- end }} - volumes: - - name: pkitool-signer-etc - emptyDir: - medium: Memory - - name: signer-logs - emptyDir: - medium: Memory - - name: certificates - emptyDir: - medium: Memory diff --git a/platforms/r3-corda-ent/charts/cenm-signer/templates/service.yaml b/platforms/r3-corda-ent/charts/cenm-signer/templates/service.yaml index 67b337d71fa..9c24e32866f 100644 --- a/platforms/r3-corda-ent/charts/cenm-signer/templates/service.yaml +++ b/platforms/r3-corda-ent/charts/cenm-signer/templates/service.yaml @@ -4,27 +4,30 @@ # SPDX-License-Identifier: Apache-2.0 ############################################################################################## +--- apiVersion: v1 kind: Service metadata: - name: {{ .Values.nodeName }} - namespace: {{ .Values.metadata.namespace }} + name: {{ include "signer.name" . }} + namespace: {{ .Release.Namespace }} labels: - run: {{ .Values.nodeName }} - app.kubernetes.io/name: {{ .Values.nodeName }} - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} + app.kubernetes.io/name: signer-service + app.kubernetes.io/component: signer + app.kubernetes.io/part-of: {{ include "signer.fullname" . }} app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} spec: + type: ClusterIP selector: - app: {{ .Values.nodeName }} -# we need Local policy and healthCheckNodePort set to get rid of logs pollution -{{- if (.Values.healthCheck.nodePort) }} - healthCheckNodePort: {{ .Values.healthCheck.nodePort }} -{{- end }} - type: {{ .Values.service.type }} + app.kubernetes.io/name: signer-statefulset + app.kubernetes.io/component: signer + app.kubernetes.io/part-of: {{ include "signer.fullname" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} ports: - - port: {{ .Values.service.adminListener.port }} - targetPort: {{ .Values.service.adminListener.port }} - protocol: TCP - name: adminlistener + - port: {{ .Values.adminListener.port }} + targetPort: {{ .Values.adminListener.port }} + protocol: TCP + name: adminlistener diff --git a/platforms/r3-corda-ent/charts/cenm-signer/templates/statefulset.yaml b/platforms/r3-corda-ent/charts/cenm-signer/templates/statefulset.yaml new file mode 100644 index 00000000000..ab1589845d6 --- /dev/null +++ b/platforms/r3-corda-ent/charts/cenm-signer/templates/statefulset.yaml @@ -0,0 +1,136 @@ +############################################################################################## +# Copyright Accenture. All Rights Reserved. +# +# SPDX-License-Identifier: Apache-2.0 +############################################################################################## + +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ template "signer.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + app: {{ include "signer.fullname" . }} + app.kubernetes.io/name: signer-statefulset + app.kubernetes.io/component: signer + app.kubernetes.io/part-of: {{ include "signer.fullname" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} +spec: + replicas: 1 + podManagementPolicy: OrderedReady + updateStrategy: + type: RollingUpdate + selector: + matchLabels: + app: {{ include "signer.fullname" . }} + app.kubernetes.io/name: signer-statefulset + app.kubernetes.io/component: signer + app.kubernetes.io/part-of: {{ include "signer.fullname" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} + serviceName: {{ include "signer.fullname" . }} + volumeClaimTemplates: + - metadata: + name: signer-logs + spec: + storageClassName: storage-{{ .Release.Name }} + accessModes: [ "ReadWriteOnce" ] + resources: + requests: + storage: {{ .Values.storage.size }} + template: + metadata: + labels: + app: {{ include "signer.fullname" . }} + app.kubernetes.io/name: signer-statefulset + app.kubernetes.io/component: signer + app.kubernetes.io/part-of: {{ include "signer.fullname" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} + spec: + securityContext: + runAsUser: 1000 + runAsGroup: 1000 + fsGroup: 1000 + serviceAccountName: {{ .Values.global.serviceAccountName }} + hostname: {{ .Release.Name }} + imagePullSecrets: + {{- if .Values.image.pullSecret }} + - name: {{ .Values.image.pullSecret }} + {{- end }} + initContainers: + - name: init-token + image: {{ .Values.image.enterpriseCli.repository }}:{{ .Values.image.enterpriseCli.tag }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + command: ["/bin/bash", "-c"] + args: + - |- + cp CM/*.sh bin/ + chmod +x bin/* + bin/getZoneToken.sh + volumeMounts: + - name: signer-etc + mountPath: /opt/cenm/etc + - name: cenm-certs + mountPath: /certs + - name: signer-conf + mountPath: /opt/cenm/etc/signer.conf + subPath: signer.conf + - name: signer-conf + mountPath: /opt/cenm/CM/getZoneToken.sh + subPath: getZoneToken.sh + containers: + - name: signer + image: {{ .Values.image.signer.repository }}:{{ .Values.image.signer.tag }} + env: + - name: ACCEPT_LICENSE + value: "YES" + imagePullPolicy: {{ .Values.image.pullPolicy }} + command: ["/bin/bash", "-c"] + args: + - |- + chmod +x bin/* + bin/run.sh + volumeMounts: + - name: signer-conf + mountPath: /opt/cenm/bin/run.sh + subPath: run.sh + - name: cenm-certs + mountPath: /certs + - name: signer-conf + mountPath: /opt/cenm/etc/signer.conf + subPath: signer.conf + - name: signer-logs + mountPath: /opt/cenm/logs + - name: logs + image: {{ .Values.image.signer.repository }}:{{ .Values.image.signer.tag }} + env: + - name: ACCEPT_LICENSE + value: "YES" + imagePullPolicy: {{ .Values.image.pullPolicy }} + command: ["/bin/bash", "-c"] + args: + - |- + cd /opt/cenm/ + while true; do tail -f logs/signing-service/*.log 2>/dev/null ; sleep 5; done + # in case sth went wrong just wait indefinitely ... + tail -f /dev/null + volumeMounts: + - name: signer-logs + mountPath: /opt/cenm/logs + volumes: + - name: signer-conf + configMap: + name: {{ include "signer.fullname" . }}-conf + defaultMode: 0777 + - name: cenm-certs + secret: + secretName: cenm-certs + - name: signer-etc + emptyDir: + medium: Memory diff --git a/platforms/r3-corda-ent/charts/cenm-signer/values.yaml b/platforms/r3-corda-ent/charts/cenm-signer/values.yaml index ea5ee52b78d..65dca2a210d 100644 --- a/platforms/r3-corda-ent/charts/cenm-signer/values.yaml +++ b/platforms/r3-corda-ent/charts/cenm-signer/values.yaml @@ -4,204 +4,91 @@ # SPDX-License-Identifier: Apache-2.0 ############################################################################################## -# Default values for Signer service. +# Default values for cenm-signer chart. # This is a YAML-formatted file. # Declare variables to be passed into your templates. - -############################################################# -# Basic Configuration # -############################################################# -# Provide the name of the node -# Eg. nodeName: signer -nodeName: signer - -# This section contains the Corda Enterprise Signer metadata. -metadata: - # Provide the namespace for the Corda Enterprise Signer. - # Eg. namespace: cenm - namespace: cenm - # Provide any additional labels for the Corda Enterprise Signer. - labels: - -# Provide information regarding the Docker images used. -image: - # Provide the alpine utils image, which is used for all init-containers of deployments/jobs. - # NOTE: The alpine image used is the base alpine image with CURL installed. - # Eg. initContainerName: ghcr.io/hyperledger/bevel-alpine:latest - initContainer: ghcr.io/hyperledger/bevel-alpine:latest - # Provide the image for the main Signer container. - # Eg. idmanContainerName: corda/enterprise-signer:1.2-zulu-openjdk8u242 - signerContainer: - # Provide the docker-registry secret created and stored in kubernetes cluster as a secret. - # Eg. imagePullSecrets: regcred, can add multiple creds - imagePullSecrets: - - name: - # Pull policy to be used for the Docker image - # Eg. pullPolicy: Always - pullPolicy: - -# Required parameter to start any .jar files -# e.g. acceptLicense: YES -acceptLicense: YES - -############################################################# -# HashiCorp Vault Configuration # -############################################################# -# NOTE: Make sure that the vault is already unsealed, intialized and configured to -# use Kubernetes service account token based authentication. -# For more info, see https://www.vaultproject.io/docs/auth/kubernetes - -vault: - # Provide the vault address - # Eg. address: http://vault.example.com:8200 - address: - # Provide the vault role used. - # Eg. role: vault-role - role: vault-role - # Provide the authpath configured to be used. - # Eg. authpath: entcordacenm - authPath: entcordacenm - # Provide the service account name autheticated to vault. - # NOTE: Make sure that the service account is already created and autheticated to use the vault. - # Eg. serviceAccountName: vault-auth +global: serviceAccountName: vault-auth - # Provide the vault path where the certificates are stored - # Eg. certsecretprefix: secret/cenm-org-name/signer/certs - certSecretPrefix: secret/cenm-org-name/signer/certs - # The amount of times to retry fetching from/writing to Vault before giving up. - # Eg. retries: 10 - retries: 10 - # The amount of time in seconds to wait after an error occurs when fetching from/writing to Vault. - # Eg. sleepTimeAfterError: 15 - sleepTimeAfterError: 15 - - -############################################################# -# Signer Configuration # -############################################################# - -# Provide the service details for setting up ssh -service: - # Eg. type: ClusterIP - type: - adminListener: - port: 6000 -############################################################# -# CENM SERVICES DETAILS # -############################################################# -cenmServices: - # Auth service name - authName: - # Auth service port - authPort: - # Provide the name of the idman - # Eg. idmanName: idman - idmanName: -# provide the other cenm services details -serviceLocations: - # Provide the idman service address - identityManager: - # The internal hostname for the Idman service, inside the K8s cluster - # Eg. host: idman.namespace - host: idman.namespace - # The public IP of the Idman service, accessible outside of the K8s cluster - # Eg. publicIp: idman.external-url-suffix.com - publicIp: - # Port at which idman service is accessible, inside the K8s cluster - # Eg. port: 5052 - port: 5052 - # Public port at which the Idman service is accessible outside the K8s cluster - # Eg. publicPort: 443 - publicPort: 443 - # networkmap service details - networkMap: - # Ex host: networkmap.namespace - host: - # port: 5050 - port: - # Details of service where certificate revocation list will be published by idman - revocation: - # port: 5053 - port: 5053 - + cluster: + provider: aws # choose from: minikube | aws | azure | gcp + cloudNativeServices: false # set to true to use Cloud Native Services (SecretsManager and IAM for AWS; KeyVault & Managed Identities for Azure) + vault: + type: hashicorp + role: vault-role + address: + authPath: supplychain + secretEngine: secretsv2 + secretPrefix: "data/supplychain" + proxy: + #This will be the proxy/ingress provider. Can have values "ambassador" or "none" + #Eg. provider: "ambassador" + provider: "ambassador" + #This field contains the external URL of the node + #Eg. externalUrlSuffix: test.blockchaincloudpoc.com + externalUrlSuffix: test.blockchaincloudpoc.com + cenm: + sharedCreds: + truststore: password + keystore: password + identityManager: + port: 10000 + revocation: + port: 5053 + internal: + port: 5052 + gateway: + port: 8080 + networkmap: + internal: + port: 5050 + +storage: + size: 1Gi + allowedTopologies: + enabled: false +image: + #Provide the docker secret name in the namespace + #Eg. pullSecret: regcred + pullSecret: + #Pull policy to be used for the Docker image + #Eg. pullPolicy: IfNotPresent + pullPolicy: IfNotPresent + #Provide a valid image and version for enterprise-gateway service + enterpriseCli: + repository: corda/enterprise-cli + tag: 1.5.9-zulu-openjdk8u382 + #Provide a valid image and version for enterprise-gateway service + signer: + repository: corda/enterprise-signer + tag: 1.5.9-zulu-openjdk8u382 + +# Sleep time (in seconds) after an error occured +sleepTimeAfterError: 300 +# path to base dir +baseDir: /opt/cenm + +# Signer internal adminListener port +adminListener: + port: 6000 -# Provide signers variables that will go to the template file -# Like the time interval for checking different tasks signers: # For checking Certificate Signing Request (CSR) schedule CSR: schedule: # Eg. interval: 1m - interval: + interval: 1m # For checking Certificate Revocation List (CRL) schedule CRL: schedule: # Eg. interval: 1d - interval: + interval: 1d # For checking with NetworkMap (NMS) NetworkMap: schedule: # Eg. interval: 1d - interval: + interval: 1m # For checking network parameters interval NetworkParameters: schedule: # Eg. interval: 1m - interval: - - -############################################################# -# Settings # -############################################################# -# Provide volume related specifications -config: - volume: - # Eg. baseDir: /opt/corda - baseDir: /opt/corda - - # Provide the path where the CENM Signer .jar-file is stored - # Eg. jarPath: bin - jarPath: bin - - # Provide the path where the CENM Service configuration files are stored - # Eg. configPath: etc - configPath: etc - - # Provide any extra annotations for the deployment - deployment: - # annotations: - # key: "value" - annotations: {} - - # Provide configuration of the .jar files used in the Node - cordaJar: - # Provide the memory size. - # Eg. memorySize: 4096 (if using kilobytes) - # Eg. memorySize: 512 (if using megabytes) - # Eg. memorySize: 1 (if using gigabytes) - memorySize: - # Provide the unit of greatness for the size, one of three options: - # - k or K for kilobytes - # - m or M for megabytes - # - g or G for gigabytes - # Eg. unit: M - unit: - - # Set memory limits of pod - pod: - resources: - # Provide the limit memory for node - # Eg. limits: 512M - limits: - # Provide the requests memory for node - # Eg. requests: 550M - requests: - - # Provide the number of replicas for your pods - # Eg. replicas: 1 - replicas: 1 - -healthCheck: - # Health Check node port set to get rid of logs pollution - # Eg. nodePort: 0 - nodePort: + interval: 1m diff --git a/platforms/r3-corda-ent/charts/cenm-zone/Chart.yaml b/platforms/r3-corda-ent/charts/cenm-zone/Chart.yaml index fe56a5b544e..bc2a68599dd 100644 --- a/platforms/r3-corda-ent/charts/cenm-zone/Chart.yaml +++ b/platforms/r3-corda-ent/charts/cenm-zone/Chart.yaml @@ -5,7 +5,21 @@ ############################################################################################## apiVersion: v1 -appVersion: "1.0" name: cenm-zone -description: "R3-corda-ent: Deploys the CENM zone." +description: "R3 Corda Enterprise Network Manager Zone Service." version: 1.0.0 +appVersion: "latest" +keywords: + - bevel + - corda + - hyperledger + - enterprise + - blockchain + - deployment + - accenture +home: https://hyperledger-bevel.readthedocs.io/en/latest/ +sources: + - https://github.com/hyperledger/bevel +maintainers: + - name: Hyperledger Bevel maintainers + email: bevel@lists.hyperledger.org diff --git a/platforms/r3-corda-ent/charts/cenm-zone/README.md b/platforms/r3-corda-ent/charts/cenm-zone/README.md index 98acfea027e..011f3084ed5 100644 --- a/platforms/r3-corda-ent/charts/cenm-zone/README.md +++ b/platforms/r3-corda-ent/charts/cenm-zone/README.md @@ -3,183 +3,101 @@ [//]: # (SPDX-License-Identifier: Apache-2.0) [//]: # (##############################################################################################) - -# Zone Deployment - -- [Zone Deployment Helm Chart](#Zone-deployment-helm-chart) -- [Prerequisites](#prerequisites) -- [Chart Structure](#chart-structure) -- [Configuration](#configuration) -- [Deployment](#deployment) -- [Contributing](#contributing) -- [License](#license) - - -## Zone Deployment Helm Chart ---- -This [Helm chart](https://github.com/hyperledger/bevel/tree/main/platforms/r3-corda-ent/charts/cenm-zone) Deploys and configure a zone service within a Kubernetes cluster. - - -## Prerequisites ---- -Before deploying the chart please ensure you have the following prerequisites: - -- Kubernetes cluster up and running. -- A HashiCorp Vault instance is set up and configured to use Kubernetes service account token-based authentication. -- The Vault is unsealed and initialized. -- Helm is installed. - - -## Chart Structure ---- -This chart has following structue: -``` - ├── cenm-zone - │ ├── Chart.yaml - │ ├── templates - │ │ ├── deployment.yaml - │ │ ├── _helpers.tpl - │ │ ├── pvc.yaml - | | |__ configmap.yaml - │ │ └── service.yaml - │ └── values.yaml +# cenm zone-service + +This chart is a component of Hyperledger Bevel. The cenm-zone chart deploys a R3 Corda Enterprise zone. If enabled, the keys are then stored on the configured vault and stored as Kubernetes secrets. See [Bevel documentation](https://hyperledger-bevel.readthedocs.io/en/latest/) for details. + +## TL;DR + +```bash +helm repo add bevel https://hyperledger.github.io/bevel +helm install zone bevel/cenm-zone ``` -Type of files used: - -- `templates` : This directory contains the Kubernetes manifest templates that define the resources to be deployed. -- `deployment.yaml` : This file is a configuration file for deployement in Kubernetes.It creates a deployment file with a specified number of replicas and defines various settings for the deployment,There are two init containers defined in the spec section. Init containers are run before the main containers and are often used for setup tasks.The init containers use environment variables to retrieve secrets and certificates from a HashiCorp Vault server, which are then used in the main container. It also specifies volume mounts to access shared storage within the pod. -- `_helpers.tpl` : A template file used for defining custom labels in the Helm chart. -- `pvc.yaml` : A PersistentVolumeClaim (PVC) is a request for storage by a user. -- `service.yaml` : This file defines a Kubernetes Service with multiple ports for protocols and targets, and supports Ambassador proxy annotations for specific configurations when using the "ambassador" proxy provider. -- `configmap.yaml` : ConfigMap resource in Kubernetes with a specific name and namespace, along with labels for identification. -- `chart.yaml` : Provides metadata about the chart, such as its name, version, and description. -- `values.yaml` : Contains the default configuration values for the chart. It includes configuration for the metadata, image, storage, Vault, etc. - -a name = "configuration"> -## Configuration ---- -The [values.yaml](https://github.com/hyperledger/bevel/blob/main/platforms/r3-corda-ent/charts/cenm-zone/values.yaml) file contains configurable values for the Helm chart. We can modify these values according to the deployment requirements. Here are some important configuration options: -## Parameters ---- +## Prerequisites -### Metadata +- Kubernetes 1.19+ +- Helm 3.2.0+ -| Name | Description | Default Value | -| ----------------| ----------------------------------------------------------------| ------------- | -| namespace | Provide the namespace for the Corda Enterprise Signer | cenm | -| nodeName | Provide the name of the node | zone | -| prefix | Provide prefix for deployment | cenm | +If Hashicorp Vault is used, then +- HashiCorp Vault Server 1.13.1+ -### Image +> **Important**: Ensure the `enterprise-init` chart has been installed before installing this. Also check the dependent charts. Installing this chart seperately is not required as it is a dependent chart for cenm, and is installed with cenm chart. -| Name | Description | Default Value | -| ------------------------ | ---------------------------------------------------------------------------------- | ----------------------------------------------| -| initContainerName | Information about the Docker container used for the init-containers | ghcr.io/hyperledger | -| zoneContainer | Provide the image for the main Zone container | corda/enterprise-zone:1.5.1-zulu-openjdk8u242 | -| ImagePullSecret | Provide the K8s secret that has rights for pulling the image off the registry | "" | -| pullPolicy | Provide the pull policy for Docker images, either Always or IfNotPresent | IfNotPresent | - -### Config - -| Name | Description | Default Value | -| ------------------------ | ---------------------------------------------------------------| --------------- | -| baseDir | Provide volume related specifications | /opt/cenm | -| pvc | Provide any extra annotations for the PVCs | 5Gi | -| pod | Set memory limits of pod | 1Gi | -| zoneJar | The directory where the Zone Service .jar file is stored | bin | -| logsContainersEnabled | Enable container displaying live logs | true | -| securityContext | Securitycontext at pod level | "" | - -### CenmServices - -| Name | Description | Default Value | -| ---------------| ------------------------------------------| ------------- | -| idmanName | Provide the name of the idman | idman | -| authName | Name of the auth service | auth | -| authPort | Auth Service port | 8081 | - -### Database - -| Name | Description | Default Value | -| ------------------------ | --------------------------------------------------------------------------------| ----------------------| -| driverClassName | Java class name to use for the database | /opt/cenm | -| jdbcDriver | JDBC Driver name | "" | -| url | The DB connection URL | "" | -| user | DB user name | "example-db-user" | -| password | DB password | "example-db-password" | -| runMigration | Option to run database migrations as part of startup | "true" | - -### Service - -| Name | Description | Default Value | -| --------------------- | ------------------------------------------| ------------- | -| type | Provide the type of service | "" | -| port | provide the port for service | 80 | - -### listenerPort - -| Name | Description | Default Value | -| -------------| ----------------------------------------------------------------------------- | ------------- | -| enm | Provide the port where the Zone Service listens for Angel Services to connect | "25000" | -| admin | Provide the port where Angel Services connect to the Zone Service | "12345" | - -### Vault - -| Name | Description | Default Value | -| ------------------------- | --------------------------------------------------------------------------| ------------- | -| address | Address/URL of the Vault server | "" | -| role | Role used for authentication with Vault | vault-role | -| authpath | Authentication path for Vault | entcordacenm | -| serviceAccountName | Provide the already created service account name autheticated to vault | vault-auth | -| certSecretPrefix | Provide the vault path where the certificates are stored | "" | -| retries | Amount of times to retry fetching from/writing to Vault before giving up | 10 | -| sleepTimeAfterError | Amount of time in seconds wait after an error occurs | 15 | - - -## Deployment ---- -To deploy the Zone Helm chart, follow these steps: - -1. Modify the [values.yaml](https://github.com/hyperledger/bevel/blob/main/platforms/r3-corda-ent/charts/cenm-zone/values.yaml) file to set the desired configuration values. -2. Run the following Helm command to install, upgrade, verify delete the chart: - -To install the chart: -```bash -helm repo add bevel https://hyperledger.github.io/bevel/ -helm install ./cenm-zone -``` +## Installing the Chart -To upgrade the chart: -```bash -helm upgrade ./cenm-zone -``` +To install the chart with the release name `zone`: -To verify the deployment: ```bash -kubectl get jobs -n +helm repo add bevel https://hyperledger.github.io/bevel +helm install zone bevel/cenm-zone ``` -Note : Replace `` with the actual namespace where the Job was created. This command will display information about the Job, including the number of completions and the current status of the Job's pods. -To delete the chart: +The command deploys the chart on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation. + +> **Tip**: List all releases using `helm list` + +## Uninstalling the Chart + +To uninstall/delete the `zone` deployment: + ```bash -helm uninstall +helm uninstall zone ``` -Note : Replace `` with the desired name for the release. +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Parameters - -## Contributing ---- -If you encounter any bugs, have suggestions, or would like to contribute to the [Zone Deployment Helm Chart](https://github.com/hyperledger/bevel/tree/main/platforms/r3-corda-ent/charts/cenm-zone), please feel free to open an issue or submit a pull request on the [project's GitHub repository](https://github.com/hyperledger/bevel). +### Global parameters +These parameters are refered to as same in each parent or child chart +| Name | Description | Default Value | +|--------|---------|-------------| +|`global.serviceAccountName` | The serviceaccount name that will be used for Vault Auth management| `vault-auth` | +| `global.cluster.provider` | Kubernetes cluster provider like AWS EKS or minikube. Currently ony `aws` and `minikube` is tested | `aws` | +| `global.cluster.cloudNativeServices` | only `false` is implemented, `true` to use Cloud Native Services (SecretsManager and IAM for AWS; KeyVault & Managed Identities for Azure) is for future | `false` | +| `global.vault.type` | Type of Vault to support other providers. Currently, only `hashicorp` is supported. | `hashicorp` | +| `global.vault.role` | Role used for authentication with Vault | `vault-role` | +| `global.vault.address`| URL of the Vault server. | `""` | +| `global.vault.authPath` | Authentication path for Vault | `supplychain` | +| `global.vault.secretEngine` | The value for vault secret engine name | `secretsv2` | +| `global.vault.secretPrefix` | The value for vault secret prefix which must start with `data/` | `data/supplychain` | +| `global.proxy.provider` | The proxy or Ingress provider. Can be `none` or `ambassador` | `ambassador` | +| `global.proxy.externalUrlSuffix` | The External URL suffix at which the Besu P2P and RPC service will be available | `test.blockchaincloudpoc.com` | +| `global.cenm.sharedCreds.truststore` | The truststore password the pki created truststores | `password` | +| `global.cenm.sharedCreds.keystore` | The truststore password the pki created ketstores | `password` | +| `global.cenm.zone.enmPort` | The port for zone enm listner | `25000` | +| `global.cenm.zone.adminPort` | The port for zone admin | `12345` | + +### Storage + +| Name | Description | Default Value | +|--------|---------|-------------| +| `storage.allowedTopologies.enabled` | Check [bevel-storageclass](../../../shared/charts/bevel-storageclass/README.md) for details | `false` | - +### Image +| Name | Description | Default Value | +| -------------| ---------- | --------- | +| `image.pullSecret` | Secret name in the namespace containing private image registry credentials | `""` | +| `image.pullPolicy` | Pull policy to be used for the Docker images | `IfNotPresent` | +| `image.zone.repository` | CENM zone image repository | `corda/enterprise-zone`| +| `image.zone.tag` | CENM zone image tag as per version | `1.5.9-zulu-openjdk8u382`| + +### Database Settings +| Name | Description | Default Value | +| -------------| ---------- | --------- | +| `database.driverClassName` | DB drive class name | `org.h2.Driver` | +| `database.jdbcDriver` | DB jdbc driver | `""` | +| `database.driverClassName` | DB url | `jdbc:h2:file:./h2/zone-persistence;DB_CLOSE_ON_EXIT=FALSE;LOCK_TIMEOUT=10000;WRITE_DELAY=0;AUTO_SERVER_PORT=0` | +| `database.user` | DB user name | `zone-db-user` | +| `database.password` | DB password | `zone-db-password` | + ## License This chart is licensed under the Apache v2.0 license. -Copyright © 2023 Accenture +Copyright © 2024 Accenture ### Attribution diff --git a/platforms/r3-corda-ent/charts/cenm-zone/files/run.sh b/platforms/r3-corda-ent/charts/cenm-zone/files/run.sh index c6ae069e475..074919313b4 100644 --- a/platforms/r3-corda-ent/charts/cenm-zone/files/run.sh +++ b/platforms/r3-corda-ent/charts/cenm-zone/files/run.sh @@ -3,36 +3,36 @@ # # main run # -if [ -f {{ .Values.config.zoneJar.path }}/zone.jar ] +if [ -f {{ .Values.jarPath }}/zone.jar ] then echo echo "CENM: starting up zone process ..." echo set -x - java -jar {{ .Values.config.zoneJar.path }}/zone.jar \ + java -jar {{ .Values.jarPath }}/zone.jar \ --user "{{ .Values.database.user }}" \ --password "{{ .Values.database.password }}" \ --url "{{ .Values.database.url }}" \ --driver-class-name "{{ .Values.database.driverClassName }}" \ --jdbc-driver "{{ .Values.database.jdbcDriver }}" \ - --enm-listener-port "{{ .Values.listenerPort.enm }}" \ - --admin-listener-port "{{ .Values.listenerPort.admin }}" \ - --auth-host "{{ .Values.cenmServices.authName }}.{{ .Values.metadata.namespace }}" \ - --auth-port "{{ .Values.cenmServices.authPort }}" \ - --auth-trust-store-location ./DATA/trust-stores/corda-ssl-trust-store.jks \ - --auth-trust-store-password "SSL_TRUSTSTORE" \ + --enm-listener-port "{{ .Values.global.cenm.zone.enmPort }}" \ + --admin-listener-port "{{ .Values.global.cenm.zone.adminPort }}" \ + --auth-host "auth.{{ .Release.Namespace }}" \ + --auth-port "{{ .Values.global.cenm.auth.port }}" \ + --auth-trust-store-location /certs/corda-ssl-trust-store.jks \ + --auth-trust-store-password {{ .Values.global.cenm.sharedCreds.truststore }} \ --auth-issuer "http://test" \ --auth-leeway 5 \ --run-migration="{{ .Values.database.runMigration }}" \ --tls=true \ - --tls-keystore=./DATA/key-stores/corda-ssl-identity-manager-keys.jks \ - --tls-keystore-password="IDMAN_SSL" \ - --tls-truststore=./DATA/trust-stores/corda-ssl-trust-store.jks \ - --tls-truststore-password="SSL_TRUSTSTORE" \ + --tls-keystore=/certs/corda-ssl-identity-manager-keys.jks \ + --tls-keystore-password="{{ .Values.global.cenm.sharedCreds.keystore }}" \ + --tls-truststore=/certs/corda-ssl-trust-store.jks \ + --tls-truststore-password="{{ .Values.global.cenm.sharedCreds.truststore }}" \ --verbose EXIT_CODE=${?} else - echo "Missing zone jar file in {{ .Values.config.zoneJar.path }} directory:" - ls -al {{ .Values.config.zoneJar.path }} + echo "Missing zone jar file in {{ .Values.jarPath }} directory:" + ls -al {{ .Values.jarPath }} EXIT_CODE=110 fi diff --git a/platforms/r3-corda-ent/charts/cenm-zone/requirements.yaml b/platforms/r3-corda-ent/charts/cenm-zone/requirements.yaml new file mode 100644 index 00000000000..895f0a0e1cf --- /dev/null +++ b/platforms/r3-corda-ent/charts/cenm-zone/requirements.yaml @@ -0,0 +1,7 @@ +dependencies: + - name: bevel-storageclass + alias: storage + repository: "file://../../../shared/charts/bevel-storageclass" + tags: + - storage + version: ~1.0.0 diff --git a/platforms/r3-corda-ent/charts/cenm-zone/templates/_helpers.tpl b/platforms/r3-corda-ent/charts/cenm-zone/templates/_helpers.tpl index 7f9b0dc6131..5b8dcaa3206 100644 --- a/platforms/r3-corda-ent/charts/cenm-zone/templates/_helpers.tpl +++ b/platforms/r3-corda-ent/charts/cenm-zone/templates/_helpers.tpl @@ -1,5 +1,29 @@ -{{- define "labels.custom" -}} - {{- range $key, $val := .Values.metadata.labels -}} - {{- $key -}}: {{- $val -}} - {{- end -}} +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "zone.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "zone.fullname" -}} +{{- $name := default .Chart.Name -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" $name .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} + + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "zone.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} {{- end -}} diff --git a/platforms/r3-corda-ent/charts/cenm-zone/templates/configmap.yaml b/platforms/r3-corda-ent/charts/cenm-zone/templates/configmap.yaml index 6b21e442f0c..7f9cd7ca6ae 100644 --- a/platforms/r3-corda-ent/charts/cenm-zone/templates/configmap.yaml +++ b/platforms/r3-corda-ent/charts/cenm-zone/templates/configmap.yaml @@ -8,13 +8,15 @@ apiVersion: v1 kind: ConfigMap metadata: - name: {{ .Values.metadata.prefix }}-zone-conf - namespace: {{ .Values.metadata.namespace }} + name: {{ include "zone.fullname" . }}-conf + namespace: {{ .Release.Namespace }} labels: - app.kubernetes.io/name: {{ .Values.metadata.nodeName }} + app.kubernetes.io/name: {{ include "zone.fullname" . }} helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - {{- include "labels.custom" . | nindent 2 }} + app.kubernetes.io/part-of: {{ include "zone.fullname" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} data: run.sh: |+ {{ tpl (.Files.Get "files/run.sh") . | nindent 4 }} diff --git a/platforms/r3-corda-ent/charts/cenm-zone/templates/deployment.yaml b/platforms/r3-corda-ent/charts/cenm-zone/templates/deployment.yaml deleted file mode 100644 index da0a159b1c6..00000000000 --- a/platforms/r3-corda-ent/charts/cenm-zone/templates/deployment.yaml +++ /dev/null @@ -1,219 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: {{ .Values.metadata.nodeName }} - namespace: {{ .Values.metadata.namespace }} - labels: - app.kubernetes.io/name: {{ .Values.metadata.nodeName }} - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} - {{- include "labels.custom" . | nindent 2 }} -spec: - serviceName: {{ .Values.metadata.nodeName }} - replicas: 1 - selector: - matchLabels: - app: {{ .Values.metadata.nodeName }} - app.kubernetes.io/name: {{ .Values.metadata.nodeName }} - app.kubernetes.io/instance: {{ .Release.Name }} - template: - metadata: - labels: - app: {{ .Values.metadata.nodeName }} - app.kubernetes.io/name: {{ .Values.metadata.nodeName }} - app.kubernetes.io/instance: {{ .Release.Name }} - spec: - {{- with .Values.image.imagePullSecrets }} - imagePullSecrets: - {{- toYaml . | nindent 8 }} - {{- end }} - serviceAccountName: {{ .Values.vault.serviceAccountName }} - securityContext: - runAsUser: 1000 - runAsGroup: 1000 - fsGroup: 1000 - initContainers: - - name: init-certificates - image: {{ .Values.image.initContainer }} - imagePullPolicy: IfNotPresent - env: - - name: VAULT_ADDR - value: {{ $.Values.vault.address }} - - name: KUBERNETES_AUTH_PATH - value: {{ $.Values.vault.authPath }} - - name: VAULT_APP_ROLE - value: {{ $.Values.vault.role }} - - name: BASE_DIR - value: {{ $.Values.config.volume.baseDir }} - - name: CERTS_SECRET_PREFIX - value: {{ .Values.vault.certsecretprefix }} - - name: IDMAN_NODENAME - value: {{ $.Values.cenmServices.idmanName }} - - name: MOUNT_PATH - value: "/DATA" - command: ["sh", "-c"] - args: - - |- - #!/usr/bin/env sh - validateVaultResponse () { - if echo ${2} | grep "errors"; then - echo "ERROR: unable to retrieve ${1}: ${2}" - exit 1 - fi - if [ "$3" == "LOOKUPSECRETRESPONSE" ] - then - http_code=$(curl -sS -o /dev/null -w "%{http_code}" \ - --header "X-Vault-Token: ${VAULT_TOKEN}" \ - ${VAULT_ADDR}/v1/${1}) - curl_response=$? - if test "$http_code" != "200" ; then - echo "Http response code from Vault - $http_code and curl_response - $curl_response" - if test "$curl_response" != "0"; then - echo "Error: curl command failed with error code - $curl_response" - exit 1 - fi - fi - fi - } - - # setting up env to get secrets from vault - echo "Getting secrets from Vault Server" - KUBE_SA_TOKEN=$(cat /var/run/secrets/kubernetes.io/serviceaccount/token) - VAULT_TOKEN=$(curl -sS --request POST ${VAULT_ADDR}/v1/auth/${KUBERNETES_AUTH_PATH}/login -H "Content-Type: application/json" -d '{"role":"'"${VAULT_APP_ROLE}"'","jwt":"'"${KUBE_SA_TOKEN}"'"}' | jq -r 'if .errors then . else .auth.client_token end') - validateVaultResponse 'vault login token' "${VAULT_TOKEN}" - - # Creating dirs for storing certificates and credentials - mkdir -p ${MOUNT_PATH}/trust-stores; - mkdir -p ${MOUNT_PATH}/key-stores; - mkdir -p ${MOUNT_PATH}/ssl; - - # ssl trust-stores from vault - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${CERTS_SECRET_PREFIX}/root/certs | jq -r 'if .errors then . else . end') - validateVaultResponse "${CERTS_SECRET_PREFIX}/root/certs" "${LOOKUP_SECRET_RESPONSE}" "LOOKUPSECRETRESPONSE" - corda_ssl_trust_store=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["corda-ssl-trust-store.jks"]') - echo "${corda_ssl_trust_store}" | base64 -d > ${MOUNT_PATH}/trust-stores/corda-ssl-trust-store.jks - echo "Successfully got SSL trust store certifcates" - - # idman ssl key-stores from vault - OUTPUT_PATH=${MOUNT_PATH}/truststore; - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${CERTS_SECRET_PREFIX}/${IDMAN_NODENAME}/certs | jq -r 'if .errors then . else . end') - validateVaultResponse "${CERTS_SECRET_PREFIX}/${IDMAN_NODENAME}/certs" "${LOOKUP_SECRET_RESPONSE}" "LOOKUPSECRETRESPONSE" - idm_ssl=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["corda-ssl-identity-manager-keys.jks"]') - echo "${idm_ssl}" | base64 -d > ${MOUNT_PATH}/key-stores/corda-ssl-identity-manager-keys.jks - echo "Successfully got SSL Idman certifcates" - - OUTPUT_PATH=${MOUNT_PATH}/ssl; - # Fetching the idman ssl credentials from vault - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${CERTS_SECRET_PREFIX}/credentials/ssl | jq -r 'if .errors then . else . end') - validateVaultResponse "${CERTS_SECRET_PREFIX}/credentials/ssl" "${LOOKUP_SECRET_RESPONSE}" "LOOKUPSECRETRESPONSE" - IDMAN_SSL=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["idman"]') - echo "${IDMAN_SSL}"> ${OUTPUT_PATH}/idmanssl - - #Fetching ssl truststore from vault - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${CERTS_SECRET_PREFIX}/credentials/truststore | jq -r 'if .errors then . else . end') - validateVaultResponse "${CERTS_SECRET_PREFIX}/credentials/truststore" "${LOOKUP_SECRET_RESPONSE}" "LOOKUPSECRETRESPONSE" - SSL_TRUSTSTORE=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["ssl"]') - echo "${SSL_TRUSTSTORE}"> ${OUTPUT_PATH}/sslts - - echo "Done" - volumeMounts: - - name: {{ .Values.metadata.prefix }}-pki-certs-keys - mountPath: /DATA - containers: - - name: main - securityContext: - {{- toYaml .Values.config.securityContext | nindent 12 }} - image: {{ .Values.image.zoneContainer }} - imagePullPolicy: {{ .Values.image.pullPolicy }} - command: ["/bin/bash", "-c"] - args: - - |- - mkdir -p {{ .Values.config.volume.baseDir }}/config; - install {{ .Values.config.volume.baseDir }}/CM-FILES/run.sh {{ .Values.config.volume.baseDir }}/config/; - - #replacing the variables in idman.conf with actual values - export IDMAN_SSL=$(cat {{ .Values.config.volume.baseDir }}/DATA/ssl/idmanssl) - sed -i -e "s*IDMAN_SSL*${IDMAN_SSL}*g" {{ .Values.config.volume.baseDir }}/config/run.sh - export SSL_TRUSTSTORE=$(cat {{ .Values.config.volume.baseDir }}/DATA/ssl/sslts) - sed -i -e "s*SSL_TRUSTSTORE*${SSL_TRUSTSTORE}*g" {{ .Values.config.volume.baseDir }}/config/run.sh - - bash {{ .Values.config.volume.baseDir }}/config/run.sh - volumeMounts: - - name: {{ .Values.metadata.prefix }}-pki-certs-keys - mountPath: {{ .Values.config.volume.baseDir }}/DATA - - name: zone-conf - mountPath: {{ .Values.config.volume.baseDir }}/CM-FILES/run.sh - subPath: run.sh - - name: {{ .Values.metadata.prefix }}-zone-logs - mountPath: {{ .Values.config.volume.baseDir }}/logs - - name: {{ .Values.metadata.prefix }}-zone-h2 - mountPath: {{ .Values.config.volume.baseDir }}/h2 - resources: - {{- toYaml .Values.config.pod.resources | nindent 12 }} - {{- if .Values.config.logsContainersEnabled }} - - name: logs-zone - securityContext: - {{- toYaml .Values.config.securityContext | nindent 12 }} - image: {{ .Values.image.zoneContainer }} - imagePullPolicy: {{ .Values.image.pullPolicy }} - command: ["/bin/bash", "-c"] - args: - - |- - cd {{ .Values.config.volume.baseDir }}/ - while true; do tail -f logs/zone-service/*.log 2>/dev/null ; sleep 5; done - # in case sth went wrong just wait indefinitely ... - tail -f /dev/null - volumeMounts: - - name: {{ .Values.metadata.prefix }}-zone-logs - mountPath: {{ .Values.config.volume.baseDir }}/logs - resources: - {{- toYaml .Values.config.pod.resources | nindent 12 }} - {{- end }} - volumes: - - name: zone-conf - configMap: - name: {{ .Values.metadata.prefix }}-zone-conf - {{- with .Values.config.nodeSelector }} - nodeSelector: - {{- toYaml . | nindent 8 }} - {{- end }} - {{- with .Values.config.affinity }} - affinity: - {{- toYaml . | nindent 8 }} - {{- end }} - {{- with .Values.config.tolerations }} - tolerations: - {{- toYaml . | nindent 8 }} - {{- end }} - volumeClaimTemplates: - - metadata: - name: {{ .Values.metadata.prefix }}-zone-h2 - spec: - storageClassName: {{ .Values.storageClass }} - accessModes: [ "ReadWriteOnce" ] - resources: - requests: - storage: {{ .Values.config.pvc.volumeSizeZoneH2 }} - - metadata: - name: {{ .Values.metadata.prefix }}-zone-logs - spec: - storageClassName: {{ .Values.storageClass }} - accessModes: [ "ReadWriteOnce" ] - resources: - requests: - storage: {{ .Values.config.pvc.volumeSizeZoneLogs }} - - metadata: - name: {{ .Values.metadata.prefix }}-pki-certs-keys - spec: - storageClassName: {{ .Values.storageClass }} - accessModes: [ "ReadWriteOnce" ] - resources: - requests: - storage: {{ .Values.config.pvc.volumeSizeZoneData }} diff --git a/platforms/r3-corda-ent/charts/cenm-zone/templates/service.yaml b/platforms/r3-corda-ent/charts/cenm-zone/templates/service.yaml index 3cc5a803c3c..fd60541846d 100644 --- a/platforms/r3-corda-ent/charts/cenm-zone/templates/service.yaml +++ b/platforms/r3-corda-ent/charts/cenm-zone/templates/service.yaml @@ -4,28 +4,34 @@ # SPDX-License-Identifier: Apache-2.0 ############################################################################################## +--- apiVersion: v1 kind: Service metadata: - name: {{ .Values.metadata.nodeName }} - namespace: {{ .Values.metadata.namespace }} + name: {{ include "zone.name" . }} + namespace: {{ .Release.Namespace }} labels: - run: {{ .Values.metadata.nodeName }} - app.kubernetes.io/name: {{ .Values.metadata.nodeName }} - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} + app.kubernetes.io/name: zone-service + app.kubernetes.io/component: zone + app.kubernetes.io/part-of: {{ include "zone.fullname" . }} app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} - {{- include "labels.custom" . | nindent 2 }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} spec: - type: {{ .Values.service.type }} - ports: - - port: {{ .Values.listenerPort.enm }} - targetPort: {{ .Values.listenerPort.enm }} - protocol: TCP - name: enm - - port: {{ .Values.listenerPort.admin }} - targetPort: {{ .Values.listenerPort.admin }} - protocol: TCP - name: admin + type: ClusterIP selector: - app: {{ .Values.metadata.nodeName }} + app.kubernetes.io/name: zone-statefulset + app.kubernetes.io/component: zone + app.kubernetes.io/part-of: {{ include "zone.fullname" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} + ports: + - name: zone-enm + protocol: TCP + port: {{ .Values.global.cenm.zone.enmPort }} + targetPort: {{ .Values.global.cenm.zone.enmPort }} + - name: zone-admin + protocol: TCP + port: {{ .Values.global.cenm.zone.adminPort }} + targetPort: {{ .Values.global.cenm.zone.adminPort }} diff --git a/platforms/r3-corda-ent/charts/cenm-zone/templates/statefulset.yaml b/platforms/r3-corda-ent/charts/cenm-zone/templates/statefulset.yaml new file mode 100644 index 00000000000..6de63ec68d6 --- /dev/null +++ b/platforms/r3-corda-ent/charts/cenm-zone/templates/statefulset.yaml @@ -0,0 +1,111 @@ +############################################################################################## +# Copyright Accenture. All Rights Reserved. +# +# SPDX-License-Identifier: Apache-2.0 +############################################################################################## + +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ template "zone.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + app: {{ include "zone.fullname" . }} + app.kubernetes.io/name: zone-statefulset + app.kubernetes.io/component: zone + app.kubernetes.io/part-of: {{ include "zone.fullname" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} +spec: + replicas: 1 + podManagementPolicy: OrderedReady + updateStrategy: + type: RollingUpdate + selector: + matchLabels: + app: {{ include "zone.fullname" . }} + app.kubernetes.io/name: zone-statefulset + app.kubernetes.io/component: zone + app.kubernetes.io/part-of: {{ include "zone.fullname" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} + serviceName: {{ include "zone.fullname" . }} + volumeClaimTemplates: + - metadata: + name: zone-h2 + spec: + storageClassName: storage-{{ .Release.Name }} + accessModes: [ "ReadWriteOnce" ] + resources: + requests: + storage: {{ .Values.storage.dbSize }} + - metadata: + name: zone-logs + spec: + storageClassName: storage-{{ .Release.Name }} + accessModes: [ "ReadWriteOnce" ] + resources: + requests: + storage: {{ .Values.storage.size }} + template: + metadata: + labels: + app: {{ include "zone.fullname" . }} + app.kubernetes.io/name: zone-statefulset + app.kubernetes.io/component: zone + app.kubernetes.io/part-of: {{ include "zone.fullname" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} + spec: + serviceAccountName: {{ .Values.global.serviceAccountName }} + securityContext: + runAsUser: 1000 + runAsGroup: 1000 + fsGroup: 1000 + hostname: {{ .Release.Name }} + imagePullSecrets: + {{- if .Values.image.pullSecret }} + - name: {{ .Values.image.pullSecret }} + {{- end }} + containers: + - name: zone + image: {{ .Values.image.zone.repository }}:{{ .Values.image.zone.tag }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + command: ["/bin/bash", "-c"] + args: + - |- + bash /opt/cenm/run.sh + volumeMounts: + - name: cenm-certs + mountPath: /certs + - name: zone-conf + mountPath: /opt/cenm/run.sh + subPath: run.sh + - name: zone-logs + mountPath: /opt/cenm/logs + - name: zone-h2 + mountPath: /opt/cenm/h2 + - name: logs-zone + image: {{ .Values.image.zone.repository }}:{{ .Values.image.zone.tag }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + command: ["/bin/bash", "-c"] + args: + - |- + cd /opt/cenm/ + while true; do tail -f logs/zone-service/*.log 2>/dev/null ; sleep 5; done + # in case sth went wrong just wait indefinitely ... + tail -f /dev/null + volumeMounts: + - name: zone-logs + mountPath: /opt/cenm/logs + volumes: + - name: zone-conf + configMap: + name: {{ include "zone.fullname" . }}-conf + defaultMode: 0777 + - name: cenm-certs + secret: + secretName: cenm-certs diff --git a/platforms/r3-corda-ent/charts/cenm-zone/values.yaml b/platforms/r3-corda-ent/charts/cenm-zone/values.yaml index cb829e21944..97206fc961f 100644 --- a/platforms/r3-corda-ent/charts/cenm-zone/values.yaml +++ b/platforms/r3-corda-ent/charts/cenm-zone/values.yaml @@ -4,183 +4,66 @@ # SPDX-License-Identifier: Apache-2.0 ############################################################################################## -# Default values for CENM Zone service. +# Default values for cenm-zone chart. # This is a YAML-formatted file. # Declare variables to be passed into your templates. - -############################################################# -# Basic Configuration # -############################################################# -# This section contains the Corda Enterprise Zone metadata. -metadata: - # Provide the namespace for the Corda Enterprise Zone. - # Eg. namespace: cenm - namespace: cenm - # Provide the name of the deployment - # Eg. nodeName: zone - nodeName: zone - # Provide prefix for deployment - # Eg. prefix: cenm - prefix: cenm - -# Provide information regarding the Docker images used. -image: - # Provide the alpine utils image, which is used for all init-containers of deployments/jobs. - # NOTE: The alpine image used is the base alpine image with CURL installed. - # Eg. initContainer: ghcr.io/hyperledger/bevel-alpine:latest - initContainer: ghcr.io/hyperledger/bevel-alpine:latest - # Provide the image for the main Zone container. - # Eg. zoneContainer: corda/enterprise-zone:1.5.1-zulu-openjdk8u242 - zoneContainer: corda/enterprise-zone:1.5.1-zulu-openjdk8u242 - # Provide the docker-registry secret created and stored in kubernetes cluster as a secret. - # Eg. imagePullSecrets: regcred, can add multiple creds - imagePullSecrets: - - name: - # Pull policy to be used for the Docker image - # Eg. pullPolicy: Always - pullPolicy: - -############################################################# -### Settings ### -############################################################# -config: - # Provide volume related specifications - volume: - # Eg. baseDir: /opt/cenm - baseDir: /opt/cenm - - pvc: - # Volume size for h2/ directory - # Eg. volumeSizeZoneH2: 1Gi - volumeSizeZoneH2: - # Volume size for logs/ directory - # Eg. volumeSizeZoneLogs: 5Gi - volumeSizeZoneLogs: - # Volume size for data/ directory - # Eg. volumeSizeZoneData: 1Gi - volumeSizeZoneData: - - pod: - # Set memory limits of pod - resources: - limits: - # Provide the limit memory for node - # Eg. memory: 1Gi - memory: - requests: - # Provide the requests memory for node - # Eg. memory: 1Gi - memory: - - zoneJar: - # The directory where the Zone Service .jar file is stored - # Eg. path: bin - path: - - nodeSelector: {} - - tolerations: [] - - affinity: {} - - # Enable container displaying live logs - logsContainersEnabled: true - - securityContext: {} - -############################################################# -# CENM Service Details # -############################################################# -# This section details the CENM service names as per the configuration file -# It also contains the passwords for keystores and truststores -cenmServices: - # Provide the name of the idman - # Eg. idmanName: idman - idmanName: idman - # Eg. authName: auth - authName: auth - # The port of auth service - # Eg. authPort: 8081 - authPort: 8081 - -############################################################# -# Database Options and Configuration # -############################################################# -# Database configuration -database: - # Java class name to use for the database - # Eg. driverClassName: "org.h2.Driver" - driverClassName: - # JDBC Driver name - # Eg. jdbcDriver: "" - jdbcDriver: - # The DB connection URL - # Eg. url: "jdbc:h2:file:./h2/zone-persistence;DB_CLOSE_ON_EXIT=FALSE;LOCK_TIMEOUT=10000;WRITE_DELAY=0;AUTO_SERVER_PORT=0" - url: - # DB user - # Eg. user: "example-db-user" - user: - # DB password - # Eg. password: "example-db-password" - password: - # Migrations of database can be run as part of the startup of Zone, if set to true. - # If set to false, it will be run prior to setting up the Zone. - # Eg. runMigration: "true" - runMigration: "true" - -######################################## -### Zone Service configuration ### -######################################## - -service: - # Service type of kubernetes cluster - # Eg. type: ClusterIP - type: - # Port of service type - # Eg. port: 80 - port: 80 - -listenerPort: - # Provide the port where the Zone Service listens for Angel Services to connect. - # Eg. enm: "25000" - enm: "25000" - # Provide the port where Angel Services connect to the Zone Service. - # Eg. admin: "12345" - admin: "12345" - -# This section contains the storage information, used for the Persistent Volume Claims (PVC). -# Provide the name of the storageclass. -# NOTE: Make sure that the storageclass exist prior to this deployment as -# this chart doesn't create the storageclass. -# Eg. storageClass: cenm -storageClass: cenm - -############################################################# -# HashiCorp Vault Configuration # -############################################################# -# NOTE: Make sure that the vault is already unsealed, intialized and configured to -# use Kubernetes service account token based authentication. -# For more info, see https://www.vaultproject.io/docs/auth/kubernetes -vault: - # Provide the vault address - # Eg. address: http://vault.example.com:8200 - address: - # Provide the vault role used. - # Eg. role: vault-role - role: vault-role - # Provide the authpath configured to be used. - # Eg. authpath: entcordacenm - authPath: entcordacenm - # Provide the service account name autheticated to vault. - # NOTE: Make sure that the service account is already created and autheticated to use the vault. - # Eg. serviceaccountname: vault-auth +global: serviceAccountName: vault-auth - # Provide the vault path where the certificates are stored - # Eg. certsecretprefix: secret/cenm-org-name - certsecretprefix: - # The amount of times to retry fetching from/writing to Vault before giving up. - # Eg. retries: 10 - retries: 10 - # The amount of time in seconds to wait after an error occurs when fetching from/writing to Vault. - # Eg. sleepTimeAfterError: 15 - sleepTimeAfterError: 15 + cluster: + provider: aws # choose from: minikube | aws | azure | gcp + cloudNativeServices: false # set to true to use Cloud Native Services (SecretsManager and IAM for AWS; KeyVault & Managed Identities for Azure) + vault: + type: hashicorp + role: vault-role + address: + authPath: supplychain + secretEngine: secretsv2 + secretPrefix: "data/supplychain" + proxy: + #This will be the proxy/ingress provider. Can have values "ambassador" or "none" + #Eg. provider: "ambassador" + provider: "ambassador" + #This field contains the external URL of the node + #Eg. externalUrlSuffix: test.blockchaincloudpoc.com + externalUrlSuffix: test.blockchaincloudpoc.com + cenm: + sharedCreds: + truststore: password + keystore: password + zone: + enmPort: 25000 + adminPort: 12345 + +storage: + size: 1Gi + dbSize: 5Gi + allowedTopologies: + enabled: false + +image: + #Provide the docker secret name in the namespace + #Eg. pullSecret: regcred + pullSecret: + #Pull policy to be used for the Docker image + #Eg. pullPolicy: IfNotPresent + pullPolicy: IfNotPresent + #Provide a valid image and version for auth service + zone: + repository: corda/enterprise-zone + tag: 1.5.9-zulu-openjdk8u382 + +# db related configuration +database: + driverClassName: "org.h2.Driver" + jdbcDriver: "" + url: "jdbc:h2:file:./h2/zone-persistence;DB_CLOSE_ON_EXIT=FALSE;LOCK_TIMEOUT=10000;WRITE_DELAY=0;AUTO_SERVER_PORT=0" + user: "zone-db-user" + password: "zone-db-password" + runMigration: true + +# Sleep time (in seconds) after an error occured +sleepTimeAfterError: 300 +# path to base dir +baseDir: /opt/cenm +# path to jar, relative to baseDir +jarPath: bin diff --git a/platforms/r3-corda-ent/charts/cenm/Chart.yaml b/platforms/r3-corda-ent/charts/cenm/Chart.yaml new file mode 100644 index 00000000000..a8055989976 --- /dev/null +++ b/platforms/r3-corda-ent/charts/cenm/Chart.yaml @@ -0,0 +1,25 @@ +############################################################################################## +# Copyright Accenture. All Rights Reserved. +# +# SPDX-License-Identifier: Apache-2.0 +############################################################################################## + +apiVersion: v1 +name: cenm +description: "R3 Corda Enterprise Network Manager (CENM)" +version: 1.0.0 +appVersion: "latest" +keywords: + - bevel + - corda + - hyperledger + - enterprise + - blockchain + - deployment + - accenture +home: https://hyperledger-bevel.readthedocs.io/en/latest/ +sources: + - https://github.com/hyperledger/bevel +maintainers: + - name: Hyperledger Bevel maintainers + email: bevel@lists.hyperledger.org diff --git a/platforms/r3-corda-ent/charts/cenm/README.md b/platforms/r3-corda-ent/charts/cenm/README.md new file mode 100644 index 00000000000..7276ffe6d9e --- /dev/null +++ b/platforms/r3-corda-ent/charts/cenm/README.md @@ -0,0 +1,135 @@ +[//]: # (##############################################################################################) +[//]: # (Copyright Accenture. All Rights Reserved.) +[//]: # (SPDX-License-Identifier: Apache-2.0) +[//]: # (##############################################################################################) + +# cenm + +This chart is a component of Hyperledger Bevel. The cenm chart deploys a R3 Corda Enterpirse Zone, Auth, Gateway, Identitymanager and associated Signer Service. If enabled, the keys are then stored on the configured vault and stored as Kubernetes secrets. See [Bevel documentation](https://hyperledger-bevel.readthedocs.io/en/latest/) for details. + +## TL;DR + +```bash +helm repo add bevel https://hyperledger.github.io/bevel +helm install cenm bevel/cenm +``` + +## Prerequisites + +- Kubernetes 1.19+ +- Helm 3.2.0+ + +If Hashicorp Vault is used, then +- HashiCorp Vault Server 1.13.1+ + +> **Important**: Ensure the `enterprise-init` chart has been installed before installing this. Also check the dependent charts. + +## Installing the Chart + +To install the chart with the release name `cenm`: + +```bash +helm repo add bevel https://hyperledger.github.io/bevel +helm install cenm bevel/cenm +``` + +The command deploys the chart on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation. + +> **Tip**: List all releases using `helm list` + +## Uninstalling the Chart + +To uninstall/delete the `cenm` deployment: + +```bash +helm uninstall cenm +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Parameters + +### Global parameters +These parameters are refered to as same in each parent or child chart +| Name | Description | Default Value | +|--------|---------|-------------| +|`global.serviceAccountName` | The serviceaccount name that will be used for Vault Auth management| `vault-auth` | +| `global.cluster.provider` | Kubernetes cluster provider like AWS EKS or minikube. Currently ony `aws` and `minikube` is tested | `aws` | +| `global.cluster.cloudNativeServices` | only `false` is implemented, `true` to use Cloud Native Services (SecretsManager and IAM for AWS; KeyVault & Managed Identities for Azure) is for future | `false` | +| `global.vault.type` | Type of Vault to support other providers. Currently, only `hashicorp` is supported. | `hashicorp` | +| `global.vault.role` | Role used for authentication with Vault | `vault-role` | +| `global.vault.address`| URL of the Vault server. | `""` | +| `global.vault.authPath` | Authentication path for Vault | `supplychain` | +| `global.vault.secretEngine` | The value for vault secret engine name | `secretsv2` | +| `global.vault.secretPrefix` | The value for vault secret prefix which must start with `data/` | `data/supplychain` | +| `global.proxy.provider` | The proxy or Ingress provider. Can be `none` or `ambassador` | `ambassador` | +| `global.proxy.externalUrlSuffix` | The External URL suffix at which the Besu P2P and RPC service will be available | `test.blockchaincloudpoc.com` | +| `global.cenm.sharedCreds.truststore` | The truststore password the pki created truststores | `password` | +| `global.cenm.sharedCreds.keystore` | The truststore password the pki created ketstores | `password` | +| `global.cenm.identityManager.port` | The port for identity manager issuance | `10000` | +| `global.cenm.identityManager.revocation.port` | The port for identity manager revocation | `5053` | +| `global.cenm.identityManager.internal.port` | The port for identity manager internal listener | `5052` | +| `global.cenm.auth.port` | The port for auth api | `8081` | +| `global.cenm.gateway.port` | The port for gateway api | `8080` | +| `global.cenm.zone.enmPort` | The port for zone ENM | `25000` | +| `global.cenm.zone.adminPort` | The port for zone admin | `12345` | +| `global.cenm.networkmap.internal.port` | The port for network manager internal listener | `5050` | + +### Storage + +| Name | Description | Default Value | +|--------|---------|-------------| +| `storage.allowedTopologies.enabled` | Check [bevel-storageclass](../../../shared/charts/bevel-storageclass/README.md) for details | `false` | + + +### Image +| Name | Description | Default Value | +| -------------| ---------- | --------- | +| `image.pullSecret` | Secret name in the namespace containing private image registry credentials | `""` | +| `image.pullPolicy` | Pull policy to be used for the Docker images | `IfNotPresent` | +| `image.pki.repository` | CENM PKI image repository | `corda/enterprise-pkitool`| +| `image.pki.tag` | CENM PKI image tag as per version | `1.5.9-zulu-openjdk8u382`| +| `image.hooks.repository` | Corda hooks image repository | `ghcr.io/hyperledger/bevel-build` | +| `image.hooks.tag` | Corda hooks image tag | `jdk8-stable` | + +### Common Settings +| Name | Description | Default Value | +| -------------| ---------- | --------- | +| `settings.removeKeysOnDelete` | Flag to delete the secrets on uninstall | `true` | + +### Subjects + +| Name | Description | Default Value | +| -------------| ---------- | --------- | +| `subjects.auth` | X.509 Subject for the auth | `"CN=Test TLS Auth Service Certificate, OU=HQ, O=HoldCo LLC, L=New York, C=US"` | +| `subjects.tlscrlsigner` | X.509 Subject for the tlscrlsigner | `"CN=Test TLS Signer Certificate, OU=HQ, O=HoldCo LLC, L=New York, C=US"` | +| `subjects.tlscrlissuer` | X.509 Subject for the tlscrlissuer | `"CN=Corda TLS CRL Authority,OU=Corda UAT,O=R3 HoldCo LLC,L=New York,C=US"` | +| `subjects.rootca` | X.509 Subject for the rootca | `"CN=DLT Root CA,OU=DLT,O=DLT,L=London,C=GB"` | +| `subjects.subordinateca` | X.509 Subject for the subordinateca | `""CN=Test Subordinate CA Certificate, OU=HQ, O=HoldCo LLC, L=New York, C=US"` | +| `subjects.idmanca` | X.509 Subject for the idmanca | `"CN=Test Identity Manager Service Certificate, OU=HQ, O=HoldCo LLC, L=New York, C=US"` | +| `subjects.networkmap` | X.509 Subject for the networkmap | `""CN=Test Network Map Service Certificate, OU=HQ, O=HoldCo LLC, L=New York, C=US"` | + + +## License + +This chart is licensed under the Apache v2.0 license. + +Copyright © 2024 Accenture + +### Attribution + +This chart is adapted from the [charts](https://hyperledger.github.io/bevel/) which is licensed under the Apache v2.0 License which is reproduced here: + +``` +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +``` diff --git a/platforms/r3-corda-ent/charts/cenm/requirements.yaml b/platforms/r3-corda-ent/charts/cenm/requirements.yaml new file mode 100644 index 00000000000..fd93915be3a --- /dev/null +++ b/platforms/r3-corda-ent/charts/cenm/requirements.yaml @@ -0,0 +1,44 @@ +dependencies: + - name: bevel-storageclass + alias: storage + repository: "file://../../../shared/charts/bevel-storageclass" + tags: + - storage + version: ~1.0.0 + - name: cenm-auth + alias: auth + repository: "file://../cenm-auth" + tags: + - cenm + version: ~1.0.0 + - name: cenm-gateway + alias: gateway + repository: "file://../cenm-gateway" + tags: + - cenm + version: ~1.0.0 + - name: cenm-zone + alias: zone + repository: "file://../cenm-zone" + tags: + - cenm + version: ~1.0.0 + - name: cenm-signer + alias: signer + repository: "file://../cenm-signer" + tags: + - bevel + version: ~1.0.0 + - name: cenm-idman + alias: idman + repository: "file://../cenm-idman" + tags: + - bevel + version: ~1.0.0 + - name: corda-certs-gen + alias: tls + repository: "file://../../../r3-corda/charts/corda-certs-gen" + tags: + - bevel + version: ~1.0.0 + condition: tls.enabled diff --git a/platforms/r3-corda-ent/charts/cenm/templates/_helpers.tpl b/platforms/r3-corda-ent/charts/cenm/templates/_helpers.tpl new file mode 100644 index 00000000000..0acdeb11eb9 --- /dev/null +++ b/platforms/r3-corda-ent/charts/cenm/templates/_helpers.tpl @@ -0,0 +1,37 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "cenm.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "cenm.fullname" -}} +{{- $name := default .Chart.Name -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" $name .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} + + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "cenm.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create idman url depending on proxy mode +*/}} +{{- define "identityManager.URL" -}} +{{- $port := .Values.global.cenm.identityManager.port | int -}} +{{- printf "http://idman.%s:%d" .Release.Namespace $port }} +{{- end -}} diff --git a/platforms/r3-corda-ent/charts/cenm/templates/hooks-pre-delete.yaml b/platforms/r3-corda-ent/charts/cenm/templates/hooks-pre-delete.yaml new file mode 100644 index 00000000000..4ecf00b7f1a --- /dev/null +++ b/platforms/r3-corda-ent/charts/cenm/templates/hooks-pre-delete.yaml @@ -0,0 +1,65 @@ +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ include "cenm.fullname" . }}-pre-delete-hook + namespace: {{ .Release.Namespace }} + annotations: + helm.sh/hook: pre-delete + helm.sh/hook-weight: "0" + helm.sh/hook-delete-policy: "hook-succeeded" + labels: + app.kubernetes.io/name: pre-delete-hook + app.kubernetes.io/component: cleanup + app.kubernetes.io/part-of: {{ include "cenm.fullname" . }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} + app.kubernetes.io/managed-by: helm +spec: + backoffLimit: 3 + completions: 1 + template: + metadata: + labels: + app.kubernetes.io/name: pre-delete-hook + app.kubernetes.io/component: cleanup + app.kubernetes.io/part-of: {{ include "cenm.fullname" . }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} + app.kubernetes.io/managed-by: helm + spec: + serviceAccountName: {{ .Values.global.serviceAccountName }} + restartPolicy: "Never" + containers: + - name: {{ template "cenm.fullname" . }}-cleanup + image: "{{ .Values.image.hooks.repository }}:{{ .Values.image.hooks.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + command: + - /bin/bash + - -c + args: + - | + + echo "{{ template "cenm.fullname" . }} pre-delete-hook ..." + +{{- if and (ne .Values.global.cluster.provider "minikube") (.Values.global.cluster.cloudNativeServices) }} + # placeholder for cloudNative deleteSecret function +{{- else }} + + function deleteSecret { + key=$1 + kubectl delete secret ${key} --namespace {{ .Release.Namespace }} + } + +{{- end }} + +{{- if .Values.settings.removeKeysOnDelete }} + +{{- if and (ne .Values.global.cluster.provider "minikube") (.Values.global.cluster.cloudNativeServices) }} + # placeholder for cloudNative deleteSecret function +{{- else }} + deleteSecret cenm-certs +{{- end }} + +{{- end }} + echo "Completed" diff --git a/platforms/r3-corda-ent/charts/cenm/templates/hooks-pre-install.yaml b/platforms/r3-corda-ent/charts/cenm/templates/hooks-pre-install.yaml new file mode 100644 index 00000000000..2a0045e779d --- /dev/null +++ b/platforms/r3-corda-ent/charts/cenm/templates/hooks-pre-install.yaml @@ -0,0 +1,443 @@ +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ include "cenm.fullname" . }}-pre-install-hook + namespace: {{ .Release.Namespace }} + annotations: + "helm.sh/hook": pre-install + "helm.sh/hook-weight": "0" + "helm.sh/hook-delete-policy": "before-hook-creation" + labels: + app.kubernetes.io/name: pre-install-hook + app.kubernetes.io/component: certgen + app.kubernetes.io/part-of: {{ include "cenm.fullname" . }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} + app.kubernetes.io/managed-by: helm +spec: + backoffLimit: 1 + completions: 1 + template: + metadata: + labels: + app.kubernetes.io/name: pre-install-hook + app.kubernetes.io/component: certgen + app.kubernetes.io/part-of: {{ include "cenm.fullname" . }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} + app.kubernetes.io/managed-by: helm + spec: + serviceAccountName: {{ .Values.global.serviceAccountName }} + restartPolicy: "OnFailure" + containers: + - name: cenm-pki + image: {{ .Values.image.pki.repository }}:{{ .Values.image.pki.tag }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + securityContext: + runAsUser: 0 + volumeMounts: + - name: generated-config + mountPath: /home + - name: scripts-volume + mountPath: /scripts/bevel-vault.sh + subPath: bevel-vault.sh + - name: openssl-conf + mountPath: /home/openssl.conf + subPath: openssl.conf + {{- if (eq .Values.global.vault.type "hashicorp") }} + env: + - name: VAULT_ADDR + value: "{{ .Values.global.vault.address }}" + - name: VAULT_SECRET_ENGINE + value: "{{ .Values.global.vault.secretEngine }}" + - name: VAULT_SECRET_PREFIX + value: "{{ .Values.global.vault.secretPrefix }}" + - name: KUBERNETES_AUTH_PATH + value: "{{ .Values.global.vault.authPath }}" + - name: VAULT_APP_ROLE + value: "{{ .Values.global.vault.role }}" + - name: VAULT_TYPE + value: "{{ .Values.global.vault.type }}" + {{- end }} + command: + - /bin/bash + - -c + args: + - | +{{- if (eq .Values.global.vault.type "hashicorp") }} + . /scripts/bevel-vault.sh + echo "Getting vault Token..." + vaultBevelFunc "init" + #Read if secret exists in Vault + vaultBevelFunc 'readJson' "${VAULT_SECRET_ENGINE}/${VAULT_SECRET_PREFIX}/cenm-certs" + function safeWriteSecret { + key=$1 + fpath=$2 + if [ "$SECRETS_AVAILABLE" == "yes" ] + then + # Get secret from Vault and create the k8s secret if it does not exist + kubectl get secret ${key}-certs --namespace {{ .Release.Namespace }} -o json > /dev/null 2>&1 + if [ $? -ne 0 ]; then + CORDA_SSL_IDENTITY_MANAGER_KEYS=$(echo ${VAULT_SECRET} | jq -r '.["idmansslkeys_base64"]') + CORDA_SSL_NETWORK_MAP_KEYS=$(echo ${VAULT_SECRET} | jq -r '.["nmssslkeys_base64"]') + CORDA_SSL_AUTH_KEYS=$(echo ${VAULT_SECRET} | jq -r '.["authsslkeys_base64"]') + CORDA_SSL_ROOT_KEYS=$(echo ${VAULT_SECRET} | jq -r '.["rootsslkeys_base64"]') + CORDA_SSL_TRUSTSTORE=$(echo ${VAULT_SECRET} | jq -r '.["ssltrustore_base64"]') + NETWORK_ROOT_TRUSTSTORE=$(echo ${VAULT_SECRET} | jq -r '.["nmstruststore_base64"]') + CORDA_SSL_SIGNER_KEYS=$(echo ${VAULT_SECRET} | jq -r '.["signerkeys_base64"]') + IDENTITY_MANAGER_KEYSTORE=$(echo ${VAULT_SECRET} | jq -r '.["idmankeystore_base64"]') + NETWORK_MAP_KEYSTORE=$(echo ${VAULT_SECRET} | jq -r '.["nmskeystore_base64"]') + TLS_CRL=$(echo ${VAULT_SECRET} | jq -r '.["tlscrl_base64"]') + ROOT_CRL=$(echo ${VAULT_SECRET} | jq -r '.["rootcrl_base64"]') + SUBORDINATE_CRL=$(echo ${VAULT_SECRET} | jq -r '.["subordinatecrl_base64"]') + + echo "creating tmp files for the cert" + echo $CORDA_SSL_IDENTITY_MANAGER_KEYS | base64 -d > /tmp/corda-ssl-identity-manager-keys.jks + echo $CORDA_SSL_NETWORK_MAP_KEYS | base64 -d > /tmp/corda-ssl-network-map-keys.jks + echo $CORDA_SSL_AUTH_KEYS | base64 -d > /tmp/corda-ssl-auth-keys.jks + echo $CORDA_SSL_ROOT_KEYS | base64 -d > /tmp/corda-ssl-root-keys.jks + echo $CORDA_SSL_TRUSTSTORE | base64 -d > /tmp/corda-ssl-trust-store.jks + echo $NETWORK_ROOT_TRUSTSTORE | base64 -d > /tmp/network-root-truststore.jks + echo $CORDA_SSL_SIGNER_KEYS | base64 -d > /tmp/corda-ssl-signer-keys.jks + echo $IDENTITY_MANAGER_KEYSTORE | base64 -d > /tmp/identity-manager-key-store.jks + echo $NETWORK_MAP_KEYSTORE | base64 -d > /tmp/network-map-key-store.jks + echo $TLS_CRL | base64 -d > /tmp/tls.crl + echo $ROOT_CRL | base64 -d > /tmp/root.crl + echo $SUBORDINATE_CRL | base64 -d > /tmp/subordinate.crl + + echo "creating kubernetes secrets.." + kubectl create secret generic ${key}-certs --namespace {{ .Release.Namespace }} \ + --from-file=corda-ssl-identity-manager-keys.jks=/tmp/corda-ssl-identity-manager-keys.jks \ + --from-file=corda-ssl-network-map-keys.jks=/tmp/corda-ssl-network-map-keys.jks \ + --from-file=corda-ssl-auth-keys.jks=/tmp/corda-ssl-auth-keys.jks \ + --from-file=corda-ssl-root-keys.jks=/tmp/corda-ssl-root-keys.jks \ + --from-file=corda-ssl-trust-store.jks=/tmp/corda-ssl-trust-store.jks \ + --from-file=network-root-truststore.jks=/tmp/network-root-truststore.jks \ + --from-file=corda-ssl-signer-keys.jks=/tmp/corda-ssl-signer-keys.jks \ + --from-file=identity-manager-key-store.jks=/tmp/identity-manager-key-store.jks \ + --from-file=network-map-key-store.jks=/tmp/network-map-key-store.jks \ + --from-file=tls.crl=/tmp/tls.crl \ + --from-file=root.crl=/tmp/root.crl \ + --from-file=subordinate.crl=/tmp/subordinate.crl + fi + else + # Save Certs to Vault + # Use -w0 to get single line base64 -w0 + CORDA_SSL_IDENTITY_MANAGER_KEYS=$(cat ${fpath}/idman/certs/corda-ssl-identity-manager-keys.jks | base64 -w0) + CORDA_SSL_NETWORK_MAP_KEYS=$(cat ${fpath}/networkmap/corda-ssl-network-map-keys.jks | base64 -w0) + CORDA_SSL_AUTH_KEYS=$(cat ${fpath}/root/corda-ssl-auth-keys.jks | base64 -w0) + CORDA_SSL_ROOT_KEYS=$(cat ${fpath}/root/corda-ssl-root-keys.jks | base64 -w0) + CORDA_SSL_TRUSTSTORE=$(cat ${fpath}/root/corda-ssl-trust-store.jks | base64 -w0) + NETWORK_ROOT_TRUSTSTORE=$(cat ${fpath}/root/network-root-truststore.jks | base64 -w0) + CORDA_SSL_SIGNER_KEYS=$(cat ${fpath}/signer/corda-ssl-signer-keys.jks| base64 -w0) + IDENTITY_MANAGER_KEYSTORE=$(cat ${fpath}/signer/identity-manager-key-store.jks | base64 -w0) + NETWORK_MAP_KEYSTORE=$(cat ${fpath}/signer/network-map-key-store.jks | base64 -w0) + TLS_CRL=$(cat ${fpath}/idman/crls/tls.crl | base64 -w0) + ROOT_CRL=$(cat ${fpath}/idman/crls/root.crl | base64 -w0) + SUBORDINATE_CRL=$(cat ${fpath}/idman/crls/subordinate.crl | base64 -w0) + # create a JSON file for the data related to node crypto + echo " + { + \"data\": + { + \"idmansslkeys_base64\": \"${CORDA_SSL_IDENTITY_MANAGER_KEYS}\", + \"nmssslkeys_base64\": \"${CORDA_SSL_NETWORK_MAP_KEYS}\", + \"authsslkeys_base64\": \"${CORDA_SSL_AUTH_KEYS}\", + \"rootsslkeys_base64\": \"${CORDA_SSL_ROOT_KEYS}\", + \"ssltrustore_base64\": \"${CORDA_SSL_TRUSTSTORE}\", + \"nmstruststore_base64\": \"${NETWORK_ROOT_TRUSTSTORE}\", + \"signerkeys_base64\": \"${CORDA_SSL_SIGNER_KEYS}\", + \"idmankeystore_base64\": \"${IDENTITY_MANAGER_KEYSTORE}\", + \"nmskeystore_base64\": \"${NETWORK_MAP_KEYSTORE}\", + \"tlscrl_base64\": \"${TLS_CRL}\", + \"rootcrl_base64\": \"${ROOT_CRL}\", + \"subordinatecrl_base64\": \"${SUBORDINATE_CRL}\" + } + }" > payload.json + vaultBevelFunc 'write' "${VAULT_SECRET_ENGINE}/${VAULT_SECRET_PREFIX}/${key}-certs" 'payload.json' + rm payload.json + # Also create the k8s secret + kubectl create secret generic ${key}-certs --namespace {{ .Release.Namespace }} \ + --from-file=corda-ssl-identity-manager-keys.jks=${fpath}/idman/certs/corda-ssl-identity-manager-keys.jks \ + --from-file=corda-ssl-network-map-keys.jks=${fpath}/networkmap/corda-ssl-network-map-keys.jks \ + --from-file=corda-ssl-auth-keys.jks=${fpath}/root/corda-ssl-auth-keys.jks \ + --from-file=corda-ssl-root-keys.jks=${fpath}/root/corda-ssl-root-keys.jks \ + --from-file=corda-ssl-trust-store.jks=${fpath}/root/corda-ssl-trust-store.jks \ + --from-file=network-root-truststore.jks=${fpath}/root/network-root-truststore.jks \ + --from-file=corda-ssl-signer-keys.jks=${fpath}/signer/corda-ssl-signer-keys.jks \ + --from-file=identity-manager-key-store.jks=${fpath}/signer/identity-manager-key-store.jks \ + --from-file=network-map-key-store.jks=${fpath}/signer/network-map-key-store.jks \ + --from-file=tls.crl=${fpath}/idman/crls/tls.crl \ + --from-file=root.crl=${fpath}/idman/crls/root.crl \ + --from-file=subordinate.crl=${fpath}/idman/crls/subordinate.crl + fi + } +{{- else }} + function safeWriteSecret { + key=$1 + fpath=$2 + kubectl get secret ${key}-certs --namespace {{ .Release.Namespace }} -o json > /dev/null 2>&1 + if [ $? -ne 0 ]; then + kubectl create secret generic ${key}-certs --namespace {{ .Release.Namespace }} \ + --from-file=corda-ssl-identity-manager-keys.jks=${fpath}/idman/certs/corda-ssl-identity-manager-keys.jks \ + --from-file=corda-ssl-network-map-keys.jks=${fpath}/networkmap/corda-ssl-network-map-keys.jks \ + --from-file=corda-ssl-auth-keys.jks=${fpath}/root/corda-ssl-auth-keys.jks \ + --from-file=corda-ssl-root-keys.jks=${fpath}/root/corda-ssl-root-keys.jks \ + --from-file=corda-ssl-trust-store.jks=${fpath}/root/corda-ssl-trust-store.jks \ + --from-file=network-root-truststore.jks=${fpath}/root/network-root-truststore.jks \ + --from-file=corda-ssl-signer-keys.jks=${fpath}/signer/corda-ssl-signer-keys.jks \ + --from-file=identity-manager-key-store.jks=${fpath}/signer/identity-manager-key-store.jks \ + --from-file=network-map-key-store.jks=${fpath}/signer/network-map-key-store.jks \ + --from-file=tls.crl=${fpath}/idman/crls/tls.crl \ + --from-file=root.crl=${fpath}/idman/crls/root.crl \ + --from-file=subordinate.crl=${fpath}/idman/crls/subordinate.crl + fi + } +{{- end }} + if [ "$SECRETS_AVAILABLE" == "yes" ] + then + echo "Certificates found for {{ .Release.Name }} ..." + else + echo "Creating certificates for {{ .Release.Name }} ..." + echo 'keyStores = { + "identity-manager-key-store" = { + type = LOCAL + file = "./DATA/signer/identity-manager-key-store.jks" + password = "password" + } + "network-map-key-store" = { + type = LOCAL + file = "./DATA/signer/network-map-key-store.jks" + password = "password" + } + "subordinate-key-store" = { + type = LOCAL + file = "./DATA/root/subordinate-key-store.jks" + password = "password" + } + "root-key-store" = { + type = LOCAL + file = "./DATA/root/root-key-store.jks" + password = "password" + } + "tls-crl-signer-key-store" = { + type = LOCAL + file = "./DATA/root/tls-crl-signer-key-store.jks" + password = "password" + } + "corda-ssl-network-map-keys" = { + type = LOCAL + file = "./DATA/networkmap/corda-ssl-network-map-keys.jks" + password = "password" + }, + "corda-ssl-identity-manager-keys" = { + type = LOCAL + file = "./DATA/idman/certs/corda-ssl-identity-manager-keys.jks" + password = "password" + }, + "corda-ssl-signer-keys" = { + type = LOCAL + file = "./DATA/signer/corda-ssl-signer-keys.jks" + password = "password" + }, + "corda-ssl-auth-keys" = { + type = LOCAL + file = "./DATA/root/corda-ssl-auth-keys.jks" + password = "password" + }, + "corda-ssl-root-keys" = { + type = LOCAL + file = "./DATA/root/corda-ssl-root-keys.jks" + password = "password" + } + } + certificatesStores = { + "network-root-trust-store" = { + file = "./DATA/root/network-root-truststore.jks" + password = "password" + } + "corda-ssl-trust-store" = { + file = "./DATA/root/corda-ssl-trust-store.jks" + password = "password" + } + } + certificates = { + "tlscrlsigner" = { + key = { + type = LOCAL + includeIn = ["tls-crl-signer-key-store"] + algorithm = "ECDSA_SECP256R1_SHA256" + password = "password" + } + isSelfSigned = true + keyUsages = [CRL_SIGN] + keyPurposes = [SERVER_AUTH, CLIENT_AUTH] + validDays = 7300 + issuesCertificates = true + subject = {{ .Values.subjects.tlscrlsigner | quote }} + includeIn = ["network-root-trust-store"] + crl = { + crlDistributionUrl = "{{ include "identityManager.URL" . }}/certificate-revocation-list/tls" + indirectIssuer = true + issuer = {{ .Values.subjects.tlscrlissuer | quote }} + file = "./DATA/idman/crls/tls.crl" + } + }, + "cordarootca" = { + key = { + type = LOCAL + includeIn = ["root-key-store"] + algorithm = "ECDSA_SECP256R1_SHA256" + password = "password" + } + isSelfSigned = true + keyUsages = [DIGITAL_SIGNATURE, KEY_CERT_SIGN, CRL_SIGN] + keyPurposes = [SERVER_AUTH, CLIENT_AUTH] + validDays = 7300 + issuesCertificates = true + subject = {{ .Values.subjects.rootca | quote }} + includeIn = ["network-root-trust-store"] + crl = { + crlDistributionUrl = "{{ include "identityManager.URL" . }}/certificate-revocation-list/root" + file = "./DATA/idman/crls/root.crl" + } + }, + "subordinateca" = { + key = { + type = LOCAL + includeIn = ["subordinate-key-store"] + algorithm = "ECDSA_SECP256R1_SHA256" + password = "password" + } + isSelfSigned = false + signedBy = "cordarootca" + keyUsages = [DIGITAL_SIGNATURE, KEY_CERT_SIGN, CRL_SIGN] + keyPurposes = [SERVER_AUTH, CLIENT_AUTH] + validDays = 7300 + issuesCertificates = true + subject = {{ .Values.subjects.subordinateca | quote }} + crl = { + crlDistributionUrl = "{{ include "identityManager.URL" . }}/certificate-revocation-list/subordinate" + file = "./DATA/idman/crls/subordinate.crl" + } + }, + "cordaidentitymanagerca" = { + key = { + type = LOCAL + includeIn = ["identity-manager-key-store"] + algorithm = "ECDSA_SECP256R1_SHA256" + password = "password" + } + isSelfSigned = false + signedBy = "subordinateca" + keyUsages = [DIGITAL_SIGNATURE, KEY_CERT_SIGN, CRL_SIGN] + keyPurposes = [SERVER_AUTH, CLIENT_AUTH] + validDays = 7300 + role = DOORMAN_CA + issuesCertificates = true + includeIn = ["network-root-trust-store"] + subject = {{ .Values.subjects.idmanca | quote }} + }, + "cordanetworkmap" = { + key = { + type = LOCAL + includeIn = ["network-map-key-store","identity-manager-key-store"] + algorithm = "ECDSA_SECP256R1_SHA256" + password = "password" + } + isSelfSigned = false + signedBy = "subordinateca" + keyUsages = [DIGITAL_SIGNATURE, KEY_CERT_SIGN, CRL_SIGN] + keyPurposes = [SERVER_AUTH, CLIENT_AUTH] + validDays = 7300 + role = NETWORK_MAP + issuesCertificates = false + subject = {{ .Values.subjects.networkmap | quote }} + }, + "::CORDA_SSL_ROOT" { + key = { + type = LOCAL + includeIn = ["corda-ssl-root-keys"] + algorithm = "ECDSA_SECP256R1_SHA256" + password = "password" + } + crl = { + crlDistributionUrl = "{{ include "identityManager.URL" . }}/certificate-revocation-list/ssl" + file = "./DATA/root/crls/ssl.crl" + } + }, + "::CORDA_SSL_IDENTITY_MANAGER" { + key = { + type = LOCAL + includeIn = ["corda-ssl-identity-manager-keys"] + algorithm = "ECDSA_SECP256R1_SHA256" + password = "password" + } + crl = { + crlDistributionUrl = "{{ include "identityManager.URL" . }}/certificate-revocation-list/ssl" + file = "./DATA/idman/crls/ssl.crl" + } + }, + "::CORDA_SSL_NETWORK_MAP" { + key = { + type = LOCAL + includeIn = ["corda-ssl-network-map-keys"] + algorithm = "ECDSA_SECP256R1_SHA256" + password = "password" + } + crl = { + crlDistributionUrl = "{{ include "identityManager.URL" . }}/certificate-revocation-list/ssl" + file = "./DATA/networkmap/crls/ssl.crl" + } + }, + "::CORDA_SSL_SIGNER" { + key = { + type = LOCAL + includeIn = ["corda-ssl-signer-keys"] + algorithm = "ECDSA_SECP256R1_SHA256" + password = "password" + } + crl = { + crlDistributionUrl = "{{ include "identityManager.URL" . }}/certificate-revocation-list/ssl" + file = "./DATA/signer/crls/ssl.crl" + } + }, + "::CORDA_SSL_AUTH_SERVICE" { + key = { + type = LOCAL + includeIn = ["corda-ssl-auth-keys"] + algorithm = "ECDSA_SECP256R1_SHA256" + password = "password" + } + crl = { + crlDistributionUrl = "{{ include "identityManager.URL" . }}/certificate-revocation-list/ssl" + file = "./DATA/root/crls/ssl.crl" + } + } + }' >> pki.conf + time java -Xmx256M -jar bin/pkitool.jar --config-file pki.conf + fi; + # TODO: Move these to pki image + apt install curl + curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.27.0/bin/linux/amd64/kubectl + chmod +x ./kubectl + mv ./kubectl /usr/local/bin + + echo "Creating cenm-certs secrets in k8s ..." +{{- if and (ne .Values.global.cluster.provider "minikube") (.Values.global.cluster.cloudNativeServices) }} + echo "Implement cloud native service methods" +{{- else }} + safeWriteSecret cenm /opt/cenm/DATA +{{- end }} + echo "Completed ..." + volumes: + - name: generated-config + emptyDir: {} + - name: scripts-volume + configMap: + name: bevel-vault-script + defaultMode: 0777 + - name: openssl-conf + configMap: + name: openssl-conf diff --git a/platforms/r3-corda-ent/charts/cenm/values.yaml b/platforms/r3-corda-ent/charts/cenm/values.yaml new file mode 100644 index 00000000000..b3795078388 --- /dev/null +++ b/platforms/r3-corda-ent/charts/cenm/values.yaml @@ -0,0 +1,96 @@ +############################################################################################## +# Copyright Accenture. All Rights Reserved. +# +# SPDX-License-Identifier: Apache-2.0 +############################################################################################## + +# Default values for nodechart. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. +global: + serviceAccountName: vault-auth + cluster: + provider: aws # choose from: minikube | aws | azure | gcp + cloudNativeServices: false # set to true to use Cloud Native Services (SecretsManager and IAM for AWS; KeyVault & Managed Identities for Azure) + vault: + type: hashicorp + role: vault-role + address: + authPath: supplychain + secretEngine: secretsv2 + secretPrefix: "data/supplychain" + proxy: + #This will be the proxy/ingress provider. Can have values "ambassador" or "none" + #Eg. provider: "ambassador" + provider: "ambassador" + #This field contains the external URL of the node + #Eg. externalUrlSuffix: test.blockchaincloudpoc.com + externalUrlSuffix: test.blockchaincloudpoc.com + # global cenm values used by dependency charts + cenm: + sharedCreds: + truststore: password + keystore: password + identityManager: + port: 10000 + revocation: + port: 5053 + internal: + port: 5052 + auth: + port: 8081 + gateway: + port: 8080 + zone: + enmPort: 25000 + adminPort: 12345 + networkmap: + internal: + port: 5050 + +storage: + size: 1Gi + dbSize: 5Gi + allowedTopologies: + enabled: false + +image: + #Provide the docker secret name in the namespace + #Eg. pullSecret: regcred + pullSecret: + #Pull policy to be used for the Docker image + #Eg. pullPolicy: IfNotPresent + pullPolicy: IfNotPresent + #Provide a valid image and version for pki + pki: + repository: corda/enterprise-pkitool + tag: 1.5.9-zulu-openjdk8u382 + hooks: + repository: ghcr.io/hyperledger/bevel-build + tag: jdk8-stable + +settings: + removeKeysOnDelete: false + +subjects: + # Mention the subject for auth + # Eg. auth: "CN=Test TLS Auth Service Certificate, OU=HQ, O=HoldCo LLC, L=New York, C=US" + auth: "CN=Test TLS Auth Service Certificate, OU=HQ, O=HoldCo LLC, L=New York, C=US" + # Mention the subject for tls crl signer + # Eg. tlscrlsigner: "CN=Test TLS Signer Certificate, OU=HQ, O=HoldCo LLC, L=New York, C=US" + tlscrlsigner: "CN=Test TLS Signer Certificate, OU=HQ, O=HoldCo LLC, L=New York, C=US" + # Mention the subject for the tls crl issuer + # Eg. tlscrlissuer: "CN=Corda TLS CRL Authority,OU=Corda UAT,O=R3 HoldCo LLC,L=New York,C=US" + tlscrlissuer: "CN=Corda TLS CRL Authority,OU=Corda UAT,O=R3 HoldCo LLC,L=New York,C=US" + # Mention the subject for rootca + # Eg. rootca: "CN=DLT Root CA,OU=DLT,O=DLT,L=London,C=GB" + rootca: "CN=DLT Root CA,OU=DLT,O=DLT,L=London,C=GB" + # Mention the subject for subordinateca + # Eg. subordinateca: "CN=Test Subordinate CA Certificate, OU=HQ, O=HoldCo LLC, L=New York, C=US" + subordinateca: "CN=Test Subordinate CA Certificate, OU=HQ, O=HoldCo LLC, L=New York, C=US" + # Mention the subject for idmanca + # Eg. idmanca: "CN=Test Identity Manager Service Certificate, OU=HQ, O=HoldCo LLC, L=New York, C=US" + idmanca: "CN=Test Identity Manager Service Certificate, OU=HQ, O=HoldCo LLC, L=New York, C=US" + # Mention the subject for networkmap + # Eg. networkmap: "CN=Test Network Map Service Certificate, OU=HQ, O=HoldCo LLC, L=New York, C=US" + networkmap: "CN=Test Network Map Service Certificate, OU=HQ, O=HoldCo LLC, L=New York, C=US" diff --git a/platforms/r3-corda-ent/charts/corda-ent-bridge/Chart.yaml b/platforms/r3-corda-ent/charts/corda-ent-bridge/Chart.yaml deleted file mode 100644 index e6094d69d8d..00000000000 --- a/platforms/r3-corda-ent/charts/corda-ent-bridge/Chart.yaml +++ /dev/null @@ -1,11 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -apiVersion: v1 -appVersion: "2.0" -description: "R3-corda-ent: Deploys the bridge component of the Corda Enterprise Firewall." -name: corda-ent-bridge -version: 1.0.0 diff --git a/platforms/r3-corda-ent/charts/corda-ent-bridge/README.md b/platforms/r3-corda-ent/charts/corda-ent-bridge/README.md deleted file mode 100644 index acff4fb99f0..00000000000 --- a/platforms/r3-corda-ent/charts/corda-ent-bridge/README.md +++ /dev/null @@ -1,181 +0,0 @@ -[//]: # (##############################################################################################) -[//]: # (Copyright Accenture. All Rights Reserved.) -[//]: # (SPDX-License-Identifier: Apache-2.0) -[//]: # (##############################################################################################) - - -# bridge Deployment - -- [Bridge Deployment Helm Chart](#Bridge-deployment-helm-chart) -- [Prerequisites](#prerequisites) -- [Chart Structure](#chart-structure) -- [Configuration](#configuration) -- [Deployment](#deployment) -- [Contributing](#contributing) -- [License](#license) - - -## Bridge Deployment Helm Chart ---- -This [Helm chart](https://github.com/hyperledger/bevel/tree/main/platforms/r3-corda-ent/charts/corda-ent-bridge) deploys the Bridge component of the Corda Enterprise Firewall. - - -## Prerequisites ---- -Before deploying the chart please ensure you have the following prerequisites: - -- Kubernetes cluster up and running. -- A HashiCorp Vault instance is set up and configured to use Kubernetes service account token-based authentication. -- The Vault is unsealed and initialized. -- Helm is installed. - - -## Chart Structure ---- -This chart has following structue: -``` - ├── corda-ent-bridge - │ ├── Chart.yaml - │ ├── templates - │ │ ├── deployment.yaml - │ │ ├── _helpers.tpl - │ │ ├── pvc.yaml - │ └── values.yaml -``` - -Type of files used: - -- `templates` : This directory contains the Kubernetes manifest templates that define the resources to be deployed. -- `deployment.yaml` : The deployment ensures that the desired number of firewall service replicas is running in the Kubernetes cluster. -- `_helpers.tpl` : A template file used for defining custom labels in the Helm chart. -- `pvc.yaml` : A PersistentVolumeClaim (PVC) is a request for storage by a user. -- `chart.yaml` : Provides metadata about the chart, such as its name, version, and description. -- `values.yaml` : Contains the default configuration values for the chart. It includes configuration for the metadata, image, storage, Vault, etc. - - -## Configuration ---- -The [values.yaml](https://github.com/hyperledger/bevel/blob/main/platforms/r3-corda-ent/charts/corda-ent-bridge/values.yaml) file contains configurable values for the Helm chart. We can modify these values according to the deployment requirements. Here are some important configuration options: - -## Parameters ---- - -### Name - -| Name | Description | Default Value | -| -----------| -------------------------------------------------- | ------------- | -| nodeName | Provide the name of the node | bridge | - -### Metadata - -| Name | Description | Default Value | -| ----------------| ----------------------------------------------------------------| ------------- | -| namespace | Provide the namespace where the Bridge will be deployed | cenm | -| labels | Provide any additional labels for the Corda Enterprise Bridge | "" | - -### initContainerImage - -| Name | Description | Default Value | -| ------------------------ | -----------------------------------------------------------------------------| ------------------- | -| Name | Information about the Docker container used for the init-containers | ghcr.io/hyperledger | - -### Image - -| Name | Description | Default Value | -| ------------------------ | ------------------------------------------------------------------------------| --------------- | -| name | Provide the name of the image, including the tag | adopblockchaincloud0502.azurecr.io/corda_image_firewall_4.4:latest| -| PullSecret | Provide the K8s secret that has rights for pulling the image off the registry | "" | -| pullPolicy | Provide the pull policy for Docker images, either Always or IfNotPresent | IfNotPresent | - -### Vault - -| Name | Description | Default Value | -| ------------------------- | --------------------------------------------------------------------------| ------------- | -| address | Address/URL of the Vault server | "" | -| role | Role used for authentication with Vault | vault-role | -| authpath | Authentication path for Vault | entcordacenm | -| serviceAccountName | Provide the already created service account name autheticated to vault | vault-auth | -| certSecretPrefix | Provide the vault path where the certificates are stored | "" | - -### volume - -| Name | Description | Default Value | -| ------------------| ------------------------------------------ | ------------- | -| baseDir | Provide the base directory for the container| /opt/corda | - -### storage - -| Name | Description | Default Value | -| --------------------- | ------------------------------------------| ------------- | -| name | Provide the name of the storage class | cenm | - -### Healthcheck - -| Name | Description | Default Value | -| ----------------------------| ------------------------------------------------------------------------------| ------------- | -| readinesscheckinterval | Provide the interval in seconds you want to iterate till db to be ready | 10 | -| readinessthreshold | Provide the threshold till you want to check if specified db up and running | 15 | - - - -## Deployment ---- - -To deploy the Brigde Helm chart, follow these steps: - -1. Modify the [values.yaml](https://github.com/hyperledger/bevel/blob/main/platforms/r3-corda-ent/charts/corda-ent-bridge/values.yaml) file to set the desired configuration values. -2. Run the following Helm command to install, upgrade, verify delete the chart: - -To install the chart: -```bash -helm repo add bevel https://hyperledger.github.io/bevel/ -helm install ./corda-ent-bridge -``` - -To upgrade the chart: -```bash -helm upgrade ./corda-ent-bridge -``` - -To verify the deployment: -```bash -kubectl get jobs -n -``` -Note : Replace `` with the actual namespace where the Job was created. This command will display information about the Job, including the number of completions and the current status of the Job's pods. - -To delete the chart: -```bash -helm uninstall -``` -Note : Replace `` with the desired name for the release. - - - -## Contributing ---- -If you encounter any bugs, have suggestions, or would like to contribute to the [Bridge Deployment Helm Chart](https://github.com/hyperledger/bevel/tree/main/platforms/r3-corda-ent/charts/corda-ent-bridge), please feel free to open an issue or submit a pull request on the [project's GitHub repository](https://github.com/hyperledger/bevel). - - -## License - -This chart is licensed under the Apache v2.0 license. - -Copyright © 2023 Accenture - -### Attribution - -This chart is adapted from the [charts](https://hyperledger.github.io/bevel/) which is licensed under the Apache v2.0 License which is reproduced here: - -``` -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -``` diff --git a/platforms/r3-corda-ent/charts/corda-ent-bridge/templates/_helpers.tpl b/platforms/r3-corda-ent/charts/corda-ent-bridge/templates/_helpers.tpl deleted file mode 100644 index d43c09d8cef..00000000000 --- a/platforms/r3-corda-ent/charts/corda-ent-bridge/templates/_helpers.tpl +++ /dev/null @@ -1,5 +0,0 @@ -{{- define "labels.custom" }} - {{ range $key, $val := $.Values.metadata.labels }} - {{ $key }}: {{ $val }} - {{ end }} -{{- end }} diff --git a/platforms/r3-corda-ent/charts/corda-ent-bridge/templates/deployment.yaml b/platforms/r3-corda-ent/charts/corda-ent-bridge/templates/deployment.yaml deleted file mode 100644 index 320159744d5..00000000000 --- a/platforms/r3-corda-ent/charts/corda-ent-bridge/templates/deployment.yaml +++ /dev/null @@ -1,240 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - ---- -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: {{ .Values.nodeName }} - namespace: {{ .Values.metadata.namespace}} - {{- if .Values.deployment.annotations }} - annotations: -{{ toYaml .Values.deployment.annotations | indent 8 }} - {{- end }} - labels: - app.kubernetes.io/name: {{ .Values.nodeName }} - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} -spec: - serviceName: "" - replicas: {{ .Values.replicas }} - selector: - matchLabels: - app: {{ .Values.nodeName }} - app.kubernetes.io/name: {{ .Values.nodeName }} - app.kubernetes.io/instance: {{ .Release.Name }} - template: - metadata: - labels: - app: {{ .Values.nodeName }} - app.kubernetes.io/name: {{ .Values.nodeName }} - app.kubernetes.io/instance: {{ .Release.Name }} - spec: - serviceAccountName: {{ .Values.vault.serviceaccountname }} - securityContext: - runAsUser: 1000 - runAsGroup: 1000 - fsGroup: 1000 - initContainers: - - name: init-certificates - image: {{ .Values.image.initContainerName }} - imagePullPolicy: Always - env: - - name: VAULT_ADDR - value: {{ $.Values.vault.address }} - - name: KUBERNETES_AUTH_PATH - value: {{ $.Values.vault.authpath }} - - name: VAULT_APP_ROLE - value: {{ $.Values.vault.role }} - - name: BASE_DIR - value: {{ $.Values.volume.baseDir }} - - name: CERTS_SECRET_PREFIX - value: {{ .Values.vault.certsecretprefix }} - command: ["sh", "-c"] - args: - - |- - #!/usr/bin/env sh - validateVaultResponse () { - if echo ${2} | grep "errors"; then - echo "ERROR: unable to retrieve ${1}: ${2}" - exit 1 - fi - if [ "$3" == "LOOKUPSECRETRESPONSE" ] - then - http_code=$(curl -sS -o /dev/null -w "%{http_code}" \ - --header "X-Vault-Token: ${VAULT_TOKEN}" \ - ${VAULT_ADDR}/v1/${1}) - curl_response=$? - if test "$http_code" != "200" ; then - echo "Http response code from Vault - $http_code and curl_response - $curl_response" - if test "$curl_response" != "0"; then - echo "Error: curl command failed with error code - $curl_response" - exit 1 - fi - fi - fi - } - - # setting up env to get secrets from vault - echo "Getting secrets from Vault Server" - KUBE_SA_TOKEN=$(cat /var/run/secrets/kubernetes.io/serviceaccount/token) - VAULT_TOKEN=$(curl -sS --request POST ${VAULT_ADDR}/v1/auth/${KUBERNETES_AUTH_PATH}/login -H "Content-Type: application/json" -d '{"role":"'"${VAULT_APP_ROLE}"'","jwt":"'"${KUBE_SA_TOKEN}"'"}' | jq -r 'if .errors then . else .auth.client_token end') - validateVaultResponse 'vault login token' "${VAULT_TOKEN}" - echo "logged into vault" - - OUTPUT_PATH=${BASE_DIR}/certificates - - # get sslkeystore.jks from vault - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${CERTS_SECRET_PREFIX}/certs/sslkeystore | jq -r 'if .errors then . else . end') - validateVaultResponse "${CERTS_SECRET_PREFIX}/sslkeystore" "${LOOKUP_SECRET_RESPONSE}" "LOOKUPSECRETRESPONSE" - TLS_SSLKEYSTORE=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["sslkeystore.jks"]') - echo "${TLS_SSLKEYSTORE}" | base64 -d > ${OUTPUT_PATH}/sslkeystore.jks - - # get truststore.jks from vault - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${CERTS_SECRET_PREFIX}/certs/truststore | jq -r 'if .errors then . else . end') - validateVaultResponse "${CERTS_SECRET_PREFIX}/certs/truststore" "${LOOKUP_SECRET_RESPONSE}" "LOOKUPSECRETRESPONSE" - TLS_TRUSTSTORE=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["truststore.jks"]') - echo "${TLS_TRUSTSTORE}" | base64 -d > ${OUTPUT_PATH}/truststore.jks - echo "Done" - - # Fetching Bridge Certificate from vault - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${CERTS_SECRET_PREFIX}/certs/firewall | jq -r 'if .errors then . else . end') - validateVaultResponse "{CERTS_SECRET_PREFIX}/certs/firewall" "${LOOKUP_SECRET_RESPONSE}" "LOOKUPSECRETRESPONSE" - bridge=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["bridge.jks"]') - echo "${bridge}" | base64 -d > ${OUTPUT_PATH}/bridge.jks - echo "Successfully got Bridge Certifcate" - - trust=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["trust.jks"]') - echo "${trust}" | base64 -d > ${OUTPUT_PATH}/trust.jks - echo "Successfully got trust Certifcate" - - # Fetching node keystore, node truststore and network-root-truststore credentials from vault - mkdir -p ${OUTPUT_PATH}/credentials - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${CERTS_SECRET_PREFIX}/credentials | jq -r 'if .errors then . else . end') - validateVaultResponse "${CERTS_SECRET_PREFIX}/credentials" "${LOOKUP_SECRET_RESPONSE}" "LOOKUPSECRETRESPONSE" - TRUSTSTORE_PASSWORD=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["truststore"]') - echo "${TRUSTSTORE_PASSWORD}" > ${OUTPUT_PATH}/credentials/truststorepass - KEYSTORE_PASSWORD=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["keystore"]') - echo "${KEYSTORE_PASSWORD}" > ${OUTPUT_PATH}/credentials/keystorepass - FIREWALL_PASSWORD=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["firewallca"]') - echo "${FIREWALL_PASSWORD}" > ${OUTPUT_PATH}/credentials/firewallpass - BRIDGE_PASSWORD=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["bridge"]') - echo "${BRIDGE_PASSWORD}" > ${OUTPUT_PATH}/credentials/bridgepass - echo "Done" - # TODO: Get network-parameters from corresponding Node - echo "Done with init-certificates..." - volumeMounts: - - name: certificates - mountPath: {{ $.Values.volume.baseDir }}/certificates - imagePullSecrets: - - name: {{ .Values.image.imagePullSecret }} - containers: - - name: main - image: "{{ .Values.image.mainContainerName }}" - env: - - name: BASE_DIR - value: {{ $.Values.volume.baseDir }} - imagePullPolicy: {{ .Values.image.pullPolicy }} - command: ["sh", "-c"] - args: - - |- - echo 'firewallMode: BridgeInner - networkParametersPath: "network-parameters" - - outboundConfig: { - artemisBrokerAddress: "{{ .Values.node.messagingServerAddress }}:{{ .Values.node.messagingServerPort }}" - alternateArtemisBrokerAddresses: [] - } - - bridgeInnerConfig: { - floatAddresses: ["{{ .Values.float.address }}:{{ .Values.tunnel.port }}"] - expectedCertificateSubject: "{{ .Values.float.subject }}" - tunnelSSLConfiguration { - keyStorePassword: "BRIDGE_PASSWORD" - trustStorePassword: "FIREWALL_PASSWORD" - sslKeystore: "certificates/bridge.jks" - trustStoreFile: "certificates/trust.jks" - } - } - - revocationConfig: { mode: "OFF"} - - certificatesDirectory: "certificates" - sslKeystore: "certificates/sslkeystore.jks" - trustStoreFile: "certificates/truststore.jks" - keyStorePassword: "KEYSTORE_PASSWORD" - trustStorePassword: "TRUSTSTORE_PASSWORD" - silencedIPs: [] - - enableAMQPPacketTrace: true - artemisReconnectionIntervalMin: 5000 - artemisReconnectionIntervalMax: 60000 - politeShutdownPeriod: 1000 - p2pConfirmationWindowSize: 1048576 - auditServiceConfiguration: { - loggingIntervalSec: 60 - }' >> ${BASE_DIR}/firewall.conf - - export TRUSTSTORE_PASSWORD=$(cat ${BASE_DIR}/certificates/credentials/truststorepass) - sed -i -e "s*TRUSTSTORE_PASSWORD*${TRUSTSTORE_PASSWORD}*g" ${BASE_DIR}/firewall.conf - export KEYSTORE_PASSWORD=$(cat ${BASE_DIR}/certificates/credentials/keystorepass) - sed -i -e "s*KEYSTORE_PASSWORD*${KEYSTORE_PASSWORD}*g" ${BASE_DIR}/firewall.conf - export FIREWALL_PASSWORD=$(cat ${BASE_DIR}/certificates/credentials/firewallpass) - sed -i -e "s*FIREWALL_PASSWORD*${FIREWALL_PASSWORD}*g" ${BASE_DIR}/firewall.conf - export BRIDGE_PASSWORD=$(cat ${BASE_DIR}/certificates/credentials/bridgepass) - sed -i -e "s*BRIDGE_PASSWORD*${BRIDGE_PASSWORD}*g" ${BASE_DIR}/firewall.conf - - ifNetworkParametersExists () { - if [ -f ${BASE_DIR}/network-parameters ] - then - return 1 - else - return 0 - fi - } - let EXIT_CODE=0 - while [ ${EXIT_CODE} -eq 0 ] - do - sleep 2 - echo "Checking for network parameters" - ifNetworkParametersExists - let EXIT_CODE=$? - done - echo "Starting the firewall" - java -jar corda-firewall.jar --base-directory ${BASE_DIR} --verbose --logging-level=INFO - resources: - requests: - memory: {{ .Values.cordaJarMx }}M - limits: - memory: {{ add .Values.cordaJarMx 2}}M - volumeMounts: - - name: {{ .Values.nodeName }}-volume - mountPath: {{ .Values.volume.baseDir }} - - name: certificates - mountPath: {{ .Values.volume.baseDir }}/certificates - volumes: - - name: certificates - emptyDir: - medium: Memory - volumeClaimTemplates: - - metadata: - name: {{ .Values.nodeName }}-volume -{{- if .Values.pvc.annotations }} - annotations: -{{ toYaml .Values.pvc.annotations | indent 8 }} -{{- end }} - labels: - app.kubernetes.io/name: {{ .Values.nodeName }}-volume - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} - spec: - storageClassName: {{ .Values.storage.name }} - accessModes: [ "ReadWriteOnce" ] - resources: - requests: - storage: 256Mi diff --git a/platforms/r3-corda-ent/charts/corda-ent-bridge/values.yaml b/platforms/r3-corda-ent/charts/corda-ent-bridge/values.yaml deleted file mode 100644 index e8abc361909..00000000000 --- a/platforms/r3-corda-ent/charts/corda-ent-bridge/values.yaml +++ /dev/null @@ -1,119 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -# Default values for the Bridge firewall component of Corda Enterprise. -# This is a YAML-formatted file. -# Declare variables to be passed into your templates. -deployment: - annotations: {} - -# Provide the name of the node. -# e.g. nodeName: bridge -nodeName: brigde - -# Provide context for the chart. -metadata: - # Provide the namespace where the Bridge will be deployed. - # e.g. namespace: cenm - namespace: cenm - # Provide any extra labels you wish to add to the charts, formatted as key-value pairs. - # e.g. key: "value" - labels: {} - -# Provide the amount of replicas you want to deploy of the Float. -# e.g. replicas: 1 -replicas: 1 - -# Information about the Docker container used for the init-containers. -initContainerImage: - # Provide the name of the image, including the tag. - # e.g. name: ghcr.io/hyperledger/bevel-alpine:latest - name: ghcr.io/hyperledger/bevel-alpine:latest - -# Information about the main image used for the main Float firewall container. -image: - # Provide the name of the image, including the tag - # e.g. name: adopblockchaincloud0502.azurecr.io/corda_image_firewall_4.4:latest - name: adopblockchaincloud0502.azurecr.io/corda_image_firewall_4.4:latest - # Provide the K8s secret that has rights for pulling the image off the registry. - # NOTE: Make sure the secret exists prior to manually running this deployment, - # as this chart does not create the pull secret itself. - # e.g. pullSecret: regcred - pullSecret: "" - # Provide the pull policy for Docker images, either Always or IfNotPresent. - # e.g. pullPolicy: Always - pullPolicy: IfNotPresent - - -# Information about the vault. -# NOTE: Make sure that the vault is already unsealed, initialized and configured -# to use the Kubernetes service account token based authentication. -vault: - # Provide the vault address - # Eg. address: http://vault.example.com:8200 - address: - # Provide the vault role used. - # Eg. role: vault-role - role: vault-role - # Provide the authpath configured to be used. - # Eg. authpath: entcordacenm - authpath: entcordacenm - # Provide the service account name autheticated to vault. - # NOTE: Make sure that the service account is already created and autheticated to use the vault. - # Eg. serviceaccountname: vault-auth - serviceaccountname: vault-auth - # Provide the vault path where the certificates are stored - # Eg. certsecretprefix: cenm/certs - certsecretprefix: cenm/certs - - - -# Provide volume related specifications -volume: - # Provide the base directory for the container. - # E.g. baseDir: /opt/corda - baseDir: /opt/corda - -# This section contains information about storage classes used in the Chart. -storage: - # Provide the name of the storage class. - # NOTE: Make sure that the storage class exists prior to manually running this deployment, - # as this chart does not create the storage class itself. - # e.g. name: cenm - name: cenm - -pvc: - # Provide any annotations for the Persistent Volume Claims (PVC), formatted as key-value pairs. - # e.g. annotations: - # key: "value" - annotations: {} - -# Provide the maximum size of the memory allocation pool -# e.g. cordaJarMx: 1 -cordaJarMx: - -# healthCheckNodePort set to get rid of logs pollution -# E.g. healthCheckNodePort: 0 -healthCheckNodePort: - -healthcheck: - # Provide the interval in seconds you want to iterate till db to be ready - # E.g. readinesscheckinterval: 10 - readinesscheckinterval: 10 - # Provide the threshold till you want to check if specified db up and running - # E.g. readinessthreshold: 15 - readinessthreshold: 15 - -# TODO Samples should be updated for below -float: - address: - port: - subject: -node: - messagingServerAddress: - messagingServerPort: -tunnel: - port: diff --git a/platforms/r3-corda-ent/charts/corda-ent-float/Chart.yaml b/platforms/r3-corda-ent/charts/corda-ent-float/Chart.yaml deleted file mode 100644 index 96a69115445..00000000000 --- a/platforms/r3-corda-ent/charts/corda-ent-float/Chart.yaml +++ /dev/null @@ -1,11 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -apiVersion: v1 -appVersion: "2.0" -description: "R3-corda-ent: Deploys the float firewall." -name: corda-ent-float -version: 1.0.0 diff --git a/platforms/r3-corda-ent/charts/corda-ent-float/README.md b/platforms/r3-corda-ent/charts/corda-ent-float/README.md deleted file mode 100644 index cacc449134c..00000000000 --- a/platforms/r3-corda-ent/charts/corda-ent-float/README.md +++ /dev/null @@ -1,206 +0,0 @@ -[//]: # (##############################################################################################) -[//]: # (Copyright Accenture. All Rights Reserved.) -[//]: # (SPDX-License-Identifier: Apache-2.0) -[//]: # (##############################################################################################) - - -# Float Deployment - -- [Float Deployment Helm Chart](#Float-deployment-helm-chart) -- [Prerequisites](#prerequisites) -- [Chart Structure](#chart-structure) -- [Configuration](#configuration) -- [Deployment](#deployment) -- [Contributing](#contributing) -- [License](#license) - - -## Float Deployment Helm Chart ---- -This [Helm chart](https://github.com/hyperledger/bevel/tree/main/platforms/r3-corda-ent/charts/corda-ent-float) deploys the float firewall service. - - -## Prerequisites ---- -Before deploying the chart please ensure you have the following prerequisites: - -- Kubernetes cluster up and running. -- A HashiCorp Vault instance is set up and configured to use Kubernetes service account token-based authentication. -- The Vault is unsealed and initialized. -- Helm is installed. - - - -## Chart Structure ---- -This chart has following structue: -``` - ├── corda-ent-float - │ ├── Chart.yaml - │ ├── templates - │ │ ├── deployment.yaml - │ │ ├── _helpers.tpl - │ │ ├── pvc.yaml - │ │ └── service.yaml - │ └── values.yaml -``` - -Type of files used: - -- `templates` : This directory contains the Kubernetes manifest templates that define the resources to be deployed. -- `deployment.yaml` : The deployment ensures that the desired number of firewall service replicas is running in the Kubernetes cluster. -- `_helpers.tpl` : A template file used for defining custom labels in the Helm chart. -- `pvc.yaml` : A PersistentVolumeClaim (PVC) is a request for storage by a user. -- `service.yaml` : This file defines a Kubernetes Service with multiple ports for protocols and targets, and supports Ambassador proxy annotations for specific configurations when using the "ambassador" proxy provider. -- `chart.yaml` : Provides metadata about the chart, such as its name, version, and description. -- `values.yaml` : Contains the default configuration values for the chart. It includes configuration for the metadata, image, storage, Vault, etc. - - -## Configuration ---- -The [values.yaml](https://github.com/hyperledger/bevel/blob/main/platforms/r3-corda-ent/charts/corda-ent-float/values.yaml) file contains configurable values for the Helm chart. We can modify these values according to the deployment requirements. Here are some important configuration options: - -## Parameters ---- - -### Name - -| Name | Description | Default Value | -| -----------| ------------------------------------| ------------- | -| nodeName | Provide the name of the node | float | - -### Metadata - -| Name | Description | Default Value | -| ----------------| ----------------------------------------------------------------| ------------- | -| namespace | Provide the namespace where the float will be deployed | cenm | -| labels | Provide any additional labels for the Corda Enterprise float | "" | - -### storage - -| Name | Description | Default Value | -| --------------------- | ------------------------------------------| ------------- | -| name | Provide the name of the storage class | cenm | - -### initContainerImage - -| Name | Description | Default Value | -| -----------| --------------------------------------------------------------------------| ------------------ | -|Name | Information about the Docker container used for the init-containers | ghcr.io/hyperledger| - -### Image - -| Name | Description | Default Value | -| ------------------------ | --------------------------------------------------------------------------------- | --------------- | -| name | Provide the name of the image, including the tag | adopblockchaincloud0502.azurecr.io/corda_image_firewall_4.4:latest| -| PullSecret | Provide the K8s secret that has rights for pulling the image off the registry | "" | -| pullPolicy | Provide the pull policy for Docker images, either Always or IfNotPresent | IfNotPresent | - -### Vault - -| Name | Description | Default Value | -| ------------------------- | --------------------------------------------------------------------------| ------------- | -| address | Address/URL of the Vault server | "" | -| role | Role used for authentication with Vault | vault-role | -| authpath | Authentication path for Vault | entcordacenm | -| serviceAccountName | Provide the already created service account name autheticated to vault | vault-auth | -| certSecretPrefix | Provide the vault path where the certificates are stored | cenm/certs | - -### volume - -| Name | Description | Default Value | -| ------------------| ------------------------------------------ | ------------- | -| baseDir | Provide the base directory for the container| /opt/corda | - -### bridge - -| Name | Description | Default Value | -| ------------------| --------------------------------------------------------------- | ------------- | -| legalName | Provide the legal X500 name of the bridge | "" | -| tunnelPort | Provide the port number that the Bridge will connect to the Float| 39999 | - -### float - -| Name | Description | Default Value | -| ------------------| ----------------------------------------------------- | ------------- | -| loadBalancerIP | Define the public STATIC IP address NUMBER of the Float| 123.456.789.001 | - -### ambassador - -| Name | Description | Default Value | -| --------------------- | ------------------------------------------| ------------- | -| p2pPort | Provide the ambassador p2p port for float | 15019 | -| tunnelPort | Provide the bridge tunnel ambassador port | 15029 | -| external_url_suffix | Provide the external url suffix for float | "" | - -### Healthcheck - -| Name | Description | Default Value | -| ----------------------------| ------------------------------------------------------------------------------| ------------- | -| readinesscheckinterval | Provide the interval in seconds you want to iterate till db to be ready | 10 | -| readinessthreshold | Provide the threshold till you want to check if specified db up and running | 15 | - - - -## Deployment ---- - -To deploy the Float Helm chart, follow these steps: - -1. Modify the [values.yaml](https://github.com/hyperledger/bevel/blob/main/platforms/r3-corda-ent/charts/corda-ent-float/values.yaml) file to set the desired configuration values. -2. Run the following Helm command to install, upgrade, verify delete the chart: - -To install the chart: -```bash -helm repo add bevel https://hyperledger.github.io/bevel/ -helm install ./corda-ent-float -``` - -To upgrade the chart: -```bash -helm upgrade ./corda-ent-float -``` - -To verify the deployment: -```bash -kubectl get jobs -n -``` -Note : Replace `` with the actual namespace where the Job was created. This command will display information about the Job, including the number of completions and the current status of the Job's pods. - -To delete the chart: -```bash -helm uninstall -``` -Note : Replace `` with the desired name for the release. - - - -## Contributing ---- -If you encounter any bugs, have suggestions, or would like to contribute to the [Float Deployment Helm Chart](https://github.com/hyperledger/bevel/tree/main/platforms/r3-corda-ent/charts/corda-ent-float), please feel free to open an issue or submit a pull request on the [project's GitHub repository](https://github.com/hyperledger/bevel). - - - -## License - -This chart is licensed under the Apache v2.0 license. - -Copyright © 2023 Accenture - -### Attribution - -This chart is adapted from the [charts](https://hyperledger.github.io/bevel/) which is licensed under the Apache v2.0 License which is reproduced here: - -``` -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -``` diff --git a/platforms/r3-corda-ent/charts/corda-ent-float/templates/_helpers.tpl b/platforms/r3-corda-ent/charts/corda-ent-float/templates/_helpers.tpl deleted file mode 100644 index d43c09d8cef..00000000000 --- a/platforms/r3-corda-ent/charts/corda-ent-float/templates/_helpers.tpl +++ /dev/null @@ -1,5 +0,0 @@ -{{- define "labels.custom" }} - {{ range $key, $val := $.Values.metadata.labels }} - {{ $key }}: {{ $val }} - {{ end }} -{{- end }} diff --git a/platforms/r3-corda-ent/charts/corda-ent-float/templates/deployment.yaml b/platforms/r3-corda-ent/charts/corda-ent-float/templates/deployment.yaml deleted file mode 100644 index f65b111897b..00000000000 --- a/platforms/r3-corda-ent/charts/corda-ent-float/templates/deployment.yaml +++ /dev/null @@ -1,241 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - ---- -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: {{ .Values.nodeName }} - namespace: {{ .Values.metadata.namespace}} - {{- if .Values.deployment.annotations }} - annotations: -{{ toYaml .Values.deployment.annotations | indent 8 }} - {{- end }} - labels: - app.kubernetes.io/name: {{ .Values.nodeName }} - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} -spec: - serviceName: {{ .Values.nodeName }} - replicas: {{ .Values.replicas }} - selector: - matchLabels: - app: {{ .Values.nodeName }} - app.kubernetes.io/name: {{ .Values.nodeName }} - app.kubernetes.io/instance: {{ .Release.Name }} - template: - metadata: - labels: - app: {{ .Values.nodeName }} - app.kubernetes.io/name: {{ .Values.nodeName }} - app.kubernetes.io/instance: {{ .Release.Name }} - spec: - serviceAccountName: {{ $.Values.vault.serviceaccountname }} - securityContext: - runAsUser: 1000 - runAsGroup: 1000 - fsGroup: 1000 - initContainers: - - name: init-certificates - image: {{ .Values.image.initContainerName }} - imagePullPolicy: Always - env: - - name: VAULT_ADDR - value: {{ $.Values.vault.address }} - - name: KUBERNETES_AUTH_PATH - value: {{ $.Values.vault.authpath }} - - name: VAULT_APP_ROLE - value: {{ $.Values.vault.role }} - - name: BASE_DIR - value: {{ $.Values.volume.baseDir }} - - name: CERTS_SECRET_PREFIX - value: {{ .Values.vault.certsecretprefix }} - command: ["sh", "-c"] - args: - - |- - #!/usr/bin/env sh - validateVaultResponse () { - if echo ${2} | grep "errors"; then - echo "ERROR: unable to retrieve ${1}: ${2}" - exit 1 - fi - if [ "$3" == "LOOKUPSECRETRESPONSE" ] - then - http_code=$(curl -sS -o /dev/null -w "%{http_code}" \ - --header "X-Vault-Token: ${VAULT_TOKEN}" \ - ${VAULT_ADDR}/v1/${1}) - curl_response=$? - if test "$http_code" != "200" ; then - echo "Http response code from Vault - $http_code and curl_response - $curl_response" - if test "$curl_response" != "0"; then - echo "Error: curl command failed with error code - $curl_response" - exit 1 - fi - fi - fi - } - - # setting up env to get secrets from vault - echo "Getting secrets from Vault Server" - KUBE_SA_TOKEN=$(cat /var/run/secrets/kubernetes.io/serviceaccount/token) - VAULT_TOKEN=$(curl -sS --request POST ${VAULT_ADDR}/v1/auth/${KUBERNETES_AUTH_PATH}/login -H "Content-Type: application/json" -d '{"role":"'"${VAULT_APP_ROLE}"'","jwt":"'"${KUBE_SA_TOKEN}"'"}' | jq -r 'if .errors then . else .auth.client_token end') - validateVaultResponse 'vault login token' "${VAULT_TOKEN}" - echo "logged into vault" - - OUTPUT_PATH=${BASE_DIR}/certificates - - # get sslkeystore.jks from vault - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${CERTS_SECRET_PREFIX}/certs/sslkeystore | jq -r 'if .errors then . else . end') - validateVaultResponse "{CERTS_SECRET_PREFIX}/certs/sslkeystore" "${LOOKUP_SECRET_RESPONSE}" "LOOKUPSECRETRESPONSE" - TLS_SSLKEYSTORE=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["sslkeystore.jks"]') - echo "${TLS_SSLKEYSTORE}" | base64 -d > ${OUTPUT_PATH}/sslkeystore.jks - - # get truststore.jks from vault - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${CERTS_SECRET_PREFIX}/certs/truststore | jq -r 'if .errors then . else . end') - validateVaultResponse "${CERTS_SECRET_PREFIX}/certs/truststore" "${LOOKUP_SECRET_RESPONSE}" "LOOKUPSECRETRESPONSE" - TLS_TRUSTSTORE=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["truststore.jks"]') - echo "${TLS_TRUSTSTORE}" | base64 -d > ${OUTPUT_PATH}/truststore.jks - echo "Done" - - # Fetching float Certificate from vault - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${CERTS_SECRET_PREFIX}/certs/firewall | jq -r 'if .errors then . else . end') - validateVaultResponse "${CERTS_SECRET_PREFIX}/certs/firewall" "${LOOKUP_SECRET_RESPONSE}" "LOOKUPSECRETRESPONSE" - float=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["float.jks"]') - echo "${float}" | base64 -d > ${OUTPUT_PATH}/float.jks - echo "Successfully got Bridge Certifcate" - - trust=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["trust.jks"]') - echo "${trust}" | base64 -d > ${OUTPUT_PATH}/trust.jks - echo "Successfully got trust Certifcate" - - # Fetching node keystore, node truststore and network-root-truststore credentials from vault - mkdir -p ${OUTPUT_PATH}/credentials - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${CERTS_SECRET_PREFIX}/credentials | jq -r 'if .errors then . else . end') - validateVaultResponse "${CERTS_SECRET_PREFIX}/credentials" "${LOOKUP_SECRET_RESPONSE}" "LOOKUPSECRETRESPONSE" - TRUSTSTORE_PASSWORD=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["truststore"]') - echo "${TRUSTSTORE_PASSWORD}" > ${OUTPUT_PATH}/credentials/truststorepass - KEYSTORE_PASSWORD=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["keystore"]') - echo "${KEYSTORE_PASSWORD}" > ${OUTPUT_PATH}/credentials/keystorepass - FIREWALL_PASSWORD=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["firewallca"]') - echo "${FIREWALL_PASSWORD}" > ${OUTPUT_PATH}/credentials/firewallpass - FLOAT_PASSWORD=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["float"]') - echo "${FLOAT_PASSWORD}" > ${OUTPUT_PATH}/credentials/floatpass - echo "Done" - - # TODO: Get network-parameters from corresponding Node - echo "Done with init-certificates..." - volumeMounts: - - name: certificates - mountPath: {{ $.Values.volume.baseDir }}/certificates - imagePullSecrets: - - name: {{ .Values.image.imagePullSecret }} - containers: - - name: main - image: "{{ .Values.image.mainContainerName }}" - imagePullPolicy: {{ .Values.image.pullPolicy }} - env: - - name: BASE_DIR - value: {{ $.Values.volume.baseDir }} - command: ["sh", "-c"] - args: - - |- - echo 'firewallMode: FloatOuter - networkParametersPath: "network-parameters" - - inboundConfig: { - listeningAddress: "{{ .Values.dmz.external }}:{{ .Values.node.p2pPort }}" - } - - floatOuterConfig: { - floatAddress: "{{ .Values.dmz.internal }}:{{ .Values.bridge.tunnelPort }}" - expectedCertificateSubject: "{{ .Values.bridge.subject }}" - tunnelSSLConfiguration: { - keyStorePassword: "FLOAT_PASSWORD" - trustStorePassword: "FIREWALL_PASSWORD" - sslKeystore: "certificates/float.jks" - trustStoreFile: "certificates/trust.jks" - } - } - - revocationConfig: { mode: "EXTERNAL_SOURCE"} - - certificatesDirectory: "certificates" - sslKeystore: "certificates/sslkeystore.jks" - trustStoreFile: "certificates/truststore.jks" - keyStorePassword: "KEYSTORE_PASSWORD" - trustStorePassword: "TRUSTSTORE_PASSWORD" - silencedIPs: [] - - enableAMQPPacketTrace: true - artemisReconnectionIntervalMin: 5000 - artemisReconnectionIntervalMax: 60000 - politeShutdownPeriod: 1000 - p2pConfirmationWindowSize: 1048576 - auditServiceConfiguration: { - loggingIntervalSec: 60 - }' >> ${BASE_DIR}/firewall.conf - - export TRUSTSTORE_PASSWORD=$(cat ${BASE_DIR}/certificates/credentials/truststorepass) - sed -i -e "s*TRUSTSTORE_PASSWORD*${TRUSTSTORE_PASSWORD}*g" ${BASE_DIR}/firewall.conf - export KEYSTORE_PASSWORD=$(cat ${BASE_DIR}/certificates/credentials/keystorepass) - sed -i -e "s*KEYSTORE_PASSWORD*${KEYSTORE_PASSWORD}*g" ${BASE_DIR}/firewall.conf - export FIREWALL_PASSWORD=$(cat ${BASE_DIR}/certificates/credentials/firewallpass) - sed -i -e "s*FIREWALL_PASSWORD*${FIREWALL_PASSWORD}*g" ${BASE_DIR}/firewall.conf - export FLOAT_PASSWORD=$(cat ${BASE_DIR}/certificates/credentials/floatpass) - sed -i -e "s*FLOAT_PASSWORD*${FLOAT_PASSWORD}*g" ${BASE_DIR}/firewall.conf - - ifNetworkParametersExists () { - if [ -f ${BASE_DIR}/network-parameters ] - then - return 1 - else - return 0 - fi - } - let EXIT_CODE=0 - while [ ${EXIT_CODE} -eq 0 ] - do - sleep 2 - echo "Checking for network parameters" - ifNetworkParametersExists - let EXIT_CODE=$? - done - - echo "Starting the firewall" - java -jar corda-firewall.jar --base-directory ${BASE_DIR} --verbose --logging-level=INFO - resources: - requests: - memory: {{ .Values.cordaJarMx }}M - limits: - memory: {{ add .Values.cordaJarMx 2}}M - volumeMounts: - - name: {{ .Values.nodeName }}-volume - mountPath: {{ .Values.volume.baseDir }} - - name: certificates - mountPath: {{ .Values.volume.baseDir }}/certificates - volumes: - - name: certificates - emptyDir: - medium: Memory - volumeClaimTemplates: - - metadata: - name: {{ .Values.nodeName }}-volume -{{- if .Values.pvc.annotations }} - annotations: -{{ toYaml .Values.pvc.annotations | indent 8 }} -{{- end }} - labels: - app.kubernetes.io/name: {{ .Values.nodeName }}-volume - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} - spec: - storageClassName: {{ .Values.storage.name }} - accessModes: [ "ReadWriteOnce" ] - resources: - requests: - storage: 256Mi diff --git a/platforms/r3-corda-ent/charts/corda-ent-float/templates/service.yaml b/platforms/r3-corda-ent/charts/corda-ent-float/templates/service.yaml deleted file mode 100644 index 27dfa27daad..00000000000 --- a/platforms/r3-corda-ent/charts/corda-ent-float/templates/service.yaml +++ /dev/null @@ -1,54 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - ---- -apiVersion: v1 -kind: Service -metadata: - name: {{ .Values.nodeName }} - namespace: {{ .Values.metadata.namespace }} - labels: - run: {{ .Values.nodeName }} - app.kubernetes.io/name: {{ .Values.nodeName }} - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} -spec: - selector: - app: {{ .Values.nodeName }} -# we need healthCheckNodePort set to get rid of logs pollution -{{- if (.Values.healthCheckNodePort) }} - healthCheckNodePort: {{ .Values.healthCheckNodePort }} -{{- end }} - ports: - - name: bridge-tunnel - protocol: TCP - port: {{ .Values.bridge.tunnelPort }} - targetPort: {{ .Values.bridge.tunnelPort }} - - name: p2p - protocol: TCP - port: {{ .Values.node.p2pPort }} - targetPort: {{ .Values.node.p2pPort }} -{{- if $.Values.ambassador }} ---- -apiVersion: getambassador.io/v3alpha1 -kind: TCPMapping -metadata: - name: {{ .Values.nodeName }}-{{ .Values.peerName }}-p2p - namespace: {{ .Values.metadata.namespace }} -spec: - port: {{ .Values.ambassador.p2pPort }} - service: {{ .Values.nodeName }}.{{ .Values.metadata.namespace }}:{{ .Values.node.p2pPort }} ---- -apiVersion: getambassador.io/v3alpha1 -kind: TCPMapping -metadata: - name: {{ .Values.nodeName }}-{{ .Values.peerName }}-tunnel - namespace: {{ .Values.metadata.namespace }} -spec: - port: {{ .Values.ambassador.tunnelPort }} - service: {{ .Values.nodeName }}.{{ .Values.metadata.namespace }}:{{ .Values.node.p2pPort }} -{{- end }} diff --git a/platforms/r3-corda-ent/charts/corda-ent-float/values.yaml b/platforms/r3-corda-ent/charts/corda-ent-float/values.yaml deleted file mode 100644 index 0ab0bf505c3..00000000000 --- a/platforms/r3-corda-ent/charts/corda-ent-float/values.yaml +++ /dev/null @@ -1,146 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -# Default values for Float Firewall. -# This is a YAML-formatted file. -# Declare variables to be passed into your templates. - -deployment: - annotations: {} - -# Provide the name of the node. -# e.g. nodeName: float -nodeName: float - -# Provide context for the chart -metadata: - # Provide the namespace where the Float will be deployed. - # e.g. namespace: cenm - namespace: cenm - # Provide any extra labels you wish to add to the charts, formatted as key-value pairs. - # e.g. key: "value" - labels: - -# Provide the amount of replicas you want to deploy of the Float. -# e.g. replicas: 1 -replicas: 1 - -pvc: - # Provide any annotations for the Persistent Volume Claims (PVC), formatted as key-value pairs. - # e.g. annotations: - # key: "value" - annotations: - -# This section contains information about storage classes used in the Chart. -storage: - # Provide the name of the storage class. - # NOTE: Make sure that the storage class exists prior to manually running this deployment, - # as this chart does not create the storage class itself. - # e.g. name: cenm - name: cenm - -# Information about the Docker container used for the init-containers. -initContainerImage: - # Provide the name of the image, including the tag. - # e.g. name: ghcr.io/hyperledger/bevel-alpine:latest - name: ghcr.io/hyperledger/bevel-alpine:latest - -# Information about the main image used for the main Float firewall container. -image: - # Provide the name of the image, including the tag - # e.g. name: adopblockchaincloud0502.azurecr.io/corda_image_firewall_4.4:latest - name: adopblockchaincloud0502.azurecr.io/corda_image_firewall_4.4:latest - # Provide the K8s secret that has rights for pulling the image off the registry. - # NOTE: Make sure the secret exists prior to manually running this deployment, - # as this chart does not create the pull secret itself. - # e.g. pullSecret: regcred - pullSecret: "" - # Provide the pull policy for Docker images, either Always or IfNotPresent. - # e.g. pullPolicy: Always - pullPolicy: IfNotPresent - -# Information about the vault. -# NOTE: Make sure that the vault is already unsealed, initialized and configured -# to use the Kubernetes service account token based authentication. -vault: - # Provide the vault address - # Eg. address: http://vault.example.com:8200 - address: - # Provide the vault role used. - # Eg. role: vault-role - role: vault-role - # Provide the authpath configured to be used. - # Eg. authpath: entcordacenm - authpath: entcordacenm - # Provide the service account name autheticated to vault. - # NOTE: Make sure that the service account is already created and autheticated to use the vault. - # Eg. serviceaccountname: vault-auth - serviceaccountname: vault-auth - # Provide the vault path where the certificates are stored - # Eg. certsecretprefix: cenm/certs - certsecretprefix: cenm/certs - -# Provide volume related specifications -volume: - # E.g. baseDir: /opt/corda - baseDir: cenm/certs - -# Provide the maximum size of the memory allocation pool -# e.g. cordaJarMx: 1 -cordaJarMx: 1 - -# Information about the bridge - the other part of the Corda firewall. -bridge: - # Provide the legal X500 name of the bridge. This is the same certificate subject as it is used in the generate-pki. - # E.g. "CN=Test Bridge Certificate, OU=HQ, O=HoldCo LLC, L=New York, C=US" - legalName: - # Provide the port number that the Bridge will connect to the Float on. - # e.g. tunnelPort: 39999 - tunnelPort: 39999 - - -float: - # Define the public STATIC IP address NUMBER of the Float, do NOT use a DNS name here! - # e.g. 123.456.789.001 - loadBalancerIP: 123.456.789.001 - -# Information regarding the node which the Float is part of -node: - # Provide the port number where the peer to peer service is running on the node. - # e.g. p2pPort: 40000 - p2pPort: 40000 - -ambassador: - # Provide the ambassador p2p port for float - # e.g. p2pPort: 15019 - p2pPort: 15019 - # Provide the bridge tunnel ambassador port - # e.g. tunnelPort: 15029 - tunnelPort: 15029 - # Provide the external url suffix for float - # e.g. external_url_suffix: blockchain.com - external_url_suffix: - -# healthCheckNodePort set to get rid of logs pollution -# Ex. healthCheckNodePort: 0 -healthCheckNodePort: - -healthcheck: - # Provide the interval in seconds you want to iterate till db to be ready - # E.g. readinesscheckinterval: 10 - readinesscheckinterval: 10 - # Provide the threshold till you want to check if specified db up and running - # E.g. readinessthreshold: 15 - readinessthreshold: 15 - -# DMZ related information to configure the Float within the network -dmz: - # Provide the internal IP address (DNS name preferred) for the DMZ. This is used for the Float address. - # e.g. internal: "dmzinternal" - internal: - # Provide the external IP address (DNS name preferred) for the DMZ. This is used to reference the Node p2p address in the Float. - # e.g. external: "dmzexternal" - external: diff --git a/platforms/r3-corda-ent/charts/corda-ent-h2/Chart.yaml b/platforms/r3-corda-ent/charts/corda-ent-h2/Chart.yaml deleted file mode 100644 index b15d2f27029..00000000000 --- a/platforms/r3-corda-ent/charts/corda-ent-h2/Chart.yaml +++ /dev/null @@ -1,11 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -apiVersion: v1 -appVersion: "2.0" -description: "R3-corda-ent: Deploys H2 DB." -name: corda-ent-h2 -version: 1.0.0 diff --git a/platforms/r3-corda-ent/charts/corda-ent-h2/README.md b/platforms/r3-corda-ent/charts/corda-ent-h2/README.md deleted file mode 100644 index 5008f3842fe..00000000000 --- a/platforms/r3-corda-ent/charts/corda-ent-h2/README.md +++ /dev/null @@ -1,176 +0,0 @@ -[//]: # (##############################################################################################) -[//]: # (Copyright Accenture. All Rights Reserved.) -[//]: # (SPDX-License-Identifier: Apache-2.0) -[//]: # (##############################################################################################) - - -# H2 Deployment - -- [H2 Deployment Helm Chart](#H2-deployment-helm-chart) -- [Prerequisites](#prerequisites) -- [Chart Structure](#chart-structure) -- [Configuration](#configuration) -- [Deployment](#deployment) -- [Contributing](#contributing) -- [License](#license) - - -## h2 Deployment Helm Chart ---- -This [Helm chart](https://github.com/hyperledger/bevel/tree/main/platforms/r3-corda-ent/charts/corda-ent-h2) deploys Kubernetes deployment resource for h2 database. - - -## Prerequisites ---- -Before deploying the chart please ensure you have the following prerequisites: - -- Node's database up and running. -- Kubernetes cluster up and running. -- A HashiCorp Vault instance is set up and configured to use Kubernetes service account token-based authentication. -- The Vault is unsealed and initialized. -- Helm is installed. - - -## Chart Structure ---- -This chart has following structue: - -``` - - ├── corda-ent-h2 - │ ├── Chart.yaml - │ ├── templates - │ │ ├── deployment.yaml - │ │ ├── pvc.yaml - │ │ └── service.yaml - │ └── values.yaml -``` - -- `templates` : This directory contains the Kubernetes manifest templates that define the resources to be deployed. -- `deployment.yaml` : This file is a configuration file for deployement in Kubernetes.It creates a deployment file with a specified number of replicas and defines various settings for the deployment.Including volume mounts, environment variables, and ports for the container. -- `pvc.yaml` : A PersistentVolumeClaim (PVC) is a request for storage by a user. -- `service.yaml` : This file defines a Kubernetes Service with multiple ports for protocols and targets, and supports Ambassador proxy annotations for specific configurations when using the "ambassador" proxy provider. -- `chart.yaml` : Provides metadata about the chart, such as its name, version, and description. -- `values.yaml` : Contains the default configuration values for the chart. It includes configuration for the image, resources, storage, service, etc. - - -## Configuration ---- -The [values.yaml](https://github.com/hyperledger/bevel/blob/main/platforms/r3-corda-ent/charts/corda-ent-h2/values.yaml) file contains configurable values for the Helm chart. We can modify these values according to the deployment requirements. Here are some important configuration options: - -## Parameters ---- - -### Name - -| Name | Description | Default Value | -| -----------| ---------------------------------------| ------------- | -| nodeName | Provide the name of the node | h2 | - - -### Metadata - -| Name | Description | Default Value | -| ----------------| --------------------------------------------------| ------------- | -| namespace | Provide the namespace for the Corda node | cenm | - -### Image - -| Name | Description | Default Value | -| ------------------------ | -----------------------------------------------| --------------- | -| containerName | Provide the containerName of image | hyperledgerlabs/h2:2018 | -| imagePullSecret | Provide the image pull secret of image | "" | - -### Resources - -| Name | Description | Default Value | -| ------------------------ | ---------------------------------------------| --------------- | -| limits | Provide the limit memory for node | "512Mi" | -| requests | Provide the requests memory for node | "512Mi" | - -### storage - -| Name | Description | Default Value | -| --------------------- | -------------------------------------| ------------- | -| Memory | Provide the memory for node | "512Mi" | - -### Service - -| Name | Description | Default Value | -| --------------------- | ------------------------------------------| ------------- | -| type | Provide the type of service | NodePort | -| tcp port | Provide the tcp port for node | 9101 | -| nodePort | Provide the tcp node port for node | 32001 | -| targetPort | Provide the tcp targetPort for node | 1521 | - -## WEB - -| Name | Description | Default Value | -| --------------------- | ------------------------------------------| ------------- | -| nodePort | Provide the web node port for node | 32080 | -| targetPort | Provide the tcp targetPort for node | 81 | -| port | Provide the tcp node port for node | 8080 | - - - -## Deployment ---- - -To deploy the h2 Helm chart, follow these steps: - -1. Modify the [values.yaml](https://github.com/hyperledger/bevel/blob/main/platforms/r3-corda-ent/charts/corda-ent-h2/values.yaml) file to set the desired configuration values. -2. Run the following Helm command to install, upgrade, verify, delete the chart: - -To install the chart: -```bash -helm repo add bevel https://hyperledger.github.io/bevel/ -helm install ./corda-ent-h2 -``` - -To upgrade the chart: -```bash -helm upgrade ./corda-ent-h2 -``` - -To verify the deployment: -```bash -kubectl get jobs -n -``` -Note : Replace `` with the actual namespace where the Job was created. This command will display information about the Job, including the number of completions and the current status of the Job's pods. - -To delete the chart: -```bash -helm uninstall -``` -Note : Replace `` with the desired name for the release. - - - -## Contributing ---- -If you encounter any bugs, have suggestions, or would like to contribute to the [h2 Deployment Helm Chart](https://github.com/hyperledger/bevel/tree/main/platforms/r3-corda-ent/charts/corda-ent-h2), please feel free to open an issue or submit a pull request on the [project's GitHub repository](https://github.com/hyperledger/bevel). - - -## License - -This chart is licensed under the Apache v2.0 license. - -Copyright © 2023 Accenture - -### Attribution - -This chart is adapted from the [charts](https://hyperledger.github.io/bevel/) which is licensed under the Apache v2.0 License which is reproduced here: - -``` -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -``` diff --git a/platforms/r3-corda-ent/charts/corda-ent-h2/templates/deployment.yaml b/platforms/r3-corda-ent/charts/corda-ent-h2/templates/deployment.yaml deleted file mode 100644 index ea2b74ec061..00000000000 --- a/platforms/r3-corda-ent/charts/corda-ent-h2/templates/deployment.yaml +++ /dev/null @@ -1,86 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - ---- -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: {{ .Values.nodeName }}db - {{- if .Values.deployment.annotations }} - annotations: -{{ toYaml .Values.deployment.annotations | indent 8 }} - {{- end }} - namespace: {{ .Values.metadata.namespace }} - labels: - app.kubernetes.io/name: {{ .Values.nodeName }}db - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} -spec: - serviceName: {{ .Values.nodeName }}db - replicas: 1 - selector: - matchLabels: - app: {{ .Values.nodeName }}db - app.kubernetes.io/name: {{ .Values.nodeName }}db - app.kubernetes.io/instance: {{ .Release.Name }} - updateStrategy: - type: OnDelete - template: - metadata: - labels: - app: {{ .Values.nodeName }}db - app.kubernetes.io/name: {{ .Values.nodeName }}db - app.kubernetes.io/instance: {{ .Release.Name }} - spec: - hostname: {{ .Values.nodeName }}db - securityContext: - fsGroup: 1000 - containers: - - name: {{ .Values.nodeName }}db - image: {{ .Values.image.containerName }} - resources: - limits: - memory: {{ .Values.resources.limits }} - requests: - memory: {{ .Values.resources.requests }} - ports: - - containerPort: 1521 - name: p2p - - containerPort: 81 - name: web - env: - - name: JAVA_OPTIONS - value: -Xmx512m - volumeMounts: - - name: {{ .Values.nodeName }}db-pvc - mountPath: "/opt/h2-data" - readOnly: false - livenessProbe: - tcpSocket: - port: 1521 - initialDelaySeconds: 15 - periodSeconds: 20 - imagePullSecrets: - - name: {{ .Values.image.imagePullSecret }} - volumeClaimTemplates: - - metadata: - name: {{ .Values.nodeName }}db-pvc - {{- if .Values.pvc.annotations }} - annotations: - {{ toYaml .Values.pvc.annotations | indent 8 }} - {{- end }} - labels: - app.kubernetes.io/name: {{ .Values.nodeName }}db-pvc - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} - spec: - storageClassName: {{ .Values.storage.name }} - accessModes: [ "ReadWriteOnce" ] - resources: - requests: - storage: {{ .Values.storage.memory }} diff --git a/platforms/r3-corda-ent/charts/corda-ent-h2/templates/service.yaml b/platforms/r3-corda-ent/charts/corda-ent-h2/templates/service.yaml deleted file mode 100644 index 7d4f13e9f1b..00000000000 --- a/platforms/r3-corda-ent/charts/corda-ent-h2/templates/service.yaml +++ /dev/null @@ -1,40 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -apiVersion: v1 -kind: Service -metadata: - name: {{ .Values.nodeName }}db - {{- if .Values.service.annotations }} - annotations: -{{ toYaml .Values.service.annotations | indent 8 }} - {{- end }} - namespace: {{ .Values.metadata.namespace }} - labels: - run: {{ .Values.nodeName }}db - app.kubernetes.io/name: {{ $.Values.nodeName }}db - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} -spec: - type: {{ .Values.service.type }} - selector: - app: {{ .Values.nodeName }}db - ports: - - name: tcp - protocol: TCP - port: {{ .Values.service.tcp.port }} - targetPort: {{ .Values.service.tcp.targetPort}} - {{- if .Values.service.tcp.nodePort }} - nodePort: {{ .Values.service.tcp.nodePort}} - {{- end }} - - name: web - protocol: TCP - port: {{ .Values.service.web.port }} - targetPort: {{ .Values.service.web.targetPort }} - {{- if .Values.service.web.nodePort }} - nodePort: {{ .Values.service.web.nodePort}} - {{- end }} diff --git a/platforms/r3-corda-ent/charts/corda-ent-h2/values.yaml b/platforms/r3-corda-ent/charts/corda-ent-h2/values.yaml deleted file mode 100644 index 25831589ee7..00000000000 --- a/platforms/r3-corda-ent/charts/corda-ent-h2/values.yaml +++ /dev/null @@ -1,76 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -# Default values for nodechart. -# This is a YAML-formatted file. -# Declare variables to be passed into your templates. - -#Provide the Name for node to be deployed -#Eg. nodeName: notary -nodeName: notary - -deployment: - annotations: {} - -# This section contains the db metadata. -metadata: - # Provide the namespace for the Corda node. - # Eg. namespace: cenm - namespace: cenm - -# Provide the number of replicas for your pods -# Eg. replicaCount: 1 -replicaCount: - -image: - #Provide the name of image for container - #Eg. containerName: hyperledgerlabs/h2:2018 - containerName: hyperledgerlabs/h2:2018 - #Provide the image pull secret of image - #Eg. pullSecret: regcred - imagePullSecret: "" - -resources: - #Provide the limit memory for node - #Eg. limits: "512Mi" - limits: "512Mi" - #Provide the requests memory for node - #Eg. requests: "512Mi" - requests: "512Mi" - -storage: - name: - #Provide the memory for node - #Eg. memory: 512Mi - memory: 512Mi - -pvc: - annotations: {} - -service: - #Provide the type of service - #Eg. type: NodePort - type: NodePort - tcp: - #Provide the tcp port for node - #Eg. port: 9101 - port: 9101 - #Provide the tcp node port for node - #Eg. port: 32001 - nodePort: - #Provide the tcp targetPort for node - #Eg. targetPort: 1521 - targetPort: 1521 - web: - #Provide the web node port for node - #Eg. port: 32080 - nodePort: - #Provide the tcp targetPort for node - #Eg. targetPort: 81 - targetPort: 81 - #Provide the tcp node port for node - #Eg. port: 8080 - port: 8080 diff --git a/platforms/r3-corda-ent/charts/corda-ent-node-initial-registration/Chart.yaml b/platforms/r3-corda-ent/charts/corda-ent-node-initial-registration/Chart.yaml deleted file mode 100644 index f7f259a6326..00000000000 --- a/platforms/r3-corda-ent/charts/corda-ent-node-initial-registration/Chart.yaml +++ /dev/null @@ -1,11 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -apiVersion: v1 -appVersion: "2.0" -description: "R3-corda-ent: Job for initial node registration." -name: corda-ent-node-initial-registration -version: 1.0.0 diff --git a/platforms/r3-corda-ent/charts/corda-ent-node-initial-registration/README.md b/platforms/r3-corda-ent/charts/corda-ent-node-initial-registration/README.md deleted file mode 100644 index ce64b0d941b..00000000000 --- a/platforms/r3-corda-ent/charts/corda-ent-node-initial-registration/README.md +++ /dev/null @@ -1,211 +0,0 @@ -[//]: # (##############################################################################################) -[//]: # (Copyright Accenture. All Rights Reserved.) -[//]: # (SPDX-License-Identifier: Apache-2.0) -[//]: # (##############################################################################################) - - -# Node-initial-registration Deployment - -- [Node-initial-registration Deployment Helm Chart](#Node-initial-registration-deployment-helm-chart) -- [Prerequisites](#prerequisites) -- [Chart Structure](#chart-structure) -- [Configuration](#configuration) -- [Deployment](#deployment) -- [Contributing](#contributing) -- [License](#license) - - -## node-initial-registration Deployment Helm Chart ---- -This [Helm chart](https://github.com/hyperledger/bevel/tree/main/platforms/r3-corda-ent/charts/corda-ent-node-initial-registration) helps to delpoy the job for registering the corda node. - - -## Prerequisites ---- -Before deploying the chart please ensure you have the following prerequisites: - -- Node's database up and running. -- Kubernetes cluster up and running. -- A HashiCorp Vault instance is set up and configured to use Kubernetes service account token-based authentication. -- The Vault is unsealed and initialized. -- Helm is installed. - -This chart has following structue: -``` - . - ├── node-initial-registration - │ ├── templates - │ │ ├── _helpers.tpl - │ │ └── job.yaml - | ├── Chart.yaml - │ └── values.yaml -``` - -Type of files used: - -- `templates` : This directory contains the Kubernetes manifest templates that define the resources to be deployed. -- `job.yaml` : This file is a configuration file for deployement in Kubernetes.It creates a deployment file with a specified number of replicas and defines various settings for the deployment. Including volume mounts, environment variables, and ports for the container. -- `chart.yaml` : Provides metadata about the chart, such as its name, version, and description. -- `values.yaml` : Contains the default configuration values for the chart. It includes configuration for the image, nodeconfig, credenatials, storage, service , vault, etc. -- `_helpers.tpl` : A template file used for defining custom labels and ports for the metrics in the Helm chart. - - -## Configuration ---- -The [values.yaml](https://github.com/hyperledger/bevel/blob/main/platforms/r3-corda-ent/charts/corda-ent-node-initial-registration/values.yaml) file contains configurable values for the Helm chart. We can modify these values according to the deployment requirements. Here are some important configuration options: - -## Parameters ---- - -### Name - -| Name | Description | Default Value | -| -----------| ------------------------------------| --------------------- | -| nodeName | Provide the name of the node | carrier-registration | - -### Metadata - -| Name | Description | Default Value | -| ----------------| ------------------------------------------------| ------------- | -| namespace | Provide the namespace for the Corda node | cenm | -| labels | Provide the labels to the Corda node | "" | - -### Image - -| Name | Description | Default Value | -| ------------------------ | ------------------------------------------------------------------------------- | --------------------------------------| -| initContainerName | Information about the Docker container used for the init-containers | ghcr.io/hyperledger | -| nodeContainerName | Enterprise node image | azurecr.io/corda_image_ent_4.4:latest | -| ImagePullSecret | Provide the K8s secret that has rights for pulling the image off the registry | "" | -| pullPolicy | Provide the pull policy for Docker images, either Always or IfNotPresent | IfNotPresent | - -### Vault - -| Name | Description | Default Value | -| ------------------------- | --------------------------------------------------------------------------| ------------------- | -| address | Address/URL of the Vault server | "" | -| role | Role used for authentication with Vault | vault-role | -| authpath | Authentication path for Vault | entcordacenm | -| serviceAccountName | Provide the already created service account name autheticated to vault | vault-auth | -| certSecretPrefix | Provide the vault path where the certificates are stored | secret/organisation-name | -| nodePath | Provide the vault orginsation node folder name where certificates stored | carrier | -| retries | Amount of times to retry fetching from/writing to Vault before giving up | 10 | -| retryInterval | Amount of time in seconds to wait after an error occurs | 15 | - -### NodeConf - -| Name | Description | Default Value | -| ------------------------ | -------------------------------------------------------------------------------------- | -----------------------------| -| ambassador | Specify ambassador host:port which will be advertised in addition to p2paddress | "" | -| legalName | Provide the legalName for node | O=Node,OU=Node,L=London,C=GB | -| emailAddress | Email address | dev-node@bevel.com | -| crlCheckSoftFail | crlCheckSoftFail defines if CRL failure a critical error or if we can just fail softly | "true" | -| tlsCertCrlDistPoint | tlsCertCrlDistPoint defines the endpoint for retrieving the CRL of the Corda Network | "" | -| tlsCertCrlIssuer | tlsCertCrlIssuer defines the X500 name of the trusted CRL issuer of the Corda Network | "" | -| jarPath | where is node jar is stored | "bin" | -| devMode | Provide the devMode for corda node | "false" | -| configPath | where service configuration files are stored | "etc" | -| replicas | Provide the number of replicas for your pods | 1 | - -### networkServices - -| Name | Description | Default Value | -| ------------------------ | ------------------------------------------------------------------ | -------------------------------- | -| idmanName | Name of the idman | idman | -| doormanURL | doormanURL defines the accesspoint for the Identity Manager server | http://my-identity-manager:10000 | -| networkmapName | name of the networkmapName | networkmap | -| networkMapURL | networkMapURL defines the accesspoint for the Network Map server | http://my-network-map:10000 | -| networkMapDomain | defines the networkMapDomain | "" | - -### Service - -| Name | Description | Default Value | -| --------------------- | -------------------------------------------------------- | ------------- | -| p2p port | p2pPort defines the port number of inbound connections | 10007 | -| p2pAddress | p2pAddress defines the public facing IP address | "" | -| ssh | ssh defines the SSH access options | "" | -| rpc port | Provide the tpc port for node | 30000 | -| rpcadmin port | Provide the rpcadmin port for node | 30009 | - -### dataSourceProperties - -| Name | Description | Default Value | -| --------------------- | ---------------------------------------------------------------------------- | ------------- | -| dataSource | DataSource of the url ,user and passowrd | "" | -| dataSourceClassName | dburl, dbport of the data source class | "" | -| monitoring | if CorDapps that are signed with developer keys will be allowed to load or not| "" | -| allowDevCorDapps | Provide the tpc port for node | 30000 | -| retries | Number of retries to check contents from vault | 10 | -| retryInterval | Interval in seconds between retries | 15 | - -### Healthcheck - -| Name | Description | Default Value | -| ----------------------------| ------------------------------------------------------------------------------| ------------- | -| readinesscheckinterval | Provide the interval in seconds you want to iterate till db to be ready | 5 | -| readinessthreshold | Provide the threshold till you want to check if specified db up and running | 2 | - - - -## Deployment ---- - -To deploy the node-initial-registration Helm chart, follow these steps: - -1. Modify the [values.yaml](https://github.com/hyperledger/bevel/blob/main/platforms/r3-corda-ent/charts/corda-ent-node-initial-registration/values.yaml) file to set the desired configuration values. -2. Run the following Helm command to install, upgrade,verify, delete the chart: - -To install the chart: -```bash -helm repo add bevel https://hyperledger.github.io/bevel/ -helm install ./corda-ent-node-initial-registration -``` - -To upgrade the chart: -```bash -helm upgrade ./corda-ent-node-initial-registration -``` - -To verify the deployment: -```bash -kubectl get jobs -n -``` -Note : Replace `` with the actual namespace where the Job was created. This command will display information about the Job, including the number of completions and the current status of the Job's pods. - -To delete the chart: -```bash -helm uninstall -``` -Note : Replace `` with the desired name for the release. - - - -## Contributing ---- -If you encounter any bugs, have suggestions, or would like to contribute to the [node-initial-registration Deployment Helm Chart](https://github.com/hyperledger/bevel/tree/main/platforms/r3-corda-ent/charts/corda-ent-node-initial-registration), please feel free to open an issue or submit a pull request on the [project's GitHub repository](https://github.com/hyperledger/bevel). - - - -## License - -This chart is licensed under the Apache v2.0 license. - -Copyright © 2023 Accenture - -### Attribution - -This chart is adapted from the [charts](https://hyperledger.github.io/bevel/) which is licensed under the Apache v2.0 License which is reproduced here: - -``` -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -``` diff --git a/platforms/r3-corda-ent/charts/corda-ent-node-initial-registration/templates/_helpers.tpl b/platforms/r3-corda-ent/charts/corda-ent-node-initial-registration/templates/_helpers.tpl deleted file mode 100644 index 7f9b0dc6131..00000000000 --- a/platforms/r3-corda-ent/charts/corda-ent-node-initial-registration/templates/_helpers.tpl +++ /dev/null @@ -1,5 +0,0 @@ -{{- define "labels.custom" -}} - {{- range $key, $val := .Values.metadata.labels -}} - {{- $key -}}: {{- $val -}} - {{- end -}} -{{- end -}} diff --git a/platforms/r3-corda-ent/charts/corda-ent-node-initial-registration/templates/job.yaml b/platforms/r3-corda-ent/charts/corda-ent-node-initial-registration/templates/job.yaml deleted file mode 100644 index 792ab56576e..00000000000 --- a/platforms/r3-corda-ent/charts/corda-ent-node-initial-registration/templates/job.yaml +++ /dev/null @@ -1,461 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -apiVersion: batch/v1 -kind: Job -metadata: - name: {{ .Values.nodeName }} - namespace: {{ .Values.metadata.namespace }} - labels: - app: {{ .Values.nodeName }} - app.kubernetes.io/name: {{ .Values.nodeName }} - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} - {{- include "labels.custom" . | nindent 2 }} -spec: - backoffLimit: 6 - template: - metadata: - labels: - app: {{ .Values.nodeName }} - app.kubernetes.io/name: {{ .Values.nodeName }} - app.kubernetes.io/instance: {{ .Release.Name }} - spec: - restartPolicy: "OnFailure" - serviceAccountName: {{ $.Values.vault.serviceaccountname }} - hostname: {{ .Values.nodeName }} - securityContext: - fsGroup: 1000 - imagePullSecrets: - - name: {{ .Values.image.imagepullsecret }} - initContainers: - - name: init-certificates - image: {{ .Values.image.initContainerName }} - imagePullPolicy: Always - env: - - name: VAULT_ADDR - value: {{ $.Values.vault.address }} - - name: KUBERNETES_AUTH_PATH - value: {{ $.Values.vault.authpath }} - - name: VAULT_APP_ROLE - value: {{ $.Values.vault.role }} - - name: BASE_DIR - value: {{ $.Values.nodeConf.volume.baseDir }} - - name: CERTS_SECRET_PREFIX - value: {{ .Values.vault.certsecretprefix }} - - name: MOUNT_PATH - value: "/DATA" - command: ["sh", "-c"] - args: - - |- - #!/usr/bin/env sh - validateVaultResponse () { - if echo ${2} | grep "errors"; then - echo "ERROR: unable to retrieve ${1}: ${2}" - exit 1 - fi - if [ "$3" == "LOOKUPSECRETRESPONSE" ] - then - http_code=$(curl -sS -o /dev/null -w "%{http_code}" \ - --header "X-Vault-Token: ${VAULT_TOKEN}" \ - ${VAULT_ADDR}/v1/${1}) - curl_response=$? - if test "$http_code" != "200" ; then - echo "Http response code from Vault - $http_code and curl_response - $curl_response" - if test "$curl_response" != "0"; then - echo "Error: curl command failed with error code - $curl_response" - exit 1 - fi - fi - fi - } - - # setting up env to get secrets from vault - echo "Getting secrets from Vault Server" - KUBE_SA_TOKEN=$(cat /var/run/secrets/kubernetes.io/serviceaccount/token) - VAULT_TOKEN=$(curl -sS --request POST ${VAULT_ADDR}/v1/auth/${KUBERNETES_AUTH_PATH}/login -H "Content-Type: application/json" -d '{"role":"'"${VAULT_APP_ROLE}"'","jwt":"'"${KUBE_SA_TOKEN}"'"}' | jq -r 'if .errors then . else .auth.client_token end') - validateVaultResponse 'vault login token' "${VAULT_TOKEN}" - echo "logged into vault" - - # Creating dirs for storing the certificate - mkdir -p ${MOUNT_PATH}/trust-stores - - # Fetching network-root-truststore certificates from vault - COUNTER=1 - while [ "$COUNTER" -lt {{ $.Values.vault.retries }} ] - do - # get keystores from vault to see if certificates are created and put in vault - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${CERTS_SECRET_PREFIX}/root/certs | jq -r 'if .errors then . else . end') - data_info="$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data["data"]')" - - if [ "$data_info" == "null" ] - then - sleep {{ $.Values.vault.retryInterval }} - else - validateVaultResponse "${CERTS_SECRET_PREFIX}/root/certs" "${LOOKUP_SECRET_RESPONSE}" "LOOKUPSECRETRESPONSE" - network_root_truststore=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["network-root-truststore.jks"]') - echo "${network_root_truststore}" | base64 -d > ${MOUNT_PATH}/trust-stores/network-root-truststore.jks - echo "Successfully got network truststore certifcates" - break - fi - COUNTER=`expr "$COUNTER" + 1` - done - if [ "$COUNTER" -ge {{ $.Values.vault.retries }} ] - then - # printing number of trial done before giving up - echo "$COUNTER" - echo "network truststore certificates might not have been put in vault. Giving up!!!" - exit 1 - fi - - mkdir -p ${MOUNT_PATH}/tlscerts - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${CERTS_SECRET_PREFIX}/certs/{{ .Values.networkServices.idmanName }} | jq -r 'if .errors then . else . end') - validateVaultResponse "${CERTS_SECRET_PREFIX}/certs/{{ .Values.networkServices.idmanName }}" "${LOOKUP_SECRET_RESPONSE}" "LOOKUPSECRETRESPONSE" - IDMAN_CERT=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["{{ .Values.networkServices.idmanName }}.crt"]') - echo "${IDMAN_CERT}" | base64 -d > ${MOUNT_PATH}/tlscerts/idman.crt - - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${CERTS_SECRET_PREFIX}/certs/{{ .Values.networkServices.networkmapName }} | jq -r 'if .errors then . else . end') - validateVaultResponse "${CERTS_SECRET_PREFIX}/certs/{{ .Values.networkServices.networkmapName }}" "${LOOKUP_SECRET_RESPONSE}" "LOOKUPSECRETRESPONSE" - NETWORKMAP_CERT=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["{{ .Values.networkServices.networkmapName }}.crt"]') - echo "${NETWORKMAP_CERT}" | base64 -d > ${MOUNT_PATH}/tlscerts/networkmap.crt - - # Fetching node keystore, node truststore and network-root-truststore credentials from the vault - mkdir -p ${MOUNT_PATH}/credentials - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${CERTS_SECRET_PREFIX}/credentials | jq -r 'if .errors then . else . end') - validateVaultResponse "${CERTS_SECRET_PREFIX}/credentials" "${LOOKUP_SECRET_RESPONSE}" "LOOKUPSECRETRESPONSE" - TRUSTSTORE_PASSWORD=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["truststore"]') - echo "${TRUSTSTORE_PASSWORD}" > ${MOUNT_PATH}/credentials/truststorepass - KEYSTORE_PASSWORD=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["keystore"]') - echo "${KEYSTORE_PASSWORD}" > ${MOUNT_PATH}/credentials/keystorepass - KEYSTORE_PASSWORD=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["root"]') - echo "${KEYSTORE_PASSWORD}" > ${MOUNT_PATH}/credentials/rootpass - - echo "Done" - volumeMounts: - - name: certificates - mountPath: /DATA - - name: db-healthcheck - image: {{ .Values.image.initContainerName }} - imagePullPolicy: Always - command: ["sh", "-c"] - args: - - |- - #!/usr/bin/env sh - COUNTER=1 - FLAG=true - # perform health check if db is up and running before starting corda node - while [ "$COUNTER" -le {{ $.Values.dataSourceProperties.retries }} ] - do - DB_NODE={{ .Values.dataSourceProperties.dataSource.dbUrl }}:{{ .Values.dataSourceProperties.dataSource.dbPort }} - STATUS=$(nc -vz $DB_NODE 2>&1 | grep -c open ) - - if [ "$STATUS" == 0 ] - then - FLAG=false - else - FLAG=true - echo "DB up and running" - fi - - if [ "$FLAG" == false ] - then - echo "Retry attempted $COUNTER times, retrying after {{ $.Values.dataSourceProperties.retryInterval }} seconds" - COUNTER=`expr "$COUNTER" + 1` - sleep {{ $.Values.dataSourceProperties.retryInterval }} - else - echo "SUCCESS!" - echo "DB up and running!" - exit 0 - break - fi - done - - if [ "$COUNTER" -gt {{ $.Values.dataSourceProperties.retries }} ] || [ "$FLAG" == false ] - then - echo "Retry attempted $COUNTER times, no DB up and running. Giving up!" - exit 1 - break - fi - containers: - - name: registration - image: "{{ .Values.image.nodeContainerName }}" - env: - - name: ACCEPT_LICENSE - value: "{{required "You must accept the license agreement to run this software" .Values.acceptLicense }}" - imagePullPolicy: {{ .Values.image.pullPolicy }} - command: ["/bin/bash", "-c"] - args: - - |- - rm -f certificates/done.txt - echo 'myLegalName: "{{ .Values.nodeConf.legalName }}" - emailAddress: "{{ .Values.nodeConf.emailAddress }}" - p2pAddress: "{{ .Values.service.p2pAddress }}:{{ .Values.service.p2pPort }}" - networkServices: { - doormanURL: "{{ .Values.networkServices.doormanURL }}", - networkMapURL: "{{ .Values.networkServices.networkMapURL }}" - } - crlCheckSoftFail: {{ .Values.nodeConf.crlCheckSoftFail }} - {{- if ne .Values.nodeConf.tlsCertCrlDistPoint "" }} - tlsCertCrlDistPoint: "{{ .Values.nodeConf.tlsCertCrlDistPoint }}", - tlsCertCrlIssuer: "{{ .Values.nodeConf.tlsCertCrlIssuer }}" - {{- end }} - devMode: {{ .Values.nodeConf.devMode }} - {{- if .Values.service.ssh.enabled }} - sshd: { - port: {{ .Values.service.ssh.sshdPort }} - } - {{- end }} - rpcSettings: { - address: "0.0.0.0:{{ .Values.service.rpc.port }}", - adminAddress: "0.0.0.0:{{ .Values.service.rpc.adminPort }}" - } - rpcUsers: [ - {{- range $user := .Values.service.rpc.users }} - { - user: "{{ $user.name }}", - password: "{{ $user.password }}", - permissions: [ - "{{ $user.permissions }}" - ] - } - {{- end }} - ] - {{- if .Values.dataSourceProperties.monitoring.enabled }} - jmxMonitoringHttpPort: {{ .Values.dataSourceProperties.monitoring.port }} - {{- end }} - - trustStorePassword: "TRUSTSTORE_PASSWORD" - keyStorePassword : "KEYSTORE_PASSWORD" - detectPublicIp: false - additionalP2PAddresses: ["{{ .Values.nodeConf.ambassador.p2pAddress }}"] - messagingServerAddress: "0.0.0.0:{{ .Values.service.messagingServerPort }}" - messagingServerExternal: false - enterpriseConfiguration: { - externalBridge: {{ .Values.firewall.enabled }} - } - {{- if .Values.dataSourceProperties.allowDevCorDapps.enabled }} - cordappSignerKeyFingerprintBlacklist: [ - ] - {{- end }} - dataSourceProperties: { - dataSourceClassName: "{{ .Values.dataSourceProperties.dataSource.dataSourceClassName }}", - dataSource.url: "{{ .Values.dataSourceProperties.dataSource.url }}", - dataSource.user: "{{ .Values.dataSourceProperties.dataSource.user }}", - dataSource.password: "{{ .Values.dataSourceProperties.dataSource.password }}" - } - database = { - {{- if eq .Values.dataSourceProperties.dataSource.dataSourceClassName "oracle.jdbc.pool.OracleDataSource" }} - schema = xe - {{- end}} - }' > etc/node.conf - - export TRUSTSTORE_PASSWORD=$(cat DATA/credentials/truststorepass) - sed -i -e "s*TRUSTSTORE_PASSWORD*${TRUSTSTORE_PASSWORD}*g" etc/node.conf - export KEYSTORE_PASSWORD=$(cat DATA/credentials/keystorepass) - sed -i -e "s*KEYSTORE_PASSWORD*${KEYSTORE_PASSWORD}*g" etc/node.conf - - SSL_TRUSTSTORE_PASSWORD=$(tr -dc A-Za-z0-9 >/dev/null >>/dev/tcp/$0/$1; do sleep 1; done' ${server} ${port} - - while true - do - if [ ! -f certificates/nodekeystore.jks ] || [ ! -f certificates/sslkeystore.jks ] || [ ! -f certificates/truststore.jks ] - then - echo - echo "Node: running initial registration ..." - echo - pwd - java -Djavax.net.ssl.trustStore=${CORDA_SSL_TRUSTSTORE} \ - -jar {{ .Values.nodeConf.jarPath }}/corda.jar \ - initial-registration \ - --config-file={{ .Values.nodeConf.configPath }}/node.conf \ - --log-to-console \ - --network-root-truststore ${NETWORK_ROOT_TRUSTSTORE} \ - --network-root-truststore-password ${NETWORK_ROOT_TRUSTSTORE_PASSWORD} - EXIT_CODE=${?} - else - echo - echo "Node: already registered to IdMan - skipping initial registration." - echo - EXIT_CODE="0" - break - fi - done - - if [ "${EXIT_CODE}" -ne "0" ] - then - HOW_LONG={{ .Values.sleepTimeAfterError }} - echo - echo "Node initial registration failed - exit code: ${EXIT_CODE} (error)" - echo - echo "Going to sleep for requested ${HOW_LONG} seconds to let you login and investigate." - echo - pwd - ls -al - else - HOW_LONG={{ .Values.sleepTime }} - echo - echo "Node initial registration: no errors - sleeping for requested ${HOW_LONG} seconds before disappearing." - echo - fi - sleep ${HOW_LONG} - - touch certificates/done.txt - volumeMounts: - - name: node-etc - mountPath: /opt/corda/etc - - name: node-nodeinfo - mountPath: /opt/corda/additional-node-infos - - name: node-certificates - mountPath: /opt/corda/certificates - - name: certificates - mountPath: /opt/corda/DATA - resources: - requests: - memory: {{ .Values.nodeConf.pod.resources.requests }} - limits: - memory: {{ .Values.nodeConf.pod.resources.limits }} - - name: store-certs - image: {{ .Values.image.initContainerName }} - imagePullPolicy: Always - env: - - name: VAULT_ADDR - value: {{ $.Values.vault.address }} - - name: FLOAT_VAULT_ADDR - value: {{ $.Values.vault.floatVaultAddress }} - - name: KUBERNETES_AUTH_PATH - value: {{ $.Values.vault.authpath }} - - name: KUBERNETES_AUTH_PATH_FLOATVAULT - value: {{ $.Values.vault.authpathFloat }} - - name: VAULT_APP_ROLE - value: {{ $.Values.vault.role }} - - name: BASE_DIR - value: {{ $.Values.nodeConf.volume.baseDir }} - - name: CERTS_SECRET_PREFIX - value: {{ .Values.vault.certsecretprefix }} - - name: MOUNT_PATH - value: "/DATA" - command: ["sh", "-c"] - args: - - |- - #!/usr/bin/env sh - validateVaultResponse () { - if echo ${2} | grep "errors"; then - echo "ERROR: unable to retrieve ${1}: ${2}" - exit 1 - fi - } - - # perform check if certificates are ready or not, and upload certificate into vault when ready - COUNTER=1 - cd ${BASE_DIR}/certificates - while [ "$COUNTER" -lt {{ $.Values.vault.retries }} ] - do - if [ -e done.txt ] - then - echo "found certificates, performing vault put" - # setting up env to get secrets from vault - KUBE_SA_TOKEN=$(cat /var/run/secrets/kubernetes.io/serviceaccount/token) - VAULT_TOKEN=$(curl -sS --request POST ${VAULT_ADDR}/v1/auth/${KUBERNETES_AUTH_PATH}/login -H "Content-Type: application/json" -d '{"role":"'"${VAULT_APP_ROLE}"'","jwt":"'"${KUBE_SA_TOKEN}"'"}' | jq -r 'if .errors then . else .auth.client_token end') - validateVaultResponse 'vault login token' "${VAULT_TOKEN}" - (echo '{"data": {"nodekeystore.jks": "'; base64 ${BASE_DIR}/certificates/nodekeystore.jks; echo '"}}') | curl -H "X-Vault-Token: ${VAULT_TOKEN}" -d @- ${VAULT_ADDR}/v1/${CERTS_SECRET_PREFIX}/certs/nodekeystore - (echo '{"data": {"sslkeystore.jks": "'; base64 ${BASE_DIR}/certificates/sslkeystore.jks; echo '"}}') | curl -H "X-Vault-Token: ${VAULT_TOKEN}" -d @- ${VAULT_ADDR}/v1/${CERTS_SECRET_PREFIX}/certs/sslkeystore - (echo '{"data": {"truststore.jks": "'; base64 ${BASE_DIR}/certificates/truststore.jks; echo '"}}') | curl -H "X-Vault-Token: ${VAULT_TOKEN}" -d @- ${VAULT_ADDR}/v1/${CERTS_SECRET_PREFIX}/certs/truststore - if [ "$FLOAT_VAULT_ADDR" != "" ] - then - echo "Getting vault token for float vault and putting certificates in it.." - VAULT_TOKEN_FLOAT=$(curl -sS --request POST ${FLOAT_VAULT_ADDR}/v1/auth/${KUBERNETES_AUTH_PATH_FLOATVAULT}/login -H "Content-Type: application/json" -d '{"role":"'"${VAULT_APP_ROLE}"'","jwt":"'"${KUBE_SA_TOKEN}"'"}' | jq -r 'if .errors then . else .auth.client_token end') - validateVaultResponse 'vault login token' "${VAULT_TOKEN_FLOAT}" - # Put in the certs in float vault is firewall is enabled - (echo '{"data": {"sslkeystore.jks": "'; base64 ${BASE_DIR}/certificates/sslkeystore.jks; echo '"}}') | curl -H "X-Vault-Token: ${VAULT_TOKEN_FLOAT}" -d @- ${FLOAT_VAULT_ADDR}/v1/${CERTS_SECRET_PREFIX}/certs/sslkeystore - (echo '{"data": {"truststore.jks": "'; base64 ${BASE_DIR}/certificates/truststore.jks; echo '"}}') | curl -H "X-Vault-Token: ${VAULT_TOKEN_FLOAT}" -d @- ${FLOAT_VAULT_ADDR}/v1/${CERTS_SECRET_PREFIX}/certs/truststore - fi - - # get nodekeystore.jks from vault - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${CERTS_SECRET_PREFIX}/{{ $.Values.nodePath }}/certs/nodekeystore | jq -r 'if .errors then . else . end') - TLS_NODEKEYSTORE=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data[ "nodekeystore.jks" ]' 2>&1) - # get sslkeystore.jks from vault - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${CERTS_SECRET_PREFIX}/{{ $.Values.nodePath }}/certs/sslkeystore | jq -r 'if .errors then . else . end') - TLS_SSLKEYSTORE=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data[ "sslkeystore.jks" ]' 2>&1) - # get truststore.jks from vault - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${CERTS_SECRET_PREFIX}/{{ $.Values.nodePath }}/certs/truststore | jq -r 'if .errors then . else . end') - TLS_TRUSTSTORE=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data[ "truststore.jks" ]' 2>&1) - - if [ "$TLS_NODEKEYSTORE" == "null" ] || [ "$TLS_SSLKEYSTORE" == "null" ] || [ "$TLS_TRUSTSTORE" == "null" ] || [[ "$TLS_NODEKEYSTORE" == "parse error"* ]] || [[ "$TLS_SSLKEYSTORE" == "parse error"* ]] || [[ "$TLS_TRUSTSTORE" == "parse error"* ]] - then - echo "certificates write or read fail" - sleep {{ $.Values.healthcheck.readinessthreshold }} - if [ "$COUNTER" -ge {{ $.Values.vault.retries }} ] - then - echo "Retry attempted $COUNTER times, certificates have not been saved" - exit 1 - fi - fi - COUNTER=`expr "$COUNTER" + 1` - fi - done - volumeMounts: - - name: node-nodeinfo - mountPath: /opt/corda/additional-node-infos - - name: node-certificates - mountPath: /opt/corda/certificates - - name: certificates - mountPath: /opt/corda/DATA - resources: - requests: - memory: {{ .Values.nodeConf.pod.resources.requests }} - limits: - memory: {{ .Values.nodeConf.pod.resources.limits }} - volumes: - - name: node-etc - emptyDir: - medium: Memory - - name: node-nodeinfo - emptyDir: - medium: Memory - - name: node-certificates - emptyDir: - medium: Memory - - name: certificates - emptyDir: - medium: Memory diff --git a/platforms/r3-corda-ent/charts/corda-ent-node-initial-registration/values.yaml b/platforms/r3-corda-ent/charts/corda-ent-node-initial-registration/values.yaml deleted file mode 100644 index 0dde6cb23b4..00000000000 --- a/platforms/r3-corda-ent/charts/corda-ent-node-initial-registration/values.yaml +++ /dev/null @@ -1,220 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -# Default values for Node Registration job. -# This is a YAML-formatted file. -# Declare variables to be passed into your templates. - -# Provide the name of the node -# Eg. nodeName: carrier-registration -nodeName: carrier-registration - -# This section contains the Enterprise-Corda node metadata. -metadata: - # Provide the namespace for the Corda node. - # Eg. namespace: cenm - namespace: cenm - # Provide the labels to the Corda node. - labels: - -# Provide image for the containers -image: - # Provide the alpine utils image. - # NOTE: The alpine image used is the base alpine image with CURL installed. - # Eg. initContainerName: ghcr.io/hyperledger/bevel-alpine:latest - initContainerName: ghcr.io/hyperledger/bevel-alpine:latest - # Enterprise node image - # Eg. adopblockchaincloud0502.azurecr.io/corda_image_ent_4.4:latest - nodeContainerName: adopblockchaincloud0502.azurecr.io/corda_image_ent_4.4:latest - # Provide the docker-registry secret created and stored in kubernetes cluster as a secret. - # Eg. imagePullSecret: regcred - imagepullsecret: "" - # Image pull policy - # Eg. Always - pullPolicy: IfNotPresent - -# required parameter -# Accept Corda Enterprise license should be YES. -acceptLicense: YES - -############################################################################################# -# This section contains the vault related information. # -############################################################################################# -# NOTE: Make sure that the vault is already unsealed, intialized and configured to -# use Kubernetes service account token based authentication. - -vault: - # Provide the vault address - # Eg. address: http://vault.example.com:8200 - address: - # Provide the vault role used. - # Eg. role: vault-role - role: vault-role - # Provide the authpath configured to be used. - # Eg. authpath: entcordacenm - authpath: entcordacenm - # Provide the service account name autheticated to vault. - # NOTE: Make sure that the service account is already created and autheticated to use the vault. - # Eg. serviceaccountname: vault-auth - serviceaccountname: vault-auth - # Provide the vault path where the certificates are stored - # Eg. certsecretprefix: secret/organisation-name - certsecretprefix: secret/organisation-name - # Provide the vault orginsation node folder name where the certificates are stored - # Eg. nodePath: carrier - nodePath: carrier - # Number of retries to check contents from vault - retries: - # Interval in seconds between retries - retryInterval: - -############################################################# -# Node Configuration # -############################################################# -nodeConf: - ambassador: - p2pPort: - external_url_suffix: - # Node external address - # Eg. p2pAddress: carrier.rce.blockchainexample.com:15010 - p2pAddress: carrier.rce.blockchainexample.com:15010 - # Node legal name - # Eg. legalName: O=Node,OU=Node,L=London,C=GB - legalName: O=Node,OU=Node,L=London,C=GB - # Email address - # Eg. emailAddress: dev-node@bevel.com - emailAddress: dev-node@bevel.com - # crlCheckSoftFail defines if CRL failure is a critical error or if we can just fail softly (by logging an error) and continuing - crlCheckSoftFail: true - # tlsCertCrlDistPoint defines the endpoint for retrieving the CRL (Certificate Revocation List) of the Corda Network, if empty "", not used - # Example from Corda Network UAT network: http://crl.uat.corda.network/nodetls.crl - # Eg. tlsCertCrlDistPoint: "", empty for current usage - tlsCertCrlDistPoint: "" - # tlsCertCrlIssuer defines the X500 name of the trusted CRL issuer of the Corda Network, example from the Corda Network UAT network - # Eg. tlsCertCrlIssuer: "CN=Test TLS Signer Certificate, OU=HQ, O=HoldCo LLC, L=New York, C=US" - tlsCertCrlIssuer: - devMode: false - # Provide volume related specifications - volume: - # Ex baseDir: /opt/corda - baseDir: - # where is node jar is stored - jarPath: bin - # where service configuration files are stored - configPath: etc - # Specify the maximum size of the memory allocation pool - cordaJar: - # Provide the memory size. - # Eg. memorySize: 4096 (if using kilobytes) - # Eg. memorySize: 512 (if using megabytes) - # Eg. memorySize: 1 (if using gigabytes) - memorySize: - # Provide the unit of greatness for the size, one of three options: - # - k or K for kilobytes - # - m or M for megabytes - # - g or G for gigabytes - # Eg. unit: M - unit: - # Set limits of .jar - pod: - resources: - # Provide the limit memory for node - # Eg. limits: 512M - limits: - # Provide the requests memory for node - # Eg. requests: 550M - requests: - -networkServices: - # Ex. idmanName: idman - idmanName: idman - # doormanURL defines the accesspoint for the Identity Manager server (protocol + domain name + port, eg. http://my-identity-manager:10000) - doormanURL: - idmanDomain: - # Ex. networkmapName: networkmap - networkmapName: networkmap - # networkMapURL defines the accesspoint for the Network Map server (protocol + domain name + port, eg. http://my-network-map:10000) - networkMapURL: - networkMapDomain: - -service: - # p2pPort defines the port number of inbound connections - # Eg. p2pPort: 40000 - p2pPort: 40000 - # p2pAddress defines the public facing IP address (domain name recommended) and port number of the Node, in the case of using a Float, this address should correspond to the public IP address of the float - # Eg. float-internal.cenm-ent - p2pAddress: float-internal.cenm-ent - # ssh defines the SSH access options - ssh: - # enabled is a boolean value for the above parameter - enabled: true - # sshdPort is the Node Shell access port. - # Eg. sshdPort: 2222 - sshdPort: 2222 - rpc: - # port is the RPC endpoint that the user interface will access to direct the CorDapp on the Corda Node - # Eg. port: 30000 - port: 30000 - # adminPort is the RPC admin endpoint that can be used to do administrative tasks on the Corda Node. - # Eg. adminPort: 30009 - adminPort: 30009 - # users defines the list of RPC users and the permissions they have. - users: - # name defines the name of the RPC user - # Eg. name: carrier-node - - name: - # password defines the password for the RPC user - # Eg. password: nodeP - password: - # permissions defines the RPC permissions available. - permissions: ALL - -############################################################# -# Database Options and Configuration # -############################################################# -dataSourceProperties: - dataSource: - # Ex user: "sa" - user: - # Ex. password: "ziAscD0MJnj4n4xkFWY6XuMBuw9bvYC7" - password: - # Eg. url: "jdbc:h2:tcp://carrierdb:9101/persistence;DB_CLOSE_ON_EXIT=FALSE;LOCK_TIMEOUT=10000;WRITE_DELAY=100;AUTO_RECONNECT=TRUE;" - url: - # Eg. dataSourceClassName: "org.h2.jdbcx.JdbcDataSource" - dataSourceClassName: - # dbUrl: notarydb - dbUrl: - # dbPort: 9101 - dbPort: - monitoring: - # enabled is a boolean value for the above parameter - enabled: true - # port defines the port on which the monitoring information will be available - port: 8090 - # allowDevCorDapps defines if CorDapps that are signed with developer keys will be allowed to load or not (it clears the cordappSignerKeyFingerprintBlacklist if enabled) - allowDevCorDapps: - # enabled is a boolean value for the above parameter - enabled: true - # Number of retries to check contents from vault - retries: - # Interval in seconds between retries - retryInterval: - -############################################################# -# Settings # -############################################################# -# sleep time in seconds when error while registration -# Ex. sleepTimeAfterError: 120 -sleepTimeAfterError: 120 -# custom sleep time in seconds -sleepTime: 20 -healthcheck: - #Provide the wait interval in seconds in fetching certificates from vault - #Eg. readinesscheckinterval: 5 - readinesscheckinterval: 5 - #Provide the threshold number of retries in fetching certificates from vault - #Eg. readinessthreshold: 2 - readinessthreshold: 2 diff --git a/platforms/r3-corda-ent/charts/corda-ent-node-pki-gen/Chart.yaml b/platforms/r3-corda-ent/charts/corda-ent-node-pki-gen/Chart.yaml deleted file mode 100644 index a2d9baa23b8..00000000000 --- a/platforms/r3-corda-ent/charts/corda-ent-node-pki-gen/Chart.yaml +++ /dev/null @@ -1,11 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -apiVersion: v1 -appVersion: "2.0" -description: "R3-corda-ent: Deploys the pki." -name: corda-ent-node-pki-gen -version: 1.0.0 diff --git a/platforms/r3-corda-ent/charts/corda-ent-node-pki-gen/README.md b/platforms/r3-corda-ent/charts/corda-ent-node-pki-gen/README.md deleted file mode 100644 index 77a87834e7a..00000000000 --- a/platforms/r3-corda-ent/charts/corda-ent-node-pki-gen/README.md +++ /dev/null @@ -1,172 +0,0 @@ -[//]: # (##############################################################################################) -[//]: # (Copyright Accenture. All Rights Reserved.) -[//]: # (SPDX-License-Identifier: Apache-2.0) -[//]: # (##############################################################################################) - - -# Generate-pki-node Deployment - -- [Generate-pki-node Deployment Helm Chart](#Generate-pki-node-deployment-helm-chart) -- [Prerequisites](#prerequisites) -- [Chart Structure](#chart-structure) -- [Configuration](#configuration) -- [Deployment](#deployment) -- [Contributing](#contributing) -- [License](#license) - - -## Generate-pki-node Deployment Helm Chart ---- -This [Helm chart](https://github.com/hyperledger/bevel/tree/main/platforms/r3-corda-ent/charts/corda-ent-node-pki-gen) generates the corda complaint certificate hierarchy for the node. - - -## Prerequisites ---- -Before deploying the chart please ensure you have the following prerequisites: - -- Kubernetes cluster up and running. -- A HashiCorp Vault instance is set up and configured to use Kubernetes service account token-based authentication. -- The Vault is unsealed and initialized. -- Helm is installed. - - -## Chart Structure ---- -This chart has following structue: -``` - ├── generate-pki-node - │ ├── Chart.yaml - │ ├── templates - │ │ ├── job.yaml - │ │ ├── _helpers.tpl - │ └── values.yaml -``` -Type of files used: - -- `templates` : This directory contains the Kubernetes manifest templates that define the resources to be deployed. -- `_helpers.tpl` : A template file used for defining custom labels in the Helm chart. -- `job.yaml` : Defines a Kubernetes Job resource, which is a Kubernetes controller that creates and manages pods to perform task. It creates a deployment file with a specified number of replicas and defines various settings for the deployment. Including volume mounts, environment variables, and initialization tasks using init containers. -- `chart.yaml` : Provides metadata about the chart, such as its name, version, and description. -- `values.yaml` : Contains the default configuration values for the chart. It includes configuration for the metadata, image, Vault, volume, subjects, etc. - - -## Configuration ---- -The [values.yaml](https://github.com/hyperledger/bevel/blob/main/platforms/r3-corda-ent/charts/corda-ent-node-pki-gen/values.yaml) file contains configurable values for the Helm chart. We can modify these values according to the deployment requirements. Here are some important configuration options: - -## Parameters ---- - -### Name - -| Name | Description | Default Value | -| -----------| -------------------------------------------------- | ------------- | -| nodeName | Provide the name of the node | carrier | - -### Metadata - -| Name | Description | Default Value | -| ----------------| -------------------------------------------------------------------- | ------------- | -| namespace | Provide the namespace for the Corda PKI Generator for the node | carrier-ent | -| labels | Provide any additional labels for the Corda PKI Generator for node | "" | - -### Image - -| Name | Description | Default Value | -| ------------------------ | ----------------------------------------------------------------------------- | ---------------------------------- | -| initContainerName | Information about the Docker container used for the init-containers | ghcr.io/hyperledger | -| pkiContainerName | Provide the image for the pki container | corda/enterprise-pki:1.2-zulu-openjdk8u242 | -| ImagePullSecret | Provide the K8s secret that has rights for pulling the image off the registry | "" | -| pullPolicy | Provide the pull policy for Docker images, either Always or IfNotPresent | IfNotPresent | - -### Vault - -| Name | Description | Default Value | -| ------------------------- | --------------------------------------------------------------------------| ------------------- | -| address | Address/URL of the Vault server | "" | -| role | Role used for authentication with Vault | vault-role | -| floatVaultAddress | Provide the float vault address | "" | -| authpath | Authentication path for Vault | cordaentcarrier | -| authpathFloat | Provide the authpath configured to be used | cordaentcarrierfloat | -| serviceAccountName | Provide the already created service account name autheticated to vault | vault-auth | -| certSecretPrefix | Provide the vault path where the certificates are stored | secret/carrier/carrier| -| retries | Amount of times to retry fetching from/writing to Vault before giving up | 10 | -| sleepTimeAfterError | Amount of time in seconds to wait after an error occurs | 15 | - -### Subjects - -| Name | Description | Default Value | -| ------------------------- | ---------------------------------------------------| ---------------------------------------------------------------------- | -| firewallca | Mention the subject for the firewallCA |"CN=Test Firewall CA Certificate, OU=HQ, O=HoldCo LLC, L=New York, C=US" | -| float | Mention the subject for the float component |"CN=Test Firewall CA Certificate, OU=HQ, O=HoldCo LLC, L=New York, C=US" | -| bridge | Mention the subject for the bridge component |"CN=Test Bridge Certificate, OU=HQ, O=HoldCo LLC, L=New York, C=US" | - -### volume - -| Name | Description | Default Value | -| ------------------| ------------------------------------------ | ------------- | -| baseDir | Provide the base directory for the container| /opt/corda | - - - -## Deployment ---- - -To deploy the Generate-pki-node Helm chart, follow these steps: - -1. Modify the [values.yaml](https://github.com/hyperledger/bevel/blob/main/platforms/r3-corda-ent/charts/corda-ent-node-pki-gen/values.yaml) file to set the desired configuration values. -2. Run the following Helm command to install, upgrade, verify delete the chart: - -To install the chart: -```bash -helm repo add bevel https://hyperledger.github.io/bevel/ -helm install ./corda-ent-node-pki-gen -``` - -To upgrade the chart: -```bash -helm upgrade ./corda-ent-node-pki-gen -``` - -To verify the deployment: -```bash -kubectl get jobs -n -``` -Note : Replace `` with the actual namespace where the Job was created. This command will display information about the Job, including the number of completions and the current status of the Job's pods. - -To delete the chart: -```bash -helm uninstall -``` -Note : Replace `` with the desired name for the release. - - - -## Contributing ---- -If you encounter any bugs, have suggestions, or would like to contribute to the [Generate-pki-node Deployment Helm Chart](https://github.com/hyperledger/bevel/tree/main/platforms/r3-corda-ent/charts/corda-ent-node-pki-gen), please feel free to open an issue or submit a pull request on the [project's GitHub repository](https://github.com/hyperledger/bevel). - - -## License - -This chart is licensed under the Apache v2.0 license. - -Copyright © 2023 Accenture - -### Attribution - -This chart is adapted from the [charts](https://hyperledger.github.io/bevel/) which is licensed under the Apache v2.0 License which is reproduced here: - -``` -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -``` diff --git a/platforms/r3-corda-ent/charts/corda-ent-node-pki-gen/templates/_helpers.tpl b/platforms/r3-corda-ent/charts/corda-ent-node-pki-gen/templates/_helpers.tpl deleted file mode 100644 index d43c09d8cef..00000000000 --- a/platforms/r3-corda-ent/charts/corda-ent-node-pki-gen/templates/_helpers.tpl +++ /dev/null @@ -1,5 +0,0 @@ -{{- define "labels.custom" }} - {{ range $key, $val := $.Values.metadata.labels }} - {{ $key }}: {{ $val }} - {{ end }} -{{- end }} diff --git a/platforms/r3-corda-ent/charts/corda-ent-node-pki-gen/templates/job.yaml b/platforms/r3-corda-ent/charts/corda-ent-node-pki-gen/templates/job.yaml deleted file mode 100644 index a67e2a68c2e..00000000000 --- a/platforms/r3-corda-ent/charts/corda-ent-node-pki-gen/templates/job.yaml +++ /dev/null @@ -1,400 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -apiVersion: batch/v1 -kind: Job -metadata: - name: {{ .Values.nodeName }}-generate-pki - namespace: {{ .Values.metadata.namespace }} - labels: - app: {{ .Values.nodeName }}-generate-pki - app.kubernetes.io/name: {{ .Values.nodeName }}-generate-pki - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} - {{- include "labels.custom" . | nindent 2 }} -spec: - backoffLimit: 6 - template: - metadata: - labels: - app: {{ .Values.nodeName }}-generate-pki - app.kubernetes.io/name: {{ .Values.nodeName }}-generate-pki - app.kubernetes.io/instance: {{ .Release.Name }} - spec: - restartPolicy: "OnFailure" - serviceAccountName: {{ $.Values.vault.serviceaccountname }} - hostname: {{ .Values.nodeName }} - securityContext: - fsGroup: 1000 - initContainers: - - name: init-check-certificates - image: {{ .Values.image.initContainerName }} - imagePullPolicy: Always - env: - - name: VAULT_ADDR - value: {{ $.Values.vault.address }} - - name: VAULT_APP_ROLE - value: {{.Values.vault.role}} - - name: KUBERNETES_AUTH_PATH - value: {{ $.Values.vault.authpath }} - - name: CERTS_SECRET_PREFIX - value: {{ .Values.vault.certsecretprefix }} - - name: MOUNT_PATH - value: "/certcheck" - command: ["sh", "-c"] - args: - - |- - #!/usr/bin/env sh - validateVaultResponse () { - if echo ${2} | grep "errors"; then - echo "ERROR: unable to retrieve ${1}: ${2}" - exit 1 - fi - if [ "$3" == "LOOKUPSECRETRESPONSE" ] - then - http_code=$(curl -sS -o /dev/null -w "%{http_code}" \ - --header "X-Vault-Token: ${VAULT_TOKEN}" \ - ${VAULT_ADDR}/v1/${1}) - curl_response=$? - if test "$http_code" != "200" ; then - echo "Http response code from Vault - $http_code and curl_response - $curl_response" - if test "$curl_response" != "0"; then - echo "Error: curl command failed with error code - $curl_response" - exit 1 - fi - fi - fi - } - - # Setting up the environment to get secrets/certificates from Vault - echo "Getting secrets/certificates from Vault server" - KUBE_SA_TOKEN=$(cat /var/run/secrets/kubernetes.io/serviceaccount/token) - VAULT_TOKEN=$(curl -sS --request POST ${VAULT_ADDR}/v1/auth/${KUBERNETES_AUTH_PATH}/login -H "Content-Type: application/json" -d '{"role":"'"${VAULT_APP_ROLE}"'","jwt":"'"${KUBE_SA_TOKEN}"'"}' | jq -r 'if .errors then . else .auth.client_token end') - validateVaultResponse 'vault login token' "${VAULT_TOKEN}" - echo "Logged into Vault" - - mkdir -p ${MOUNT_PATH} - - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${CERTS_SECRET_PREFIX}/certs/firewall | jq -r 'if .errors then . else . end') - data_info="$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data["data"]')" - - if [ "$data_info" == "null" ] - then - echo "Certficates absent in vault. Ignore error warning" - touch ${MOUNT_PATH}/absent.txt - else - validateVaultResponse "${CERTS_SECRET_PREFIX}/certs/firewall" "${LOOKUP_SECRET_RESPONSE}" "LOOKUPSECRETRESPONSE" - echo "Certificates present in vault" - touch ${MOUNT_PATH}/present.txt - fi - - echo "Done checking for certificates in vault" - volumeMounts: - - name: certcheck - mountPath: /certcheck - - name: init-credentials - image: {{ .Values.image.initContainerName }} - imagePullPolicy: Always - env: - - name: VAULT_ADDR - value: {{ $.Values.vault.address }} - - name: KUBERNETES_AUTH_PATH - value: {{ $.Values.vault.authpath }} - - name: VAULT_APP_ROLE - value: {{ $.Values.vault.role }} - - name: BASE_DIR - value: {{ $.Values.volume.baseDir }} - - name: CERTS_SECRET_PREFIX - value: {{ .Values.vault.certsecretprefix }} - - name: MOUNT_PATH - value: "/DATA" - - name: NODEINFO_MOUNT_PATH - value: "/notary-nodeinfo" - command: ["sh", "-c"] - args: - - |- - #!/usr/bin/env sh - validateVaultResponse () { - if echo ${2} | grep "errors"; then - echo "ERROR: unable to retrieve ${1}: ${2}" - exit 1 - fi - if [ "$3" == "LOOKUPSECRETRESPONSE" ] - then - http_code=$(curl -sS -o /dev/null -w "%{http_code}" \ - --header "X-Vault-Token: ${VAULT_TOKEN}" \ - ${VAULT_ADDR}/v1/${1}) - curl_response=$? - if test "$http_code" != "200" ; then - echo "Http response code from Vault - $http_code and curl_response - $curl_response" - if test "$curl_response" != "0"; then - echo "Error: curl command failed with error code - $curl_response" - exit 1 - fi - fi - fi - } - - if [ -e /certcheck/present.txt ] - then - echo "Certificates already present in the vault. Skipping.." - exit 0 - fi - - # Setting up the environment to get secrets from Vault - echo "Getting secrets from Vault Server" - KUBE_SA_TOKEN=$(cat /var/run/secrets/kubernetes.io/serviceaccount/token) - VAULT_TOKEN=$(curl -sS --request POST ${VAULT_ADDR}/v1/auth/${KUBERNETES_AUTH_PATH}/login -H "Content-Type: application/json" -d '{"role":"vault-role","jwt":"'"${KUBE_SA_TOKEN}"'"}' | jq -r 'if .errors then . else .auth.client_token end') - validateVaultResponse 'vault login token' "${VAULT_TOKEN}" - echo "logged into vault" - - # Creating dirs for storing certificates - mkdir -p ${MOUNT_PATH}; - - # Fetching credentials for truststores - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${CERTS_SECRET_PREFIX}/credentials | jq -r 'if .errors then . else . end') - validateVaultResponse "${CERTS_SECRET_PREFIX}/credentials" "${LOOKUP_SECRET_RESPONSE}" "LOOKUPSECRETRESPONSE" - - FIREWALL_CA=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["firewallca"]') - echo "${FIREWALL_CA}"> ${MOUNT_PATH}/firewallca - BRIDGE=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["bridge"]') - echo "${BRIDGE}"> ${MOUNT_PATH}/bridge - FLOAT=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["float"]') - echo "${FLOAT}"> ${MOUNT_PATH}/float - - touch /DATA/done.txt - echo "Done" - volumeMounts: - - name: credentials - mountPath: /DATA - - name: certcheck - mountPath: /certcheck - containers: - - name: pki - image: "{{ required "pki[main]: missing value for .Values.image.pkiContainerName" .Values.image.pkiContainerName }}" - env: - - name: ACCEPT_LICENSE - value: "{{ .Values.acceptLicense }}" - - name: BASE_DIR - value: "{{ .Values.volume.baseDir }}" - imagePullPolicy: {{ .Values.image.pullPolicy }} - command: ["/bin/bash", "-c"] - args: - - |- - if [ -e /certcheck/present.txt ] - then - echo "Certificates already present in the vault. Skipping.." - exit 0 - fi - rm -r ${BASE_DIR}/DATA/done.txt - - echo 'keyStores = { - "bridge" = { - type = LOCAL - file = "./DATA/node/bridge.jks" - password = "BRIDGE" - }, - "float" = { - type = LOCAL - file = "./DATA/node/float.jks" - password = "FLOAT" - }, - "firewallca" = { - type = LOCAL - file = "./DATA/node/firewallca.jks" - password = "FIREWALL_CA" - } - } - certificates = { - "firewallca" = { - key = { - type = LOCAL - includeIn = ["firewallca"] - algorithm = "ECDSA_SECP256R1_SHA256" - password = "FIREWALL_CA" - } - isSelfSigned = true - keyUsages = [DIGITAL_SIGNATURE, KEY_CERT_SIGN, CRL_SIGN] - keyPurposes = [SERVER_AUTH, CLIENT_AUTH] - validDays = 7300 - issuesCertificates = false - subject = {{ .Values.subjects.firewallca | quote }} - }, - "bridge" = { - key = { - type = LOCAL - includeIn = ["bridge"] - algorithm = "ECDSA_SECP256R1_SHA256" - password = "BRIDGE" - } - - signedBy = "firewallca" - keyUsages = [DIGITAL_SIGNATURE, KEY_CERT_SIGN, CRL_SIGN] - keyPurposes = [SERVER_AUTH, CLIENT_AUTH] - validDays = 7300 - issuesCertificates = false - subject = {{ .Values.subjects.bridge | quote }} - }, - "float" = { - key = { - type = LOCAL - includeIn = ["float"] - algorithm = "ECDSA_SECP256R1_SHA256" - password = "FLOAT" - } - signedBy = "firewallca" - keyUsages = [DIGITAL_SIGNATURE, KEY_CERT_SIGN, CRL_SIGN] - keyPurposes = [SERVER_AUTH, CLIENT_AUTH] - validDays = 7300 - issuesCertificates = false - subject = {{ .Values.subjects.float | quote }} - } - }' >> {{ .Values.configPath }}/pki.conf - - #replacement of the variables in the pki conf file - export FIREWALL_CA=$(cat /opt/corda/credentials/firewallca) - sed -i -e "s*FIREWALL_CA*${FIREWALL_CA}*g" {{ .Values.configPath }}/pki.conf - export FLOAT=$(cat /opt/corda/credentials/float) - sed -i -e "s*FLOAT*${FLOAT}*g" {{ .Values.configPath }}/pki.conf - export BRIDGE=$(cat /opt/corda/credentials/bridge) - sed -i -e "s*BRIDGE*${BRIDGE}*g" {{ .Values.configPath }}/pki.conf - - mkdir -p DATA/node - time java -Xmx{{ .Values.cordaJarMx }}M -jar bin/pkitool.jar --config-file {{ .Values.configPath }}/pki.conf --ignore-missing-crl - keytool -exportcert -rfc -alias firewallca -keystore DATA/node/firewallca.jks -storepass ${FIREWALL_CA} -keypass ${FIREWALL_CA} > DATA/node/root.pem - keytool -importcert -noprompt -file DATA/node/root.pem -alias firewallca -keystore DATA/node/trust.jks -storepass ${FIREWALL_CA} - rm DATA/node/root.pem - - #creating a dummy file to perform check if last line is executed or not. - touch ${BASE_DIR}/DATA/done.txt - volumeMounts: - - name: certcheck - mountPath: /certcheck - - name: credentials - mountPath: /opt/corda/credentials - - name: pkitool-certs-keys - mountPath: /opt/corda/DATA - - name: pkitool-etc - mountPath: /opt/corda/etc - resources: - requests: - memory: {{ .Values.cordaJarMx }}M - limits: - memory: {{ add .Values.cordaJarMx 2 }}M - - name: store-certs - image: {{ .Values.image.initContainerName }} - imagePullPolicy: Always - env: - - name: VAULT_ADDR - value: {{ $.Values.vault.address }} - - name: FLOAT_VAULT_ADDR - value: {{ $.Values.vault.floatVaultAddress }} - - name: KUBERNETES_AUTH_PATH - value: {{ $.Values.vault.authpath }} - - name: KUBERNETES_AUTH_PATH_FLOATVAULT - value: {{ $.Values.vault.authpathFloat }} - - name: VAULT_APP_ROLE - value: {{ $.Values.vault.role }} - - name: BASE_DIR - value: {{ $.Values.volume.baseDir }} - - name: CERTS_SECRET_PREFIX - value: {{ .Values.vault.certsecretprefix }} - command: ["sh", "-c"] - args: - - |- - #!/usr/bin/env sh - validateVaultResponse () { - if echo ${2} | grep "errors"; then - echo "ERROR: unable to retrieve ${1}: ${2}" - exit 1 - fi - } - - if [ -e /certcheck/present.txt ] - then - echo "Certificates already present in the vault. Skipping.." - exit 0 - fi - - # setting up env to access vault - KUBE_SA_TOKEN=$(cat /var/run/secrets/kubernetes.io/serviceaccount/token) - VAULT_TOKEN=$(curl -sS --request POST ${VAULT_ADDR}/v1/auth/${KUBERNETES_AUTH_PATH}/login -H "Content-Type: application/json" -d '{"role":"'"${VAULT_APP_ROLE}"'","jwt":"'"${KUBE_SA_TOKEN}"'"}' | jq -r 'if .errors then . else .auth.client_token end') - validateVaultResponse 'vault login token' "${VAULT_TOKEN}" - - # setting up env to access vault of float - FLOAT_VAULT_TOKEN=$(curl -sS --request POST ${FLOAT_VAULT_ADDR}/v1/auth/${KUBERNETES_AUTH_PATH_FLOATVAULT}/login -H "Content-Type: application/json" -d '{"role":"'"${VAULT_APP_ROLE}"'","jwt":"'"${KUBE_SA_TOKEN}"'"}' | jq -r 'if .errors then . else .auth.client_token end') - validateVaultResponse 'float vault login token' "${FLOAT_VAULT_TOKEN}" - - # putting certificate for firewall - COUNTER=1 - - cd ${BASE_DIR}/DATA - while [ "$COUNTER" -lt {{ $.Values.vault.retries }} ] - do - if [ -e done.txt ] - then - cd ${BASE_DIR}/DATA/node - echo "found firewall certificates, performing vault put for root path" - (echo '{"data": {' - for FILE in *; - do - echo '"'$FILE'": "'; base64 ${FILE}; echo '",' - done; - ) >> ../temp_root.json - sed -i '$ s/.$//' ../temp_root.json - echo '}}' >> ../temp_root.json - cat ../temp_root.json | tr -d '\n' >> ../root.json - echo "before curl" - curl \ - --header "X-Vault-Token: ${VAULT_TOKEN}" \ - --request POST \ - --data @../root.json \ - ${VAULT_ADDR}/v1/${CERTS_SECRET_PREFIX}/certs/firewall - - echo "Put firewall certificates in float vault.." - - curl \ - --header "X-Vault-Token: ${FLOAT_VAULT_TOKEN}" \ - --request POST \ - --data @../root.json \ - ${FLOAT_VAULT_ADDR}/v1/${CERTS_SECRET_PREFIX}/certs/firewall - echo "after curl" - break; - else - echo "certificates are not ready, sleeping ..." - sleep {{ $.Values.vault.sleepTimeAfterError }} - COUNTER=`expr "$COUNTER" + 1` - fi - done - echo "completed" - volumeMounts: - - name: certcheck - mountPath: /certcheck - - name: pkitool-certs-keys - mountPath: /opt/corda/DATA - readOnly: false - imagePullSecrets: - - name: {{ .Values.image.imagePullSecret }} - volumes: - - name: certcheck - emptyDir: - medium: Memory - - name: credentials - emptyDir: - medium: Memory - - name: pkitool-signer-etc - emptyDir: - medium: Memory - - name: signer-logs - emptyDir: - medium: Memory - - name: pkitool-certs-keys - emptyDir: - medium: Memory - - name: pkitool-etc - emptyDir: - medium: Memory diff --git a/platforms/r3-corda-ent/charts/corda-ent-node-pki-gen/values.yaml b/platforms/r3-corda-ent/charts/corda-ent-node-pki-gen/values.yaml deleted file mode 100644 index f349cfc16e0..00000000000 --- a/platforms/r3-corda-ent/charts/corda-ent-node-pki-gen/values.yaml +++ /dev/null @@ -1,114 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -# Default values for PKI Generator chart. -# This is a YAML-formatted file. -# Declare variables to be passed into your templates. - -############################################################# -# Basic Configuration # -############################################################# -# Provide the name of the node -# Eg. nodeName: carrier -nodeName: carrier - -# This section contains the Corda Node metadata. -metadata: - # Provide the namespace for the Corda PKI Generator for the node. - # Eg. namespace: carrier-ent - namespace: carrier-ent - # Provide any additional labels for the Corda PKI Generator for the node. - labels: - -# Provide information regarding the Docker images used. -image: - # Provide the alpine utils image, which is used for all init-containers of deployments/jobs. - # NOTE: The alpine image used is the base alpine image with CURL installed. - # Eg. initContainerName: ghcr.io/hyperledger/bevel-alpine:latest - initContainerName: ghcr.io/hyperledger/bevel-alpine:latest - # Provide the image for the main pki container. - # Eg. pkiContainerName: corda/enterprise-pki:1.2-zulu-openjdk8u242 - pkiContainerName: corda/enterprise-pki:1.2-zulu-openjdk8u242 - # Provide the docker-registry secret created and stored in kubernetes cluster as a secret. - # Eg. imagePullSecret: regcred - imagePullSecret: "" - # Pull policy to be used for the Docker image - # Eg. pullPolicy: Always - pullPolicy: - -# Required parameter to start any .jar files -# Eg. acceptLicense: YES -acceptLicense: - -############################################################# -# HashiCorp Vault Configuration # -############################################################# -# NOTE: Make sure that the vault is already unsealed, intialized and configured to -# use Kubernetes service account token based authentication. -# For more info, see https://www.vaultproject.io/docs/auth/kubernetes - -vault: - # Provide the vault address - # Eg. address: http://vault.example.com:8200 - address: - # Provide the float vault address - # Eg. address: http://vault.example.com:8200 - floatVaultAddress: - # Provide the vault role used. - # Eg. role: vault-role - role: vault-role - # Provide the authpath configured to be used. - # Eg. authpath: cordaentcarrier - authPath: cordaentcarrier - # Provide the authpath configured to be used. - # Eg. authpath: cordaentcarrierfloat - authpathFloat: cordaentcarrierfloat - # Provide the service account name autheticated to vault. - # NOTE: Make sure that the service account is already created and autheticated to use the vault. - # Eg. serviceaccountname: vault-auth - serviceAccountName: vault-auth - # Provide the vault path where the certificates are stored - # Eg. certsecretprefix: secret/carrier/carrier - certSecretPrefix: secret/carrier/carrier - # The amount of times to retry fetching from/writing to Vault before giving up. - # Eg. retries: 10 - retries: 10 - # The amount of time in seconds to wait after an error occurs when fetching from/writing to Vault. - # Eg. sleepTimeAfterError: 15 - sleepTimeAfterError: 15 - -############################################################# -# Corda Firewall Details # -############################################################# -# This section details the corda firewall subjects - -subjects: - # Mention the subject for the firewallCA - # Eg. firewallca: "CN=Test Firewall CA Certificate, OU=HQ, O=HoldCo LLC, L=New York, C=US" - firewallca: "CN=Test Firewall CA Certificate, OU=HQ, O=HoldCo LLC, L=New York, C=US" - # Mention the subject for the float component - # Eg. float: "CN=Test Float Certificate, OU=HQ, O=HoldCo LLC, L=New York, C=US" - float: "CN=Test Firewall CA Certificate, OU=HQ, O=HoldCo LLC, L=New York, C=US" - # Mention the subject for the bridge component - # Eg. bridge: "CN=Test Bridge Certificate, OU=HQ, O=HoldCo LLC, L=New York, C=US" - bridge: "CN=Test Bridge Certificate, OU=HQ, O=HoldCo LLC, L=New York, C=US" - -############################################################# -# Settings # -############################################################# -volume: - # Eg. baseDir: /opt/corda - baseDir: /opt/corda -# Mention the maximum size, in megabytes, of the memory allocation pool -# This is consumed by the pki jar -# Eg. cordaJarMx: 256 -cordaJarMx: 256 -# Provide the path where the Corda node configuration files are stored -# Eg. configPath: etc -configPath: etc -#Provide the replicacount -# Eg. replicas: 1 -replicas: 1 diff --git a/platforms/r3-corda-ent/charts/corda-ent-node/Chart.yaml b/platforms/r3-corda-ent/charts/corda-ent-node/Chart.yaml deleted file mode 100644 index b7654d3ad4f..00000000000 --- a/platforms/r3-corda-ent/charts/corda-ent-node/Chart.yaml +++ /dev/null @@ -1,11 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -apiVersion: v1 -appVersion: "2.0" -description: "R3-corda-ent: Deploys the corda node." -name: corda-ent-node -version: 1.0.0 diff --git a/platforms/r3-corda-ent/charts/corda-ent-node/README.md b/platforms/r3-corda-ent/charts/corda-ent-node/README.md deleted file mode 100644 index a266eecbc56..00000000000 --- a/platforms/r3-corda-ent/charts/corda-ent-node/README.md +++ /dev/null @@ -1,229 +0,0 @@ -[//]: # (##############################################################################################) -[//]: # (Copyright Accenture. All Rights Reserved.) -[//]: # (SPDX-License-Identifier: Apache-2.0) -[//]: # (##############################################################################################) - - -# Node Deployment - -- [Node Deployment Helm Chart](#Node-deployment-helm-chart) -- [Prerequisites](#prerequisites) -- [Chart Structure](#chart-structure) -- [Configuration](#configuration) -- [Deployment](#deployment) -- [Contributing](#contributing) -- [License](#license) - - -## node Deployment Helm Chart ---- -This [Helm chart](https://github.com/hyperledger/bevel/tree/main/platforms/r3-corda-ent/charts/corda-ent-node) helps to delpoy the corda node. - - -## Prerequisites ---- -Before deploying the chart please ensure you have the following prerequisites: - -- Node's database up and running. -- Kubernetes cluster up and running. -- A HashiCorp Vault instance is set up and configured to use Kubernetes service account token-based authentication. -- The Vault is unsealed and initialized. -- Helm is installed. - -This chart has following structue: -``` - . - ├── corda-ent-node - │ ├── Chart.yaml - │ ├── templates - │ │ ├── deployment.yaml - │ │ ├── _helpers.tpl - │ │ ├── pvc.yaml - │ │ └── service.yaml - │ └── values.yaml -``` - -Type of files used: - -- `templates` : This directory contains the Kubernetes manifest templates that define the resources to be deployed. -- `deployment.yaml`: This file is a configuration file for deployement in Kubernetes.It creates a deployment file with a specified number of replicas and defines various settings for the deployment.It includes an init container for initializing the retrieves secrets from Vault and checks if node registration is complete, and a main container for running the r3corda node.It also specifies volume mounts for storing certificates and data. -- `pvc.yaml` : A PersistentVolumeClaim (PVC) is a request for storage by a user. -- `service.yaml` : This file defines a Kubernetes Service with multiple ports for protocols and targets, and supports Ambassador proxy annotations for specific configurations when using the "ambassador" proxy provider. -- `chart.yaml` : Provides metadata about the chart, such as its name, version, and description. -- `values.yaml` : Contains the default configuration values for the chart. It includes configuration for the image, nodeconfig, credenatials, storage, service , vault, etc. -- `_helpers.tpl` : A template file used for defining custom labels and ports for the metrics in the Helm chart. - - -## Configuration ---- -The [values.yaml](https://github.com/hyperledger/bevel/blob/main/platforms/r3-corda-ent/charts/corda-ent-node/values.yaml) file contains configurable values for the Helm chart. We can modify these values according to the deployment requirements. Here are some important configuration options: - -## Parameters ---- - -### Name - -| Name | Description | Default Value | -| -----------| ---------------------------------------| ------------- | -| nodeName | Provide the name of the node | carrier | - -### Metadata - -| Name | Description | Default Value | -| ----------------| ----------------------------------------------| ------------- | -| namespace | Provide the namespace for the Corda node | cenm | -| labels | Provide the labels to the Corda node | "" | - -### Image - -| Name | Description | Default Value | -| ------------------------ | -----------------------------------------------------------------------------| ------------------------------------------------------------| -| initContainerName | Information about the Docker container used for the init-containers | ghcr.io/hyperledger | -| nodeContainerName | Enterprise node image |adopblockchaincloud0502.azurecr.io/corda_image_ent_4.4:latest| -| ImagePullSecret | Provide the K8s secret that has rights for pulling the image off the registry| "" | -| pullPolicy | Provide the pull policy for Docker images, either Always or IfNotPresent | IfNotPresent | - -### storage - -| Name | Description | Default Value | -| --------------------- | ------------------------------------------| ------------- | -| Name | Provide the name of the storageclass | cenm | - -### Vault - -| Name | Description | Default Value | -| ------------------------- | --------------------------------------------------------------------------| ------------------- | -| address | Address/URL of the Vault server | "" | -| role | Role used for authentication with Vault | vault-role | -| authpath | Authentication path for Vault | entcordacenm | -| serviceAccountName | Provide the already created service account name autheticated to vault | vault-auth | -| certSecretPrefix | Provide the vault path where the certificates are stored | secret/organisation-name | -| nodePath | Provide the vault orginsation node folder name where certificates stored | carrier | -| retries | Amount of times to retry fetching from/writing to Vault before giving up | 20 | -| retryInterval | Amount of time in seconds to wait after an error occurs | 30 | - -### cordapps - -| Name | Description | Default Value | -| ------------------------ | ------------------------------------------------------- | --------------- | -| getcordapps | Provide if you want to provide jars in cordapps | false | -| repository | Provide the repository of cordapps | "" | -| jars url | Provide url to download the jar using wget cmd | "" | - -### NodeConf - -| Name | Description | Default Value | -| ------------------------ | -------------------------------------------------------------------------------------- | -----------------------------| -| ambassador | Specify ambassador host:port which will be advertised in addition to p2paddress | "" | -| legalName | Provide the legalName for node | O=Node,OU=Node,L=London,C=GB | -| emailAddress | Email address | dev-node@bevel.com | -| crlCheckSoftFail | crlCheckSoftFail defines if CRL failure a critical error or if we can just fail softly | "true" | -| tlsCertCrlDistPoint | tlsCertCrlDistPoint defines the endpoint for retrieving the CRL of the Corda Network | "" | -| tlsCertCrlIssuer | tlsCertCrlIssuer defines the X500 name of the trusted CRL issuer of the Corda Network | "" | -| jarPath | where is node jar is stored | "bin" | -| devMode | Provide the devMode for corda node | "false" | -| configPath | where service configuration files are stored | "etc" | -| replicas | Provide the number of replicas for your pods | 1 | - -### networkServices - -| Name | Description | Default Value | -| ------------------------ | ------------------------------------------------------------------ | ---------------------------------| -| idmanName | Name of the idman | idman | -| doormanURL | doormanURL defines the accesspoint for the Identity Manager server | http://my-identity-manager:10000 | -| networkmapName | name of the networkmapName | networkmap | -| networkMapURL | networkMapURL defines the accesspoint for the Network Map server | http://my-network-map:10000 | -| networkMapDomain | defines the networkMapDomain | "" | - -### Service - -| Name | Description | Default Value | -| --------------------- | -------------------------------------------------------- | ------------- | -| p2p port | p2pPort defines the port number of inbound connections | 10007 | -| p2pAddress | p2pAddress defines the public facing IP address | "" | -| ssh | ssh defines the SSH access options | "" | -| rpc port | Provide the tpc port for node | 30000 | -| rpcadmin port | Provide the rpcadmin port for node | 30009 | - -### dataSourceProperties - -| Name | Description | Default Value | -| --------------------- | ---------------------------------------------------------------------------- | ------------- | -| dataSource | DataSource of the url ,user and passowrd | "" | -| dataSourceClassName | dburl, dbport of the data source class | "" | -| monitoring | if CorDapps that are signed with developer keys will be allowed to load or not| "" | -| allowDevCorDapps | Provide the tpc port for node | 30000 | -| retries | Number of retries to check contents from vault | 10 | -| retryInterval | Interval in seconds between retries | 15 | - -### Healthcheck - -| Name | Description | Default Value | -| ----------------------------| ------------------------------------------------------------------------------| ------------- | -| readinesscheckinterval | Provide the interval in seconds you want to iterate till db to be ready | 5 | -| readinessthreshold | Provide the threshold till you want to check if specified db up and running | 2 | - - - -## Deployment ---- - -To deploy the node Helm chart, follow these steps: - -1. Modify the [values.yaml](https://github.com/hyperledger/bevel/blob/main/platforms/r3-corda-ent/charts/corda-ent-node/values.yaml) file to set the desired configuration values. -2. Run the following Helm command to install, upgrade,verify, delete the chart: - -To install the chart: -```bash -helm repo add bevel https://hyperledger.github.io/bevel/ -helm install ./corda-ent-node -``` - -To upgrade the chart: -```bash -helm upgrade ./corda-ent-node -``` - -To verify the deployment: -```bash -kubectl get jobs -n -``` -Note : Replace `` with the actual namespace where the Job was created. This command will display information about the Job, including the number of completions and the current status of the Job's pods. - -To delete the chart: -```bash -helm uninstall -``` -Note : Replace `` with the desired name for the release. - - - -## Contributing ---- -If you encounter any bugs, have suggestions, or would like to contribute to the [node Deployment Helm Chart](https://github.com/hyperledger/bevel/tree/main/platforms/r3-corda-ent/charts/corda-ent-node), please feel free to open an issue or submit a pull request on the [project's GitHub repository](https://github.com/hyperledger/bevel). - - - -## License - -This chart is licensed under the Apache v2.0 license. - -Copyright © 2023 Accenture - -### Attribution - -This chart is adapted from the [charts](https://hyperledger.github.io/bevel/) which is licensed under the Apache v2.0 License which is reproduced here: - -``` -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -``` diff --git a/platforms/r3-corda-ent/charts/corda-ent-node/templates/_helpers.tpl b/platforms/r3-corda-ent/charts/corda-ent-node/templates/_helpers.tpl deleted file mode 100644 index 7f9b0dc6131..00000000000 --- a/platforms/r3-corda-ent/charts/corda-ent-node/templates/_helpers.tpl +++ /dev/null @@ -1,5 +0,0 @@ -{{- define "labels.custom" -}} - {{- range $key, $val := .Values.metadata.labels -}} - {{- $key -}}: {{- $val -}} - {{- end -}} -{{- end -}} diff --git a/platforms/r3-corda-ent/charts/corda-ent-node/templates/deployment.yaml b/platforms/r3-corda-ent/charts/corda-ent-node/templates/deployment.yaml deleted file mode 100644 index 758300d5209..00000000000 --- a/platforms/r3-corda-ent/charts/corda-ent-node/templates/deployment.yaml +++ /dev/null @@ -1,465 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - ---- -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: {{ .Values.nodeName }} - namespace: {{ .Values.metadata.namespace }} - {{- if .Values.nodeConf.deployment.annotations }} - annotations: -{{ toYaml .Values.nodeConf.deployment.annotations | indent 8 }} - {{- end }} - labels: - app.kubernetes.io/name: {{ .Values.nodeName }} - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} -spec: - serviceName: {{ .Values.nodeName }} - replicas: {{ .Values.nodeConf.replicas }} - selector: - matchLabels: - app: {{ .Values.nodeName }} - app.kubernetes.io/name: {{ .Values.nodeName }} - app.kubernetes.io/instance: {{ .Release.Name }} - template: - metadata: - labels: - app: {{ .Values.nodeName }} - app.kubernetes.io/name: {{ .Values.nodeName }} - app.kubernetes.io/instance: {{ .Release.Name }} - spec: - serviceAccountName: {{ $.Values.vault.serviceaccountname }} - securityContext: - fsGroup: 1000 - initContainers: - - name: init-check-registration - image: {{ .Values.image.initContainerName }} - imagePullPolicy: Always - env: - - name: VAULT_ADDR - value: {{ $.Values.vault.address }} - - name: VAULT_APP_ROLE - value: {{.Values.vault.role}} - - name: KUBERNETES_AUTH_PATH - value: {{ $.Values.vault.authpath }} - - name: CERTS_SECRET_PREFIX - value: {{ .Values.vault.certsecretprefix }} - command: ["sh", "-c"] - args: - - |- - #!/usr/bin/env sh - validateVaultResponse () { - if echo ${2} | grep "errors"; then - echo "ERROR: unable to retrieve ${1}: ${2}" - exit 1 - fi - if [ "$3" == "LOOKUPSECRETRESPONSE" ] - then - http_code=$(curl -sS -o /dev/null -w "%{http_code}" \ - --header "X-Vault-Token: ${VAULT_TOKEN}" \ - ${VAULT_ADDR}/v1/${1}) - curl_response=$? - if test "$http_code" != "200" ; then - echo "Http response code from Vault - $http_code and curl_response - $curl_response" - if test "$curl_response" != "0"; then - echo "Error: curl command failed with error code - $curl_response" - exit 1 - fi - fi - fi - } - - # setting up env to get secrets from vault - echo "Getting secrets from Vault Server" - KUBE_SA_TOKEN=$(cat /var/run/secrets/kubernetes.io/serviceaccount/token) - VAULT_TOKEN=$(curl -sS --request POST ${VAULT_ADDR}/v1/auth/${KUBERNETES_AUTH_PATH}/login -H "Content-Type: application/json" -d '{"role":"'"${VAULT_APP_ROLE}"'","jwt":"'"${KUBE_SA_TOKEN}"'"}' | jq -r 'if .errors then . else .auth.client_token end') - - validateVaultResponse 'vault login token' "${VAULT_TOKEN}" - echo "logged into vault" - COUNTER=1 - while [ "$COUNTER" -lt {{ $.Values.vault.retries }} ] - do - # get truststore from vault to see if registration is done or not - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${CERTS_SECRET_PREFIX}/certs/truststore | jq -r 'if .errors then . else . end') - data_info="$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data["data"]')" - - if [ "$data_info" == "null" ] - then - sleep {{ $.Values.vault.retryInterval }} - else - validateVaultResponse "${CERTS_SECRET_PREFIX}/certs/truststore" "${LOOKUP_SECRET_RESPONSE}" "LOOKUPSECRETRESPONSE" - break - fi - COUNTER=`expr "$COUNTER" + 1` - done - - if [ "$COUNTER" -ge {{ $.Values.vault.retries }} ] - then - # printing number of trial done before giving up - echo "$COUNTER" - echo "Node registration might not have been done." - exit 1 - fi - echo "Done" - - name: init-certificates - image: {{ .Values.image.initContainerName }} - imagePullPolicy: Always - env: - - name: VAULT_ADDR - value: {{ $.Values.vault.address }} - - name: KUBERNETES_AUTH_PATH - value: {{ $.Values.vault.authpath }} - - name: VAULT_APP_ROLE - value: {{ $.Values.vault.role }} - - name: BASE_DIR - value: {{ $.Values.nodeConf.volume.baseDir }} - - name: CERTS_SECRET_PREFIX - value: {{ .Values.vault.certsecretprefix }} - command: ["sh", "-c"] - args: - - |- - #!/usr/bin/env sh - validateVaultResponse () { - if echo ${2} | grep "errors"; then - echo "ERROR: unable to retrieve ${1}: ${2}" - exit 1 - fi - if [ "$3" == "LOOKUPSECRETRESPONSE" ] - then - http_code=$(curl -sS -o /dev/null -w "%{http_code}" \ - --header "X-Vault-Token: ${VAULT_TOKEN}" \ - ${VAULT_ADDR}/v1/${1}) - curl_response=$? - if test "$http_code" != "200" ; then - echo "Http response code from Vault - $http_code and curl_response - $curl_response" - if test "$curl_response" != "0"; then - echo "Error: curl command failed with error code - $curl_response" - exit 1 - fi - fi - fi - } - - # setting up env to get secrets from vault - echo "Getting secrets from Vault Server" - KUBE_SA_TOKEN=$(cat /var/run/secrets/kubernetes.io/serviceaccount/token) - VAULT_TOKEN=$(curl -sS --request POST ${VAULT_ADDR}/v1/auth/${KUBERNETES_AUTH_PATH}/login -H "Content-Type: application/json" -d '{"role":"'"${VAULT_APP_ROLE}"'","jwt":"'"${KUBE_SA_TOKEN}"'"}' | jq -r 'if .errors then . else .auth.client_token end') - - validateVaultResponse 'vault login token' "${VAULT_TOKEN}" - - OUTPUT_PATH=${BASE_DIR}/certificates - - # get nodekeystore.jks from vault - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${CERTS_SECRET_PREFIX}/certs/nodekeystore | jq -r 'if .errors then . else . end') - validateVaultResponse "${CERTS_SECRET_PREFIX}/certs/nodekeystore" "${LOOKUP_SECRET_RESPONSE}" "LOOKUPSECRETRESPONSE" - TLS_NODEKEYSTORE=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["nodekeystore.jks"]') - echo "${TLS_NODEKEYSTORE}" | base64 -d > ${OUTPUT_PATH}/nodekeystore.jks - - # get sslkeystore.jks from vault - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${CERTS_SECRET_PREFIX}/certs/sslkeystore | jq -r 'if .errors then . else . end') - validateVaultResponse "${CERTS_SECRET_PREFIX}/certs/sslkeystore" "${LOOKUP_SECRET_RESPONSE}" "LOOKUPSECRETRESPONSE" - TLS_SSLKEYSTORE=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["sslkeystore.jks"]') - echo "${TLS_SSLKEYSTORE}" | base64 -d > ${OUTPUT_PATH}/sslkeystore.jks - - # get truststore.jks from vault - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${CERTS_SECRET_PREFIX}/certs/truststore | jq -r 'if .errors then . else . end') - validateVaultResponse "${CERTS_SECRET_PREFIX}/certs/truststore" "${LOOKUP_SECRET_RESPONSE}" "LOOKUPSECRETRESPONSE" - TLS_TRUSTSTORE=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["truststore.jks"]') - echo "${TLS_TRUSTSTORE}" | base64 -d > ${OUTPUT_PATH}/truststore.jks - echo "Done" - - mkdir -p ${OUTPUT_PATH}/tlscerts - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${CERTS_SECRET_PREFIX}/certs/{{ .Values.networkServices.idmanName }} | jq -r 'if .errors then . else . end') - validateVaultResponse "${CERTS_SECRET_PREFIX}/certs/{{ .Values.networkServices.idmanName }}" "${LOOKUP_SECRET_RESPONSE}" "LOOKUPSECRETRESPONSE" - IDMAN_CERT=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["{{ .Values.networkServices.idmanName }}.crt"]') - echo "${IDMAN_CERT}" | base64 -d > ${OUTPUT_PATH}/tlscerts/idman.crt - - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${CERTS_SECRET_PREFIX}/certs/{{ .Values.networkServices.networkmapName }} | jq -r 'if .errors then . else . end') - validateVaultResponse "${CERTS_SECRET_PREFIX}/certs/{{ .Values.networkServices.networkmapName }}" "${LOOKUP_SECRET_RESPONSE}" "LOOKUPSECRETRESPONSE" - NETWORKMAP_CERT=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["{{ .Values.networkServices.networkmapName }}.crt"]') - echo "${NETWORKMAP_CERT}" | base64 -d > ${OUTPUT_PATH}/tlscerts/networkmap.crt - - # Fetching the node tlscerts - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${CERTS_SECRET_PREFIX}/tlscerts | jq -r 'if .errors then . else . end') - validateVaultResponse "${CERTS_SECRET_PREFIX}/tlscerts" "${LOOKUP_SECRET_RESPONSE}" "LOOKUPSECRETRESPONSE" - NODE_CERT=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["tlscacerts"]') - echo "${NODE_CERT}" | base64 -d > ${OUTPUT_PATH}/tlscerts/node.crt - - # Fetching node keystore, node truststore and network-root-truststore credentials from vault - mkdir -p ${OUTPUT_PATH}/credentials - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${CERTS_SECRET_PREFIX}/credentials | jq -r 'if .errors then . else . end') - validateVaultResponse "${CERTS_SECRET_PREFIX}/credentials" "${LOOKUP_SECRET_RESPONSE}" "LOOKUPSECRETRESPONSE" - TRUSTSTORE_PASSWORD=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["truststore"]') - echo "${TRUSTSTORE_PASSWORD}" > ${OUTPUT_PATH}/credentials/truststorepass - KEYSTORE_PASSWORD=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["keystore"]') - echo "${KEYSTORE_PASSWORD}" > ${OUTPUT_PATH}/credentials/keystorepass - KEYSTORE_PASSWORD=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["root"]') - echo "${KEYSTORE_PASSWORD}" > ${OUTPUT_PATH}/credentials/rootpass - echo "Done" - volumeMounts: - - name: node-certificates - mountPath: {{ $.Values.nodeConf.volume.baseDir }}/certificates - - name: db-healthcheck - image: {{ .Values.image.initContainerName }} - imagePullPolicy: Always - command: ["sh", "-c"] - args: - - |- - #!/usr/bin/env sh - COUNTER=1 - FLAG=true - # perform health check if db is up and running before starting corda node - while [ "$COUNTER" -le {{ $.Values.dataSourceProperties.retries }} ] - do - DB_NODE={{ .Values.dataSourceProperties.dataSource.dbUrl }}:{{ .Values.dataSourceProperties.dataSource.dbPort }} - STATUS=$(nc -vz $DB_NODE 2>&1 | grep -c open ) - if [ "$STATUS" == 0 ] - then - FLAG=false - else - FLAG=true - echo "DB up and running" - fi - if [ "$FLAG" == false ] - then - echo "Retry attempted $COUNTER times, retrying after {{ $.Values.vault.retryInterval }} seconds" - COUNTER=`expr "$COUNTER" + 1` - sleep {{ $.Values.vault.retryInterval }} - else - echo "SUCCESS!" - echo "DB up and running!" - exit 0 - break - fi - done - if [ "$COUNTER" -gt {{ $.Values.dataSourceProperties.retries }} ] || [ "$FLAG" == false ] - then - echo "Retry attempted $COUNTER times, no DB up and running. Giving up!" - exit 1 - break - fi - - name: init-cordapps - image: {{ .Values.image.initContainerName }} - imagePullPolicy: Always - command: ["sh", "-c"] - env: - - name: VAULT_ADDR - value: {{ $.Values.vault.address }} - - name: KUBERNETES_AUTH_PATH - value: {{ $.Values.vault.authpath }} - - name: VAULT_APP_ROLE - value: {{ $.Values.vault.role }} - - name: BASE_DIR - value: {{ $.Values.nodeConf.volume.baseDir }} - - name: CERTS_SECRET_PREFIX - value: {{ .Values.vault.certsecretprefix }} - args: - - |- - # crearting cordapps dir in volume to keep jars - {{- if .Values.cordapps.getcordapps }} - mkdir -p ${BASE_DIR}/cordapps - mkdir -p /tmp/downloaded-jars - # setting up env to get secrets from vault - KUBE_SA_TOKEN=$(cat /var/run/secrets/kubernetes.io/serviceaccount/token) - echo "Getting secrets from Vault Server" - VAULT_TOKEN=$(curl -sS --request POST ${VAULT_ADDR}/v1/auth/${KUBERNETES_AUTH_PATH}/login -H "Content-Type: application/json" -d '{"role":"'"${VAULT_APP_ROLE}"'","jwt":"'"${KUBE_SA_TOKEN}"'"}' | jq -r 'if .errors then . else .auth.client_token end') - - # save cordapps login password from vault - LOOKUP_PWD_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${CERTS_SECRET_PREFIX}/credentials | jq -r 'if .errors then . else . end') - REPO_USER_PASS=$(echo ${LOOKUP_PWD_RESPONSE} | jq -r '.data.data["repo_password"]') - REPO_USER=$(echo ${LOOKUP_PWD_RESPONSE} | jq -r '.data.data["repo_username"]') - - # Downloading official corda provided jars using curl - {{- range .Values.cordapps.jars }} - cd /tmp/downloaded-jars && curl -u $REPO_USER:$REPO_USER_PASS -O -L {{ .url }} - {{- end }} - cp -ar /tmp/downloaded-jars/* ${BASE_DIR}/cordapps - {{- end }} - volumeMounts: - - name: {{ .Values.nodeName }}-volume - mountPath: {{ .Values.nodeConf.volume.baseDir }} - containers: - - name: node - image: "{{ .Values.image.nodeContainerName }}" - imagePullPolicy: {{ .Values.image.pullPolicy }} - env: - - name: ACCEPT_LICENSE - value: "{{required "You must accept the license agreement to run this software" .Values.acceptLicense }}" - - name: BASE_DIR - value: {{ $.Values.nodeConf.volume.baseDir }} - command: ["/bin/bash", "-c"] - args: - - |- - # Create directory - # mkdir -p ${BASE_DIR}/etc - - # Create node.conf configuration file - echo 'myLegalName: "{{ .Values.nodeConf.legalName }}" - emailAddress: "{{ .Values.nodeConf.emailAddress }}" - p2pAddress: "{{ .Values.service.p2pAddress }}:{{ .Values.service.p2pPort }}" - networkServices: { - doormanURL: "{{ .Values.networkServices.doormanURL }}", - networkMapURL: "{{ .Values.networkServices.networkMapURL }}" - } - crlCheckSoftFail: {{ .Values.nodeConf.crlCheckSoftFail }} - {{- if ne .Values.nodeConf.tlsCertCrlDistPoint "" }} - tlsCertCrlDistPoint: "{{ .Values.nodeConf.tlsCertCrlDistPoint }}", - tlsCertCrlIssuer: "{{ .Values.nodeConf.tlsCertCrlIssuer }}" - {{- end }} - devMode: {{ .Values.nodeConf.devMode }} - {{- if .Values.service.ssh.enabled }} - sshd: { - port: {{ .Values.service.ssh.sshdPort }} - } - {{- end }} - rpcSettings: { - address: "0.0.0.0:{{ .Values.service.rpc.port }}", - adminAddress: "0.0.0.0:{{ .Values.service.rpc.adminPort }}" - } - rpcUsers: [ - {{- range $user := .Values.service.rpc.users }} - { - user: "{{ $user.name }}", - password: "{{ $user.password }}", - permissions: [ - "{{ $user.permissions }}" - ] - } - {{- end }} - ] - {{- if .Values.dataSourceProperties.monitoring.enabled }} - jmxMonitoringHttpPort: {{ .Values.dataSourceProperties.monitoring.port }} - {{- end }} - - trustStorePassword: "TRUSTSTORE_PASSWORD" - keyStorePassword : "KEYSTORE_PASSWORD" - detectPublicIp: false - additionalP2PAddresses: ["{{ .Values.nodeConf.ambassador.p2pAddress }}"] - messagingServerAddress: "0.0.0.0:{{ .Values.service.messagingServerPort }}" - messagingServerExternal: false - enterpriseConfiguration: { - externalBridge: {{ .Values.firewall.enabled }} - } - {{- if .Values.dataSourceProperties.allowDevCorDapps.enabled }} - cordappSignerKeyFingerprintBlacklist: [ - ] - {{- end }} - dataSourceProperties: { - dataSourceClassName: "{{ .Values.dataSourceProperties.dataSource.dataSourceClassName }}", - dataSource.url: "{{ .Values.dataSourceProperties.dataSource.url }}", - dataSource.user: "{{ .Values.dataSourceProperties.dataSource.user }}", - dataSource.password: "{{ .Values.dataSourceProperties.dataSource.password }}" - } - database = { - {{- if eq .Values.dataSourceProperties.dataSource.dataSourceClassName "oracle.jdbc.pool.OracleDataSource" }} - schema = xe - {{- end}} - }' > ${BASE_DIR}/node.conf - - # Replace placeholders in node.conf with actual passwords - export TRUSTSTORE_PASSWORD=$(cat ${BASE_DIR}/certificates/credentials/truststorepass) - sed -i -e "s*TRUSTSTORE_PASSWORD*${TRUSTSTORE_PASSWORD}*g" ${BASE_DIR}/node.conf - export KEYSTORE_PASSWORD=$(cat ${BASE_DIR}/certificates/credentials/keystorepass) - sed -i -e "s*KEYSTORE_PASSWORD*${KEYSTORE_PASSWORD}*g" ${BASE_DIR}/node.conf - - # Clean or remove network-parameters on every restart - rm -rf ${BASE_DIR}/network-parameters - - # Import certificates into truststore.jks - yes | keytool -importcert -file ${BASE_DIR}/certificates/tlscerts/networkmap.crt -storepass $TRUSTSTORE_PASSWORD -alias {{ .Values.networkServices.networkMapDomain }} -keystore ${BASE_DIR}/certificates/truststore.jks - yes | keytool -importcert -file ${BASE_DIR}/certificates/tlscerts/idman.crt -storepass $TRUSTSTORE_PASSWORD -alias {{ .Values.networkServices.idmanDomain }} -keystore ${BASE_DIR}/certificates/truststore.jks - yes | keytool -importcert -file ${BASE_DIR}/certificates/tlscerts/node.crt -storepass $TRUSTSTORE_PASSWORD -alias {{ .Values.nodeName }} -keystore ${BASE_DIR}/certificates/truststore.jks - - # Start a new shell session - /bin/sh - - # Retrieve keystore password again - KEYSTORE_PASSWORD=$(cat ${BASE_DIR}/certificates/credentials/keystorepass) - - # Check if the 'corda.jar' file exists - if [ -f {{ .Values.nodeConf.jarPath }}/corda.jar ] - then - echo -e "\nStarting Node node ...\n" - - # Run migration scripts for database schema upgradation and then start the Corda-ent 'node' node - java -Djavax.net.ssl.trustStore=${BASE_DIR}/certificates/truststore.jks -Djavax.net.ssl.keyStore=${BASE_DIR}/certificates/sslkeystore.jks -Djavax.net.ssl.keyStorePassword=${KEYSTORE_PASSWORD} -jar {{ .Values.nodeConf.jarPath }}/corda.jar run-migration-scripts --core-schemas --app-schemas -f ${BASE_DIR}/node.conf --base-directory ${BASE_DIR} --log-to-console - # start the Corda-ent 'node' node, setting javax.net.ssl.keyStore as ${BASE_DIR}/certificates/sslkeystore.jks since keystore gets reset when using h2 ssl - java -Djavax.net.ssl.trustStore=${BASE_DIR}/certificates/truststore.jks -Djavax.net.ssl.keyStore=${BASE_DIR}/certificates/sslkeystore.jks -Djavax.net.ssl.keyStorePassword=${KEYSTORE_PASSWORD} -jar {{ .Values.nodeConf.jarPath }}/corda.jar -f ${BASE_DIR}/node.conf --base-directory ${BASE_DIR} --log-to-console - # Capture the exit code of the previous command - EXIT_CODE=${?} - else - echo "Error: 'corda.jar' file is not found in the {{ .Values.nodeConf.jarPath }} folder." - # Additionally, manually check the availability of 'corda.jar' file at the same path - ls -al {{ .Values.nodeConf.jarPath }} - # Set to '1' to indicate an error - EXIT_CODE=1 - fi - - # Handle node failure - if [ "${EXIT_CODE}" -ne "0" ] - then - HOW_LONG={{ .Values.sleepTimeAfterError }} - echo "\nNode failed - exit code: ${EXIT_CODE} (error)\n" - echo "Going to sleep for requested ${HOW_LONG} seconds to let you login and investigate.\n" - sleep ${HOW_LONG} - fi - - echo "DONE" - volumeMounts: - - name: {{ .Values.nodeName }}-volume - mountPath: {{ .Values.nodeConf.volume.baseDir }} - - name: node-certificates - mountPath: {{ .Values.nodeConf.volume.baseDir }}/certificates - resources: - requests: - memory: {{ .Values.nodeConf.pod.resources.requests }} - limits: - memory: {{ .Values.nodeConf.pod.resources.limits }} - - name: logs - image: "{{ .Values.image.nodeContainerName }}" - imagePullPolicy: {{ .Values.image.pullPolicy }} - env: - - name: ACCEPT_LICENSE - value: "{{required "You must accept the license agreement to run this software" .Values.acceptLicense }}" - command: ["/bin/bash", "-c"] - args: - - |- - # Change directory to the specified base directory for Corda-ent node logs - cd {{ .Values.nodeConf.volume.baseDir }}/ - # Continuously display the content of all log files in the 'logs' directory - tail -f logs/*.log 2>/dev/null - # If the logs are not available, enter an indefinite wait state - tail -f /dev/null - volumeMounts: - - name: {{ .Values.nodeName }}-volume - mountPath: {{ .Values.nodeConf.volume.baseDir }} - imagePullSecrets: - - name: {{ .Values.image.imagepullsecret }} - volumes: - - name: node-certificates - emptyDir: - medium: Memory - volumeClaimTemplates: - - metadata: - name: {{ .Values.nodeName }}-volume -{{- if .Values.nodeConf.pvc.annotations }} - annotations: -{{ toYaml .Values.nodeConf.pvc.annotations | indent 8 }} -{{- end }} - labels: - app.kubernetes.io/name: {{ .Values.nodeName }}-volume - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} - spec: - storageClassName: {{ .Values.storage.name }} - accessModes: [ "ReadWriteOnce" ] - resources: - requests: - storage: 64Mi diff --git a/platforms/r3-corda-ent/charts/corda-ent-node/templates/service.yaml b/platforms/r3-corda-ent/charts/corda-ent-node/templates/service.yaml deleted file mode 100644 index c509f58ad51..00000000000 --- a/platforms/r3-corda-ent/charts/corda-ent-node/templates/service.yaml +++ /dev/null @@ -1,81 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -apiVersion: v1 -kind: Service -metadata: - name: {{ .Values.nodeName }} - namespace: {{ .Values.metadata.namespace }} - labels: - run: {{ .Values.nodeName }} - app.kubernetes.io/name: {{ .Values.nodeName }} - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} -spec: - selector: - app: {{ .Values.nodeName }} - # we need healthCheckNodePort set to get rid of logs pollution - {{- if (.Values.healthCheckNodePort) }} - healthCheckNodePort: {{ .Values.healthCheckNodePort }} - {{- end }} - {{- if (.Values.service.type) }} - type: {{ .Values.service.type }} - {{- end }} - ports: - - port: {{ .Values.service.messagingServerPort }} - targetPort: {{ .Values.service.messagingServerPort }} - protocol: TCP - name: p2p - - port: {{ .Values.service.rpc.port }} - targetPort: {{ .Values.service.rpc.port }} - protocol: TCP - name: rpc - - port: {{ .Values.service.ssh.sshdPort }} - targetPort: {{ .Values.service.ssh.sshdPort }} - protocol: TCP - name: ssh -{{- if and ($.Values.nodeConf.ambassador.p2pAddress) (eq .Values.firewall.enabled false) }} ---- -apiVersion: getambassador.io/v3alpha1 -kind: Host -metadata: - name: {{ .Values.nodeName }}-host -spec: - hostname: {{ .Values.nodeName }}.{{ .Values.nodeConf.ambassador.external_url_suffix }} - acmeProvider: - authority: none - requestPolicy: - insecure: - action: Reject - tlsSecret: - name: {{ .Values.nodeName }}-ambassador-certs - namespace: {{ .Values.metadata.namespace }} - tls: - min_tls_version: v1.2 ---- -apiVersion: getambassador.io/v3alpha1 -kind: TCPMapping -metadata: - name: {{ .Values.nodeName }}-p2p - namespace: {{ .Values.metadata.namespace }} -spec: - host: {{ .Values.nodeName }}.{{ .Values.nodeConf.ambassador.external_url_suffix }} - port: {{ .Values.nodeConf.ambassador.p2pPort }} - service: {{ .Values.nodeName }}.{{ .Values.metadata.namespace }}:{{ .Values.service.p2pPort }} ---- -apiVersion: getambassador.io/v3alpha1 -kind: TLSContext -metadata: - name: {{ .Values.nodeName }}-tlscontext - namespace: {{ .Values.metadata.namespace }} -spec: - hosts: - - {{ .Values.nodeName }}.{{ .Values.nodeConf.ambassador.external_url_suffix }} - secret: {{ .Values.nodeName }}-ambassador-certs.{{ .Values.metadata.namespace }} - secret_namespacing: true - min_tls_version: v1.2 -{{- end }} diff --git a/platforms/r3-corda-ent/charts/corda-ent-node/values.yaml b/platforms/r3-corda-ent/charts/corda-ent-node/values.yaml deleted file mode 100644 index 7f3ce110cd2..00000000000 --- a/platforms/r3-corda-ent/charts/corda-ent-node/values.yaml +++ /dev/null @@ -1,255 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -# Default values for Node Setup. -# This is a YAML-formatted file. -# Declare variables to be passed into your templates. - -# Provide the name of the node -# Eg. nodeName: carrier -nodeName: carrier - -# This section contains the Enterprise-Corda node metadata. -metadata: - # Provide the namespace for the Corda node. - # Eg. namespace: cenm - namespace: cenm - # Provide the labels to the Corda node. - labels: - -# Provide image for the containers -image: - # Provide the alpine utils image. - # NOTE: The alpine image used is the base alpine image with CURL installed. - # Eg. initContainerName: ghcr.io/hyperledger/bevel-alpine:latest - initContainerName: ghcr.io/hyperledger/bevel-alpine:latest - # Enterprise node image - # Eg. adopblockchaincloud0502.azurecr.io/corda_image_ent_4.4:latest - nodeContainerName: adopblockchaincloud0502.azurecr.io/corda_image_ent_4.4:latest - # Provide the docker-registry secret created and stored in kubernetes cluster as a secret. - # Eg. imagePullSecret: regcred - imagepullsecret: "" - # Image pull policy - # Eg. Always - pullPolicy: IfNotPresent - -# This section contains the storage information. -storage: - # Provide the name of the storageclass. - # NOTE: Make sure that the storageclass exist prior to this deployment as - # this chart doesn't create the storageclass. - # Eg. name: cenm - name: cenm - -# required parameter -# Accept Corda Enterprise license should be YES. -acceptLicense: YES - -############################################################################################# -# This section contains information related to HashiCorp Vault. # -############################################################################################# -# NOTE: Make sure that the vault is already unsealed, intialized and configured to -# use Kubernetes service account token based authentication. - -vault: - # Provide the vault address - # Eg. address: http://vault.example.com:8200 - address: - # Provide the vault role used. - # Eg. role: vault-role - role: vault-role - # Provide the authpath configured to be used. - # Eg. authpath: entcordacenm - authpath: entcordacenm - # Provide the service account name autheticated to vault. - # NOTE: Make sure that the service account is already created and autheticated to use the vault. - # Eg. serviceaccountname: vault-auth - serviceaccountname: vault-auth - # Provide the vault path where the certificates are stored - # Eg. certsecretprefix: secret/organisation-name - certsecretprefix: secret/organisation-name - # Provide the vault orginsation node folder name where the certificates are stored - # Eg. nodePath: carrier - nodePath: carrier - # Number of retries to check contents from vault - retries: 20 - # Interval in seconds between retries - retryInterval: 30 - -############################################################# -# Node and CorDapps Configuration # -############################################################# -cordapps: - #Provide if you want to provide jars in cordapps - #Eg. getcordapps: true or false - getcordapps: - repository: - jars: - #Provide url to download the jar using wget cmd - #Eg. url: https://ci-artifactory.corda.r3cev.com/artifactory/corda-releases/net/corda/corda-finance/3.3-corda/corda-finance-3.3-corda.jar - - url: - - url: - -nodeConf: - ambassador: - p2pPort: - external_url_suffix: - # Node external address - # Eg. p2pAddress: carrier.rce.blockchainexample.com:15010 - p2pAddress: carrier.rce.blockchainexample.com:15010 - # Node legal name - # Eg. legalName: O=Node,OU=Node,L=London,C=GB - legalName: O=Node,OU=Node,L=London,C=GB - # Email address - # Eg. emailAddress: dev-node@bevel.com - emailAddress: - # crlCheckSoftFail defines if CRL failure is a critical error or if we can just fail softly (by logging an error) and continuing - crlCheckSoftFail: true - # tlsCertCrlDistPoint defines the endpoint for retrieving the CRL (Certificate Revocation List) of the Corda Network, if empty "", not used - # Example from Corda Network UAT network: http://crl.uat.corda.network/nodetls.crl - # Eg. tlsCertCrlDistPoint: "", empty for current usage - tlsCertCrlDistPoint: "" - # tlsCertCrlIssuer defines the X500 name of the trusted CRL issuer of the Corda Network, example from the Corda Network UAT network - # Eg. tlsCertCrlIssuer: "CN=Test TLS Signer Certificate, OU=HQ, O=HoldCo LLC, L=New York, C=US" - tlsCertCrlIssuer: - devMode: false - # Provide volume related specifications - volume: - # Ex baseDir: /opt/corda - baseDir: - # where is node jar is stored - jarPath: bin - # where service configuration files are stored - configPath: etc - pvc: - # annotations: - # key: "value" - annotations: {} - deployment: - # annotations: - # key: "value" - annotations: {} - # Provide the number of replicas for your pods - # Eg. replicas: 1 - replicas: 1 - # Specify the maximum size of the memory allocation pool - cordaJar: - # Provide the memory size. - # Eg. memorySize: 4096 (if using kilobytes) - # Eg. memorySize: 512 (if using megabytes) - # Eg. memorySize: 1 (if using gigabytes) - memorySize: - # Provide the unit of greatness for the size, one of three options: - # - k or K for kilobytes - # - m or M for megabytes - # - g or G for gigabytes - # Eg. unit: M - unit: - # Set limits of .jar - pod: - resources: - # Provide the limit memory for node - # Eg. limits: 512M - limits: - # Provide the requests memory for node - # Eg. requests: 550M - requests: - -networkServices: - # Ex. idmanName: idman - idmanName: idman - # doormanURL defines the accesspoint for the Identity Manager server (protocol + domain name + port, eg. http://my-identity-manager:10000) - doormanURL: - idmanDomain: - # Ex. networkmapName: networkmap - networkmapName: networkmap - # networkMapURL defines the accesspoint for the Network Map server (protocol + domain name + port, eg. http://my-network-map:10000) - networkMapURL: - networkMapDomain: - -# external firewall (Bridge and Float) -firewall: - # enabled is a boolean value for the above function - enabled: false - -service: - # p2pPort defines the port number of inbound connections - # Eg. p2pPort: 40000 - p2pPort: 40000 - # p2pAddress defines the public facing IP address (domain name recommended) and port number of the Node, in the case of using a Float, this address should correspond to the public IP address of the float - # Eg. float-internal.cenm-ent - p2pAddress: - # ssh defines the SSH access options - ssh: - # enabled is a boolean value for the above parameter - enabled: true - # sshdPort is the Node Shell access port. - # Eg. sshdPort: 2222 - sshdPort: - rpc: - # port is the RPC endpoint that the user interface will access to direct the CorDapp on the Corda Node - # Eg. port: 30000 - port: 30000 - # adminPort is the RPC admin endpoint that can be used to do administrative tasks on the Corda Node. - # Eg. adminPort: 30009 - adminPort: 30009 - # users defines the list of RPC users and the permissions they have. - users: - # name defines the name of the RPC user - # Eg. name: carrier-node - - name: - # password defines the password for the RPC user - # Eg. password: nodeP - password: - # permissions defines the RPC permissions available. - permissions: ALL - -############################################################# -# Database Options and Configuration # -############################################################# -dataSourceProperties: - dataSource: - # Ex user: "sa" - user: - # Ex. password: "ziAscD0MJnj4n4xkFWY6XuMBuw9bvYC7" - password: - # Eg. url: "jdbc:h2:tcp://carrierdb:9101/persistence;DB_CLOSE_ON_EXIT=FALSE;LOCK_TIMEOUT=10000;WRITE_DELAY=100;AUTO_RECONNECT=TRUE;" - url: - # Eg. dataSourceClassName: "org.h2.jdbcx.JdbcDataSource" - dataSourceClassName: " " - # dbUrl: notarydb - dbUrl: - # dbPort: 9101 - dbPort: - monitoring: - # enabled is a boolean value for the above parameter - enabled: true - # port defines the port on which the monitoring information will be available - port: 8090 - # allowDevCorDapps defines if CorDapps that are signed with developer keys will be allowed to load or not (it clears the cordappSignerKeyFingerprintBlacklist if enabled) - allowDevCorDapps: - # enabled is a boolean value for the above parameter - enabled: true - # Number of retries to check contents from vault - retries: 10 - # Interval in seconds between retries - retryInterval: 15 - -############################################################# -# Settings # -############################################################# -# sleep time in seconds when error while registration -# Ex. sleepTimeAfterError: 120 -sleepTimeAfterError: 120 -# custom sleep time in seconds -sleepTime: 20 -healthcheck: - #Provide the wait interval in seconds in fetching certificates from vault - #Eg. readinesscheckinterval: 5 - readinesscheckinterval: 5 - #Provide the threshold number of retries in fetching certificates from vault - #Eg. readinessthreshold: 2 - readinessthreshold: 2 diff --git a/platforms/r3-corda-ent/charts/corda-ent-notary-initial-registration/Chart.yaml b/platforms/r3-corda-ent/charts/corda-ent-notary-initial-registration/Chart.yaml deleted file mode 100644 index bf51d87e228..00000000000 --- a/platforms/r3-corda-ent/charts/corda-ent-notary-initial-registration/Chart.yaml +++ /dev/null @@ -1,11 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -apiVersion: v1 -appVersion: "2.0" -description: "R3-corda-ent: Job for initial notary node registration." -name: corda-ent-notary-initial-registration -version: 1.0.0 diff --git a/platforms/r3-corda-ent/charts/corda-ent-notary-initial-registration/README.md b/platforms/r3-corda-ent/charts/corda-ent-notary-initial-registration/README.md deleted file mode 100644 index 8ff5a80207d..00000000000 --- a/platforms/r3-corda-ent/charts/corda-ent-notary-initial-registration/README.md +++ /dev/null @@ -1,201 +0,0 @@ -[//]: # (##############################################################################################) -[//]: # (Copyright Accenture. All Rights Reserved.) -[//]: # (SPDX-License-Identifier: Apache-2.0) -[//]: # (##############################################################################################) - - -# Notary-initial-registration Deployment - -- [Notary-initial-registration Deployment Helm Chart](#Notary-initial-registration-deployment-helm-chart) -- [Prerequisites](#prerequisites) -- [Chart Structure](#chart-structure) -- [Configuration](#configuration) -- [Deployment](#deployment) -- [Contributing](#contributing) -- [License](#license) - - -## notary-initial-registration Deployment Helm Chart ---- -This [Helm chart](https://github.com/hyperledger/bevel/tree/main/platforms/r3-corda-ent/charts/corda-ent-notary-initial-registration) helps to deploy the job for initial notory node registration. - - -## Prerequisites ---- -Before deploying the chart please ensure you have the following prerequisites: - -- networkmap and Node's database up and running. -- Kubernetes cluster up and running. -- A HashiCorp Vault instance is set up and configured to use Kubernetes service account token-based authentication. -- The Vault is unsealed and initialized. -- Helm is installed. - -This chart has following structue: -``` - . - ├── corda-ent-notary-initial-registration - │ ├── Chart.yaml - │ ├── templates - │ │ ├── _helpers.tpl - │ │ └── job.yaml - │ └── values.yaml -``` - -Type of files used: - -- `templates` : This directory contains the Kubernetes manifest templates that define the resources to be deployed. -- `job.yaml` : This file is a configuration file for deployement in Kubernetes.It creates a deployment file with a specified number of replicas and defines various settings for the deployment, Init container is responsible for intial node registration process is completed successfully before the main containers start.It also specifies volume mounts for storing certificates and data. -- `chart.yaml` : Provides metadata about the chart, such as its name, version, and description. -- `values.yaml` : Contains the default configuration values for the chart. It includes configuration for the image, nodeconfig, credenatials, storage, service , vault, etc. -- `_helpers.tpl` : A template file used for defining custom labels and ports for the metrics in the Helm chart. - - - -## Configuration ---- -The [values.yaml](https://github.com/hyperledger/bevel/blob/main/platforms/r3-corda-ent/charts/corda-ent-notary-initial-registration/values.yaml) file contains configurable values for the Helm chart. We can modify these values according to the deployment requirements. Here are some important configuration options: - -## Parameters ---- - -### Name - -| Name | Description | Default Value | -| -----------| -----------------------------------| ------------- | -| nodeName | Provide the name of the node | notary-registration | - -### Metadata - -| Name | Description | Default Value | -| ----------------| -------------------------------------------------| ------------- | -| namespace | Provide the namespace for the Corda node | cenm | -| labels | Provide the labels to the Corda node | "" | - -### Image - -| Name | Description | Default Value | -| ------------------------ | ------------------------------------------------------------------------------- | ----------------------------------------------| -| initContainerName | Information about the Docker container used for the init-containers | ghcr.io/hyperledger | -| nodeContainerName | Enterprise node image | azurecr.io/corda/notary:1.2-zulu-openjdk8u242 | -| ImagePullSecret | Provide the K8s secret that has rights for pulling the image off the registry | "" | -| pullPolicy | Provide the pull policy for Docker images, either Always or IfNotPresent | IfNotPresent | - -### Vault - -| Name | Description | Default Value | -| ------------------------- | --------------------------------------------------------------------------| ------------------------ | -| address | Address/URL of the Vault server | "" | -| role | Role used for authentication with Vault | vault-role | -| authpath | Authentication path for Vault | entcordacenm | -| serviceAccountName | Provide the already created service account name autheticated to vault | vault-auth | -| certSecretPrefix | Provide the vault path where the certificates are stored | secret/organisation-name | -| nodePath | Provide the vault orginsation node folder name where certificates stored | notary | -| retries | Amount of times to retry fetching from/writing to Vault before giving up | "" | -| retryInterval | Amount of time in seconds to wait after an error occurs | "" | - -### NodeConf - -| Name | Description | Default Value | -| ------------------------ | -------------------------------------------------------------------------------------- | -----------------------------| -| p2p | The host and port on which the node is available for protocol operations over ArtemisMQ | "" | -| ambassador | Specify ambassador host:port which will be advertised in addition to p2paddress | O=Node,OU=Node,L=London,C=GB | -| legalName | Provide the legalName for node | dev-node@bevel.com | -| notary | Notary type Ex cenm or notary | cenm | -| volume | Provide volume related specifications | /opt/corda/base | -| jarPath | where is node jar is stored | bin | -| devMode | Provide the devMode for corda node | "false" | -| configPath | where service configuration files are stored | etc | - -### networkServices - -| Name | Description | Default Value | -| ------------------------ | ------------------------------------------------------------------ | --------------------------------| -| idmanName | Name of the idman | idman | -| doormanURL | doormanURL defines the accesspoint for the Identity Manager server | http://my-identity-manager:1000 | -| networkmapName | name of the networkmapName | networkmap | -| networkMapURL | networkMapURL defines the accesspoint for the Network Map server | http://my-network-map:10000 | -| networkMapDomain | defines the networkMapDomain | "" | - -### Service - -| Name | Description | Default Value | -| --------------------- | -------------------------------------------------------- | ------------- | -| p2p port | p2pPort defines the port number of inbound connections | 40000 | -| rpcadmin port | Provide the rpcadmin port for node | "" | - -### dataSourceProperties - -| Name | Description | Default Value | -| --------------------- | ---------------------------------------------------------------------------- | ------------- | -| dataSource | DataSource of the url ,user and passowrd | "" | -| dataSourceClassName | dburl, dbport of the data source class | "" | - -### Healthcheck - -| Name | Description | Default Value | -| ----------------------------| ------------------------------------------------------------------------------| ------------- | -| readinesscheckinterval | Provide the interval in seconds you want to iterate till db to be ready | 5 | -| readinessthreshold | Provide the threshold till you want to check if specified db up and running | 2 | - - - -## Deployment ---- - -To deploy the notary-initial-registration Helm chart, follow these steps: - -1. Modify the [values.yaml](https://github.com/hyperledger/bevel/blob/main/platforms/r3-corda-ent/charts/corda-ent-notary-initial-registration/values.yaml) file to set the desired configuration values. -2. Run the following Helm command to install, upgrade,verify, delete the chart: - -To install the chart: -```bash -helm repo add bevel https://hyperledger.github.io/bevel/ -helm install ./corda-ent-notary-initial-registration -``` - -To upgrade the chart: -```bash -helm upgrade ./corda-ent-notary-initial-registration -``` - -To verify the deployment: -```bash -kubectl get jobs -n -``` -Note : Replace `` with the actual namespace where the Job was created. This command will display information about the Job, including the number of completions and the current status of the Job's pods. - -To delete the chart: -```bash -helm uninstall -``` -Note : Replace `` with the desired name for the release. - - -## Contributing ---- -If you encounter any bugs, have suggestions, or would like to contribute to the [notary-initial-registration Deployment Helm Chart](https://github.com/hyperledger/bevel/tree/main/platforms/r3-corda-ent/charts/corda-ent-notary-initial-registration), please feel free to open an issue or submit a pull request on the [project's GitHub repository](https://github.com/hyperledger/bevel). - - -## License - -This chart is licensed under the Apache v2.0 license. - -Copyright © 2023 Accenture - -### Attribution - -This chart is adapted from the [charts](https://hyperledger.github.io/bevel/) which is licensed under the Apache v2.0 License which is reproduced here: - -``` -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -``` diff --git a/platforms/r3-corda-ent/charts/corda-ent-notary-initial-registration/templates/_helpers.tpl b/platforms/r3-corda-ent/charts/corda-ent-notary-initial-registration/templates/_helpers.tpl deleted file mode 100644 index 7f9b0dc6131..00000000000 --- a/platforms/r3-corda-ent/charts/corda-ent-notary-initial-registration/templates/_helpers.tpl +++ /dev/null @@ -1,5 +0,0 @@ -{{- define "labels.custom" -}} - {{- range $key, $val := .Values.metadata.labels -}} - {{- $key -}}: {{- $val -}} - {{- end -}} -{{- end -}} diff --git a/platforms/r3-corda-ent/charts/corda-ent-notary-initial-registration/templates/job.yaml b/platforms/r3-corda-ent/charts/corda-ent-notary-initial-registration/templates/job.yaml deleted file mode 100644 index dceca226302..00000000000 --- a/platforms/r3-corda-ent/charts/corda-ent-notary-initial-registration/templates/job.yaml +++ /dev/null @@ -1,410 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -apiVersion: batch/v1 -kind: Job -metadata: - name: {{ .Values.nodeName }} - namespace: {{ .Values.metadata.namespace }} - labels: - app: {{ .Values.nodeName }} - app.kubernetes.io/name: {{ .Values.nodeName }} - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} - {{- include "labels.custom" . | nindent 2 }} -spec: - backoffLimit: 6 - template: - metadata: - labels: - app: {{ .Values.nodeName }} - app.kubernetes.io/name: {{ .Values.nodeName }} - app.kubernetes.io/instance: {{ .Release.Name }} - spec: - restartPolicy: "OnFailure" - serviceAccountName: {{ $.Values.vault.serviceAccountName }} - hostname: {{ .Values.nodeName }} - securityContext: - fsGroup: 1000 - initContainers: - - name: init-certificates - image: {{ .Values.image.initContainerName }} - imagePullPolicy: Always - env: - - name: VAULT_ADDR - value: {{ $.Values.vault.address }} - - name: KUBERNETES_AUTH_PATH - value: {{ $.Values.vault.authPath }} - - name: VAULT_APP_ROLE - value: {{ $.Values.vault.role }} - - name: BASE_DIR - value: {{ $.Values.nodeConf.volume.baseDir }} - - name: CERTS_SECRET_PREFIX - value: {{ .Values.vault.certSecretPrefix }} - - name: MOUNT_PATH - value: "/DATA" - command: ["sh", "-c"] - args: - - |- - #!/usr/bin/env sh - validateVaultResponse () { - if echo ${2} | grep "errors"; then - echo "ERROR: unable to retrieve ${1}: ${2}" - exit 1 - fi - if [ "$3" == "LOOKUPSECRETRESPONSE" ] - then - http_code=$(curl -sS -o /dev/null -w "%{http_code}" \ - --header "X-Vault-Token: ${VAULT_TOKEN}" \ - ${VAULT_ADDR}/v1/${1}) - curl_response=$? - if test "$http_code" != "200" ; then - echo "Http response code from Vault - $http_code and curl_response - $curl_response" - if test "$curl_response" != "0"; then - echo "Error: curl command failed with error code - $curl_response" - exit 1 - fi - fi - fi - } - # setting up env to get secrets from vault - echo "Getting secrets from Vault Server" - KUBE_SA_TOKEN=$(cat /var/run/secrets/kubernetes.io/serviceaccount/token) - VAULT_TOKEN=$(curl -sS --request POST ${VAULT_ADDR}/v1/auth/${KUBERNETES_AUTH_PATH}/login -H "Content-Type: application/json" -d '{"role":"'"${VAULT_APP_ROLE}"'","jwt":"'"${KUBE_SA_TOKEN}"'"}' | jq -r 'if .errors then . else .auth.client_token end') - validateVaultResponse 'vault login token' "${VAULT_TOKEN}" - - echo "logged into vault" - # Creating dirs for storing the certificate - mkdir -p ${MOUNT_PATH}/trust-stores - # Fetching network-root-truststore certificates from vault - COUNTER=1 - while [ "$COUNTER" -lt {{ $.Values.vault.retries }} ] - do - # get keystores from vault to see if certificates are created and put in vault - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${CERTS_SECRET_PREFIX}/root/certs | jq -r 'if .errors then . else . end') - data_info="$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data["data"]')" - - if [ "$data_info" == "null" ] - then - sleep {{ $.Values.vault.retryInterval }} - else - validateVaultResponse "${CERTS_SECRET_PREFIX}/root/certs" "${LOOKUP_SECRET_RESPONSE}" "LOOKUPSECRETRESPONSE" - network_root_truststore=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["network-root-truststore.jks"]') - echo "${network_root_truststore}" | base64 -d > ${MOUNT_PATH}/trust-stores/network-root-truststore.jks - # ssl trust store - corda_ssl_trust_store=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["corda-ssl-trust-store.jks"]') - echo "${corda_ssl_trust_store}" | base64 -d > ${MOUNT_PATH}/trust-stores/corda-ssl-trust-store.jks - echo "Successfully got network truststore certifcates" - break - fi - COUNTER=`expr "$COUNTER" + 1` - done - if [ "$COUNTER" -ge {{ $.Values.vault.retries }} ] - then - # printing number of trial done before giving up - echo "$COUNTER" - echo "Network truststore certificates might not have been put in vault ! Giving up..." - exit 1 - fi - - mkdir -p ${MOUNT_PATH}/tlscerts - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${CERTS_SECRET_PREFIX}/{{ .Values.networkServices.idmanName }}/tlscerts | jq -r 'if .errors then . else . end') - validateVaultResponse "${CERTS_SECRET_PREFIX}/{{ .Values.networkServices.idmanName }}/tlscerts" "${LOOKUP_SECRET_RESPONSE}" "LOOKUPSECRETRESPONSE" - IDMAN_CERT=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["tlscacerts"]') - echo "${IDMAN_CERT}" | base64 -d > ${MOUNT_PATH}/tlscerts/idman.crt - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${CERTS_SECRET_PREFIX}/{{ .Values.networkServices.networkmapName }}/tlscerts | jq -r 'if .errors then . else . end') - validateVaultResponse "${CERTS_SECRET_PREFIX}/{{ .Values.networkServices.networkmapName }}/tlscerts" "${LOOKUP_SECRET_RESPONSE}" "LOOKUPSECRETRESPONSE" - NETWORKMAP_CERT=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["tlscacerts"]') - echo "${NETWORKMAP_CERT}" | base64 -d > ${MOUNT_PATH}/tlscerts/networkmap.crt - #Fetching truststore credentials from vault - mkdir -p ${MOUNT_PATH}/truststore - OUTPUT_PATH=${MOUNT_PATH}/truststore; - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${CERTS_SECRET_PREFIX}/credentials/truststore | jq -r 'if .errors then . else . end') - validateVaultResponse "${CERTS_SECRET_PREFIX}/credentials/truststore" "${LOOKUP_SECRET_RESPONSE}" "LOOKUPSECRETRESPONSE" - ROOTCA_TRUSTSTORE=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["rootca"]') - echo "${ROOTCA_TRUSTSTORE}"> ${OUTPUT_PATH}/rootcats - TRUSTSTORE_PASSWORD=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["trustStorePassword"]') - echo "${TRUSTSTORE_PASSWORD}"> ${OUTPUT_PATH}/ts - #Fetching keystore credentials from vault - mkdir -p ${MOUNT_PATH}/keystore - OUTPUT_PATH=${MOUNT_PATH}/keystore; - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${CERTS_SECRET_PREFIX}/credentials/keystore | jq -r 'if .errors then . else . end') - validateVaultResponse "${CERTS_SECRET_PREFIX}/credentials/keystore" "${LOOKUP_SECRET_RESPONSE}" "LOOKUPSECRETRESPONSE" - KEYSTORE_PASSWORD=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["keyStorePassword"]') - echo "${KEYSTORE_PASSWORD}"> ${OUTPUT_PATH}/ks - - echo "Done with getting certifcates and credentials from vault" - volumeMounts: - - name: certificates - mountPath: /DATA - - name: db-healthcheck - image: {{ .Values.image.initContainerName }} - imagePullPolicy: Always - command: ["sh", "-c"] - args: - - |- - #!/usr/bin/env sh - COUNTER=1 - FLAG=true - # perform health check if db is up and running before starting corda node - while [ "$COUNTER" -le {{ $.Values.healthcheck.readinessthreshold }} ] - do - DB_NODE={{ .Values.dataSourceProperties.dbUrl }}:{{ .Values.dataSourceProperties.dbPort }} - STATUS=$(nc -vz $DB_NODE 2>&1 | grep -c open ) - if [ "$STATUS" == 0 ] - then - FLAG=false - else - FLAG=true - echo "DB up and running" - fi - if [ "$FLAG" == false ] - then - echo "Retry attempted $COUNTER times, retrying after {{ $.Values.healthcheck.readinesscheckinterval }} seconds" - COUNTER=`expr "$COUNTER" + 1` - sleep {{ $.Values.healthcheck.readinesscheckinterval }} - else - echo "SUCCESS!" - echo "DB up and running!" - exit 0 - break - fi - done - if [ "$COUNTER" -gt {{ $.Values.healthcheck.readinessthreshold }} ] || [ "$FLAG" == false ] - then - echo "Retry attempted $COUNTER times, no DB up and running. Giving up!" - exit 1 - break - fi - containers: - - name: registration - image: "{{ .Values.image.nodeContainerName }}" - env: - - name: ACCEPT_LICENSE - value: "{{required "You must accept the license agreement to run this software" .Values.acceptLicense }}" - - name: BASE_DIR - value: {{ $.Values.nodeConf.volume.baseDir }} - imagePullPolicy: {{ .Values.image.pullPolicy }} - command: ["/bin/bash", "-c"] - args: - - |- - rm -f certificates/done.txt - echo 'networkServices { - doormanURL="{{ .Values.networkServices.doormanURL }}" - networkMapURL="{{ .Values.networkServices.networkMapURL }}" - } - dataSourceProperties { - dataSource { - password = "{{ .Values.dataSourceProperties.dataSource.password }}" - url = "{{ .Values.dataSourceProperties.dataSource.url }}" - user = "{{ .Values.dataSourceProperties.dataSource.user }}" - } - dataSourceClassName = "{{ .Values.dataSourceProperties.dataSourceClassName }}" - } - notary { - serviceLegalName : "{{ .Values.nodeConf.notary.serviceLegalName }}" - validating = "{{ .Values.nodeConf.notary.validating }}" - } - devMode = {{ .Values.nodeConf.devMode }} - emailAddress : "{{ .Values.nodeConf.emailAddress }}" - myLegalName : "{{ .Values.nodeConf.legalName }}" - p2pAddress : "{{ .Values.nodeConf.p2p.url }}:{{ .Values.service.p2pPort }}" - trustStorePassword: "TRUSTSTORE_PASSWORD" - keyStorePassword: "KEYSTORE_PASSWORD" - additionalP2PAddresses : ["{{ .Values.nodeConf.ambassador.p2pAddress }}"] - detectPublicIp : false - rpcSettings { - address="{{ .Values.service.rpc.address }}:{{ .Values.service.rpc.addressPort }}" - adminAddress="{{ .Values.service.rpc.admin.address }}:{{ .Values.service.rpc.admin.addressPort }}" - standAloneBroker="{{ .Values.service.rpc.standAloneBroker }}" - useSsl="{{ .Values.service.rpc.useSSL }}" - } - rpcUsers=[ - { - username="{{ .Values.service.rpc.users.username }}" - password="{{ .Values.service.rpc.users.password }}" - permissions=[ - ALL - ] - } - ]' > etc/notary.conf - export TRUSTSTORE_PASSWORD=$(cat {{ $.Values.nodeConf.volume.baseDir }}/DATA/truststore/ts) - sed -i -e "s*TRUSTSTORE_PASSWORD*${TRUSTSTORE_PASSWORD}*g" etc/notary.conf - export KEYSTORE_PASSWORD=$(cat {{ $.Values.nodeConf.volume.baseDir }}/DATA/keystore/ks) - sed -i -e "s*KEYSTORE_PASSWORD*${KEYSTORE_PASSWORD}*g" etc/notary.conf - rm -r ${BASE_DIR}/certificates/done.txt - - {{- if eq .Values.nodeConf.notary.type "cenm" }} - CORDA_SSL_TRUSTSTORE={{ $.Values.nodeConf.volume.baseDir }}/DATA/trust-stores/corda-ssl-trust-store.jks - {{- else }} - SSL_TRUSTSTORE_PASSWORD=$(tr -dc A-Za-z0-9 &1) - # get sslkeystore.jks from vault - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${CERTS_SECRET_PREFIX}/{{ $.Values.nodePath }}/certs/sslkeystore | jq -r 'if .errors then . else . end') - TLS_SSLKEYSTORE=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data[ "sslkeystore.jks" ]' 2>&1) - # get truststore.jks from vault - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${CERTS_SECRET_PREFIX}/{{ $.Values.nodePath }}/certs/truststore | jq -r 'if .errors then . else . end') - TLS_TRUSTSTORE=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data[ "truststore.jks" ]' 2>&1) - - if [ "$TLS_NODEKEYSTORE" == "null" ] || [ "$TLS_SSLKEYSTORE" == "null" ] || [ "$TLS_TRUSTSTORE" == "null" ] || [[ "$TLS_NODEKEYSTORE" == "parse error"* ]] || [[ "$TLS_SSLKEYSTORE" == "parse error"* ]] || [[ "$TLS_TRUSTSTORE" == "parse error"* ]] - then - echo "certificates write or read fail" - sleep {{ $.Values.healthcheck.readinessthreshold }} - if [ "$COUNTER" -ge {{ $.Values.vault.retries }} ] - then - echo "Retry attempted $COUNTER times, certificates have not been saved" - exit 1 - fi - fi - COUNTER=`expr "$COUNTER" + 1` - fi - done - # setting up env to get secrets from vault - cd ${BASE_DIR}/additional-node-infos - KUBE_SA_TOKEN=$(cat /var/run/secrets/kubernetes.io/serviceaccount/token) - VAULT_TOKEN=$(curl -sS --request POST ${VAULT_ADDR}/v1/auth/${KUBERNETES_AUTH_PATH}/login -H "Content-Type: application/json" -d '{"role":"'"${VAULT_APP_ROLE}"'","jwt":"'"${KUBE_SA_TOKEN}"'"}' | jq -r 'if .errors then . else .auth.client_token end') - validateVaultResponse 'vault login token' "${VAULT_TOKEN}" - # getting the nodeInfo name - nodeInfoName=$(basename $(ls nodeInfo*)) - # Encoding binary file and putting it in file, to make sure binary gets correctly put in Vault - base64 ${BASE_DIR}/additional-node-infos/nodeInfo* > ${BASE_DIR}/file - (echo '{"data": {"nodeInfoFile": "'; cat ${BASE_DIR}/file; echo '","nodeInfoName": "'; echo $nodeInfoName; echo '","validating": "'; echo {{ .Values.nodeConf.notary.validating }}; echo '"}}') | curl -H "X-Vault-Token: ${VAULT_TOKEN}" -d @- ${VAULT_ADDR}/v1/${CERTS_SECRET_PREFIX}/{{ $.Values.nodePath }}/nodeInfo - volumeMounts: - - name: notary-nodeinfo - mountPath: {{ $.Values.nodeConf.volume.baseDir }}/additional-node-infos - - name: notary-certificates - mountPath: {{ $.Values.nodeConf.volume.baseDir }}/certificates - - name: certificates - mountPath: {{ $.Values.nodeConf.volume.baseDir }}/DATA - imagePullSecrets: - - name: {{ .Values.image.imagePullSecret }} - volumes: - - name: notary-etc - emptyDir: - medium: Memory - - name: notary-nodeinfo - emptyDir: - medium: Memory - - name: notary-certificates - emptyDir: - medium: Memory - - name: certificates - emptyDir: - medium: Memory diff --git a/platforms/r3-corda-ent/charts/corda-ent-notary-initial-registration/values.yaml b/platforms/r3-corda-ent/charts/corda-ent-notary-initial-registration/values.yaml deleted file mode 100644 index a43c278d0ff..00000000000 --- a/platforms/r3-corda-ent/charts/corda-ent-notary-initial-registration/values.yaml +++ /dev/null @@ -1,191 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -# Default values for Notary Registration job. -# This is a YAML-formatted file. -# Declare variables to be passed into your templates. - -# Provide the name of the node -# Eg. nodeName: notary-registration -nodeName: notary-registration - -# This section contains the Enterprise-Corda node metadata. -metadata: - # Provide the namespace for the Corda node. - # Eg. namespace: cenm - namespace: cenm - # Provide the labels to the Corda node. - labels: - -# Provide image for the containers -image: - # Provide the alpine utils image. - # NOTE: The alpine image used is the base alpine image with CURL installed. - # Eg. initContainerName: ghcr.io/hyperledger/bevel-alpine:latest - initContainerName: ghcr.io/hyperledger/bevel-alpine:latest - # Enterprise node image - # Eg. adopblockchaincloud0502.azurecr.io/corda/notary:1.2-zulu-openjdk8u242 - nodeContainerName: adopblockchaincloud0502.azurecr.io/corda/notary:1.2-zulu-openjdk8u242 - # Provide the docker-registry secret created and stored in kubernetes cluster as a secret. - # Eg. imagePullSecret: regcred - imagePullSecret: "" - # Image pull policy - # Eg. Always - pullPolicy: - -# required parameter -# Accept Corda Enterprise license should be YES. -acceptLicense: YES - -############################################################################################# -# This section contains the vault related information. # -############################################################################################# -# NOTE: Make sure that the vault is already unsealed, intialized and configured to -# use Kubernetes service account token based authentication. - -vault: - # Provide the vault address - # Eg. address: http://vault.example.com:8200 - address: - # Provide the vault role used. - # Eg. role: vault-role - role: vault-role - # Provide the authpath configured to be used. - # Eg. authpath: entcordacenm - authpath: entcordacenm - # Provide the service account name autheticated to vault. - # NOTE: Make sure that the service account is already created and autheticated to use the vault. - # Eg. serviceaccountname: vault-auth - serviceaccountname: vault-auth - # Provide the vault path where the certificates are stored - # Eg. certsecretprefix: secret/organisation-name - certsecretprefix: secret/organisation-name - # Provide the vault orginsation node folder name where the certificates are stored - # Eg. nodePath: notary - nodePath: notary - # Number of retries to check contents from vault - retries: - # Interval in seconds between retries - retryInterval: - -############################################################# -# Notary Configuration # -############################################################# -nodeConf: - p2p: - url: - ambassador: - p2pPort: - external_url_suffix: - p2pAddress: - # Notary legal name - # Eg. legalName: O=Notary,OU=Notary,L=London,C=GB - legalName: - # Email address - # Eg. emailAddress: dev-node@bevel.com - emailAddress: - notary: - serviceLegalName: - # Notary is validator Ex : isValidating: true - validating: - # Notary type Ex cenm or notary - # Eg. type: cenm - type: - # Dev Mode - devMode: false - # Provide volume related specifications - volume: - # Ex baseDir: /opt/corda - baseDir: /opt/corda - # where is node jar is stored - jarPath: - # where service configuration files are stored - configPath: - # Specify the maximum size of the memory allocation pool - cordaJar: - # Provide the memory size. - # Eg. memorySize: 4096 (if using kilobytes) - # Eg. memorySize: 512 (if using megabytes) - # Eg. memorySize: 1 (if using gigabytes) - memorySize: - # Provide the unit of greatness for the size, one of three options: - # - k or K for kilobytes - # - m or M for megabytes - # - g or G for gigabytes - # Eg. unit: M - unit: - # Set limits of .jar - pod: - resources: - # Provide the limit memory for node - # Eg. limits: 512M - limits: - # Provide the requests memory for node - # Eg. requests: 550M - requests: - -networkServices: - # Ex. idmanName: idman - idmanName: idman - # doormanURL defines the accesspoint for the Identity Manager server (protocol + domain name + port, eg. http://my-identity-manager:1000) - doormanURL: - idmanDomain: - # Ex. networkmapName: networkmap - networkmapName: networkmap - # networkMapURL defines the accesspoint for the Network Map server (protocol + domain name + port, eg. http://my-network-map:10000) - networkMapURL: - networkMapDomain: - -service: - # p2pPort defines the port number of inbound connections - # Eg. p2pPort: 40000 - p2pPort: - notaryPublicIP: - rpc: - address: - addressPort: - admin: - address: - addressPort: - standAloneBroker: - useSSL: - users: - username: - password: - -############################################################# -# Database Options and Configuration # -############################################################# -dataSourceProperties: - dataSource: - # Ex. password: "ziAscD0MJnj4n4xkFWY6XuMBuw9bvYC7" - password: - # Ex. url: "jdbc:h2:tcp://notarydb:9101/persistence;DB_CLOSE_ON_EXIT=FALSE;LOCK_TIMEOUT=10000;WRITE_DELAY=100;AUTO_RECONNECT=TRUE;" - url: - # Ex user: "sa" - user: - # Ex data source class name: "org.h2.jdbcx.JdbcDataSource" - dataSourceClassName: - # dbUrl: notarydb - dbUrl: - # dbPort: 9101 - dbPort: - -############################################################# -# Settings # -############################################################# -# sleep time in seconds when error while registration -# Ex. sleepTimeAfterError: 120 -sleepTimeAfterError: -# custom sleep time in seconds -sleepTime: -healthcheck: - #Provide the wait interval in seconds for any readiness check - #Eg. readinesscheckinterval: 5 - readinesscheckinterval: 5 - #Provide the threshold number of retries - #Eg. readinessthreshold: 2 - readinessthreshold: 2 diff --git a/platforms/r3-corda-ent/charts/corda-ent-notary/Chart.yaml b/platforms/r3-corda-ent/charts/corda-ent-notary/Chart.yaml deleted file mode 100644 index 592048d243e..00000000000 --- a/platforms/r3-corda-ent/charts/corda-ent-notary/Chart.yaml +++ /dev/null @@ -1,11 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -apiVersion: v1 -appVersion: "2.0" -description: "R3-corda-ent: Deploys the notary node." -name: corda-ent-notary -version: 1.0.0 diff --git a/platforms/r3-corda-ent/charts/corda-ent-notary/README.md b/platforms/r3-corda-ent/charts/corda-ent-notary/README.md deleted file mode 100644 index e7d58cffcca..00000000000 --- a/platforms/r3-corda-ent/charts/corda-ent-notary/README.md +++ /dev/null @@ -1,212 +0,0 @@ -[//]: # (##############################################################################################) -[//]: # (Copyright Accenture. All Rights Reserved.) -[//]: # (SPDX-License-Identifier: Apache-2.0) -[//]: # (##############################################################################################) - - -# Notary Deployment - -- [Notary Deployment Helm Chart](#Notary-deployment-helm-chart) -- [Prerequisites](#prerequisites) -- [Chart Structure](#chart-structure) -- [Configuration](#configuration) -- [Deployment](#deployment) -- [Contributing](#contributing) -- [License](#license) - - -## Notary Deployment Helm Chart ---- -This [Helm chart](https://github.com/hyperledger/bevel/tree/main/platforms/r3-corda-ent/charts/corda-ent-notary) helps to deploy the corda notory node. - - -## Prerequisites ---- -Before deploying the chart please ensure you have the following prerequisites: - -- NetworkMap and Node's database up and running. -- Kubernetes cluster up and running. -- A HashiCorp Vault instance is set up and configured to use Kubernetes service account token-based authentication. -- The Vault is unsealed and initialized. -- Helm is installed. - - -This chart has following structure: -``` - - ├── corda-ent-notary - │   ├── Chart.yaml - │   ├── templates - │   │   ├── deployment.yaml - │   │   ├── _helpers.tpl - │   │   ├── pvc.yaml - │   │   └── service.yaml - │   └── values.yaml -``` - -- `templates` : This directory contains the Kubernetes manifest templates that define the resources to be deployed. -- `deployment.yaml`: This file is a configuration file for deployement in Kubernetes.It creates a deployment file with a specified number of replicas and defines various settings for the deployment, Init container is responsible for node registration process is completed successfully before the main containers start.It also specifies volume mounts for storing certificates and data. -- `pvc.yaml` : A PersistentVolumeClaim (PVC) is a request for storage by a user. -- `service.yaml` : This file defines a Kubernetes Service with multiple ports for protocols and targets, and supports Ambassador proxy annotations for specific configurations when using the "ambassador" proxy provider. -- `chart.yaml` : Provides metadata about the chart, such as its name, version, and description. -- `values.yaml` : Contains the default configuration values for the chart. It includes configuration for the image, nodeconfig, credenatials, storage, service , vault, etc. -- `_helpers.tpl` : A template file used for defining custom labels and ports for the metrics in the Helm chart. - - - -## Configuration ---- -The [values.yaml](https://github.com/hyperledger/bevel/blob/main/platforms/r3-corda-ent/charts/corda-ent-notary/values.yaml) file contains configurable values for the Helm chart. We can modify these values according to the deployment requirements. Here are some important configuration options: - -## Parameters ---- - -### Name - -| Name | Description | Default Value | -| -----------| -------------------------------------| ------------- | -| nodeName | Provide the name of the node | notary | - -### Metadata - -| Name | Description | Default Value | -| ----------------| -------------------------------------------------| ------------- | -| namespace | Provide the namespace for the Corda node | cenm | -| labels | Provide the labels to the Corda node | "" | - -### Image - -| Name | Description | Default Value | -| ------------------------ | ------------------------------------------------------------------------------- | ----------------------------------------------| -| initContainerName | Information about the Docker container used for the init-containers | ghcr.io/hyperledger | -| nodeContainerName | Enterprise node image | azurecr.io/corda/notary:1.2-zulu-openjdk8u242 | -| ImagePullSecret | Provide the K8s secret that has rights for pulling the image off the registry | "" | -| pullPolicy | Provide the pull policy for Docker images, either Always or IfNotPresent | IfNotPresent | - -### Vault - -| Name | Description | Default Value | -| ------------------------- | --------------------------------------------------------------------------| -------------------------| -| address | Address/URL of the Vault server | "" | -| role | Role used for authentication with Vault | vault-role | -| authpath | Authentication path for Vault | entcordacenm | -| serviceAccountName | Provide the already created service account name autheticated to vault | vault-auth | -| certSecretPrefix | Provide the vault path where the certificates are stored | secret/organisation-name | -| retries | Amount of times to retry fetching from/writing to Vault before giving up | 10 | -| retryInterval | Amount of time in seconds to wait after an error occurs | 15 | - -### cordapps - -| Name | Description | Default Value | -| ------------------------ | ------------------------------------------------------- | --------------- | -| getcordapps | Provide if you want to provide jars in cordapps | true | -| repository | Provide the repository of cordapps | "" | -| jars url | Provide url to download the jar using wget cmd | "" | - -### NodeConf - -| Name | Description | Default Value | -| ------------------------ | -------------------------------------------------------------------------------------------| -----------------------------| -| p2p | The host and port on which the node is available for protocol operations over ArtemisMQ | "" | -| ambassador | Specify ambassador host:port which will be advertised in addition to p2paddress | O=Node,OU=Node,L=London,C=GB | -| legalName | Provide the legalName for node | dev-node@bevel.com | -| notary | Notary type Ex cenm or notary | cenm | -| volume | Provide volume related specifications | /opt/corda/base | -| jarPath | where is node jar is stored | bin | -| devMode | Provide the devMode for corda node | "false" | -| configPath | where service configuration files are stored | "etc" | -| replicas | Provide the number of replicas for your pods | 1 | - -### networkServices - -| Name | Description | Default Value | -| ------------------------ | ------------------------------------------------------------------ | --------------------------------| -| idmanName | Name of the idman | idman | -| doormanURL | doormanURL defines the accesspoint for the Identity Manager server | http://my-identity-manager:1000 | -| networkmapName | name of the networkmapName | networkmap | -| networkMapURL | networkMapURL defines the accesspoint for the Network Map server | http://my-network-map:10000 | -| networkMapDomain | defines the networkMapDomain | "" | - -### Service - -| Name | Description | Default Value | -| --------------------- | -------------------------------------------------------- | ------------- | -| p2p port | p2pPort defines the port number of inbound connections | 4000 | -| rpcadmin port | Provide the rpcadmin port for node | "" | - -### dataSourceProperties - -| Name | Description | Default Value | -| --------------------- | -------------------------------------------------| ------------- | -| dataSource | DataSource of the url ,user and passowrd | "" | -| dataSourceClassName | dburl, dbport of the data source class | "" | - -### Healthcheck - -| Name | Description | Default Value | -| ----------------------------| ------------------------------------------------------------------------------| ------------- | -| readinesscheckinterval | Provide the interval in seconds you want to iterate till db to be ready | 5 | -| readinessthreshold | Provide the threshold till you want to check if specified db up and running | 2 | - - -## Deployment ---- - -To deploy the notary Helm chart, follow these steps: - -1. Modify the [values.yaml](https://github.com/hyperledger/bevel/blob/main/platforms/r3-corda-ent/charts/corda-ent-notary/values.yaml) file to set the desired configuration values. -2. Run the following Helm command to install, upgrade,verify, delete the chart: - -To install the chart: -```bash -helm repo add bevel https://hyperledger.github.io/bevel/ -helm install ./corda-ent-notary -``` - -To upgrade the chart: -```bash -helm upgrade ./corda-ent-notary -``` - -To verify the deployment: -```bash -kubectl get jobs -n -``` -Note : Replace `` with the actual namespace where the Job was created. This command will display information about the Job, including the number of completions and the current status of the Job's pods. - -To delete the chart: -```bash -helm uninstall -``` -Note : Replace `` with the desired name for the release. - - - -## Contributing ---- -If you encounter any bugs, have suggestions, or would like to contribute to the [notary Deployment Helm Chart](https://github.com/hyperledger/bevel/tree/main/platforms/r3-corda-ent/charts/corda-ent-notary), please feel free to open an issue or submit a pull request on the [project's GitHub repository](https://github.com/hyperledger/bevel). - - -## License - -This chart is licensed under the Apache v2.0 license. - -Copyright © 2023 Accenture - -### Attribution - -This chart is adapted from the [charts](https://hyperledger.github.io/bevel/) which is licensed under the Apache v2.0 License which is reproduced here: - -``` -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -``` diff --git a/platforms/r3-corda-ent/charts/corda-ent-notary/templates/_helpers.tpl b/platforms/r3-corda-ent/charts/corda-ent-notary/templates/_helpers.tpl deleted file mode 100644 index 7f9b0dc6131..00000000000 --- a/platforms/r3-corda-ent/charts/corda-ent-notary/templates/_helpers.tpl +++ /dev/null @@ -1,5 +0,0 @@ -{{- define "labels.custom" -}} - {{- range $key, $val := .Values.metadata.labels -}} - {{- $key -}}: {{- $val -}} - {{- end -}} -{{- end -}} diff --git a/platforms/r3-corda-ent/charts/corda-ent-notary/templates/deployment.yaml b/platforms/r3-corda-ent/charts/corda-ent-notary/templates/deployment.yaml deleted file mode 100644 index ef73033c034..00000000000 --- a/platforms/r3-corda-ent/charts/corda-ent-notary/templates/deployment.yaml +++ /dev/null @@ -1,465 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - ---- -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: {{ .Values.nodeName }} - namespace: {{ .Values.metadata.namespace }} - {{- if .Values.nodeConf.deployment.annotations }} - annotations: -{{ toYaml .Values.nodeConf.deployment.annotations | indent 8 }} - {{- end }} - labels: - app.kubernetes.io/name: {{ .Values.nodeName }} - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} -spec: - serviceName: {{ .Values.nodeName }} - replicas: {{ .Values.replicas }} - selector: - matchLabels: - app: {{ .Values.nodeName }} - app.kubernetes.io/name: {{ .Values.nodeName }} - app.kubernetes.io/instance: {{ .Release.Name }} - template: - metadata: - labels: - app: {{ .Values.nodeName }} - app.kubernetes.io/name: {{ .Values.nodeName }} - app.kubernetes.io/instance: {{ .Release.Name }} - spec: - serviceAccountName: {{ $.Values.vault.serviceAccountName }} - securityContext: - fsGroup: 1000 - initContainers: - - name: init-check-registration - image: {{ .Values.image.initContainerName }} - imagePullPolicy: Always - env: - - name: VAULT_ADDR - value: {{ $.Values.vault.address }} - - name: VAULT_APP_ROLE - value: {{.Values.vault.role}} - - name: KUBERNETES_AUTH_PATH - value: {{ $.Values.vault.authPath }} - - name: CERTS_SECRET_PREFIX - value: {{ .Values.vault.certSecretPrefix }} - command: ["sh", "-c"] - args: - - |- - #!/usr/bin/env sh - validateVaultResponse () { - if echo ${2} | grep "errors"; then - echo "ERROR: unable to retrieve ${1}: ${2}" - exit 1 - fi - if [ "$3" == "LOOKUPSECRETRESPONSE" ] - then - http_code=$(curl -sS -o /dev/null -w "%{http_code}" \ - --header "X-Vault-Token: ${VAULT_TOKEN}" \ - ${VAULT_ADDR}/v1/${1}) - curl_response=$? - if test "$http_code" != "200" ; then - echo "Http response code from Vault - $http_code and curl_response - $curl_response" - if test "$curl_response" != "0"; then - echo "Error: curl command failed with error code - $curl_response" - exit 1 - fi - fi - fi - } - - # setting up env to get secrets from vault - echo "Getting secrets from Vault Server" - KUBE_SA_TOKEN=$(cat /var/run/secrets/kubernetes.io/serviceaccount/token) - VAULT_TOKEN=$(curl -sS --request POST ${VAULT_ADDR}/v1/auth/${KUBERNETES_AUTH_PATH}/login -H "Content-Type: application/json" -d '{"role":"'"${VAULT_APP_ROLE}"'","jwt":"'"${KUBE_SA_TOKEN}"'"}' | jq -r 'if .errors then . else .auth.client_token end') - validateVaultResponse 'vault login token' "${VAULT_TOKEN}" - echo "logged into vault" - COUNTER=1 - while [ "$COUNTER" -lt {{ $.Values.vault.retries }} ] - do - # get truststore from vault to see if registration is done or not - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${CERTS_SECRET_PREFIX}/{{ $.Values.nodeName }}/certs/truststore | jq -r 'if .errors then . else . end') - data_info="$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data["data"]')" - - if [ "$data_info" == "null" ] - then - sleep {{ $.Values.vault.retryInterval }} - else - validateVaultResponse "${CERTS_SECRET_PREFIX}/{{ $.Values.nodeName }}/certs/truststore" "${LOOKUP_SECRET_RESPONSE}" "LOOKUPSECRETRESPONSE" - break - fi - COUNTER=`expr "$COUNTER" + 1` - done - - if [ "$COUNTER" -ge {{ $.Values.vault.retries }} ] - then - # printing number of trial done before giving up - echo "$COUNTER" - echo "Node registration might not have been done." - exit 1 - fi - echo "Done" - - name: init-certificates - image: {{ .Values.image.initContainerName }} - imagePullPolicy: Always - env: - - name: VAULT_ADDR - value: {{ $.Values.vault.address }} - - name: KUBERNETES_AUTH_PATH - value: {{ $.Values.vault.authPath }} - - name: VAULT_APP_ROLE - value: {{ $.Values.vault.role }} - - name: BASE_DIR - value: {{ $.Values.nodeConf.volume.baseDir }} - - name: CERTS_SECRET_PREFIX - value: {{ .Values.vault.certSecretPrefix }} - command: ["sh", "-c"] - args: - - |- - #!/usr/bin/env sh - validateVaultResponse () { - if echo ${2} | grep "errors"; then - echo "ERROR: unable to retrieve ${1}: ${2}" - exit 1 - fi - if [ "$3" == "LOOKUPSECRETRESPONSE" ] - then - http_code=$(curl -sS -o /dev/null -w "%{http_code}" \ - --header "X-Vault-Token: ${VAULT_TOKEN}" \ - ${VAULT_ADDR}/v1/${1}) - curl_response=$? - if test "$http_code" != "200" ; then - echo "Http response code from Vault - $http_code and curl_response - $curl_response" - if test "$curl_response" != "0"; then - echo "Error: curl command failed with error code - $curl_response" - exit 1 - fi - fi - fi - } - - # setting up env to get secrets from vault - echo "Getting secrets from Vault Server" - KUBE_SA_TOKEN=$(cat /var/run/secrets/kubernetes.io/serviceaccount/token) - VAULT_TOKEN=$(curl -sS --request POST ${VAULT_ADDR}/v1/auth/${KUBERNETES_AUTH_PATH}/login -H "Content-Type: application/json" -d '{"role":"'"${VAULT_APP_ROLE}"'","jwt":"'"${KUBE_SA_TOKEN}"'"}' | jq -r 'if .errors then . else .auth.client_token end') - validateVaultResponse 'vault login token' "${VAULT_TOKEN}" - OUTPUT_PATH=${BASE_DIR}/certificates - # Getting intitial network parameters from Vault - - # get nodekeystore.jks from vault - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${CERTS_SECRET_PREFIX}/{{ $.Values.nodeName }}/certs/nodekeystore | jq -r 'if .errors then . else . end') - validateVaultResponse "${CERTS_SECRET_PREFIX}/{{ $.Values.nodeName }}/certs/nodekeystore" "${LOOKUP_SECRET_RESPONSE}" "LOOKUPSECRETRESPONSE" - TLS_NODEKEYSTORE=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["nodekeystore.jks"]') - echo "${TLS_NODEKEYSTORE}" | base64 -d > ${OUTPUT_PATH}/nodekeystore.jks - - # get sslkeystore.jks from vault - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${CERTS_SECRET_PREFIX}/{{ $.Values.nodeName }}/certs/sslkeystore | jq -r 'if .errors then . else . end') - validateVaultResponse "${CERTS_SECRET_PREFIX}/{{ $.Values.nodeName }}/certs/sslkeystore" "${LOOKUP_SECRET_RESPONSE}" "LOOKUPSECRETRESPONSE" - TLS_SSLKEYSTORE=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["sslkeystore.jks"]') - echo "${TLS_SSLKEYSTORE}" | base64 -d > ${OUTPUT_PATH}/sslkeystore.jks - - # get truststore.jks from vault - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${CERTS_SECRET_PREFIX}/{{ $.Values.nodeName }}/certs/truststore | jq -r 'if .errors then . else . end') - validateVaultResponse "${CERTS_SECRET_PREFIX}/{{ $.Values.nodeName }}/certs/truststore" "${LOOKUP_SECRET_RESPONSE}" "LOOKUPSECRETRESPONSE" - TLS_TRUSTSTORE=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["truststore.jks"]') - echo "${TLS_TRUSTSTORE}" | base64 -d > ${OUTPUT_PATH}/truststore.jks - - {{- if eq .Values.nodeConf.notary.type "cenm" }} - # get ca ssl-truststore.jks from vault - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${CERTS_SECRET_PREFIX}/root/certs | jq -r 'if .errors then . else . end') - validateVaultResponse "${CERTS_SECRET_PREFIX}/root/certs" "${LOOKUP_SECRET_RESPONSE}" "LOOKUPSECRETRESPONSE" - TLS_TRUSTSTORE=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["corda-ssl-trust-store.jks"]') - echo "${TLS_TRUSTSTORE}" | base64 -d > ${OUTPUT_PATH}/corda-ssl-trust-store.jks - {{- else }} - # get idman and networkmap certs - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${CERTS_SECRET_PREFIX}/{{ .Values.networkServices.idmanName }}/tlscerts | jq -r 'if .errors then . else . end') - validateVaultResponse "${CERTS_SECRET_PREFIX}/{{ .Values.networkServices.idmanName }}/tlscerts" "${LOOKUP_SECRET_RESPONSE}" "LOOKUPSECRETRESPONSE" - IDMAN_CERT=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["tlscacerts"]') - echo "${IDMAN_CERT}" | base64 -d > ${OUTPUT_PATH}/idman.crt - - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${CERTS_SECRET_PREFIX}/{{ .Values.networkServices.networkmapName }}/tlscerts | jq -r 'if .errors then . else . end') - validateVaultResponse "${CERTS_SECRET_PREFIX}/{{ .Values.networkServices.networkmapName }}/tlscerts" "${LOOKUP_SECRET_RESPONSE}" "LOOKUPSECRETRESPONSE" - NETWORKMAP_CERT=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["tlscacerts"]') - echo "${NETWORKMAP_CERT}" | base64 -d > ${OUTPUT_PATH}/networkmap.crt - {{- end }} - echo "Done fetching all certificates from vault" - - # Fetching keystore credentials from vault - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${CERTS_SECRET_PREFIX}/credentials/keystore | jq -r 'if .errors then . else . end') - validateVaultResponse "${CERTS_SECRET_PREFIX}/credentials/keystore" "${LOOKUP_SECRET_RESPONSE}" "LOOKUPSECRETRESPONSE" - KEYSTORE_PASSWORD=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["keyStorePassword"]') - echo "${KEYSTORE_PASSWORD}"> ${OUTPUT_PATH}/kspass - - # Fetching truststore credentials from vault - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${CERTS_SECRET_PREFIX}/credentials/truststore | jq -r 'if .errors then . else . end') - validateVaultResponse "${CERTS_SECRET_PREFIX}/credentials/truststore" "${LOOKUP_SECRET_RESPONSE}" "LOOKUPSECRETRESPONSE" - KEYSTORE_PASSWORD=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["trustStorePassword"]') - echo "${KEYSTORE_PASSWORD}"> ${OUTPUT_PATH}/tspass - - # Fetching ssl truststore credentials from vault - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${CERTS_SECRET_PREFIX}/credentials/truststore | jq -r 'if .errors then . else . end') - validateVaultResponse "${CERTS_SECRET_PREFIX}/credentials/truststore" "${LOOKUP_SECRET_RESPONSE}" "LOOKUPSECRETRESPONSE" - KEYSTORE_PASSWORD=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["ssl"]') - echo "${KEYSTORE_PASSWORD}"> ${OUTPUT_PATH}/sslpass - echo "Fetched credentials from vault" - volumeMounts: - - name: notary-certificates - mountPath: {{ $.Values.nodeConf.volume.baseDir }}/certificates - - name: db-healthcheck - image: {{ .Values.image.initContainerName }} - imagePullPolicy: Always - command: ["sh", "-c"] - args: - - |- - #!/usr/bin/env sh - COUNTER=1 - FLAG=true - # perform health check if db is up and running before starting corda node - while [ "$COUNTER" -le {{ $.Values.healthcheck.readinessthreshold }} ] - do - DB_NODE={{ .Values.dataSourceProperties.dbUrl }}:{{ .Values.dataSourceProperties.dbPort }} - STATUS=$(nc -vz $DB_NODE 2>&1 | grep -c open ) - if [ "$STATUS" == 0 ] - then - FLAG=false - else - FLAG=true - echo "DB up and running" - fi - if [ "$FLAG" == false ] - then - echo "Retry attempted $COUNTER times, retrying after {{ $.Values.healthcheck.readinesscheckinterval }} seconds" - COUNTER=`expr "$COUNTER" + 1` - sleep {{ $.Values.healthcheck.readinesscheckinterval }} - else - echo "SUCCESS!" - echo "DB up and running!" - exit 0 - break - fi - done - if [ "$COUNTER" -gt {{ $.Values.healthcheck.readinessthreshold }} ] || [ "$FLAG" == false ] - then - echo "Retry attempted $COUNTER times, no DB up and running. Giving up!" - exit 1 - break - fi - - name: init-cordapps - image: {{ .Values.image.initContainerName }} - imagePullPolicy: Always - command: ["sh", "-c"] - env: - - name: VAULT_ADDR - value: {{ $.Values.vault.address }} - - name: KUBERNETES_AUTH_PATH - value: {{ $.Values.vault.authPath }} - - name: VAULT_APP_ROLE - value: {{ $.Values.vault.role }} - - name: BASE_DIR - value: {{ $.Values.nodeConf.volume.baseDir }} - - name: CERTS_SECRET_PREFIX - value: {{ .Values.vault.certSecretPrefix }} - args: - - |- - # creating cordapps dir in volume to keep jars - {{- if .Values.cordapps.getcordapps }} - # Created cordapps directory using permission 'm755' to provide access to user - mkdir -p -m775 ${BASE_DIR}/cordapps - mkdir -p /tmp/downloaded-jars - # setting up env to get secrets from vault - KUBE_SA_TOKEN=$(cat /var/run/secrets/kubernetes.io/serviceaccount/token) - echo "Getting secrets from Vault Server" - VAULT_TOKEN=$(curl -sS --request POST ${VAULT_ADDR}/v1/auth/${KUBERNETES_AUTH_PATH}/login -H "Content-Type: application/json" -d '{"role":"'"${VAULT_APP_ROLE}"'","jwt":"'"${KUBE_SA_TOKEN}"'"}' | jq -r 'if .errors then . else .auth.client_token end') - - # save cordapps login password from vault - LOOKUP_PWD_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${CERTS_SECRET_PREFIX}/credentials/cordapps | jq -r 'if .errors then . else . end') - REPO_USER_PASS=$(echo ${LOOKUP_PWD_RESPONSE} | jq -r '.data.data["repo_password"]') - REPO_USER=$(echo ${LOOKUP_PWD_RESPONSE} | jq -r '.data.data["repo_username"]') - - # Downloading official corda provided jars using curl - {{- range .Values.cordapps.jars }} - cd /tmp/downloaded-jars && curl -u $REPO_USER:$REPO_USER_PASS -O -L {{ .url }} - {{- end }} - cp -ar /tmp/downloaded-jars/* ${BASE_DIR}/cordapps - {{- end }} - volumeMounts: - - name: {{ .Values.nodeName }}-volume - mountPath: {{ .Values.nodeConf.volume.baseDir }} - containers: - - name: notary - image: "{{ .Values.image.nodeContainerName }}" - imagePullPolicy: {{ .Values.image.pullPolicy }} - env: - - name: ACCEPT_LICENSE - value: "{{required "You must accept the license agreement to run this software" .Values.acceptLicense }}" - - name: BASE_DIR - value: {{ $.Values.nodeConf.volume.baseDir }} - command: ["/bin/bash", "-c"] - args: - - |- - # Create directory - mkdir -p ${BASE_DIR}/etc - - # Create notary.conf configuration file - echo 'networkServices { - doormanURL="{{ .Values.networkServices.doormanURL }}" - networkMapURL="{{ .Values.networkServices.networkMapURL }}" - } - dataSourceProperties { - dataSource { - password = "{{ .Values.dataSourceProperties.dataSource.password }}" - url = "{{ .Values.dataSourceProperties.dataSource.url }}" - user = "{{ .Values.dataSourceProperties.dataSource.user }}" - } - dataSourceClassName = "{{ .Values.dataSourceProperties.dataSourceClassName }}" - } - notary { - serviceLegalName : "{{ .Values.nodeConf.notary.serviceLegalName }}" - validating = "{{ .Values.nodeConf.notary.validating }}" - } - - devMode = {{ .Values.nodeConf.devMode }} - emailAddress : "{{ .Values.nodeConf.emailAddress }}" - myLegalName : "{{ .Values.nodeConf.legalName }}" - p2pAddress : "{{ .Values.nodeConf.p2p.url }}:{{ .Values.service.p2pPort }}" - trustStorePassword: "TRUSTSTORE_PASSWORD" - keyStorePassword: "KEYSTORE_PASSWORD" - additionalP2PAddresses : ["{{ .Values.nodeConf.ambassador.p2pAddress }}"] - detectPublicIp : false - rpcSettings { - address="{{ .Values.service.rpc.address }}:{{ .Values.service.rpc.addressPort }}" - adminAddress="{{ .Values.service.rpc.admin.address }}:{{ .Values.service.rpc.admin.addressPort }}" - standAloneBroker="{{ .Values.service.rpc.standAloneBroker }}" - useSsl="{{ .Values.service.rpc.useSSL }}" - } - - rpcUsers=[ - { - username="{{ .Values.service.rpc.users.username }}" - password="{{ .Values.service.rpc.users.password }}" - permissions=[ - ALL - ] - } - ] - - sshd { - port={{ .Values.service.sshdPort }} - }' > ${BASE_DIR}/etc/notary.conf - - # Replace placeholders in notary.conf with actual passwords - export TRUSTSTORE_PASSWORD=$(cat ${BASE_DIR}/certificates/tspass) - sed -i -e "s*TRUSTSTORE_PASSWORD*${TRUSTSTORE_PASSWORD}*g" ${BASE_DIR}/etc/notary.conf - export KEYSTORE_PASSWORD=$(cat ${BASE_DIR}/certificates/kspass) - sed -i -e "s*KEYSTORE_PASSWORD*${KEYSTORE_PASSWORD}*g" ${BASE_DIR}/etc/notary.conf - - - # Clean or remove network-parameters on every restart - rm -rf ${BASE_DIR}/network-parameters - - {{- if eq .Values.nodeConf.notary.type "cenm" }} - # Add ssl-truststore to truststore - export SSLTRUSTSTORE_PASSWORD=$(cat ${BASE_DIR}/certificates/sslpass) - keytool -importkeystore -srckeystore ${BASE_DIR}/certificates/corda-ssl-trust-store.jks -srcstorepass $SSLTRUSTSTORE_PASSWORD -destkeystore ${BASE_DIR}/certificates/truststore.jks -deststorepass $TRUSTSTORE_PASSWORD -srcalias cordasslrootca -destalias cordasslrootca - {{- else }} - # Add idman and networkmap certificates to truststore - yes | keytool -importcert -file ${BASE_DIR}/certificates/networkmap.crt -storepass $TRUSTSTORE_PASSWORD -alias {{ .Values.networkServices.networkMapDomain }} -keystore ${BASE_DIR}/certificates/truststore.jks - yes | keytool -importcert -file ${BASE_DIR}/certificates/idman.crt -storepass $TRUSTSTORE_PASSWORD -alias {{ .Values.networkServices.idmanDomain }} -keystore ${BASE_DIR}/certificates/truststore.jks - {{- end }} - - # Start a new shell session - /bin/sh - - # Retrieve keystore password again - KEYSTORE_PASSWORD=$(cat ${BASE_DIR}/certificates/kspass) - - # Check if the 'corda.jar' file exists - if [ -f {{ .Values.nodeConf.jarPath }}/corda.jar ] - then - echo "\nCENM: starting Notary node ...\n" - - # Run migration scripts for database schema upgradation and then start the Corda-ent 'notary' node - java -Djavax.net.ssl.trustStore=${BASE_DIR}/certificates/truststore.jks -Djavax.net.ssl.trustStorePassword=$TRUSTSTORE_PASSWORD -Djavax.net.ssl.keyStore=${BASE_DIR}/certificates/sslkeystore.jks -Djavax.net.ssl.keyStorePassword=${KEYSTORE_PASSWORD} -jar {{ .Values.nodeConf.jarPath }}/corda.jar run-migration-scripts --core-schemas --app-schemas -f ${BASE_DIR}/etc/notary.conf --base-directory=${BASE_DIR} -v --logging-level=DEBUG - # start the Corda-ent 'notary' node, setting javax.net.ssl.keyStore as ${BASE_DIR}/certificates/sslkeystore.jks since keystore gets reset when using h2 ssl - java -Djavax.net.ssl.trustStore=${BASE_DIR}/certificates/truststore.jks -Djavax.net.ssl.trustStorePassword=$TRUSTSTORE_PASSWORD -Djavax.net.ssl.keyStore=${BASE_DIR}/certificates/sslkeystore.jks -Djavax.net.ssl.keyStorePassword=${KEYSTORE_PASSWORD} -jar {{ .Values.nodeConf.jarPath }}/corda.jar -f ${BASE_DIR}/etc/notary.conf --base-directory=${BASE_DIR} -v --logging-level=DEBUG - # Capture the exit code of the previous command - EXIT_CODE=${?} - else - echo "Error: 'corda.jar' file is not found in the {{ .Values.nodeConf.jarPath }} folder." - # Additionally, manually check the availability of 'corda.jar' file at the same path - ls -al {{ .Values.nodeConf.jarPath }} - # Set to '1' to indicate an error - EXIT_CODE=1 - fi - - # Handle node failure - if [ "${EXIT_CODE}" -ne "0" ] - then - HOW_LONG={{ .Values.sleepTimeAfterError }} - echo "\nNotary failed - exit code: ${EXIT_CODE} (error).\n" - echo "Going to sleep for requested ${HOW_LONG} seconds to let you login and investigate.\n" - sleep ${HOW_LONG} - fi - - echo "DONE" - volumeMounts: - - name: notary-certificates - mountPath: {{ $.Values.nodeConf.volume.baseDir }}/certificates - readOnly: false - - name: {{ .Values.nodeName }}-volume - mountPath: {{ $.Values.nodeConf.volume.baseDir }} - readOnly: false - resources: - requests: - memory: {{ .Values.nodeConf.pod.resources.requests }} - limits: - memory: {{ .Values.nodeConf.pod.resources.limits }} - - name: logs - image: "{{ .Values.image.nodeContainerName }}" - imagePullPolicy: {{ .Values.image.pullPolicy }} - env: - - name: ACCEPT_LICENSE - value: "{{required "You must accept the license agreement to run this software" .Values.acceptLicense }}" - command: ["/bin/bash", "-c"] - args: - - |- - # Change directory to the specified base directory for Corda-ent notary logs - cd {{ $.Values.nodeConf.volume.baseDir }}/ - # Continuously display the content of all log files in the 'logs' directory - tail -f logs/*.log 2>/dev/null - # If the logs are not available, enter an indefinite wait state - tail -f /dev/null - volumeMounts: - - name: {{ .Values.nodeName }}-volume - mountPath: {{ $.Values.nodeConf.volume.baseDir }} - imagePullSecrets: - - name: {{ .Values.image.imagePullSecret }} - volumes: - - name: notary-certificates - emptyDir: - medium: Memory - volumeClaimTemplates: - - metadata: - name: {{ .Values.nodeName }}-volume -{{- if .Values.nodeConf.pvc.annotations }} - annotations: -{{ toYaml .Values.nodeConf.pvc.annotations | indent 8 }} -{{- end }} - labels: - app.kubernetes.io/name: {{ .Values.nodeName }}-volume - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} - spec: - storageClassName: {{ .Values.nodeConf.storage.name }} - accessModes: [ "ReadWriteOnce" ] - resources: - requests: - storage: {{ .Values.nodeConf.storage.memory }} diff --git a/platforms/r3-corda-ent/charts/corda-ent-notary/templates/service.yaml b/platforms/r3-corda-ent/charts/corda-ent-notary/templates/service.yaml deleted file mode 100644 index d4caabccfb2..00000000000 --- a/platforms/r3-corda-ent/charts/corda-ent-notary/templates/service.yaml +++ /dev/null @@ -1,81 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -apiVersion: v1 -kind: Service -metadata: - name: {{ .Values.nodeName }} - namespace: {{ .Values.metadata.namespace }} - labels: - run: {{ .Values.nodeName }} - app.kubernetes.io/name: {{ .Values.nodeName }} - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} -spec: - selector: - app: {{ .Values.nodeName }} - # we need healthCheckNodePort set to get rid of logs pollution - {{- if (.Values.healthCheckNodePort) }} - healthCheckNodePort: {{ .Values.healthCheckNodePort }} - {{- end }} - {{- if (.Values.service.type) }} - type: {{ .Values.service.type }} - {{- end }} - ports: - - port: {{ .Values.service.p2pPort }} - targetPort: {{ .Values.service.p2pPort }} - protocol: TCP - name: http - - port: {{ .Values.service.rpc.addressPort }} - targetPort: {{ .Values.service.rpc.addressPort }} - protocol: TCP - name: rpc - - port: {{ .Values.service.sshdPort }} - targetPort: {{ .Values.service.sshdPort }} - protocol: TCP - name: ssh -{{- if $.Values.nodeConf.ambassador }} ---- -apiVersion: getambassador.io/v3alpha1 -kind: Host -metadata: - name: {{ .Values.nodeName }}-host -spec: - hostname: {{ .Values.nodeName }}.{{ .Values.nodeConf.ambassador.external_url_suffix }} - acmeProvider: - authority: none - requestPolicy: - insecure: - action: Reject - tlsSecret: - name: {{ .Values.nodeName }}-ambassador-certs - namespace: {{ .Values.metadata.namespace }} - tls: - min_tls_version: v1.2 ---- -apiVersion: getambassador.io/v3alpha1 -kind: TCPMapping -metadata: - name: {{ .Values.nodeName }}-p2p - namespace: {{ .Values.metadata.namespace }} -spec: - host: {{ .Values.nodeName }}.{{ .Values.nodeConf.ambassador.external_url_suffix }} - port: {{ .Values.nodeConf.ambassador.p2pPort }} - service: {{ .Values.nodeName }}.{{ .Values.metadata.namespace }}:{{ .Values.service.p2pPort }} ---- -apiVersion: getambassador.io/v3alpha1 -kind: TLSContext -metadata: - name: {{ .Values.nodeName }}-tlscontext - namespace: {{ .Values.metadata.namespace }} -spec: - hosts: - - {{ .Values.nodeName }}.{{ .Values.nodeConf.ambassador.external_url_suffix }} - secret: {{ .Values.nodeName }}-ambassador-certs.{{ .Values.metadata.namespace }} - secret_namespacing: true - min_tls_version: v1.2 -{{- end }} diff --git a/platforms/r3-corda-ent/charts/corda-ent-notary/values.yaml b/platforms/r3-corda-ent/charts/corda-ent-notary/values.yaml deleted file mode 100644 index 4a1d86a2b38..00000000000 --- a/platforms/r3-corda-ent/charts/corda-ent-notary/values.yaml +++ /dev/null @@ -1,211 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -# Default values for Notary. -# This is a YAML-formatted file. -# Declare variables to be passed into your templates. - -# Provide the name of the node -# Eg. nodeName: notary -nodeName: notary - -# This section contains the Enterprise-Corda node metadata. -metadata: - # Provide the namespace for the Corda node. - # Eg. namespace: cenm - namespace: cenm - # Provide the labels to the Corda node. - labels: - -# Provide image for the containers -image: - # Provide the alpine utils image. - # NOTE: The alpine image used is the base alpine image with CURL installed. - # Eg. initContainerName: ghcr.io/hyperledger/bevel-alpine:latest - initContainerName: ghcr.io/hyperledger/bevel-alpine:latest - # Enterprise node image - # Eg. adopblockchaincloud0502.azurecr.io/corda/notary:1.2-zulu-openjdk8u242 - nodeContainerName: adopblockchaincloud0502.azurecr.io/corda/notary:1.2-zulu-openjdk8u242 - # Provide the docker-registry secret created and stored in kubernetes cluster as a secret. - # Eg. imagePullSecret: regcred - imagePullSecret: "" - # Image pull policy - # Eg. Always - pullPolicy: IfNotPresent - -# required parameter -# Accept Corda Enterprise license should be YES. -acceptLicense: YES - -############################################################################################# -# This section contains the vault related information. # -############################################################################################# -# NOTE: Make sure that the vault is already unsealed, intialized and configured to -# use Kubernetes service account token based authentication. - -vault: - # Provide the vault address - # Eg. address: http://vault.example.com:8200 - address: - # Provide the vault role used. - # Eg. role: vault-role - role: vault-role - # Provide the authpath configured to be used. - # Eg. authpath: entcordacenm - authPath: entcordacenm - # Provide the service account name autheticated to vault. - # NOTE: Make sure that the service account is already created and autheticated to use the vault. - # Eg. serviceaccountname: vault-auth - serviceAccountName: vault-auth - # Provide the vault path where the certificates are stored - # Eg. certsecretprefix: secret/organisation-name - certSecretPrefix: secret/organisation-name - # Number of retries to check contents from vault - retries: - # Interval in seconds between retries - retryInterval: - -############################################################# -# Notary and CorDapps Configuration # -############################################################# -cordapps: - #Provide if you want to provide jars in cordapps - #Eg. getcordapps: true or false - getcordapps: true - repository: - jars: - #Provide url to download the jar using wget cmd - #Eg. url: https://ci-artifactory.corda.r3cev.com/artifactory/corda-releases/net/corda/corda-finance/3.3-corda/corda-finance-3.3-corda.jar - - url: - - url: - -nodeConf: - p2p: - url: - ambassador: - p2pPort: - external_url_suffix: - p2pAddress: - # Notary legal name - # Eg. legalName: O=Notary,OU=Notary,L=London,C=GB - legalName: - # Email address - # Eg. emailAddress: dev-node@bevel.com - emailAddress: - notary: - serviceLegalName: - # Notary is validator Ex : isValidating: true - validating: - # Notary type Ex cenm or notary - # Eg. type: cenm - type: - # Dev Mode - devMode: false - # Provide volume related specifications - volume: - # Ex baseDir: /opt/corda/base - baseDir: /opt/corda/base - # where is node jar is stored - jarPath: - # where service configuration files are stored - configPath: - # Specify the maximum size of the memory allocation pool - cordaJar: - # Provide the memory size. - # Eg. memorySize: 4096 (if using kilobytes) - # Eg. memorySize: 512 (if using megabytes) - # Eg. memorySize: 1 (if using gigabytes) - memorySize: - # Provide the unit of greatness for the size, one of three options: - # - k or K for kilobytes - # - m or M for megabytes - # - g or G for gigabytes - # Eg. unit: M - unit: - # Set limits of .jar - pod: - resources: - # Provide the limit memory for node - # Eg. limits: 512M - limits: - # Provide the requests memory for node - # Eg. requests: 550M - requests: - pvc: - # annotations: - # key: "value" - annotations: {} - deployment: - # annotations: - # key: "value" - annotations: {} - # Provide the number of replicas for your pods - # Eg. replicas: 1 - replicas: 1 - -networkServices: - # Ex. idmanName: idman - idmanName: idman - # doormanURL defines the accesspoint for the Identity Manager server (protocol + domain name + port, eg. http://my-identity-manager:1000) - doormanURL: - idmanDomain: - # Ex. networkmapName: networkmap - networkmapName: - # networkMapURL defines the accesspoint for the Network Map server (protocol + domain name + port, eg. http://my-network-map:10000) - networkMapURL: - networkMapDomain: - -service: - # p2pPort defines the port number of inbound connections - # Eg. p2pPort: 40000 - p2pPort: - sshdPort: - notaryPublicIP: - rpc: - address: - addressPort: - admin: - address: - addressPort: - standAloneBroker: - useSSL: - users: - username: - password: - -############################################################# -# Database Options and Configuration # -############################################################# -dataSourceProperties: - dataSource: - # Ex. password: "ziAscD0MJnj4n4xkFWY6XuMBuw9bvYC7" - password: - # Ex. url: "jdbc:h2:tcp://notarydb:9101/persistence;DB_CLOSE_ON_EXIT=FALSE;LOCK_TIMEOUT=10000;WRITE_DELAY=100;AUTO_RECONNECT=TRUE;" - url: - # Ex user: "sa" - user: - # Ex data source class name: "org.h2.jdbcx.JdbcDataSource" - dataSourceClassName: - # dbUrl: notarydb - dbUrl: - # dbPort: 9101 - dbPort: - -############################################################# -# Settings # -############################################################# -# sleep time in seconds when error while registration -# Ex. sleepTimeAfterError: 120 -sleepTimeAfterError: -# custom sleep time in seconds -sleepTime: -healthcheck: - #Provide the wait interval in seconds for any readiness check - #Eg. readinesscheckinterval: 5 - readinesscheckinterval: 5 - #Provide the threshold number of retries - #Eg. readinessthreshold: 2 - readinessthreshold: 2 diff --git a/platforms/r3-corda-ent/charts/enterprise-init/Chart.yaml b/platforms/r3-corda-ent/charts/enterprise-init/Chart.yaml new file mode 100644 index 00000000000..8dbd3105356 --- /dev/null +++ b/platforms/r3-corda-ent/charts/enterprise-init/Chart.yaml @@ -0,0 +1,25 @@ +############################################################################################## +# Copyright Accenture. All Rights Reserved. +# +# SPDX-License-Identifier: Apache-2.0 +############################################################################################## + +apiVersion: v1 +name: enterprise-init +description: "R3 Corda: Initializes Corda Enterprise Network." +version: 1.0.0 +appVersion: "latest" +keywords: + - bevel + - corda + - hyperledger + - enterprise + - blockchain + - deployment + - accenture +home: https://hyperledger-bevel.readthedocs.io/en/latest/ +sources: + - https://github.com/hyperledger/bevel +maintainers: + - name: Hyperledger Bevel maintainers + email: bevel@lists.hyperledger.org diff --git a/platforms/r3-corda-ent/charts/enterprise-init/README.md b/platforms/r3-corda-ent/charts/enterprise-init/README.md new file mode 100644 index 00000000000..bd59aa13c4e --- /dev/null +++ b/platforms/r3-corda-ent/charts/enterprise-init/README.md @@ -0,0 +1,96 @@ +[//]: # (##############################################################################################) +[//]: # (Copyright Accenture. All Rights Reserved.) +[//]: # (SPDX-License-Identifier: Apache-2.0) +[//]: # (##############################################################################################) + +# enterprise-init + +This chart is a component of Hyperledger Bevel. The enterprise-init chart initializes a Kubernetes namespace for Corda Enterprise network. See [Bevel documentation](https://hyperledger-bevel.readthedocs.io/en/latest/) for details. + +## TL;DR + +```bash +helm repo add bevel https://hyperledger.github.io/bevel +helm install init bevel/enterprise-init +``` + +## Prerequisites + +- Kubernetes 1.19+ +- Helm 3.2.0+ + +If Hashicorp Vault is used, then +- HashiCorp Vault Server 1.13.1+ + +> **Important**: Also check the dependent charts. + +## Installing the Chart + +To install the chart with the release name `init`: + +```bash +helm repo add bevel https://hyperledger.github.io/bevel +helm install init bevel/enterprise-init +``` + +The command deploys the chart on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation. + +> **Tip**: List all releases using `helm list` + +## Uninstalling the Chart + +To uninstall/delete the `init` deployment: + +```bash +helm uninstall init +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Parameters + +### Global parameters +These parameters are refered to as same in each parent or child chart +| Name | Description | Default Value | +|--------|---------|-------------| +|`global.serviceAccountName` | The serviceaccount name that will be created for Vault Auth and k8S Secret management| `vault-auth` | +| `global.cluster.provider` | Kubernetes cluster provider like AWS EKS or minikube. Currently ony `aws` and `minikube` is tested | `aws` | +| `global.cluster.cloudNativeServices` | only `false` is implemented, `true` to use Cloud Native Services (SecretsManager and IAM for AWS; KeyVault & Managed Identities for Azure) is for future | `false` | +| `global.cluster.kubernetesUrl` | URL of the Kubernetes Cluster | `""` | +| `global.vault.type` | Type of Vault to support other providers. Currently, only `hashicorp` and `kubernetes` is supported. | `hashicorp` | +| `global.vault.role` | Role used for authentication with Vault | `vault-role` | +| `global.vault.network` | Network type that is being deployed | `corda` | +| `global.vault.address`| URL of the Vault server. | `""` | +| `global.vault.authPath` | Authentication path for Vault | `supplychain` | +| `global.vault.secretEngine` | The value for vault secret engine name | `secretsv2` | +| `global.vault.secretPrefix` | The value for vault secret prefix which must start with `data/` | `data/supplychain` | + +### Settings + +| Name | Description | Default Value | +|--------|---------|-------------| +| `settings.secondaryInit` | Flag to identitymanager and networkmap certs from `files` for additional nodes, true only when tls: true | `false` | + +## License + +This chart is licensed under the Apache v2.0 license. + +Copyright © 2024 Accenture + +### Attribution + +This chart is adapted from the [charts](https://hyperledger.github.io/bevel/) which is licensed under the Apache v2.0 License which is reproduced here: + +``` +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +``` diff --git a/platforms/r3-corda/charts/corda-certs-gen/files/openssl.conf b/platforms/r3-corda-ent/charts/enterprise-init/files/openssl.conf similarity index 100% rename from platforms/r3-corda/charts/corda-certs-gen/files/openssl.conf rename to platforms/r3-corda-ent/charts/enterprise-init/files/openssl.conf diff --git a/platforms/r3-corda-ent/charts/enterprise-init/requirements.yaml b/platforms/r3-corda-ent/charts/enterprise-init/requirements.yaml new file mode 100644 index 00000000000..b1195396c5f --- /dev/null +++ b/platforms/r3-corda-ent/charts/enterprise-init/requirements.yaml @@ -0,0 +1,11 @@ +dependencies: + - name: bevel-vault-mgmt + repository: "file://../../../shared/charts/bevel-vault-mgmt" + tags: + - bevel + version: ~1.0.0 + - name: bevel-scripts + repository: "file://../../../shared/charts/bevel-scripts" + tags: + - bevel + version: ~1.0.0 diff --git a/platforms/r3-corda-ent/charts/enterprise-init/templates/_helpers.tpl b/platforms/r3-corda-ent/charts/enterprise-init/templates/_helpers.tpl new file mode 100644 index 00000000000..b4f20e9205d --- /dev/null +++ b/platforms/r3-corda-ent/charts/enterprise-init/templates/_helpers.tpl @@ -0,0 +1,29 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "enterprise-init.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "enterprise-init.fullname" -}} +{{- $name := default .Chart.Name -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" $name .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} + + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "enterprise-init.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} diff --git a/platforms/r3-corda-ent/charts/enterprise-init/templates/configmap.yaml b/platforms/r3-corda-ent/charts/enterprise-init/templates/configmap.yaml new file mode 100644 index 00000000000..81cdd52c0b4 --- /dev/null +++ b/platforms/r3-corda-ent/charts/enterprise-init/templates/configmap.yaml @@ -0,0 +1,53 @@ +############################################################################################## +# Copyright Accenture. All Rights Reserved. +# +# SPDX-License-Identifier: Apache-2.0 +############################################################################################## + +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: openssl-conf + namespace: {{ .Release.Namespace }} + labels: + app.kubernetes.io/name: "openssl-config" + app.kubernetes.io/part-of: {{ include "enterprise-init.fullname" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} +data: + openssl.conf: |+ +{{ .Files.Get "files/openssl.conf" | indent 4 }} +{{- if .Values.settings.secondaryInit }} +--- +apiVersion: v1 +kind: Secret +metadata: + name: idman-tls-certs + namespace: {{ .Release.Namespace }} + labels: + app.kubernetes.io/name: "idman-tls-certs" + app.kubernetes.io/part-of: {{ include "enterprise-init.fullname" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} +data: + tls.crt: |+ +{{ .Files.Get "files/idman.crt" | indent 4 }} +--- +apiVersion: v1 +kind: Secret +metadata: + name: nms-tls-certs + namespace: {{ .Release.Namespace }} + labels: + app.kubernetes.io/name: "nms-tls-certs" + app.kubernetes.io/part-of: {{ include "enterprise-init.fullname" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} +data: + tls.crt: |+ +{{ .Files.Get "files/nms.crt" | indent 4 }} +{{- end }} diff --git a/platforms/r3-corda-ent/charts/enterprise-init/values.yaml b/platforms/r3-corda-ent/charts/enterprise-init/values.yaml new file mode 100644 index 00000000000..1c97e0c9af1 --- /dev/null +++ b/platforms/r3-corda-ent/charts/enterprise-init/values.yaml @@ -0,0 +1,35 @@ +############################################################################################## +# Copyright Accenture. All Rights Reserved. +# +# SPDX-License-Identifier: Apache-2.0 +############################################################################################## +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. +global: + #Provide the service account name which will be created. + serviceAccountName: vault-auth + cluster: + provider: aws # choose from: minikube | aws | azure | gcp + cloudNativeServices: false # only 'false' is implemented + #Provide the kubernetes host url + #Eg. kubernetesUrl: https://10.3.8.5:8443 + kubernetesUrl: + vault: + #Provide the type of vault + type: hashicorp + #Provide the vault role used. + role: vault-role + #Provide the network type + network: corda-ent + #Provide the vault server address + address: + #Provide the vault authPath configured to be used. + authPath: supplychain + #Provide the secret engine. + secretEngine: secretsv2 + #Provide the vault path where the secrets will be stored + secretPrefix: "data/supplychain" + +settings: + # Flag to copy identity manager and networkmap certs, true only when tls: true + secondaryInit: false diff --git a/platforms/r3-corda-ent/charts/enterprise-node/Chart.yaml b/platforms/r3-corda-ent/charts/enterprise-node/Chart.yaml new file mode 100644 index 00000000000..de42b8d993b --- /dev/null +++ b/platforms/r3-corda-ent/charts/enterprise-node/Chart.yaml @@ -0,0 +1,25 @@ +############################################################################################## +# Copyright Accenture. All Rights Reserved. +# +# SPDX-License-Identifier: Apache-2.0 +############################################################################################## + +apiVersion: v1 +name: enterprise-node +description: "R3 Corda Enterprise Node/Notary Service" +version: 1.0.0 +appVersion: "latest" +keywords: + - bevel + - corda + - hyperledger + - enterprise + - blockchain + - deployment + - accenture +home: https://hyperledger-bevel.readthedocs.io/en/latest/ +sources: + - https://github.com/hyperledger/bevel +maintainers: + - name: Hyperledger Bevel maintainers + email: bevel@lists.hyperledger.org diff --git a/platforms/r3-corda-ent/charts/enterprise-node/README.md b/platforms/r3-corda-ent/charts/enterprise-node/README.md new file mode 100644 index 00000000000..7340e8ddbfe --- /dev/null +++ b/platforms/r3-corda-ent/charts/enterprise-node/README.md @@ -0,0 +1,130 @@ +[//]: # (##############################################################################################) +[//]: # (Copyright Accenture. All Rights Reserved.) +[//]: # (SPDX-License-Identifier: Apache-2.0) +[//]: # (##############################################################################################) + +# corda enterprise node-service + +This chart is a component of Hyperledger Bevel. The enterprise-node chart deploys a R3 Corda Enterprise node. If enabled, the keys are then stored on the configured vault and stored as Kubernetes secrets. See [Bevel documentation](https://hyperledger-bevel.readthedocs.io/en/latest/) for details. + +## TL;DR + +```bash +helm repo add bevel https://hyperledger.github.io/bevel +helm install node bevel/enterprise-node +``` + +## Prerequisites + +- Kubernetes 1.19+ +- Helm 3.2.0+ + +If Hashicorp Vault is used, then +- HashiCorp Vault Server 1.13.1+ + +> **Important**: Ensure the `enterprise-init`, `cenm`, chart has been installed before installing this. Also check the dependent charts. + +## Installing the Chart + +To install the chart with the release name `node`: + +```bash +helm repo add bevel https://hyperledger.github.io/bevel +helm install node bevel/enterprise-node +``` + +The command deploys the chart on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation. + +> **Tip**: List all releases using `helm list` + +## Uninstalling the Chart + +To uninstall/delete the `node` deployment: + +```bash +helm uninstall node +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Parameters + +### Global parameters +These parameters are refered to as same in each parent or child chart +| Name | Description | Default Value | +|--------|---------|-------------| +|`global.serviceAccountName` | The serviceaccount name that will be used for Vault Auth management| `vault-auth` | +| `global.cluster.provider` | Kubernetes cluster provider like AWS EKS or minikube. Currently ony `aws` and `minikube` is tested | `aws` | +| `global.cluster.cloudNativeServices` | only `false` is implemented, `true` to use Cloud Native Services (SecretsManager and IAM for AWS; KeyVault & Managed Identities for Azure) is for future | `false` | +| `global.vault.type` | Type of Vault to support other providers. Currently, only `hashicorp` is supported. | `hashicorp` | +| `global.vault.role` | Role used for authentication with Vault | `vault-role` | +| `global.vault.address`| URL of the Vault server. | `""` | +| `global.vault.authPath` | Authentication path for Vault | `supplychain` | +| `global.vault.secretEngine` | The value for vault secret engine name | `secretsv2` | +| `global.vault.secretPrefix` | The value for vault secret prefix which must start with `data/` | `data/supplychain` | +| `global.proxy.provider` | The proxy or Ingress provider. Can be `none` or `ambassador` | `ambassador` | +| `global.proxy.externalUrlSuffix` | The External URL suffix at which the Besu P2P and RPC service will be available | `test.blockchaincloudpoc.com` | + +### Storage + +| Name | Description | Default Value | +|--------|---------|-------------| +| `storage.size` | Size of the Volume needed node Service | `1Gi` | +| `storage.dbSize` | Size of the Volume needed for h2 | `5Gi` | +| `storage.allowedTopologies.enabled` | Check [bevel-storageclass](../../../shared/charts/bevel-storageclass/README.md) for details | `false` | + + +### Image +| Name | Description | Default Value | +| -------------| ---------- | --------- | +| `image.pullSecret` | Secret name in the namespace containing private image registry credentials | `""` | +| `image.pullPolicy` | Pull policy to be used for the Docker images | `IfNotPresent` | +| `image.bevelAlpine.repository` | Bevel alpine image repository | `ghcr.io/hyperledger/bevel-alpine`| +| `image.bevelAlpine.tag` | Bevel alpine image tag | `latest`| +| `image.node.repository` | node image repository | `corda/corda-enterprise` | +| `image.node.tag` | node image tag | `4.10.3-zulu-openjdk8-alpine` | + +### node nodeConf + +| Name | Description | Default Value | +| ----------------| ----------- | ------------- | +| `nodeConf.creds.truststore` | Keystore password | `cordacadevpass` | +| `nodeConf.creds.keystore` | Truststore password | `trustpass` | +| `nodeConf.crlCheckSoftFail` | certificate revocation check fails silently | `true` | +| `nodeConf.tlsCertCrlDistPoint` | tls cert certificate revocation list distribution point | `""` | +| `nodeConf.tlsCertCrlIssuer` | tls cert certificate issuer | `""` | +| `nodeConf.devMode` | Flag for node dev mode | `false` | +| `nodeConf.allowDevCorDapps.enabled` | Flag to allow dev corDapps | `true` | +| `nodeConf.p2pPort` | node p2p port | `10002` | +| `nodeConf.rpc.port` | node rpc port | `10003` | +| `nodeConf.rpc.adminPort` | node rpc admin port | `10005` | +| `nodeConf.rpc.users` | node list of RPC users | `list` | +| `nodeConf.ssh.enabled` | Enable node ssh service | `true` | +| `nodeConf.ssh.sshdPort` | node ssh port | `2222` | +| `nodeConf.removeKeysOnDelete` | Remove node keys on helm uninstallation | `false` | +| `nodeConf.legalName` | node legal name | `O=node1,OU=node,L=London,C=GB` | +| `nodeConf.firewall.enabled` | Corda Enterprise node firewall enabled | `false` | + +## License + +This chart is licensed under the Apache v2.0 license. + +Copyright © 2024 Accenture + +### Attribution + +This chart is adapted from the [charts](https://hyperledger.github.io/bevel/) which is licensed under the Apache v2.0 License which is reproduced here: + +``` +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +``` diff --git a/platforms/r3-corda-ent/charts/enterprise-node/files/node-initial-registration.sh b/platforms/r3-corda-ent/charts/enterprise-node/files/node-initial-registration.sh new file mode 100644 index 00000000000..b62d92cbdd4 --- /dev/null +++ b/platforms/r3-corda-ent/charts/enterprise-node/files/node-initial-registration.sh @@ -0,0 +1,48 @@ +#!/bin/sh +NETWORK_ROOT_TRUSTSTORE=/certs/network-root-truststore.jks +JAVA_ARGS="-Dcapsule.jvm.args='-Xmx3G'" + +{{- if eq .Values.global.proxy.provider "ambassador" }} +CUSTOM_SSL_TRUSTSTORE=/opt/corda/certificates/corda-ssl-custom-trust-store.jks +JAVA_ARGS="-Dcapsule.jvm.args='-Xmx3G' -Djavax.net.ssl.trustStore=${CUSTOM_SSL_TRUSTSTORE}" +yes | keytool -importcert -file /certs/doorman/tls.crt -storepass {{ .Values.nodeConf.creds.truststore }} -alias {{ .Values.nodeConf.doormanDomain }} -keystore $CUSTOM_SSL_TRUSTSTORE +yes | keytool -importcert -file /certs/nms/tls.crt -storepass {{ .Values.nodeConf.creds.truststore }} -alias {{ .Values.nodeConf.networkMapDomain }} -keystore $CUSTOM_SSL_TRUSTSTORE +{{- end }} + +while true +do + if [ ! -f certificates/nodekeystore.jks ] || [ ! -f certificates/sslkeystore.jks ] || [ ! -f certificates/truststore.jks ] + then + echo + echo "Node: running initial registration ..." + echo + java $JAVA_ARGS \ + -jar bin/corda.jar \ + initial-registration \ + --config-file=etc/node.conf \ + --log-to-console \ + --network-root-truststore ${NETWORK_ROOT_TRUSTSTORE} \ + --network-root-truststore-password {{ .Values.network.creds.truststore }} + EXIT_CODE=${?} + echo + echo "Initial registration exit code: ${EXIT_CODE}" + echo + else + echo + echo "Node: already registered to Identity Manager - skipping initial registration." + echo + EXIT_CODE="0" + break + fi +done + +if [ "${EXIT_CODE}" -ne "0" ] +then + echo + echo "Node initial registration failed - exit code: ${EXIT_CODE} (error)" + echo + echo "Going to sleep for the requested {{ .Values.sleepTimeAfterError }} seconds to let you log in and investigate." + echo + sleep {{ .Values.sleepTimeAfterError }} +fi +echo diff --git a/platforms/r3-corda-ent/charts/enterprise-node/files/node.conf b/platforms/r3-corda-ent/charts/enterprise-node/files/node.conf new file mode 100644 index 00000000000..03813b75e2f --- /dev/null +++ b/platforms/r3-corda-ent/charts/enterprise-node/files/node.conf @@ -0,0 +1,67 @@ +emailAddress : "dev-node@bevel.com" +myLegalName : "{{ .Values.nodeConf.legalName }}" +p2pAddress: "{{ .Release.Name }}.{{ .Release.Namespace }}:{{ .Values.nodeConf.p2pPort }}" +networkServices: { + doormanURL="{{ .Values.nodeConf.doormanURL }}" + networkMapURL="{{ .Values.nodeConf.networkMapURL }}" +} +crlCheckSoftFail: {{ .Values.nodeConf.crlCheckSoftFail }} +{{- if ne .Values.nodeConf.tlsCertCrlDistPoint "" }} +tlsCertCrlDistPoint: "{{ .Values.nodeConf.tlsCertCrlDistPoint }}", +tlsCertCrlIssuer: "{{ .Values.nodeConf.tlsCertCrlIssuer }}" +{{- end }} +devMode: {{ .Values.nodeConf.devMode }} +{{- if .Values.nodeConf.ssh.enabled }} +sshd: { + port: {{ .Values.nodeConf.ssh.sshdPort }} +} +{{- end }} +rpcSettings: { + address: "0.0.0.0:{{ .Values.nodeConf.rpc.port }}", + adminAddress: "0.0.0.0:{{ .Values.nodeConf.rpc.adminPort }}" +} +rpcUsers: [ +{{- range $user := .Values.nodeConf.rpc.users }} + { + user: "{{ $user.name }}", + password: "{{ $user.password }}", + permissions: [ + "{{ $user.permissions }}" + ] + } +{{- end }} +] +{{- if .Values.nodeConf.monitoring.enabled }} +jmxMonitoringHttpPort: {{ .Values.nodeConf.monitoring.port }} +{{- end }} + +trustStorePassword: {{ .Values.nodeConf.creds.truststore }} +keyStorePassword : {{ .Values.nodeConf.creds.keystore }} +detectPublicIp: false +additionalP2PAddresses: ["{{ .Release.Name }}.{{ .Values.global.proxy.externalUrlSuffix }}:443"] +messagingServerAddress: "0.0.0.0:{{ .Values.nodeConf.p2pPort }}" +messagingServerExternal: false +enterpriseConfiguration: { + externalBridge: {{ .Values.firewall.enabled }} +} +{{- if .Values.nodeConf.allowDevCorDapps.enabled }} +cordappSignerKeyFingerprintBlacklist: [ +] +{{- end }} +dataSourceProperties: { + dataSourceClassName: "{{ .Values.dataSourceProperties.dataSource.dataSourceClassName }}", + dataSource.url: "{{ .Values.dataSourceProperties.dataSource.url }}", + dataSource.user: "{{ .Values.dataSourceProperties.dataSource.user }}", + dataSource.password: "{{ .Values.dataSourceProperties.dataSource.password }}" +} +database = { +{{- if eq .Values.dataSourceProperties.dataSource.dataSourceClassName "oracle.jdbc.pool.OracleDataSource" }} + schema = xe +{{- end}} +} +{{- if .Values.nodeConf.notary }} +notary { + serviceLegalName = "{{ .Values.nodeConf.notary.serviceLegalName }}" + validating = {{ .Values.nodeConf.notary.validating }} +} +{{- end }} diff --git a/platforms/r3-corda-ent/charts/enterprise-node/files/run.sh b/platforms/r3-corda-ent/charts/enterprise-node/files/run.sh new file mode 100644 index 00000000000..0b144bd264f --- /dev/null +++ b/platforms/r3-corda-ent/charts/enterprise-node/files/run.sh @@ -0,0 +1,50 @@ +#!/bin/sh + +NODE_SSL_TRUSTSTORE=/opt/corda/certificates/truststore.jks + +{{- if eq .Values.global.proxy.provider "ambassador" }} +yes | keytool -importcert -file /certs/doorman/tls.crt -storepass {{ .Values.nodeConf.creds.truststore }} -alias {{ .Values.nodeConf.doormanDomain }} -keystore $NODE_SSL_TRUSTSTORE +yes | keytool -importcert -file /certs/nms/tls.crt -storepass {{ .Values.nodeConf.creds.truststore }} -alias {{ .Values.nodeConf.networkMapDomain }} -keystore $NODE_SSL_TRUSTSTORE +{{- end }} + + +# +# main run +# +if [ -f bin/corda.jar ] +then + echo "running DB migration.." + echo + java -Djavax.net.ssl.trustStore=$NODE_SSL_TRUSTSTORE \ + -Djavax.net.ssl.keyStore=/opt/corda/certificates/sslkeystore.jks \ + -Djavax.net.ssl.keyStorePassword={{ .Values.nodeConf.creds.keystore }} \ + -jar bin/corda.jar run-migration-scripts --core-schemas --app-schemas \ + -f etc/node.conf + echo + echo "Corda: starting node ..." + echo + java -Djavax.net.ssl.trustStore=$NODE_SSL_TRUSTSTORE \ + -Djavax.net.ssl.trustStorePassword={{ .Values.nodeConf.creds.truststore }} \ + -Djavax.net.ssl.keyStore=/opt/corda/certificates/sslkeystore.jks \ + -Djavax.net.ssl.keyStorePassword={{ .Values.nodeConf.creds.keystore }} \ + -jar bin/corda.jar \ + -f etc/node.conf + echo + EXIT_CODE=${?} +else + echo "Missing node jar file in bin directory:" + ls -al bin + EXIT_CODE=110 +fi + +if [ "${EXIT_CODE}" -ne "0" ] +then + HOW_LONG={{ .Values.sleepTimeAfterError }} + echo + echo "Node failed - exit code: ${EXIT_CODE} (error)" + echo + echo "Going to sleep for requested ${HOW_LONG} seconds to let you login and investigate." + echo + sleep ${HOW_LONG} +fi +echo diff --git a/platforms/r3-corda-ent/charts/enterprise-node/requirements.yaml b/platforms/r3-corda-ent/charts/enterprise-node/requirements.yaml new file mode 100644 index 00000000000..b79c80ac4db --- /dev/null +++ b/platforms/r3-corda-ent/charts/enterprise-node/requirements.yaml @@ -0,0 +1,14 @@ +dependencies: + - name: bevel-storageclass + alias: storage + repository: "file://../../../shared/charts/bevel-storageclass" + tags: + - storage + version: ~1.0.0 + - name: corda-certs-gen + alias: tls + repository: "file://../../../r3-corda/charts/corda-certs-gen" + tags: + - bevel + version: ~1.0.0 + condition: tls.enabled diff --git a/platforms/r3-corda-ent/charts/enterprise-node/templates/_helpers.tpl b/platforms/r3-corda-ent/charts/enterprise-node/templates/_helpers.tpl new file mode 100644 index 00000000000..8e8767744c3 --- /dev/null +++ b/platforms/r3-corda-ent/charts/enterprise-node/templates/_helpers.tpl @@ -0,0 +1,43 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "node.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "node.fullname" -}} +{{- $name := default .Chart.Name -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" $name .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} + + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "node.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create node url depending on proxy mode +*/}} +{{- define "node.URL" -}} +{{- $port := .Values.port | int -}} +{{- $extport := 443 | int -}} +{{- $protocal := "https://" -}} +{{- if eq .Values.global.proxy.provider "ambassador" -}} + {{- printf "https://%s.%s:%d" .Release.name .Values.global.proxy.externalUrlSuffix $extport }} +{{- else -}} + {{- printf "http://%s.%s:%d" .Release.name .Release.Namespace $port }} +{{- end -}} +{{- end -}} diff --git a/platforms/r3-corda-ent/charts/enterprise-node/templates/configmap.yaml b/platforms/r3-corda-ent/charts/enterprise-node/templates/configmap.yaml new file mode 100644 index 00000000000..9be47c01c7f --- /dev/null +++ b/platforms/r3-corda-ent/charts/enterprise-node/templates/configmap.yaml @@ -0,0 +1,28 @@ +############################################################################################## +# Copyright Accenture. All Rights Reserved. +# +# SPDX-License-Identifier: Apache-2.0 +############################################################################################## + +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "node.fullname" . }}-conf + namespace: {{ .Release.Namespace }} + labels: + app.kubernetes.io/name: {{ include "node.fullname" . }} + helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} + app.kubernetes.io/part-of: {{ include "node.fullname" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} +data: + node.conf: |+ +{{ tpl (.Files.Get "files/node.conf") . | indent 4 }} + + node-initial-registration.sh: |+ +{{ tpl (.Files.Get "files/node-initial-registration.sh") . | nindent 4 }} + + run.sh: |+ +{{ tpl (.Files.Get "files/run.sh") . | nindent 4 }} diff --git a/platforms/r3-corda-ent/charts/enterprise-node/templates/service.yaml b/platforms/r3-corda-ent/charts/enterprise-node/templates/service.yaml new file mode 100644 index 00000000000..2c7d8895b75 --- /dev/null +++ b/platforms/r3-corda-ent/charts/enterprise-node/templates/service.yaml @@ -0,0 +1,72 @@ +############################################################################################## +# Copyright Accenture. All Rights Reserved. +# +# SPDX-License-Identifier: Apache-2.0 +############################################################################################## + +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ .Release.Name }} + namespace: {{ .Release.Namespace }} + labels: + app.kubernetes.io/name: node-service + app.kubernetes.io/component: node + app.kubernetes.io/part-of: {{ include "node.fullname" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} +spec: + type: ClusterIP + selector: + app.kubernetes.io/name: node-statefulset + app.kubernetes.io/component: node + app.kubernetes.io/part-of: {{ include "node.fullname" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} + ports: + - port: {{ .Values.nodeConf.p2pPort }} + targetPort: {{ .Values.nodeConf.p2pPort }} + protocol: TCP + name: p2p + - port: {{ .Values.nodeConf.rpc.port }} + targetPort: {{ .Values.nodeConf.rpc.port }} + protocol: TCP + name: rpc +{{- if .Values.nodeConf.ssh.enabled }} + - port: {{ .Values.nodeConf.ssh.sshdPort }} + targetPort: {{ .Values.nodeConf.ssh.sshdPort }} + protocol: TCP + name: ssh +{{- end }} +{{- if eq .Values.global.proxy.provider "ambassador" }} +--- +## Host for doorman +apiVersion: getambassador.io/v3alpha1 +kind: Host +metadata: + name: {{ .Release.Name }}-node +spec: + hostname: {{ .Release.Name }}.{{ .Values.global.proxy.externalUrlSuffix }} + acmeProvider: + authority: none + requestPolicy: + insecure: + action: Reject + tlsSecret: + name: {{ .Release.Name }}-tls-certs + namespace: {{ .Release.Namespace }} +--- +## Mapping for nms port +apiVersion: getambassador.io/v3alpha1 +kind: Mapping +metadata: + name: {{ .Release.Name }}-mapping + namespace: {{ .Release.Namespace }} +spec: + host: {{ .Release.Name }}.{{ .Values.global.proxy.externalUrlSuffix }} + prefix: / + service: {{ .Release.Name }}.{{ .Release.Namespace }}:{{ .Values.nodeConf.p2pPort }} +{{- end }} diff --git a/platforms/r3-corda-ent/charts/enterprise-node/templates/statefulset.yaml b/platforms/r3-corda-ent/charts/enterprise-node/templates/statefulset.yaml new file mode 100644 index 00000000000..35d44ce3a10 --- /dev/null +++ b/platforms/r3-corda-ent/charts/enterprise-node/templates/statefulset.yaml @@ -0,0 +1,424 @@ +############################################################################################## +# Copyright Accenture. All Rights Reserved. +# +# SPDX-License-Identifier: Apache-2.0 +############################################################################################## + +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ template "node.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + app: {{ include "node.fullname" . }} + app.kubernetes.io/name: node-statefulset + app.kubernetes.io/component: node + app.kubernetes.io/part-of: {{ include "node.fullname" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} +spec: + replicas: 1 + podManagementPolicy: OrderedReady + updateStrategy: + type: RollingUpdate + selector: + matchLabels: + app: {{ include "node.fullname" . }} + app.kubernetes.io/name: node-statefulset + app.kubernetes.io/component: node + app.kubernetes.io/part-of: {{ include "node.fullname" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} + serviceName: {{ include "node.fullname" . }} + volumeClaimTemplates: + - metadata: + name: node-h2 + spec: + storageClassName: storage-{{ .Release.Name }} + accessModes: [ "ReadWriteOnce" ] + resources: + requests: + storage: {{ .Values.storage.dbSize }} + {{- if .Values.nodeConf.notary }} + - metadata: + name: notary-nodeinfo + spec: + storageClassName: storage-{{ .Release.Name }} + accessModes: [ "ReadWriteOnce" ] + resources: + requests: + storage: {{ .Values.storage.size }} + {{- end }} + - metadata: + name: node-cordapps + spec: + storageClassName: storage-{{ .Release.Name }} + accessModes: [ "ReadWriteOnce" ] + resources: + requests: + storage: {{ .Values.storage.size }} + - metadata: + name: node-logs + spec: + storageClassName: storage-{{ .Release.Name }} + accessModes: [ "ReadWriteOnce" ] + resources: + requests: + storage: {{ .Values.storage.size }} + template: + metadata: + labels: + app: {{ include "node.fullname" . }} + app.kubernetes.io/name: node-statefulset + app.kubernetes.io/component: node + app.kubernetes.io/part-of: {{ include "node.fullname" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} + spec: + serviceAccountName: {{ .Values.global.serviceAccountName }} + securityContext: + runAsUser: 1000 + runAsGroup: 1000 + fsGroup: 1000 + hostname: {{ .Release.Name }} + imagePullSecrets: + {{- if .Values.image.pullSecret }} + - name: {{ .Values.image.pullSecret }} + {{- end }} + initContainers: + - name: init-certs + image: {{ .Values.image.bevelAlpine.repository }}:{{ .Values.image.bevelAlpine.tag }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + command: ["sh", "-c"] + args: + - |- + {{- if (eq .Values.global.vault.type "hashicorp") }} + echo "Implement hashicorp releayed method" + . /scripts/bevel-vault.sh + echo "Getting vault Token..." + vaultBevelFunc "init" + # Read if secret exists in Vault + vaultBevelFunc 'readJson' "${VAULT_SECRET_ENGINE}/${VAULT_SECRET_PREFIX}/{{ .Release.Name }}-certs" + if [ "$SECRETS_AVAILABLE" == "yes" ] + then + NODE_KEYSTORE=$(echo ${VAULT_SECRET} | jq -r '.["nodekeystore_base64"]') + SSL_KEYSTORE=$(echo ${VAULT_SECRET} | jq -r '.["sslkeystore_base64"]') + TRUSTSTORE=$(echo ${VAULT_SECRET} | jq -r '.["truststore_base64"]') + echo "saving the cert contents to file" + echo $NODE_KEYSTORE | base64 -d > /opt/corda/certificates/nodekeystore.jks + echo $SSL_KEYSTORE | base64 -d > /opt/corda/certificates/sslkeystore.jks + echo $TRUSTSTORE | base64 -d > /opt/corda/certificates/truststore.jks + fi + {{- else }} + kubectl get secret {{ .Release.Name }}-certs --namespace {{ .Release.Namespace }} -o json > /opt/corda/certificates/tmp 2>&1 + if [ $? -eq 0 ] + then + NODE_KEYSTORE=$(cat /opt/corda/certificates/tmp | jq -r '.data."nodekeystore.jks"') + SSL_KEYSTORE=$(cat /opt/corda/certificates/tmp | jq -r '.data."sslkeystore.jks"') + TRUSTSTORE=$(cat /opt/corda/certificates/tmp | jq -r '.data."truststore.jks"') + rm -f /opt/corda/certificates/tmp + echo "saving the cert contents to file" + echo $NODE_KEYSTORE | base64 -d > /opt/corda/certificates/nodekeystore.jks + echo $SSL_KEYSTORE | base64 -d > /opt/corda/certificates/sslkeystore.jks + echo $TRUSTSTORE | base64 -d > /opt/corda/certificates/truststore.jks + fi + {{- end }} + echo "Completed ..." + {{- if (eq .Values.global.vault.type "hashicorp") }} + env: + - name: VAULT_ADDR + value: "{{ .Values.global.vault.address }}" + - name: VAULT_SECRET_ENGINE + value: "{{ .Values.global.vault.secretEngine }}" + - name: VAULT_SECRET_PREFIX + value: "{{ .Values.global.vault.secretPrefix }}" + - name: KUBERNETES_AUTH_PATH + value: "{{ .Values.global.vault.authPath }}" + - name: VAULT_APP_ROLE + value: "{{ .Values.global.vault.role }}" + - name: VAULT_TYPE + value: "{{ .Values.global.vault.type }}" + {{- end }} + volumeMounts: + - name: scripts-volume + mountPath: /scripts/bevel-vault.sh + subPath: bevel-vault.sh + - name: node-certificates + mountPath: /opt/corda/certificates + - name: init-registration + image: {{ .Values.image.node.repository }}:{{ .Values.image.node.tag }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + command: ["/bin/bash", "-c"] + args: + - |- + bin/node-initial-registration.sh + volumeMounts: + - name: node-conf + mountPath: /opt/corda/etc/node.conf + subPath: node.conf + - name: node-conf + mountPath: /opt/corda/bin/node-initial-registration.sh + subPath: node-initial-registration.sh + - name: node-certificates + mountPath: /opt/corda/certificates + {{- if .Values.tls.enabled }} + - name: nms-certs + mountPath: "/certs/nms" + - name: doorman-certs + mountPath: "/certs/doorman" + {{- end }} + {{- if .Values.nodeConf.notary }} + - name: notary-nodeinfo + mountPath: /opt/corda/additional-node-infos + {{- end }} + - name: node-logs + mountPath: /opt/corda/logs + - name: node-h2 + mountPath: /opt/corda/h2 + - name: node-cordapps + mountPath: /opt/corda/cordapps + - name: cenm-certs + mountPath: /certs + - name: store-certs + image: {{ .Values.image.bevelAlpine.repository }}:{{ .Values.image.bevelAlpine.tag }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + command: ["sh", "-c"] + args: + - |- + {{- if (eq .Values.global.vault.type "hashicorp") }} + echo "Implement hashicorp releayed method" + . /scripts/bevel-vault.sh + echo "Getting vault Token..." + vaultBevelFunc "init" + # Read if secret exists in Vault + vaultBevelFunc 'readJson' "${VAULT_SECRET_ENGINE}/${VAULT_SECRET_PREFIX}/{{ .Release.Name }}-certs" + function safeWriteSecret { + key=$1 + fpath=$2 + if [ "$SECRETS_AVAILABLE" == "yes" ] + then + kubectl get secret ${key}-certs --namespace {{ .Release.Namespace }} -o json > /dev/null 2>&1 + if [ $? -ne 0 ]; then + NODE_KEYSTORE=$(echo ${VAULT_SECRET} | jq -r '.["nodekeystore_base64"]') + SSL_KEYSTORE=$(echo ${VAULT_SECRET} | jq -r '.["sslkeystore_base64"]') + TRUSTSTORE=$(echo ${VAULT_SECRET} | jq -r '.["truststore_base64"]') + + echo "creating tmp files for the cert" + echo $NODE_KEYSTORE | base64 -d > ${fpath}/nodekeystore.jks + echo $SSL_KEYSTORE | base64 -d > ${fpath}/sslkeystore.jks + echo $TRUSTSTORE | base64 -d > ${fpath}/truststore.jks + + kubectl create secret generic ${key}-certs --namespace {{ .Release.Namespace }} \ + --from-file=nodekeystore.jks=${fpath}/nodekeystore.jks \ + --from-file=sslkeystore.jks=${fpath}/sslkeystore.jks \ + --from-file=truststore.jks=${fpath}/truststore.jks + fi + else + # Save Certs to Vault + # Use -w0 to get single line base64 -w0 + NODE_KEYSTORE=$(cat ${fpath}/nodekeystore.jks | base64 -w0) + SSL_KEYSTORE=$(cat ${fpath}/sslkeystore.jks | base64 -w0) + TRUSTSTORE=$(cat ${fpath}/truststore.jks | base64 -w0) + + # create a JSON file for the data related to node crypto + echo " + { + \"data\": + { + \"nodekeystore_base64\": \"${NODE_KEYSTORE}\", + \"sslkeystore_base64\": \"${SSL_KEYSTORE}\", + \"truststore_base64\": \"${TRUSTSTORE}\" + } + }" > /tmp/payload.json + vaultBevelFunc 'write' "${VAULT_SECRET_ENGINE}/${VAULT_SECRET_PREFIX}/${key}-certs" '/tmp/payload.json' + rm /tmp/payload.json + kubectl create secret generic ${key}-certs --namespace {{ .Release.Namespace }} \ + --from-file=nodekeystore.jks=${fpath}/nodekeystore.jks \ + --from-file=sslkeystore.jks=${fpath}/sslkeystore.jks \ + --from-file=truststore.jks=${fpath}/truststore.jks + fi + } + {{- else }} + function safeWriteSecret { + key=$1 + fpath=$2 + kubectl get secret ${key}-certs --namespace {{ .Release.Namespace }} -o json > /dev/null 2>&1 + if [ $? -ne 0 ]; then + kubectl create secret generic ${key}-certs --namespace {{ .Release.Namespace }} \ + --from-file=nodekeystore.jks=${fpath}/nodekeystore.jks \ + --from-file=sslkeystore.jks=${fpath}/sslkeystore.jks \ + --from-file=truststore.jks=${fpath}/truststore.jks + fi + } + {{- end }} + {{- if .Values.nodeConf.notary }} + function safeWriteNotaryNodeInfo { + key=$1 + fpath=$2 + + validating={{ .Values.nodeConf.notary.validating }} + nodeInfoFile=$(basename $(ls ${fpath}/nodeInfo*)) + + kubectl get secret ${key}-info --namespace {{ .Release.Namespace }} -o json > /dev/null 2>&1 + if [ $? -ne 0 ] + then + kubectl create secret generic ${key}-info --namespace {{ .Release.Namespace }} \ + --from-file=${nodeInfoFile}=${fpath}/${nodeInfoFile} \ + --from-literal=isValidating_${nodeInfoFile}=${validating} --save-config + else + ENCODED_NODEINFO_FILE=$(cat ${fpath}/${nodeInfoFile} | base64) + # Retrieve the current secret, add the new key-value pair, and update the secret + kubectl get secret ${key}-info -n {{ .Release.Namespace }} -o json | \ + jq --arg nodeinfo_key "${nodeInfoFile}" --arg value "$ENCODED_NODEINFO_FILE" \ + '.data[$nodeinfo_key]=$value' | kubectl apply -f - + + ENCODED_IS_VALIDATING=$(echo ${validating} | xargs | base64) + + kubectl get secret ${key}-info -n {{ .Release.Namespace }} -o json | \ + jq --arg isValidating_key "isValidating_${nodeInfoFile}" --arg value "$ENCODED_IS_VALIDATING" \ + '.data[$isValidating_key]=$value' | kubectl apply -f - + fi + } + {{- end }} + {{- if and (ne .Values.global.cluster.provider "minikube") (.Values.global.cluster.cloudNativeServices) }} + echo "Implement Cloud Native method" + {{- else }} + safeWriteSecret {{ .Release.Name }} /opt/corda/certificates + {{- if .Values.nodeConf.notary }} + safeWriteNotaryNodeInfo notary /opt/corda/additional-node-infos + {{- end }} + {{- end }} + echo "Completed ..." + {{- if (eq .Values.global.vault.type "hashicorp") }} + env: + - name: VAULT_ADDR + value: "{{ .Values.global.vault.address }}" + - name: VAULT_SECRET_ENGINE + value: "{{ .Values.global.vault.secretEngine }}" + - name: VAULT_SECRET_PREFIX + value: "{{ .Values.global.vault.secretPrefix }}" + - name: KUBERNETES_AUTH_PATH + value: "{{ .Values.global.vault.authPath }}" + - name: VAULT_APP_ROLE + value: "{{ .Values.global.vault.role }}" + - name: VAULT_TYPE + value: "{{ .Values.global.vault.type }}" + {{- end }} + volumeMounts: + - name: scripts-volume + mountPath: /scripts/bevel-vault.sh + subPath: bevel-vault.sh + {{- if .Values.nodeConf.notary }} + - name: notary-nodeinfo + mountPath: /opt/corda/additional-node-infos + {{- end }} + - name: node-certificates + mountPath: /opt/corda/certificates + - name: init-cordapps + image: {{ .Values.image.bevelAlpine.repository }}:{{ .Values.image.bevelAlpine.tag }} + imagePullPolicy: Always + command: ["sh", "-c"] + args: + - |- + {{- if .Values.cordApps.getCordApps }} + mkdir -p /tmp/downloaded-jars + REPO_USER=$(cat /secret/username) + REPO_USER_PASS=$(cat /secret/password) + # Downloading official corda provided jars using curl + {{- range .Values.cordApps.jars }} + cd /tmp/downloaded-jars && curl -u $REPO_USER:$REPO_USER_PASS -O -L {{ .url }} + {{- end }} + cp -ar /tmp/downloaded-jars/* /opt/corda/cordapps + {{- end }} + volumeMounts: + - name: node-cordapps + mountPath: /opt/corda/cordapps + {{- if .Values.cordApps.mavenSecret }} + - name: maven-secrets + mountPath: "/secret" + {{- end }} + containers: + - name: node + image: {{ .Values.image.node.repository }}:{{ .Values.image.node.tag }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + command: ["/bin/bash", "-c"] + args: + - |- + # running the node service + bin/run.sh + resources: + limits: + memory: 2G + requests: + memory: 1G + volumeMounts: + - name: node-cordapps + mountPath: /opt/corda/cordapps + - name: cenm-certs + mountPath: /certs + - name: node-conf + mountPath: /opt/corda/etc/node.conf + subPath: node.conf + - name: node-conf + mountPath: /opt/corda/bin/run.sh + subPath: run.sh + {{- if .Values.tls.enabled }} + - name: nms-certs + mountPath: "/certs/nms" + - name: doorman-certs + mountPath: "/certs/doorman" + {{- end }} + {{- if .Values.nodeConf.notary }} + - name: notary-nodeinfo + mountPath: /opt/corda/additional-node-infos + {{- end }} + - name: node-certificates + mountPath: /opt/corda/certificates + - name: node-logs + mountPath: /opt/corda/logs + - name: node-h2 + mountPath: /opt/corda/h2 + - name: logs + image: {{ .Values.image.node.repository }}:{{ .Values.image.node.tag }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + command: ["/bin/bash", "-c"] + args: + - |- + cd /opt/corda + while true; do tail -f logs/*.log 2>/dev/null; sleep 5; done + # in case sth went wrong just wait indefinitely ... + tail -f /dev/null + volumeMounts: + - name: node-logs + mountPath: /opt/corda/logs + volumes: + - name: node-conf + configMap: + name: {{ include "node.fullname" . }}-conf + defaultMode: 0777 + - name: cenm-certs + secret: + secretName: cenm-certs + - name: scripts-volume + configMap: + name: bevel-vault-script + defaultMode: 0777 + - name: node-certificates + emptyDir: + medium: Memory + {{- if .Values.cordApps.mavenSecret }} + - name: maven-secrets + secret: + secretName: {{ .Values.cordApps.mavenSecret }} + {{- end }} + {{- if .Values.tls.enabled }} + - name: doorman-certs + secret: + secretName: doorman-tls-certs + - name: nms-certs + secret: + secretName: nms-tls-certs + {{- end }} diff --git a/platforms/r3-corda-ent/charts/enterprise-node/values.yaml b/platforms/r3-corda-ent/charts/enterprise-node/values.yaml new file mode 100644 index 00000000000..f3ab1fbccdc --- /dev/null +++ b/platforms/r3-corda-ent/charts/enterprise-node/values.yaml @@ -0,0 +1,105 @@ +############################################################################################## +# Copyright Accenture. All Rights Reserved. +# +# SPDX-License-Identifier: Apache-2.0 +############################################################################################## + +# Default values for enterprise-node chart. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. +global: + serviceAccountName: vault-auth + cluster: + provider: aws # choose from: minikube | aws | azure | gcp + cloudNativeServices: false # set to true to use Cloud Native Services (SecretsManager and IAM for AWS; KeyVault & Managed Identities for Azure) + vault: + type: hashicorp + role: vault-role + address: + authPath: supplychain + secretEngine: secretsv2 + secretPrefix: "data/supplychain" + proxy: + #This will be the proxy/ingress provider. Can have values "ambassador" or "none" + #Eg. provider: "ambassador" + provider: "ambassador" + #This field contains the external URL of the node + #Eg. externalUrlSuffix: test.blockchaincloudpoc.com + externalUrlSuffix: test.blockchaincloudpoc.com + +storage: + size: 1Gi + dbSize: 5Gi + allowedTopologies: + enabled: false + +image: + #Provide the docker secret name in the namespace + #Eg. pullSecret: regcred + pullSecret: + #Pull policy to be used for the Docker image + #Eg. pullPolicy: IfNotPresent + pullPolicy: IfNotPresent + #Provide a valid image and version for enterprise-node service + node: + repository: corda/corda-enterprise + tag: 4.10.3-zulu-openjdk8-alpine + #Provide a valid image and version for enterprise-gateway service + bevelAlpine: + repository: ghcr.io/hyperledger/bevel-alpine + tag: latest + +network: + creds: + truststore: password + +dataSourceProperties: + dataSource: + user: node-db-user + password: node-db-password + url: "jdbc:h2:file:./h2/node-persistence;DB_CLOSE_ON_EXIT=FALSE;WRITE_DELAY=0;LOCK_TIMEOUT=10000" + dataSourceClassName: org.h2.jdbcx.JdbcDataSource + +nodeConf: + creds: + truststore: cordacadevpass + keystore: trustpass + crlCheckSoftFail: true + tlsCertCrlDistPoint: "" + tlsCertCrlIssuer: "" + devMode: false + monitoring: + enabled: true + port: 8090 + allowDevCorDapps: + enabled: true + p2pPort: 10002 + rpc: + port: 10003 + adminPort: 10005 + users: + - name: node + password: nodeP + permissions: ALL + ssh: + enabled: true + sshdPort: 2222 + removeKeysOnDelete: false + legalName: "O=Node,OU=Node,L=London,C=GB" +firewall: + enabled: false + +cordApps: + #Provide if you want to provide jars in cordApps + #Eg. getCordApps: true or false + getCordApps: false + jars: + #Provide url to download the jar using wget cmd + #Eg. url: https://ci-artifactory.corda.r3cev.com/artifactory/corda-releases/net/corda/corda-finance/3.3-corda/corda-finance-3.3-corda.jar + - url: https://maven.pkg.github.com/hyperledger/bevel/com.supplychain.bcc.cordapp-supply-chain/4.9/cordapp-supply-chain-4.9.jar + - url: https://maven.pkg.github.com/hyperledger/bevel/com.supplychain.bcc.cordapp-contracts-states/4.9/cordapp-contracts-states-4.9.jar + +# Sleep time (in seconds) after an error occured +sleepTimeAfterError: 180 +# path to base dir +baseDir: /opt/corda diff --git a/platforms/r3-corda-ent/charts/values/noproxy-and-novault/cenm.yaml b/platforms/r3-corda-ent/charts/values/noproxy-and-novault/cenm.yaml new file mode 100644 index 00000000000..851bcd65761 --- /dev/null +++ b/platforms/r3-corda-ent/charts/values/noproxy-and-novault/cenm.yaml @@ -0,0 +1,24 @@ +--- +#helm install supplychain -f values/noproxy-and-novault/network-service.yaml -n supplychain-ns corda-network-service +#helm upgrade supplychain -f values/noproxy-and-novault/network-service.yaml -n supplychain-ns corda-network-service +global: + serviceAccountName: bevel-auth + cluster: + provider: azure + cloudNativeServices: false + vault: + type: kubernetes + proxy: + provider: none + externalUrlSuffix: svc.cluster.local +storage: + size: 1Gi + dbSize: 5Gi + allowedTopologies: + enabled: false + +settings: + removeKeysOnDelete: true # this will erase keys + +tls: + enabled: false diff --git a/platforms/r3-corda-ent/charts/values/noproxy-and-novault/init.yaml b/platforms/r3-corda-ent/charts/values/noproxy-and-novault/init.yaml new file mode 100644 index 00000000000..ed1eac6fa94 --- /dev/null +++ b/platforms/r3-corda-ent/charts/values/noproxy-and-novault/init.yaml @@ -0,0 +1,9 @@ +#helm install init -f values/noproxy-and-novault/init.yaml -n supplychain-ns corda-init +global: + serviceAccountName: bevel-auth + vault: + type: kubernetes + network: corda-ent + cluster: + provider: azure + cloudNativeServices: false diff --git a/platforms/r3-corda-ent/charts/values/noproxy-and-novault/node.yaml b/platforms/r3-corda-ent/charts/values/noproxy-and-novault/node.yaml new file mode 100644 index 00000000000..e112872f3f1 --- /dev/null +++ b/platforms/r3-corda-ent/charts/values/noproxy-and-novault/node.yaml @@ -0,0 +1,37 @@ +--- +#helm install notary -f values/noproxy-and-novault/notary.yaml -n supplychain-ns corda-node +#helm upgrade notary -f values/noproxy-and-novault/notary.yaml -n supplychain-ns corda-node +global: + serviceAccountName: bevel-auth + cluster: + provider: azure + cloudNativeServices: false + vault: + type: kubernetes + proxy: + provider: none + externalUrlSuffix: svc.cluster.local + +storage: + size: "1Gi" + dbSize: 1Gi + +image: + node: + repository: corda/corda-enterprise + tag: 4.10.3-zulu-openjdk8-alpine + +network: + creds: + truststore: password + +tls: + enabled: false + +nodeConf: + legalName: "O=Node,OU=Node,L=London,C=GB" + networkMapPort: 10000 + doormanDomain: idman.supplychain-ent + networkMapDomain: cenm-networkmap.supplychain-ent + networkMapURL: http://cenm-networkmap.supplychain-ent:10000 + doormanURL: http://idman.supplychain-ent:10000 diff --git a/platforms/r3-corda-ent/charts/values/noproxy-and-novault/notary.yaml b/platforms/r3-corda-ent/charts/values/noproxy-and-novault/notary.yaml new file mode 100644 index 00000000000..6adccf84205 --- /dev/null +++ b/platforms/r3-corda-ent/charts/values/noproxy-and-novault/notary.yaml @@ -0,0 +1,37 @@ +--- +#helm install notary -f values/noproxy-and-novault/notary.yaml -n supplychain-ns corda-node +#helm upgrade notary -f values/noproxy-and-novault/notary.yaml -n supplychain-ns corda-node +global: + serviceAccountName: bevel-auth + cluster: + provider: azure + cloudNativeServices: false + vault: + type: kubernetes + proxy: + provider: none + externalUrlSuffix: svc.cluster.local + +image: + notary: + repository: corda/corda-enterprise + tag: 4.10.3-zulu-openjdk8-alpine + +network: + creds: + truststore: password + +tls: + enabled: false + +nodeConf: + legalName: "O=Notary,OU=Notary,L=London,C=GB" + notary: + validating: true + serviceLegalName: "O=Notary,OU=Notary Service,L=London,C=GB" + doormanPort: 10000 + networkMapPort: 10000 + doormanDomain: idman.supplychain-ent + networkMapDomain: cenm-networkmap.supplychain-ent + networkMapURL: http://cenm-networkmap.supplychain-ent:10000 + doormanURL: http://idman.supplychain-ent:10000 diff --git a/platforms/r3-corda-ent/charts/values/proxy-and-vault/cenm.yaml b/platforms/r3-corda-ent/charts/values/proxy-and-vault/cenm.yaml new file mode 100644 index 00000000000..db8ae38faee --- /dev/null +++ b/platforms/r3-corda-ent/charts/values/proxy-and-vault/cenm.yaml @@ -0,0 +1,32 @@ +--- +#helm install supplychain -f values/noproxy-and-novault/network-service.yaml -n supplychain-ns corda-network-service +#helm upgrade supplychain -f values/noproxy-and-novault/network-service.yaml -n supplychain-ns corda-network-service +global: + serviceAccountName: bevel-auth + cluster: + provider: aws + cloudNativeServices: false + vault: + type: hashicorp + role: vault-role + address: http://vault.url:8200 + authPath: supplychain + secretEngine: secretsv2 + secretPrefix: "data/supplychain" + network: corda-enterprise + proxy: + provider: "ambassador" + externalUrlSuffix: test.blockchaincloud.com +storage: + size: 1Gi + dbSize: 5Gi + allowedTopologies: + enabled: false + +settings: + removeKeysOnDelete: true # this will erase keys + +tls: + enabled: true + settings: + networkServices: true diff --git a/platforms/r3-corda-ent/charts/values/proxy-and-vault/init.yaml b/platforms/r3-corda-ent/charts/values/proxy-and-vault/init.yaml new file mode 100644 index 00000000000..d5bc2ec3d23 --- /dev/null +++ b/platforms/r3-corda-ent/charts/values/proxy-and-vault/init.yaml @@ -0,0 +1,15 @@ +#helm install init -f values/noproxy-and-novault/init.yaml -n supplychain-ns corda-init +global: + serviceAccountName: bevel-auth + vault: + type: hashicorp + role: vault-role + address: http://vault.url:8200 + authPath: supplychain + secretEngine: secretsv2 + secretPrefix: "data/supplychain" + network: corda-enterprise + cluster: + provider: aws + cloudNativeServices: false + kubernetesUrl: "https://yourkubernetes.com" diff --git a/platforms/r3-corda-ent/charts/values/proxy-and-vault/node.yaml b/platforms/r3-corda-ent/charts/values/proxy-and-vault/node.yaml new file mode 100644 index 00000000000..4f32b1f759d --- /dev/null +++ b/platforms/r3-corda-ent/charts/values/proxy-and-vault/node.yaml @@ -0,0 +1,45 @@ +--- +#helm install notary -f values/noproxy-and-novault/notary.yaml -n supplychain-ns corda-node +#helm upgrade notary -f values/noproxy-and-novault/notary.yaml -n supplychain-ns corda-node +global: + serviceAccountName: bevel-auth + cluster: + provider: aws + cloudNativeServices: false + vault: + type: hashicorp + role: vault-role + address: http://vault.url:8200 + authPath: supplychain + secretEngine: secretsv2 + secretPrefix: "data/supplychain" + network: corda-enterprise + proxy: + provider: "ambassador" + externalUrlSuffix: test.blockchaincloud.com + +storage: + size: "1Gi" + dbSize: 1Gi + +image: + node: + repository: corda/corda-enterprise + tag: 4.10.3-zulu-openjdk8-alpine + +network: + creds: + truststore: password + +tls: + nameOverride: node # should match the release name + enabled: true + +nodeConf: + legalName: "O=Node,OU=Node,L=London,C=GB" + doormanPort: 443 + networkMapPort: 443 + doormanDomain: cenm-doorman.test.blockchaincloud.com + networkMapDomain: cenm-nms.test.blockchaincloud.com + networkMapURL: https://cenm-nms.test.blockchaincloud.com + doormanURL: https://cenm-doorman.test.blockchaincloud.com diff --git a/platforms/r3-corda-ent/charts/values/proxy-and-vault/notary.yaml b/platforms/r3-corda-ent/charts/values/proxy-and-vault/notary.yaml new file mode 100644 index 00000000000..3cfce36eb8a --- /dev/null +++ b/platforms/r3-corda-ent/charts/values/proxy-and-vault/notary.yaml @@ -0,0 +1,42 @@ +--- +#helm install notary -f values/noproxy-and-novault/notary.yaml -n supplychain-ns corda-node +#helm upgrade notary -f values/noproxy-and-novault/notary.yaml -n supplychain-ns corda-node +global: + serviceAccountName: bevel-auth + cluster: + provider: azure + cloudNativeServices: false + vault: + type: hashicorp + role: vault-role + address: http://vault.url:8200 + authPath: supplychain + secretEngine: secretsv2 + secretPrefix: "data/supplychain" + proxy: + provider: "ambassador" + externalUrlSuffix: test.blockchaincloud.com +image: + notary: + repository: corda/corda-enterprise + tag: 4.10.3-zulu-openjdk8-alpine + +network: + creds: + truststore: password + +tls: + nameOverride: notary # should match the release name + enabled: true + +nodeConf: + legalName: "O=Notary,OU=Notary,L=London,C=GB" + notary: + validating: true + serviceLegalName: "O=Notary,OU=Notary Service,L=London,C=GB" + doormanPort: 443 + networkMapPort: 443 + doormanDomain: cenm-doorman.test.blockchaincloud.com + networkMapDomain: cenm-nms.test.blockchaincloud.com + networkMapURL: https://cenm-nms.test.blockchaincloud.com + doormanURL: https://cenm-doorman.test.blockchaincloud.com diff --git a/platforms/r3-corda-ent/configuration/deploy-network.yaml b/platforms/r3-corda-ent/configuration/deploy-network.yaml index b27be004b49..0d803aa585b 100644 --- a/platforms/r3-corda-ent/configuration/deploy-network.yaml +++ b/platforms/r3-corda-ent/configuration/deploy-network.yaml @@ -20,98 +20,103 @@ path: "./build" state: absent - # create namespace, service account and clusterrolebinding - - name: "Create namespace, service accounts and clusterrolebinding" - include_role: - name: create/namespace_serviceaccount + # Create namespaces for organizations + - name: "Create namespace" + include_role: + name: create/namespace + vars: + component_name: "{{ org.name | lower }}-ent" + component_type_name: "{{ org.type | lower }}" + kubernetes: "{{ org.k8s }}" + release_dir: "{{ playbook_dir }}/../../../{{ org.gitops.release_dir }}/{{ org.name | lower }}" + loop: "{{ network['organizations'] }}" + loop_control: + loop_var: org + + # Create necessary Kubernetes secrets for each organization + - name: "Create k8s secrets" + include_role: + name: create/secrets vars: component_ns: "{{ org.name | lower }}-ent" - organisation: "{{ org.name | lower }}" kubernetes: "{{ org.k8s }}" - gitops: "{{ org.gitops }}" + vault: "{{ org.vault }}" loop: "{{ network['organizations'] }}" loop_control: loop_var: org - # Create Storageclass that will be used for this deployment - - name: Create Storage Class + # # Setup Init + - name: "Setup Init" include_role: - name: "{{ playbook_dir }}/../../../platforms/shared/configuration/roles/setup/storageclass" + name: setup/init vars: org_name: "{{ org.name | lower }}" - sc_name: "{{ org_name }}-bevel-storageclass" - region: "{{ org.k8s.region | default('eu-west-1') }}" + component_name: "{{ org_name }}-init" + component_ns: "{{ org_name }}-ent" + kubernetes: "{{ org.k8s }}" + vault: "{{ org.vault }}" + values_dir: "{{ playbook_dir }}/../../../{{ org.gitops.release_dir }}/{{ org_name }}/build" + charts_dir: "{{ org.gitops.chart_source }}" loop: "{{ network['organizations'] }}" loop_control: loop_var: org - # Setup CENM services for the network + # Setup CENM - name: "Setup cenm" include_role: name: "setup/cenm" vars: - services: "{{ org.services }}" - organisation: "{{ org.name | lower }}" - name: "{{ org.name | lower }}" - sc_name: "{{ name }}-bevel-storageclass" - component_ns: "{{ org.name | lower }}-ent" + org: "{{ network['organizations'] | first }}" + org_name: "{{ org.name | lower }}" + org_services: "{{ org.services }}" + component_name: "{{ org_name }}-cenm" + component_ns: "{{ org_name }}-ent" kubernetes: "{{ org.k8s }}" vault: "{{ org.vault }}" - gitops: "{{ org.gitops }}" - policy_type: "r3-corda-ent" - loop: "{{ network['organizations'] }}" - loop_control: - loop_var: org - when: org.type == 'cenm' - - # Wait for cenm services to respond - - name: Check that CENM uri are reachable - uri: - url: "{{ item.uri }}/status" - validate_certs: no - register: this - until: this.status == 200 - loop: "{{ network['network_services'] }}" - retries: "{{ network.env.retry_count}}" - delay: 50 + external_url_suffix: "{{ org.external_url_suffix }}" + charts_dir: "{{ org.gitops.chart_source }}" + values_dir: "{{ playbook_dir }}/../../../{{ org.gitops.release_dir }}/{{ org_name }}/build" - # Setup Corda Node services for the network - - name: Setup Corda Node services + # Deploy notaries + - name: Deploy notary service include_role: - name: setup/node + name: setup/notary vars: - services: "{{ org.services }}" - organisation: "{{ org.name | lower }}" - name: "{{ org.name | lower }}" - sc_name: "{{ name }}-bevel-storageclass" - component_ns: "{{ org.name | lower }}-ent" + org_name: "{{ org.name | lower }}" + component_name: "{{ org_name }}-notary" + component_ns: "{{ org_name }}-ent" + cloud_provider: "{{ org.cloud_provider }}" + external_url_suffix: "{{ org.external_url_suffix }}" + node: "{{ org.services.notaries }}" kubernetes: "{{ org.k8s }}" vault: "{{ org.vault }}" gitops: "{{ org.gitops }}" - policy_type: "r3-corda-ent" + charts_dir: "{{ org.gitops.chart_source }}" + values_dir: "{{ playbook_dir }}/../../../{{ org.gitops.release_dir }}/{{ org_name }}" loop: "{{ network['organizations'] }}" loop_control: loop_var: org - when: org.type == 'node' + when: + - org.services.notaries is defined - # Setup Additional Notary services for the network - - name: Setup Additional Notary services - include_role: - name: setup/notary + # Setup NETWORK_MAP + - name: "Setup network_map" + include_role: + name: "setup/network_map" vars: - services: "{{ org.services }}" - organisation: "{{ org.name | lower }}" - name: "{{ org.name | lower }}" - sc_name: "{{ name }}-bevel-storageclass" - component_ns: "{{ org.name | lower }}-ent" + org: "{{ network['organizations'] | first }}" + org_name: "{{ org.name | lower }}" + cloud_provider: "{{ org.cloud_provider | lower }}" + org_services: "{{ org.services }}" + component_name: "{{ org_name }}-nm" + component_ns: "{{ org_name }}-ent" kubernetes: "{{ org.k8s }}" vault: "{{ org.vault }}" gitops: "{{ org.gitops }}" - policy_type: "r3-corda-ent" - loop: "{{ network['organizations'] }}" - loop_control: - loop_var: org - when: org.type == 'notary' + external_url_suffix: "{{ org.external_url_suffix }}" + charts_dir: "{{ org.gitops.chart_source }}" + values_dir: "{{ playbook_dir }}/../../../{{ org.gitops.release_dir }}/{{ org_name }}" + # These variables can be overriden from the command line vars: diff --git a/platforms/r3-corda-ent/configuration/roles/create/k8_component/tasks/main.yaml b/platforms/r3-corda-ent/configuration/roles/create/k8_component/tasks/main.yaml index 9924d86642c..63491dc0ef2 100644 --- a/platforms/r3-corda-ent/configuration/roles/create/k8_component/tasks/main.yaml +++ b/platforms/r3-corda-ent/configuration/roles/create/k8_component/tasks/main.yaml @@ -13,29 +13,18 @@ ################################################################################################ --- -# Create and/or check if the target directory exists -- name: Ensures {{ release_dir }}/{{ component_name }} dir exists - file: - path: "{{ release_dir }}/{{ component_name }}" - state: directory +# Ensure that the directory exists for each entity, if not, it creates them +- name: Ensure {{ component_type_name }} dir exists + include_role: + name: "{{ playbook_dir }}/../../shared/configuration/roles/check/directory" + vars: + path: "{{ release_dir }}/{{ component_type_name }}" -# Create deployment file from a template -- name: Create {{ component_type }} file for {{ component_name }} +# Create the value file for the k8 components +- name: "Create {{ component_type }} file for {{ component_type_name }}" template: - src: "{{ dlt_templates[component_type] }}" + src: "{{ k8_templates[type] | default('default.tpl') }}" dest: "{{ values_file }}" vars: - values_file: "{{ release_dir }}/{{ component_name }}/{{ component_type }}.yaml" - -################################################################################################ -# Test the value file for syntax errors/ missing values -# This is done by calling the helm_lint role and passing the value file parameter -# When a new k8_component is added, changes should be made in helm_lint role as well -- name: Helm lint - include_role: - name: "{{ playbook_dir }}/../../shared/configuration/roles/helm_lint" - vars: - helmtemplate_type: "{{ component_type }}" - chart_path: "{{ charts_dir }}" - value_file: "{{ release_dir }}/{{ component_name }}/{{ helmtemplate_type }}.yaml" - when: helm_lint=="true" + values_file: "{{ release_dir }}/{{ component_type_name }}/{{ component_type }}.yaml" + type: "{{ component_type }}" diff --git a/platforms/r3-corda-ent/configuration/roles/create/k8_component/vars/main.yaml b/platforms/r3-corda-ent/configuration/roles/create/k8_component/vars/main.yaml index 346de7c7532..a9c2e610722 100644 --- a/platforms/r3-corda-ent/configuration/roles/create/k8_component/vars/main.yaml +++ b/platforms/r3-corda-ent/configuration/roles/create/k8_component/vars/main.yaml @@ -4,7 +4,7 @@ # SPDX-License-Identifier: Apache-2.0 ############################################################################################## -dlt_templates: +k8_templates: namespace: namespace.tpl reviewer_rbac: reviewer_rbac.tpl vault-reviewer: reviewer.tpl diff --git a/platforms/r3-corda-ent/configuration/roles/create/namespace/tasks/main.yaml b/platforms/r3-corda-ent/configuration/roles/create/namespace/tasks/main.yaml new file mode 100644 index 00000000000..19ecefdf744 --- /dev/null +++ b/platforms/r3-corda-ent/configuration/roles/create/namespace/tasks/main.yaml @@ -0,0 +1,39 @@ +############################################################################################## +# Copyright Accenture. All Rights Reserved. +# +# SPDX-License-Identifier: Apache-2.0 +############################################################################################## + +############################################################################################# +# This role creates value files for namespaces of organizations +############################################################################################# + +# Check if namespace created +- name: Check namespace is created + include_role: + name: "{{ playbook_dir }}/../../shared/configuration/roles/check/k8_component" + vars: + component_type: "Namespace" + type: "no_retry" + +# Assign the result of check namespace task to a local variable +- name: "Set Variable" + set_fact: + get_namespace: "{{ result }}" + +# Create the value file of Namespace for Organizations +- name: Create namespaces + include_role: + name: create/k8_component + vars: + component_type: "namespace" + when: get_namespace.resources|length == 0 + +# Git Push : Push the above generated files to git directory +- name: Git Push + include_role: + name: "{{ playbook_dir }}/../../shared/configuration/roles/git_push" + vars: + GIT_DIR: "{{ playbook_dir }}/../../../" + gitops: "{{ org.gitops }}" + msg: "[ci skip] Pushing deployment files for namespace" diff --git a/platforms/r3-corda-ent/configuration/roles/create/secrets/tasks/main.yaml b/platforms/r3-corda-ent/configuration/roles/create/secrets/tasks/main.yaml new file mode 100644 index 00000000000..cc31dd73c32 --- /dev/null +++ b/platforms/r3-corda-ent/configuration/roles/create/secrets/tasks/main.yaml @@ -0,0 +1,32 @@ +############################################################################################## +# Copyright Accenture. All Rights Reserved. +# +# SPDX-License-Identifier: Apache-2.0 +############################################################################################## + +# Wait for namespace to be created by flux +- name: "Wait for the namespace {{ component_ns }} to be created" + include_role: + name: "{{ playbook_dir }}/../../shared/configuration/roles/check/k8_component" + vars: + component_type: "Namespace" + component_name: "{{ component_ns }}" + type: "retry" + +# Create the vault roottoken secret +- name: "Create vault token secret" + include_role: + name: "{{ playbook_dir }}/../../shared/configuration/roles/create/shared_k8s_secrets" + vars: + namespace: "{{ component_ns }}" + check: "token_secret" + +# Create the docker pull credentials for image registry +- name: "Create docker credentials secret" + include_role: + name: "{{ playbook_dir }}/../../shared/configuration/roles/create/shared_k8s_secrets" + vars: + namespace: "{{ component_ns }}" + check: "docker_credentials" + when: + - network.docker.username is defined diff --git a/platforms/r3-corda-ent/configuration/roles/helm_component/tasks/main.yaml b/platforms/r3-corda-ent/configuration/roles/helm_component/tasks/main.yaml index 2d9a4f45a3d..df2b22182c9 100644 --- a/platforms/r3-corda-ent/configuration/roles/helm_component/tasks/main.yaml +++ b/platforms/r3-corda-ent/configuration/roles/helm_component/tasks/main.yaml @@ -9,11 +9,11 @@ ############################################################################################# # Ensure that the directory exists, and creates it, if it does not exist -- name: "Ensures {{ values_dir }}/{{ name }} dir exists" +- name: "Ensures {{ values_dir }}/{{ org_name }} dir exists" include_role: name: "{{ playbook_dir }}/../../shared/configuration/roles/check/directory" vars: - path: "{{ values_dir }}/{{ name }}" + path: "{{ values_dir }}/{{ org_name }}" ############################################################################################ # Create the value file for the helm release @@ -22,7 +22,7 @@ - name: "create value file for {{ component_name }}" template: src: "{{ helm_templates[type] | default('helm_component.tpl') }}" - dest: "{{ values_dir }}/{{ name }}/{{ component_name }}.yaml" + dest: "{{ values_dir }}/{{ org_name }}/{{ component_name }}.yaml" vars: main_container_image: "{{ docker_images.cenm[corda_service_version] }}" init_container_image: "{{ docker_images.init_container }}" @@ -37,4 +37,4 @@ vars: helmtemplate_type: "{{ type }}" chart_path: "{{ charts_dir }}" - value_file: "{{ values_dir }}/{{ name }}/{{ component_name }}.yaml" + value_file: "{{ values_dir }}/{{ org_name }}/{{ component_name }}.yaml" diff --git a/platforms/r3-corda-ent/configuration/roles/helm_component/templates/auth.tpl b/platforms/r3-corda-ent/configuration/roles/helm_component/templates/auth.tpl index 3600507a829..3a72523c1e5 100644 --- a/platforms/r3-corda-ent/configuration/roles/helm_component/templates/auth.tpl +++ b/platforms/r3-corda-ent/configuration/roles/helm_component/templates/auth.tpl @@ -1,4 +1,4 @@ -apiVersion: helm.toolkit.fluxcd.io/v2beta1 +apiVersion: helm.toolkit.fluxcd.io/v2 kind: HelmRelease metadata: name: {{ org.services.auth.name }} diff --git a/platforms/r3-corda-ent/configuration/roles/helm_component/templates/bridge.tpl b/platforms/r3-corda-ent/configuration/roles/helm_component/templates/bridge.tpl index 2bbf477fd92..59ef32e0495 100644 --- a/platforms/r3-corda-ent/configuration/roles/helm_component/templates/bridge.tpl +++ b/platforms/r3-corda-ent/configuration/roles/helm_component/templates/bridge.tpl @@ -1,4 +1,4 @@ -apiVersion: helm.toolkit.fluxcd.io/v2beta1 +apiVersion: helm.toolkit.fluxcd.io/v2 kind: HelmRelease metadata: name: {{ component_name }} diff --git a/platforms/r3-corda-ent/configuration/roles/helm_component/templates/corda_ent_network_map.tpl b/platforms/r3-corda-ent/configuration/roles/helm_component/templates/corda_ent_network_map.tpl new file mode 100644 index 00000000000..1c355e144a2 --- /dev/null +++ b/platforms/r3-corda-ent/configuration/roles/helm_component/templates/corda_ent_network_map.tpl @@ -0,0 +1,75 @@ +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: {{ component_name }} + namespace: {{ component_ns }} + annotations: + fluxcd.io/automated: "false" +spec: + releaseName: {{ component_name }} + interval: 1m + chart: + spec: + chart: {{ charts_dir }}/cenm-networkmap + sourceRef: + kind: GitRepository + name: flux-{{ network.env.type }} + namespace: flux-{{ network.env.type }} + values: + global: + serviceAccountName: vault-auth + cluster: + provider: "{{ cloud_provider }}" + cloudNativeServices: false + vault: + type: hashicorp + role: vault-role + address: + authPath: "{{ org_name }}" + secretEngine: secretsv2 + secretPrefix: "data/{{ org_name }}" + proxy: + provider: "ambassador" + externalUrlSuffix: "{{ external_url_suffix }}" + cenm: + prefix: "{{ org_name }}-cenm" + sharedCreds: + truststore: {{ cred_truststore }} + keystore: {{ cred_keystore}} + identityManager: + internal: + port: {{ idman_int_port }} + port: {{ idman_ext_port }} + revocation: + port: {{ idman_rev_port }} + auth: + port: {{ auth_port }} + gateway: + port: {{ gateway_port }} + zone: + enmPort: {{ zone_enm_port }} + networkmap: + internal: + port: {{ network_map_int_port }} + port: {{ network_map_ext_port }} + + storage: + size: 1Gi + dbSize: 5Gi + allowedTopologies: + enabled: false + + database: + driverClassName: "org.h2.Driver" + jdbcDriver: "" + url: "jdbc:h2:file:./h2/networkmap-manager-persistence;DB_CLOSE_ON_EXIT=FALSE;LOCK_TIMEOUT=10000;WRITE_DELAY=0;AUTO_SERVER_PORT=0" + user: "networkmap-db-user" + password: "networkmap-db-password" + runMigration: true + + nmapUpdate: false + sleepTimeAfterError: 120 + baseDir: /opt/cenm + + adminListener: + port: {{ network_map_admin_listener_port }} diff --git a/platforms/r3-corda-ent/configuration/roles/helm_component/templates/corda_ent_notary.tpl b/platforms/r3-corda-ent/configuration/roles/helm_component/templates/corda_ent_notary.tpl new file mode 100644 index 00000000000..48303af3847 --- /dev/null +++ b/platforms/r3-corda-ent/configuration/roles/helm_component/templates/corda_ent_notary.tpl @@ -0,0 +1,106 @@ +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: {{ component_name }} + namespace: {{ component_ns }} + annotations: + fluxcd.io/automated: "false" +spec: + releaseName: {{ component_name }} + interval: 1m + chart: + spec: + chart: {{ charts_dir }}/enterprise-node + sourceRef: + kind: GitRepository + name: flux-{{ network.env.type }} + namespace: flux-{{ network.env.type }} + values: + global: + serviceAccountName: vault-auth + cluster: + provider: "{{ cloud_provider }}" + cloudNativeServices: false + kubernetesUrl: "{{ kubernetes_server }}" + vault: + type: hashicorp + role: vault-role + network: corda-enterprise + address: "{{ vault.url }}" + authPath: "{{ org_name }}" + secretEngine: secretsv2 + secretPrefix: "data/{{ org_name }}" + proxy: + provider: ambassador + externalUrlSuffix: {{ external_url_suffix }} + image: + notary: + repository: corda/corda-enterprise + tag: 4.10.3-zulu-openjdk8-alpine + network: + creds: + truststore: password + tls: + nameOverride: {{ node_name }} # should match the release name + enabled: true + sleepTimeAfterError: 180 + storage: + size: 1Gi + dbSize: 5Gi + allowedTopologies: + enabled: false + baseDir: /opt/corda + dataSourceProperties: + dataSource: + user: node-db-user + password: node-db-password + url: "jdbc:h2:file:./h2/node-persistence;DB_CLOSE_ON_EXIT=FALSE;WRITE_DELAY=0;LOCK_TIMEOUT=10000" + dataSourceClassName: org.h2.jdbcx.JdbcDataSource + nodeConf: + legalName: {{ subject }} + devMode: false + creds: + truststore: cordacadevpass + keystore: trustpass + crlCheckSoftFail: true + tlsCertCrlDistPoint: "" + tlsCertCrlIssuer: "" + monitoring: + enabled: true + port: 8090 + allowDevCorDapps: + enabled: true + p2pPort: {{ p2p_port }} + rpc: + port: {{ rpc_port }} + adminPort: {{ rpc_admin_port }} + users: + - name: node + password: nodeP + permissions: ALL + ssh: + enabled: true + sshdPort: 2222 + removeKeysOnDelete: false + firewall: + enabled: false + notary: + serviceLegalName: {{ service_name }} + validating: {{ validating }} + doormanPort: 443 + networkMapPort: 443 + doormanDomain: {{ org_name }}-cenm-doorman.{{ external_url_suffix }} + networkMapDomain: {{ org_name }}-cenm-nms.{{ external_url_suffix }} + doormanURL: https://{{ org_name }}-cenm-doorman.{{ external_url_suffix }} + networkMapURL: https://{{ org_name }}-cenm-nms.{{ external_url_suffix }} +{% if (org.cordapps is defined) and (org.cordapps|length > 0) %} + cordapps: + getcordapps: true + jars: +{% for jars in org.cordapps.jars %} + - url: {{ jars.jar.url }} +{% endfor %} +{% else %} + cordapps: + getcordapps: false +{% endif %} diff --git a/platforms/r3-corda-ent/configuration/roles/helm_component/templates/db.tpl b/platforms/r3-corda-ent/configuration/roles/helm_component/templates/db.tpl index a84000e6b6b..bae15cc7075 100644 --- a/platforms/r3-corda-ent/configuration/roles/helm_component/templates/db.tpl +++ b/platforms/r3-corda-ent/configuration/roles/helm_component/templates/db.tpl @@ -1,4 +1,4 @@ -apiVersion: helm.toolkit.fluxcd.io/v2beta1 +apiVersion: helm.toolkit.fluxcd.io/v2 kind: HelmRelease metadata: name: {{ component_name }} diff --git a/platforms/r3-corda-ent/configuration/roles/helm_component/templates/float.tpl b/platforms/r3-corda-ent/configuration/roles/helm_component/templates/float.tpl index 1daa810e798..7f79a939447 100644 --- a/platforms/r3-corda-ent/configuration/roles/helm_component/templates/float.tpl +++ b/platforms/r3-corda-ent/configuration/roles/helm_component/templates/float.tpl @@ -1,4 +1,4 @@ -apiVersion: helm.toolkit.fluxcd.io/v2beta1 +apiVersion: helm.toolkit.fluxcd.io/v2 kind: HelmRelease metadata: name: {{ component_name }} diff --git a/platforms/r3-corda-ent/configuration/roles/helm_component/templates/gateway.tpl b/platforms/r3-corda-ent/configuration/roles/helm_component/templates/gateway.tpl index a042d383130..67123bc3b24 100644 --- a/platforms/r3-corda-ent/configuration/roles/helm_component/templates/gateway.tpl +++ b/platforms/r3-corda-ent/configuration/roles/helm_component/templates/gateway.tpl @@ -1,4 +1,4 @@ -apiVersion: helm.toolkit.fluxcd.io/v2beta1 +apiVersion: helm.toolkit.fluxcd.io/v2 kind: HelmRelease metadata: name: {{ component_name }} diff --git a/platforms/r3-corda-ent/configuration/roles/helm_component/templates/idman.tpl b/platforms/r3-corda-ent/configuration/roles/helm_component/templates/idman.tpl index b9165c9114c..f93ce6eb18b 100644 --- a/platforms/r3-corda-ent/configuration/roles/helm_component/templates/idman.tpl +++ b/platforms/r3-corda-ent/configuration/roles/helm_component/templates/idman.tpl @@ -1,4 +1,4 @@ -apiVersion: helm.toolkit.fluxcd.io/v2beta1 +apiVersion: helm.toolkit.fluxcd.io/v2 kind: HelmRelease metadata: name: {{ component_name }} diff --git a/platforms/r3-corda-ent/configuration/roles/helm_component/templates/nmap.tpl b/platforms/r3-corda-ent/configuration/roles/helm_component/templates/nmap.tpl index 63713c7ada1..eb837a438df 100644 --- a/platforms/r3-corda-ent/configuration/roles/helm_component/templates/nmap.tpl +++ b/platforms/r3-corda-ent/configuration/roles/helm_component/templates/nmap.tpl @@ -1,4 +1,4 @@ -apiVersion: helm.toolkit.fluxcd.io/v2beta1 +apiVersion: helm.toolkit.fluxcd.io/v2 kind: HelmRelease metadata: name: {{ component_name }} diff --git a/platforms/r3-corda-ent/configuration/roles/helm_component/templates/node.tpl b/platforms/r3-corda-ent/configuration/roles/helm_component/templates/node.tpl index 20b64dfc05d..a6089b64569 100644 --- a/platforms/r3-corda-ent/configuration/roles/helm_component/templates/node.tpl +++ b/platforms/r3-corda-ent/configuration/roles/helm_component/templates/node.tpl @@ -1,4 +1,4 @@ -apiVersion: helm.toolkit.fluxcd.io/v2beta1 +apiVersion: helm.toolkit.fluxcd.io/v2 kind: HelmRelease metadata: name: {{ component_name }} diff --git a/platforms/r3-corda-ent/configuration/roles/helm_component/templates/node_registration.tpl b/platforms/r3-corda-ent/configuration/roles/helm_component/templates/node_registration.tpl index c58a9c9df54..03365d194e7 100644 --- a/platforms/r3-corda-ent/configuration/roles/helm_component/templates/node_registration.tpl +++ b/platforms/r3-corda-ent/configuration/roles/helm_component/templates/node_registration.tpl @@ -1,4 +1,4 @@ -apiVersion: helm.toolkit.fluxcd.io/v2beta1 +apiVersion: helm.toolkit.fluxcd.io/v2 kind: HelmRelease metadata: name: {{ component_name }} diff --git a/platforms/r3-corda-ent/configuration/roles/helm_component/templates/notary.tpl b/platforms/r3-corda-ent/configuration/roles/helm_component/templates/notary.tpl index 3eb9405bc39..84c6f8f1a73 100644 --- a/platforms/r3-corda-ent/configuration/roles/helm_component/templates/notary.tpl +++ b/platforms/r3-corda-ent/configuration/roles/helm_component/templates/notary.tpl @@ -1,4 +1,4 @@ -apiVersion: helm.toolkit.fluxcd.io/v2beta1 +apiVersion: helm.toolkit.fluxcd.io/v2 kind: HelmRelease metadata: name: {{ component_name }} diff --git a/platforms/r3-corda-ent/configuration/roles/helm_component/templates/notary_initial_registration.tpl b/platforms/r3-corda-ent/configuration/roles/helm_component/templates/notary_initial_registration.tpl index 88f84751b10..34577386c5a 100644 --- a/platforms/r3-corda-ent/configuration/roles/helm_component/templates/notary_initial_registration.tpl +++ b/platforms/r3-corda-ent/configuration/roles/helm_component/templates/notary_initial_registration.tpl @@ -1,4 +1,4 @@ -apiVersion: helm.toolkit.fluxcd.io/v2beta1 +apiVersion: helm.toolkit.fluxcd.io/v2 kind: HelmRelease metadata: name: {{ component_name }} diff --git a/platforms/r3-corda-ent/configuration/roles/helm_component/templates/pki-generator-node.tpl b/platforms/r3-corda-ent/configuration/roles/helm_component/templates/pki-generator-node.tpl index 5ecb2b41cd1..341df40dde6 100644 --- a/platforms/r3-corda-ent/configuration/roles/helm_component/templates/pki-generator-node.tpl +++ b/platforms/r3-corda-ent/configuration/roles/helm_component/templates/pki-generator-node.tpl @@ -1,4 +1,4 @@ -apiVersion: helm.toolkit.fluxcd.io/v2beta1 +apiVersion: helm.toolkit.fluxcd.io/v2 kind: HelmRelease metadata: name: {{ component_name }} diff --git a/platforms/r3-corda-ent/configuration/roles/helm_component/templates/pki-generator.tpl b/platforms/r3-corda-ent/configuration/roles/helm_component/templates/pki-generator.tpl index 30fbff5fbe9..89c59b01602 100644 --- a/platforms/r3-corda-ent/configuration/roles/helm_component/templates/pki-generator.tpl +++ b/platforms/r3-corda-ent/configuration/roles/helm_component/templates/pki-generator.tpl @@ -1,4 +1,4 @@ -apiVersion: helm.toolkit.fluxcd.io/v2beta1 +apiVersion: helm.toolkit.fluxcd.io/v2 kind: HelmRelease metadata: name: {{ component_name }} diff --git a/platforms/r3-corda-ent/configuration/roles/helm_component/templates/signer.tpl b/platforms/r3-corda-ent/configuration/roles/helm_component/templates/signer.tpl index 778c230f9bc..4fbd7eb699f 100644 --- a/platforms/r3-corda-ent/configuration/roles/helm_component/templates/signer.tpl +++ b/platforms/r3-corda-ent/configuration/roles/helm_component/templates/signer.tpl @@ -1,4 +1,4 @@ -apiVersion: helm.toolkit.fluxcd.io/v2beta1 +apiVersion: helm.toolkit.fluxcd.io/v2 kind: HelmRelease metadata: name: {{ org.services.signer.name }} diff --git a/platforms/r3-corda-ent/configuration/roles/helm_component/templates/zone.tpl b/platforms/r3-corda-ent/configuration/roles/helm_component/templates/zone.tpl index 2a60f2fc7b3..73ae7482937 100644 --- a/platforms/r3-corda-ent/configuration/roles/helm_component/templates/zone.tpl +++ b/platforms/r3-corda-ent/configuration/roles/helm_component/templates/zone.tpl @@ -1,4 +1,4 @@ -apiVersion: helm.toolkit.fluxcd.io/v2beta1 +apiVersion: helm.toolkit.fluxcd.io/v2 kind: HelmRelease metadata: name: {{ org.services.zone.name }} diff --git a/platforms/r3-corda-ent/configuration/roles/helm_component/vars/main.yaml b/platforms/r3-corda-ent/configuration/roles/helm_component/vars/main.yaml index e9fc7d6f2fc..2deb67e6a29 100644 --- a/platforms/r3-corda-ent/configuration/roles/helm_component/vars/main.yaml +++ b/platforms/r3-corda-ent/configuration/roles/helm_component/vars/main.yaml @@ -15,11 +15,12 @@ helm_templates: zone: zone.tpl notary-initial-registration: notary_initial_registration.tpl db: db.tpl - notary: notary.tpl bridge: bridge.tpl float: float.tpl node_registration: node_registration.tpl node: node.tpl + corda_ent_notary: corda_ent_notary.tpl + corda_ent_network_map: corda_ent_network_map.tpl docker_images: cenm: # list of various nodes supporting version 1.2 diff --git a/platforms/r3-corda-ent/configuration/roles/setup/cenm/tasks/main.yaml b/platforms/r3-corda-ent/configuration/roles/setup/cenm/tasks/main.yaml index 83a23d5860b..f3bf4fad139 100644 --- a/platforms/r3-corda-ent/configuration/roles/setup/cenm/tasks/main.yaml +++ b/platforms/r3-corda-ent/configuration/roles/setup/cenm/tasks/main.yaml @@ -3,88 +3,37 @@ # # SPDX-License-Identifier: Apache-2.0 ############################################################################################## - -############################################################################################## -# This role sets up all cenm services -############################################################################################## - -# Wait for namespace creation -- name: "Wait for namespace creation for {{ organisation }}" - include_role: - name: "{{ playbook_dir }}/../../shared/configuration/roles/check/k8_component" +# Gather Kubernetes cluster information +- name: Gather Kubernetes cluster information + community.kubernetes.k8s_cluster_info: + kubeconfig: "{{ kubernetes.config_file }}" + register: cluster_info + +# Set the Kubernetes server URL fact +- name: Set kubernetes_server_url fact + set_fact: + kubernetes_server: "{{ cluster_info.connection.host }}" + +# Install init node +- name: "Install init node" + include_role: + name: "{{ playbook_dir }}/../../shared/configuration/roles/create/job_component" + vars: + type: "corda_ent_cenm" + cloud_provider: "{{ org.cloud_provider | lower }}" + root_ca: "{{ org.subject }}" + subordinate_ca: "{{ org.subordinate_ca_subject }}" + auth_subject: "{{ org_services.auth.subject }}" + signer_subject: "{{ org_services.signer.subject }}" + idman_subject: "{{ org_services.idman.subject }}" + idman_crlissuer_subject: "{{ org_services.idman.crlissuer_subject }}" + networkmap_subject: "{{ org_services.networkmap.subject }}" + secondaryInit: false + +# Check if the job is completed +- name: "Check if {{ component_name }} job is completed in the {{ org_name }} organization" + include_role: + name: "{{ playbook_dir }}/../../shared/configuration/roles/check/helm_component" vars: - component_type: "Namespace" - component_name: "{{ component_ns }}" - type: "retry" - -# Wait for vault-reviewer creation -- name: "Wait for vault-reviewer creation for {{ organisation }}" - include_role: - name: "{{ playbook_dir }}/../../shared/configuration/roles/check/k8_component" - vars: - component_type: "ServiceAccount" - component_name: "vault-reviewer" - type: "retry" - tags: - - notest - -# Create vault access policies -- name: "Setup vault access for cenm" - include_role: - name: "{{ playbook_dir }}/../../shared/configuration/roles/setup/vault_kubernetes" - vars: - name: "{{ org.name | lower }}" - component_name: "{{ org.name | lower }}-vaultk8s-job" - component_auth: "{{ network.env.type }}{{ name }}" - component_type: "{{ org.type | lower }}" - -# Check if the certs are already created -- name: Check if the root certs are already created - shell: | - vault kv get -field=corda-ssl-root-keys.jks {{ vault.secret_path | default('secretsv2') }}/{{ org.name | lower }}/root/certs - environment: - VAULT_ADDR: "{{ vault.url }}" - VAULT_TOKEN: "{{ vault.root_token }}" - register: root_certs - ignore_errors: yes - -# Generate crypto using pki-generator -- name: "Generate crypto using pki-generator" - include_role: - name: "setup/pki-generator" - when: root_certs.failed - -# Deploy Auth Service -- name: "Deploy Auth Service" - include_role: - name: "setup/auth" - -# Deploy gateway service -- name: Deploy gateway service - include_role: - name: setup/gateway - -# Deploy Zone service -- name: Deploy Zone service - include_role: - name: setup/zone - -# Deploy Signer node -- name: Deploy Signer service - include_role: - name: setup/signer - -# Deploy Idman service -- name: Deploy Idman service - include_role: - name: setup/idman - -# Deploy networkmap service -- name: Deploy networkmap service - include_role: - name: setup/nmap - -# Deploy notary service -- name: Deploy notary service - include_role: - name: setup/notary + component_type: Job + namespace: "{{ component_ns }}" diff --git a/platforms/r3-corda-ent/configuration/roles/setup/init/tasks/main.yaml b/platforms/r3-corda-ent/configuration/roles/setup/init/tasks/main.yaml new file mode 100644 index 00000000000..fbfcec3dd67 --- /dev/null +++ b/platforms/r3-corda-ent/configuration/roles/setup/init/tasks/main.yaml @@ -0,0 +1,25 @@ +############################################################################################## +# Copyright Accenture. All Rights Reserved. +# +# SPDX-License-Identifier: Apache-2.0 +############################################################################################## + +# Gather Kubernetes cluster information +- name: Gather Kubernetes cluster information + community.kubernetes.k8s_cluster_info: + kubeconfig: "{{ kubernetes.config_file }}" + register: cluster_info + +# Set the Kubernetes server URL fact +- name: Set kubernetes_server_url fact + set_fact: + kubernetes_server: "{{ cluster_info.connection.host }}" + +# Install init node +- name: "Install init node" + include_role: + name: "{{ playbook_dir }}/../../shared/configuration/roles/create/job_component" + vars: + cloud_provider: "{{ org.cloud_provider | lower }}" + type: "corda_ent_init" + secondaryInit: false diff --git a/platforms/r3-corda-ent/configuration/roles/setup/network_map/tasks/main.yaml b/platforms/r3-corda-ent/configuration/roles/setup/network_map/tasks/main.yaml new file mode 100644 index 00000000000..d8c50fbf5fb --- /dev/null +++ b/platforms/r3-corda-ent/configuration/roles/setup/network_map/tasks/main.yaml @@ -0,0 +1,49 @@ +############################################################################################## +# Copyright Accenture. All Rights Reserved. +# +# SPDX-License-Identifier: Apache-2.0 +############################################################################################## +# Gather Kubernetes cluster information +- name: Gather Kubernetes cluster information + community.kubernetes.k8s_cluster_info: + kubeconfig: "{{ kubernetes.config_file }}" + register: cluster_info + +# Set the Kubernetes server URL fact +- name: Set kubernetes_server_url fact + set_fact: + kubernetes_server: "{{ cluster_info.connection.host }}" + +# Create deployment file for network_map +- name: Create network_map deployment file + include_role: + name: helm_component + vars: + type: "corda_ent_network_map" + cred_truststore: "{{ org.credentials.truststore }}" + cred_keystore: "{{ org.credentials.keystore }}" + idman_int_port: "{{ org_services.idman.ports.internal }}" + idman_ext_port: "{{ org_services.idman.ports.external }}" + idman_rev_port: "{{ org_services.idman.ports.revocation }}" + auth_port: "{{ org_services.auth.port }}" + gateway_port: "{{ org_services.gateway.port }}" + zone_enm_port: "{{ org_services.zone.ports.enm }}" + network_map_int_port: "{{ org_services.networkmap.ports.internal }}" + network_map_ext_port: "{{ org_services.networkmap.ports.external }}" + network_map_admin_listener_port: "{{ org_services.networkmap.ports.admin_listener }}" + +# Git Push: Push the deployment files for network_map +- name: Push network_map deployment files + include_role: + name: "{{ playbook_dir }}/../../shared/configuration/roles/git_push" + vars: + GIT_DIR: "{{ playbook_dir }}/../../../" + msg: "[ci skip] Pushing notary deployment file for notary" + +# Check if the job is completed +- name: "Check if {{ component_name }} job is completed in the {{ org_name }} organization" + include_role: + name: "{{ playbook_dir }}/../../shared/configuration/roles/check/helm_component" + vars: + component_type: Job + namespace: "{{ component_ns }}" diff --git a/platforms/r3-corda-ent/configuration/roles/setup/node_registration/tasks/main.yaml b/platforms/r3-corda-ent/configuration/roles/setup/node_registration/tasks/main.yaml index 556cc25d0ab..1e505a5fc81 100644 --- a/platforms/r3-corda-ent/configuration/roles/setup/node_registration/tasks/main.yaml +++ b/platforms/r3-corda-ent/configuration/roles/setup/node_registration/tasks/main.yaml @@ -27,7 +27,7 @@ node_name: "{{ peer.name | lower }}" values_dir: "{{ playbook_dir }}/../../../{{ org.gitops.release_dir }}" charts_dir: "{{ org.gitops.chart_source }}" - container_name: "index.docker.io/hyperledgerlabs/h2:2018" + container_name: "ghcr.io/hyperledger/h2:2018" helm_lint: "true" # These variables are needed as the db.tpl is used for both notary & node registration # The values for the below variables are fetched from different parts in the network.yaml diff --git a/platforms/r3-corda-ent/configuration/roles/setup/notary-initial-registration/tasks/nested_main.yaml b/platforms/r3-corda-ent/configuration/roles/setup/notary-initial-registration/tasks/nested_main.yaml index ad2155eb330..ff3dcec4937 100644 --- a/platforms/r3-corda-ent/configuration/roles/setup/notary-initial-registration/tasks/nested_main.yaml +++ b/platforms/r3-corda-ent/configuration/roles/setup/notary-initial-registration/tasks/nested_main.yaml @@ -32,7 +32,7 @@ node_name: "{{ notary.name | lower }}" values_dir: "{{ playbook_dir }}/../../../{{ org.gitops.release_dir }}" charts_dir: "{{ org.gitops.chart_source }}" - container_name: "index.docker.io/hyperledgerlabs/h2:2018" + container_name: "ghcr.io/hyperledger/h2:2018" helm_lint: "true" # These variables are needed as the db.tpl is used for both notary & node registration # The values for the below variables are fetched from different parts in the network.yaml diff --git a/platforms/r3-corda-ent/configuration/roles/setup/notary/tasks/main.yaml b/platforms/r3-corda-ent/configuration/roles/setup/notary/tasks/main.yaml index 47249d4d59b..a925618ea87 100644 --- a/platforms/r3-corda-ent/configuration/roles/setup/notary/tasks/main.yaml +++ b/platforms/r3-corda-ent/configuration/roles/setup/notary/tasks/main.yaml @@ -5,16 +5,51 @@ ############################################################################################## ############################################################################################## -# This role creates the value file for notary +# This role creates the deployment files for node and pushes them to repository ############################################################################################## +# Gather Kubernetes cluster information +- name: Gather Kubernetes cluster information + community.kubernetes.k8s_cluster_info: + kubeconfig: "{{ kubernetes.config_file }}" + register: cluster_info ---- -# Create Notary related to cenm -- name: Create Notary for cenm org - include_tasks: notary_cenm.yaml - when: org.type == 'cenm' +# Set the Kubernetes server URL fact +- name: Set kubernetes_server_url fact + set_fact: + kubernetes_server: "{{ cluster_info.connection.host }}" -# Create Notary related to org -- name: Create Notary for notary org - include_tasks: notary_node.yaml - when: org.type == 'notary' +# Create deployment file for notary node +- name: Create notary node deployment file + include_role: + name: helm_component + vars: + type: corda_ent_notary + node_name: "{{ node.name | lower }}" + p2p_port: "{{ node.ports.p2p }}" + rpc_port: "{{ node.ports.rpc }}" + rpc_admin_port: "{{ node.ports.admin_rpc }}" + subject: "{{ node.subject }}" + legal_name: "{{ node.subject }}" + service_name: "{{ node.serviceName }}" + validating: "{{ node.validating }}" + # nms_url: "{{ network | json_query('network_services[?type==`idman`].uri') | first }}" + # doorman_url: "{{ network | json_query('network_services[?type==`networkmap`].uri') | first }}" + loop: "{{ node }}" + loop_control: + loop_var: node + +# Git Push: Push the deployment files for notary node +- name: Push notary deployment files + include_role: + name: "{{ playbook_dir }}/../../shared/configuration/roles/git_push" + vars: + GIT_DIR: "{{ playbook_dir }}/../../../" + msg: "[ci skip] Pushing notary deployment file for notary" + +# # Check if the job is completed +# - name: "Check if {{ component_name }} job is completed in the {{ org_name }} organization" +# include_role: +# name: "{{ playbook_dir }}/../../shared/configuration/roles/check/helm_component" +# vars: +# component_type: Job +# namespace: "{{ component_ns }}" diff --git a/platforms/r3-corda-ent/configuration/roles/setup/notary/tasks/notary_cenm.yaml b/platforms/r3-corda-ent/configuration/roles/setup/notary/tasks/notary_cenm.yaml deleted file mode 100644 index ec6f81330a3..00000000000 --- a/platforms/r3-corda-ent/configuration/roles/setup/notary/tasks/notary_cenm.yaml +++ /dev/null @@ -1,64 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -############################################################################################## -# This role creates the value file for notary -############################################################################################## - ---- -# Wait till the nmap is running -- name: Waiting for nmap pod to come up - include_role: - name: "{{ playbook_dir }}/../../shared/configuration/roles/check/helm_component" - vars: - component_type: "Pod" - namespace: "{{ component_ns }}" - component_name: "{{ org.services.networkmap.name | lower }}" - kubernetes: "{{ org.k8s }}" - label_selectors: - - app = {{ component_name }} - -# Create notary ambassador certificates -- name: Create ambassador certficates for notary - include_role: - name: create/certificates/cenm - vars: - tlscert_path: "./build/ambassador/{{ notary.name }}" - service_name: "{{ notary.name }}" - namespace: "{{ component_ns }}" - dest_path: "./build/ambassador/{{ notary.name }}/notary.crt" - loop: "{{ org.services.notaries }}" - loop_control: - loop_var: notary - -# Create deployment file for CENM notary service -- name: Create value file for notary - include_role: - name: helm_component - vars: - type: "notary" - notary_service: "{{ notary }}" - component_name: "{{ notary_service.name | lower }}" - name: "{{ org.name | lower }}" - values_dir: "{{ playbook_dir }}/../../../{{ org.gitops.release_dir }}" - charts_dir: "{{ org.gitops.chart_source }}" - idman_url: "{{ network | json_query('network_services[?type==`idman`].uri') | first }}" - idman_domain: "{{ idman_url.split(':')[1] | regex_replace('/', '') }}" - networkmap_url: "{{ network | json_query('network_services[?type==`networkmap`].uri') | first }}" - networkmap_domain: "{{ networkmap_url.split(':')[1] | regex_replace('/', '') }}" - corda_service_version: notary-{{ network.version }} - loop: "{{ org.services.notaries }}" - loop_control: - loop_var: notary - -# Git Push: Push the notary deployment files to repository -- name: Push the created deployment files to repository - include_role: - name: "{{ playbook_dir }}/../../shared/configuration/roles/git_push" - vars: - GIT_DIR: "{{ playbook_dir }}/../../../" - gitops: "{{ org.gitops }}" - msg: "[ci skip] Pushing deployment files for notary service" diff --git a/platforms/r3-corda-ent/configuration/roles/setup/notary/tasks/notary_node.yaml b/platforms/r3-corda-ent/configuration/roles/setup/notary/tasks/notary_node.yaml deleted file mode 100644 index 40bd9e060cc..00000000000 --- a/platforms/r3-corda-ent/configuration/roles/setup/notary/tasks/notary_node.yaml +++ /dev/null @@ -1,101 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -############################################################################################## -# This role sets up all Additional Notary orgs -############################################################################################## - -# Wait for namespace creation -- name: "Wait for namespace creation for {{ organisation }}" - include_role: - name: "{{ playbook_dir }}/../../shared/configuration/roles/check/k8_component" - vars: - component_type: "Namespace" - component_name: "{{ component_ns }}" - type: "retry" - -# Wait for vault-reviewer creation -- name: "Wait for vault-reviewer creation for {{ organisation }}" - include_role: - name: "{{ playbook_dir }}/../../shared/configuration/roles/check/k8_component" - vars: - component_type: "ServiceAccount" - component_name: "vault-reviewer" - type: "retry" - tags: - - notest - -# create vault access policies -- name: "Setup vault access for nodes" - include_role: - name: "{{ playbook_dir }}/../../shared/configuration/roles/setup/vault_kubernetes" - vars: - component_name: "{{ org.name | lower }}-vaultk8s-job" - component_auth: "{{ network.env.type }}{{ org.name | lower }}" - component_type: "{{ org.type | lower }}" - -# Create ambassador certificates for notary -- name: Create ambassador certificates for notary - include_role: - name: create/certificates/notary - vars: - tlscert_path: "./build/ambassador/{{ notary.name }}" - node_name: "{{ notary.name | lower }}" - service_name: "{{ notary.name }}" - dest_path: "./build/ambassador/{{ notary.name }}/notary.crt" - loop: "{{ org.services.notaries }}" - loop_control: - loop_var: notary - -# Save tls certificates for network_services to Vault -- name: Save tls certificates for network_services to Vault - include_role: - name: setup/tlscerts - loop: "{{ network.network_services }}" - loop_control: - loop_var: services - -# Write the networkroot truststore, node truststore, node keystore, firewallca, float and bridge passwords to the vault -- name: Write the networkroot truststore, node truststore, node keystore, firewallca, float and bridge passwords to the vault - include_role: - name: setup/credentials - -# Create value file for notary-initial-registration -- name: Create value file for notary registration job - include_role: - name: setup/notary-initial-registration - loop: "{{ org.services.notaries }}" - loop_control: - loop_var: notary - -# Create deployment file for CENM notary service -- name: Create value file for notary - include_role: - name: helm_component - vars: - type: "notary" - notary_service: "{{ notary }}" - component_name: "{{ notary_service.name | lower }}" - name: "{{ org.name | lower }}" - values_dir: "{{ playbook_dir }}/../../../{{ org.gitops.release_dir }}" - charts_dir: "{{ org.gitops.chart_source }}" - idman_url: "{{ network | json_query('network_services[?type==`idman`].uri') | first }}" - idman_domain: "{{ idman_url.split(':')[1] | regex_replace('/', '') }}" - networkmap_url: "{{ network | json_query('network_services[?type==`networkmap`].uri') | first }}" - networkmap_domain: "{{ networkmap_url.split(':')[1] | regex_replace('/', '') }}" - corda_service_version: "notary-{{ network.version }}" - loop: "{{ org.services.notaries }}" - loop_control: - loop_var: notary - -# Push the notary deployment files to repository -- name: Push the created deployment files to repository - include_role: - name: "{{ playbook_dir }}/../../shared/configuration/roles/git_push" - vars: - GIT_DIR: "{{ playbook_dir }}/../../../" - gitops: "{{ org.gitops }}" - msg: "[ci skip] Pushing deployment files for notary service" diff --git a/platforms/r3-corda-ent/configuration/samples/network-addNotary.yaml b/platforms/r3-corda-ent/configuration/samples/network-addNotary.yaml index 3c2bb40fb81..301cd85531d 100644 --- a/platforms/r3-corda-ent/configuration/samples/network-addNotary.yaml +++ b/platforms/r3-corda-ent/configuration/samples/network-addNotary.yaml @@ -105,91 +105,70 @@ network: jars: - jar: # e.g https://maven.pkg.github.com/hyperledger/bevel/com.supplychain.bcc.cordapp-supply-chain/4.7/cordapp-supply-chain-4.7.jar - url: "https://repo/path/cordapp1.jar" + url: https://maven.pkg.github.com/hyperledger/bevel/com.supplychain.bcc.cordapp-supply-chain/4.9/cordapp-supply-chain-4.9.jar - jar: # e.g https://maven.pkg.github.com/hyperledger/bevel/com.supplychain.bcc.cordapp-contracts-states/4.7/cordapp-contracts-states-4.7.jar - url: "https://repo/path/cordapp2.jar" + url: https://maven.pkg.github.com/hyperledger/bevel/com.supplychain.bcc.cordapp-contracts-states/4.9/cordapp-contracts-states-4.9.jar username: "git_username" password: "git_access_token" credentials: - keystore: - keystore: cordacadevpass - idman: password - networkmap: password - subordinateca: password - rootca: password - tlscrlsigner: password - truststore: - truststore: trustpass - rootca: rootpassword - ssl: password - ssl: - networkmap: password - idman: password - signer: password - root: password - auth: password + keystore: password + truststore: password # Services maps to the pods that will be deployed on the k8s cluster # This sample has idman, networkmap, signer and notary on one cluster but different namespaces services: zone: - name: zone - type: cenm-zone + db: h2 # default: h2 ports: enm: 25000 admin: 12345 auth: - name: auth + db: h2 # default: h2 subject: "CN=Test TLS Auth Service Certificate, OU=HQ, O=HoldCo LLC, L=New York, C=US" - type: cenm-auth port: 8081 - username: admin - userpwd: p4ssWord + credentials: + username: admin + userpwd: p4ssWord gateway: - name: gateway subject: "CN=Test TLS Gateway Certificate, OU=HQ, O=HoldCo LLC, L=New York, C=US" - type: cenm-gateway - port: 8080 + port: 8080 idman: - name: idman - subject: "CN=Test Identity Manager Service Certificate, OU=HQ, O=HoldCo LLC, L=New York, C=US" + db: h2 # default: h2 + subject: "CN=Test Identity Manager Service Certificate, OU=HQ, O=HoldCo LLC, L=New York, C=US" # idmanca crlissuer_subject: "CN=Corda TLS CRL Authority,OU=Corda UAT,O=R3 HoldCo LLC,L=New York,C=US" - type: cenm-idman - port: 10000 + ports: + internal: 5052 + external: 10000 + revocation: 5053 + admin_listener: 6000 networkmap: - name: networkmap subject: "CN=Test Network Map Service Certificate, OU=HQ, O=HoldCo LLC, L=New York, C=US" - type: cenm-networkmap ports: - servicePort: 10000 - targetPort: 10000 + internal: 5050 + external: 10000 + admin_listener: 6000 signer: - name: signer subject: "CN=Test TLS Signer Certificate, OU=HQ, O=HoldCo LLC, L=New York, C=US" - type: cenm-signer ports: - servicePort: 8080 - targetPort: 8080 + admin_listener: 6000 notaries: - notary: - name: notary-1 - subject: "O=Notary,OU=Notary1,L=London,C=GB" - serviceName: "O=Notary Service,OU=Notary1,L=London,C=GB" + name: notary + subject: "O=Notary,OU=Notary,L=London,C=GB" # legalName: "O=Notary,OU=Notary,L=London,C=GB" + serviceName: "O=Notary Service,OU=Notary,L=London,C=GB" type: notary validating: true - emailAddress: "dev@bevel.com" - p2p: - port: 10002 - targetPort: 10002 - ambassador: 15005 #Port for ambassador service (must be from env.ambassadorPorts above) - rpc: - port: 30000 - targetPort: 10003 - rpcadmin: - port: 30009 - targetPort: 10005 + emailAddress: "dev@bevel.com" + db: h2 # default: h2 + credentials: + truststore: cordacadevpass + keystore: trustpass + ports: + p2p: 10002 + rpc: 10003 + admin_rpc: 10005 dbtcp: port: 9101 targetPort: 1521 @@ -198,21 +177,19 @@ network: targetPort: 81 - notary: name: notary-2 - subject: "O=Notary,OU=Notary2,L=London,C=GB" - serviceName: "O=Notary Service,OU=Notary2,L=London,C=GB" + subject: "O=Notary2,OU=Notary2,L=London,C=GB" + serviceName: "O=Notary2 Service,OU=Notary2,L=London,C=GB" type: notary validating: true - emailAddress: "dev@bevel.com" - p2p: - port: 10002 - targetPort: 10002 - ambassador: 15010 #Port for ambassador service (must be from env.ambassadorPorts above) - rpc: - port: 30000 - targetPort: 10003 - rpcadmin: - port: 30009 - targetPort: 10005 + emailAddress: "dev@bevel.com" + db: h2 # default: h2 + credentials: + truststore: cordacadevpass + keystore: trustpass + ports: + p2p: 10002 + rpc: 10003 + admin_rpc: 10005 dbtcp: port: 9101 targetPort: 1521 @@ -268,13 +245,13 @@ network: jars: - jar: # e.g https://maven.pkg.github.com/hyperledger/bevel/com.supplychain.bcc.cordapp-supply-chain/4.7/cordapp-supply-chain-4.7.jar - url: "https://repo/path/cordapp1.jar" + url: https://maven.pkg.github.com/hyperledger/bevel/com.supplychain.bcc.cordapp-supply-chain/4.9/cordapp-supply-chain-4.9.jar - jar: # e.g https://maven.pkg.github.com/hyperledger/bevel/com.supplychain.bcc.cordapp-contracts-states/4.7/cordapp-contracts-states-4.7.jar - url: "https://repo/path/cordapp2.jar" + url: https://maven.pkg.github.com/hyperledger/bevel/com.supplychain.bcc.cordapp-contracts-states/4.9/cordapp-contracts-states-4.9.jar username: "git_username" password: "git_access_token" - + # The participating nodes are named as peers services: notaries: diff --git a/platforms/r3-corda-ent/configuration/samples/network-cordaent.yaml b/platforms/r3-corda-ent/configuration/samples/network-cordaent.yaml index 21d857f6b07..96e446a416d 100644 --- a/platforms/r3-corda-ent/configuration/samples/network-cordaent.yaml +++ b/platforms/r3-corda-ent/configuration/samples/network-cordaent.yaml @@ -28,7 +28,7 @@ network: external_dns: enabled # Should be enabled if using external-dns for automatic route configuration # Docker registry details where images are stored. This will be used to create k8s secrets - # Please ensure all required images are built and stored in this registry. + # Please ensure all required images are built and stored in this registry. # Do not check-in docker_password. docker: url: "adopblockchaincloud0502.azurecr.io" @@ -48,7 +48,7 @@ network: type: networkmap uri: https://networkmap.test.corda.blockchaincloudpoc.com certificate: home_dir/platforms/r3-corda-ent/configuration/build/ambassador/networkmap/ambassador.pem - truststore: home_dir/platforms/r3-corda-ent/configuration/build/networkroottruststore.jks #Certificate should be encoded in base64 format + truststore: home_dir/platforms/r3-corda-ent/configuration/build/networkroottruststore.jks # Certificate should be encoded in base64 format truststore_pass: rootpassword # Allows specification of one or many organizations that will be connecting to a network. @@ -101,93 +101,70 @@ network: jars: - jar: # e.g https://maven.pkg.github.com/hyperledger/bevel/com.supplychain.bcc.cordapp-supply-chain/4.7/cordapp-supply-chain-4.7.jar - url: "https://repo/path/cordapp1.jar" + url: https://maven.pkg.github.com/hyperledger/bevel/com.supplychain.bcc.cordapp-supply-chain/4.9/cordapp-supply-chain-4.9.jar - jar: # e.g https://maven.pkg.github.com/hyperledger/bevel/com.supplychain.bcc.cordapp-contracts-states/4.7/cordapp-contracts-states-4.7.jar - url: "https://repo/path/cordapp2.jar" + url: https://maven.pkg.github.com/hyperledger/bevel/com.supplychain.bcc.cordapp-contracts-states/4.9/cordapp-contracts-states-4.9.jar username: "git_username" password: "git_access_token" credentials: - keystore: - keystore: cordacadevpass - idman: password - networkmap: password - subordinateca: password - rootca: password - tlscrlsigner: password - truststore: - truststore: trustpass - rootca: rootpassword - ssl: password - ssl: - networkmap: password - idman: password - signer: password - root: password - auth: password + keystore: password + truststore: password # Services maps to the pods that will be deployed on the k8s cluster # This sample has idman, networkmap, signer and notary on one cluster but different namespaces services: zone: - name: zone - type: cenm-zone + db: h2 # default: h2 ports: enm: 25000 admin: 12345 auth: - name: auth + db: h2 # default: h2 subject: "CN=Test TLS Auth Service Certificate, OU=HQ, O=HoldCo LLC, L=New York, C=US" - type: cenm-auth port: 8081 - username: admin - userpwd: p4ssWord + credentials: + username: admin + userpwd: p4ssWord gateway: - name: gateway subject: "CN=Test TLS Gateway Certificate, OU=HQ, O=HoldCo LLC, L=New York, C=US" - type: cenm-gateway - ports: - servicePort: 8080 - ambassadorPort: 15008 + port: 8080 idman: - name: idman - subject: "CN=Test Identity Manager Service Certificate, OU=HQ, O=HoldCo LLC, L=New York, C=US" + db: h2 # default: h2 + subject: "CN=Test Identity Manager Service Certificate, OU=HQ, O=HoldCo LLC, L=New York, C=US" # idmanca crlissuer_subject: "CN=Corda TLS CRL Authority,OU=Corda UAT,O=R3 HoldCo LLC,L=New York,C=US" - type: cenm-idman - port: 10000 + ports: + internal: 5052 + external: 10000 + revocation: 5053 + admin_listener: 6000 networkmap: - name: networkmap subject: "CN=Test Network Map Service Certificate, OU=HQ, O=HoldCo LLC, L=New York, C=US" - type: cenm-networkmap ports: - servicePort: 10000 - targetPort: 10000 + internal: 5050 + external: 10000 + admin_listener: 6000 signer: - name: signer subject: "CN=Test TLS Signer Certificate, OU=HQ, O=HoldCo LLC, L=New York, C=US" - type: cenm-signer ports: - servicePort: 8080 - targetPort: 8080 + admin_listener: 6000 notaries: - notary: - name: notary-1 - subject: "O=Notary,OU=Notary1,L=London,C=GB" - serviceName: "O=Notary Service,OU=Notary1,L=London,C=GB" + name: notary + subject: "O=Notary,OU=Notary,L=London,C=GB" # legalName: "O=Notary,OU=Notary,L=London,C=GB" + serviceName: "O=Notary Service,OU=Notary,L=London,C=GB" type: notary validating: true emailAddress: "dev@bevel.com" - p2p: - port: 10002 - targetPort: 10002 - ambassador: 15005 #Port for ambassador service (must be from env.ambassadorPorts above) - rpc: - port: 30000 - targetPort: 10003 - rpcadmin: - port: 30009 - targetPort: 10005 + db: h2 # default: h2 + credentials: + truststore: cordacadevpass + keystore: trustpass + ports: + p2p: 10002 + rpc: 10003 + admin_rpc: 10005 dbtcp: port: 9101 targetPort: 1521 @@ -196,540 +173,22 @@ network: targetPort: 81 - notary: name: notary-2 - subject: "O=Notary,OU=Notary2,L=London,C=GB" - serviceName: "O=Notary Service,OU=Notary2,L=London,C=GB" + subject: "O=Notary2,OU=Notary2,L=London,C=GB" + serviceName: "O=Notary2 Service,OU=Notary2,L=London,C=GB" type: notary validating: true - emailAddress: "dev@bevel.com" - p2p: - port: 10002 - targetPort: 10002 - ambassador: 15010 #Port for ambassador service (must be from env.ambassadorPorts above) - rpc: - port: 30000 - targetPort: 10003 - rpcadmin: - port: 30009 - targetPort: 10005 - dbtcp: - port: 9101 - targetPort: 1521 - dbweb: - port: 8080 - targetPort: 81 - # Specification for the 2nd organization. Each organization maps to a VPC and a separate k8s cluster - - organization: - name: manufacturer - version: 4.7 - cenm_version: 1.5 - firewall_version: 4.4 # Supports 4.4 - country: CH - state: Zurich - location: Zurich - subject: "O=Manufacturer,OU=Manufacturer,L=Zurich,C=CH" - type: node - external_url_suffix: test.corda.blockchaincloudpoc.com - firewall: - enabled: true # true if firewall components are to be deployed - subject: "CN=Test Firewall CA Certificate, OU=HQ, O=HoldCo LLC, L=New York, C=US" - credentials: - firewallca: firewallcapassword - float: floatpassword - bridge: bridgepassword - - cloud_provider: aws # Options: aws, azure, gcp - aws: - access_key: "aws_access_key" # AWS Access key, only used when cloud_provider=aws - secret_key: "aws_secret_key" # AWS Secret key, only used when cloud_provider=aws - - # Kubernetes cluster deployment variables. The config file path and name has to be provided. - k8s: - context: "cluster_context" - config_file: "cluster_config" - - # Hashicorp Vault server address and root-token. Vault should be unsealed. - # Do not check-in root_token - vault: - url: "vault_addr" - root_token: "vault_root_token" - secret_path: "secretsv2" - # Git Repo details which will be used by GitOps/Flux. - # Do not check-in git_access_token - gitops: - git_protocol: "https" # Option for git over https or ssh - git_url: "https://github.com//bevel.git" # Gitops https url for flux value files - branch: "develop" # Git branch where release is being made - release_dir: "platforms/r3-corda-ent/releases/dev" # Relative Path in the Git repo for flux sync per environment. - chart_source: "platforms/r3-corda-ent/charts" # Relative Path where the Helm charts are stored in Git repo - username: "git_username" # Git Service user who has rights to check-in in all branches - password: "git_access_token" # Git Server user password/token (Optional for ssh; Required for https) - email: "git@email.com" # Email to use in git config - git_repo: "github.com//bevel.git" # Gitops git repository URL for git push - private_key: "path_to_private_key" # Path to private key file which has write-access to the git repo (Optional for https; Required for ssh) - - # Cordapps Repository details (optional if cordapps jar are store in a repository) - cordapps: - jars: - - jar: - # e.g https://maven.pkg.github.com/hyperledger/bevel/com.supplychain.bcc.cordapp-supply-chain/4.7/cordapp-supply-chain-4.7.jar - url: "https://repo/path/cordapp1.jar" - - jar: - # e.g https://maven.pkg.github.com/hyperledger/bevel/com.supplychain.bcc.cordapp-contracts-states/4.7/cordapp-contracts-states-4.7.jar - url: "https://repo/path/cordapp2.jar" - username: "git_username" - password: "git_access_token" - - # The participating nodes are named as peers - services: - float: - name: float - subject: "CN=Test Float Certificate, OU=HQ, O=HoldCo LLC, L=New York, C=US" - external_url_suffix: test.cordafloat.blockchaincloudpoc.com - cloud_provider: aws # Options: aws, azure, gcp - aws: - access_key: "aws_access_key" # AWS Access key, only used when cloud_provider=aws - secret_key: "aws_secret_key" # AWS Secret key, only used when cloud_provider=aws - k8s: - context: "float_cluster_context" - config_file: "float_cluster_config" - vault: - url: "float_vault_addr" - root_token: "float_vault_root_token" - gitops: - git_protocol: "https" # Option for git over https or ssh - git_url: "https://github.com//bevel.git" # Gitops https url for flux value files - branch: "develop" # Git branch where release is being made - release_dir: "platforms/r3-corda-ent/releases/float" # Relative Path in the Git repo for flux sync per environment. - chart_source: "platforms/r3-corda-ent/charts" # Relative Path where the Helm charts are stored in Git repo - username: "git_username" # Git Service user who has rights to check-in in all branches - password: "git_access_token" # Git Server user password/token (Optional for ssh; Required for https) - email: "git@email.com" # Email to use in git config - git_repo: "github.com//bevel.git" # Gitops git repository URL for git push - private_key: "path_to_private_key" # Path to private key file which has write-access to the git repo (Optional for https; Required for ssh) - ports: - p2p_port: 40000 - tunnelport: 39999 - ambassador_tunnel_port: 15021 - ambassador_p2p_port: 15020 - bridge: - name: bridge - subject: "CN=Test Bridge Certificate, OU=HQ, O=HoldCo LLC, L=New York, C=US" - peers: - - peer: - name: manufacturer - subject: "O=Manufacturer,OU=Manufacturer,L=47.38/8.54/Zurich,C=CH" # This is the node identity. L=lat/long is mandatory for supplychain sample app - type: node - credentials: - truststore: trustpass - keystore: cordacadevpass - hsm: # hsm support for future release - enabled: false - p2p: - port: 10002 - targetPort: 10002 - ambassador: 15010 #Port for ambassador service (must be from env.ambassadorPorts above) - rpc: - port: 30000 - targetPort: 10003 - rpcadmin: - port: 30009 - targetPort: 10005 - dbtcp: - port: 9101 - targetPort: 1521 - dbweb: - port: 8080 - targetPort: 81 - springboot: # This is for the springboot server - targetPort: 20001 - port: 20001 - expressapi: # This is for the express api server - targetPort: 3000 - port: 3000 - - - organization: - name: carrier - version: 4.7 - cenm_version: 1.5 - firewall_version: 4.4 # Supports 4.4 - country: GB - state: London - location: London - subject: "O=Carrier,OU=Carrier,L=London,C=GB" - type: node - external_url_suffix: test.corda.blockchaincloudpoc.com - firewall: - enabled: true # true if firewall components are to be deployed - subject: "CN=Test Firewall CA Certificate, OU=HQ, O=HoldCo LLC, L=New York, C=US" - credentials: - firewallca: firewallcapassword - float: floatpassword - bridge: bridgepassword - - cloud_provider: aws # Options: aws, azure, gcp - aws: - access_key: "aws_access_key" # AWS Access key, only used when cloud_provider=aws - secret_key: "aws_secret_key" # AWS Secret key, only used when cloud_provider=aws - - # Kubernetes cluster deployment variables. The config file path and name has to be provided in case - # the cluster has already been created. - k8s: - context: "cluster_context" - config_file: "cluster_config" - - # Hashicorp Vault server address and root-token. Vault should be unsealed. - # Do not check-in root_token - vault: - url: "vault_addr" - root_token: "vault_root_token" - secret_path: "secretsv2" - # Git Repo details which will be used by GitOps/Flux. - # Do not check-in git_access_token - gitops: - git_protocol: "https" # Option for git over https or ssh - git_url: "https://github.com//bevel.git" # Gitops https url for flux value files - branch: "develop" # Git branch where release is being made - release_dir: "platforms/r3-corda-ent/releases/dev" # Relative Path in the Git repo for flux sync per environment. - chart_source: "platforms/r3-corda-ent/charts" # Relative Path where the Helm charts are stored in Git repo - username: "git_username" # Git Service user who has rights to check-in in all branches - password: "git_access_token" # Git Server user password/token (Optional for ssh; Required for https) - email: "git@email.com" # Email to use in git config - git_repo: "github.com//bevel.git" # Gitops git repository URL for git push - private_key: "path_to_private_key" # Path to private key file which has write-access to the git repo (Optional for https; Required for ssh) - - # Cordapps Repository details (optional if cordapps jar are store in a repository) - cordapps: - jars: - - jar: - # e.g https://maven.pkg.github.com/hyperledger/bevel/com.supplychain.bcc.cordapp-supply-chain/4.7/cordapp-supply-chain-4.7.jar - url: "https://repo/path/cordapp1.jar" - - jar: - # e.g https://maven.pkg.github.com/hyperledger/bevel/com.supplychain.bcc.cordapp-contracts-states/4.7/cordapp-contracts-states-4.7.jar - url: "https://repo/path/cordapp2.jar" - username: "git_username" - password: "git_access_token" - - services: - float: - name: float - subject: "CN=Test Float Certificate, OU=HQ, O=HoldCo LLC, L=New York, C=US" - external_url_suffix: test.cordafloat.blockchaincloudpoc.com - cloud_provider: aws # Options: aws, azure, gcp - aws: - access_key: "aws_access_key" # AWS Access key, only used when cloud_provider=aws - secret_key: "aws_secret_key" # AWS Secret key, only used when cloud_provider=aws - k8s: - context: "float_cluster_context" - config_file: "float_cluster_config" - vault: - url: "float_vault_addr" - root_token: "float_vault_root_token" - gitops: - git_protocol: "https" # Option for git over https or ssh - git_url: "https://github.com//bevel.git" # Gitops https or ssh url for flux value files - branch: "develop" # Git branch where release is being made - release_dir: "platforms/r3-corda-ent/releases/float" # Relative Path in the Git repo for flux sync per environment. - chart_source: "platforms/r3-corda-ent/charts" # Relative Path where the Helm charts are stored in Git repo - username: "git_username" # Git Service user who has rights to check-in in all branches - password: "git_access_token" # Git Server user password/token (Optional for ssh; Required for https) - email: "git@email.com" # Email to use in git config - git_repo: "github.com//bevel.git" # Gitops git repository URL for git push - private_key: "path_to_private_key" # Path to private key file which has write-access to the git repo (Optional for https; Required for ssh) - ports: - p2p_port: 40000 - tunnelport: 39999 - ambassador_tunnel_port: 15031 - ambassador_p2p_port: 15030 - bridge: - name: bridge - subject: "CN=Test Bridge Certificate, OU=HQ, O=HoldCo LLC, L=New York, C=US" - peers: - - peer: - name: carrier - subject: "O=Carrier,OU=Carrier,L=51.50/-0.13/London,C=GB" # This is the node subject. L=lat/long is mandatory for supplychain sample app - type: node - credentials: - truststore: trustpass - keystore: cordacadevpass - hsm: # hsm support for future release - enabled: false - p2p: - port: 10002 - targetPort: 10002 - ambassador: 15030 #Port for ambassador service (must be from env.ambassadorPorts above) - rpc: - port: 30000 - targetPort: 10003 - rpcadmin: - port: 30009 - targetPort: 10005 - dbtcp: - port: 9101 - targetPort: 1521 - dbweb: - port: 8080 - targetPort: 81 - springboot: - targetPort: 20001 - port: 20001 - expressapi: - targetPort: 3000 - port: 3000 - - - organization: - name: store - version: 4.7 - cenm_version: 1.5 - firewall_version: 4.4 # Supports 4.4 - country: US - state: New York - location: New York - subject: "O=Store,OU=Store,L=New York,C=US" - type: node - external_url_suffix: test.corda.blockchaincloudpoc.com - firewall: - enabled: true # true if firewall components are to be deployed - subject: "CN=Test Firewall CA Certificate, OU=HQ, O=HoldCo LLC, L=New York, C=US" - credentials: - firewallca: firewallcapassword - float: floatpassword - bridge: bridgepassword - - cloud_provider: aws # Options: aws, azure, gcp - aws: - access_key: "aws_access_key" # AWS Access key, only used when cloud_provider=aws - secret_key: "aws_secret_key" # AWS Secret key, only used when cloud_provider=aws - - # Kubernetes cluster deployment variables. The config file path and name has to be provided in case - # the cluster has already been created. - k8s: - context: "cluster_context" - config_file: "cluster_config" - - # Hashicorp Vault server address and root-token. Vault should be unsealed. - # Do not check-in root_token - vault: - url: "vault_addr" - root_token: "vault_root_token" - secret_path: "secretsv2" - # Git Repo details which will be used by GitOps/Flux. - # Do not check-in git_access_token - gitops: - git_protocol: "https" # Option for git over https or ssh - git_url: "https://github.com//bevel.git" # Gitops https url for flux value files - branch: "develop" # Git branch where release is being made - release_dir: "platforms/r3-corda-ent/releases/dev" # Relative Path in the Git repo for flux sync per environment. - chart_source: "platforms/r3-corda-ent/charts" # Relative Path where the Helm charts are stored in Git repo - username: "git_username" # Git Service user who has rights to check-in in all branches - password: "git_access_token" # Git Server user password/token (Optional for ssh; Required for https) - email: "git@email.com" # Email to use in git config - git_repo: "github.com//bevel.git" # Gitops git repository URL for git push - private_key: "path_to_private_key" # Path to private key file which has write-access to the git repo (Optional for https; Required for ssh) - - # Cordapps Repository details (optional if cordapps jar are store in a repository) - cordapps: - jars: - - jar: - # e.g https://maven.pkg.github.com/hyperledger/bevel/com.supplychain.bcc.cordapp-supply-chain/4.7/cordapp-supply-chain-4.7.jar - url: "https://repo/path/cordapp1.jar" - - jar: - # e.g https://maven.pkg.github.com/hyperledger/bevel/com.supplychain.bcc.cordapp-contracts-states/4.7/cordapp-contracts-states-4.7.jar - url: "https://repo/path/cordapp2.jar" - username: "git_username" - password: "git_access_token" - - services: - float: - name: float - subject: "CN=Test Float Certificate, OU=HQ, O=HoldCo LLC, L=New York, C=US" - external_url_suffix: test.cordafloat.blockchaincloudpoc.com - cloud_provider: aws # Options: aws, azure, gcp - aws: - access_key: "aws_access_key" # AWS Access key, only used when cloud_provider=aws - secret_key: "aws_secret_key" # AWS Secret key, only used when cloud_provider=aws - k8s: - context: "float_cluster_context" - config_file: "float_cluster_config" - vault: - url: "float_vault_addr" - root_token: "float_vault_root_token" - gitops: - git_protocol: "https" # Option for git over https or ssh - git_url: "https://github.com//bevel.git" # Gitops https or ssh url for flux value files - branch: "develop" # Git branch where release is being made - release_dir: "platforms/r3-corda-ent/releases/float" # Relative Path in the Git repo for flux sync per environment. - chart_source: "platforms/r3-corda-ent/charts" # Relative Path where the Helm charts are stored in Git repo - username: "git_username" # Git Service user who has rights to check-in in all branches - password: "git_access_token" # Git Server user password/token (Optional for ssh; Required for https) - email: "git@email.com" # Email to use in git config - git_repo: "github.com//bevel.git" # Gitops git repository URL for git push - private_key: "path_to_private_key" # Path to private key file which has write-access to the git repo (Optional for https; Required for ssh) - ports: - p2p_port: 40000 - tunnelport: 39999 - ambassador_tunnel_port: 15041 - ambassador_p2p_port: 15040 - bridge: - name: bridge - subject: "CN=Test Bridge Certificate, OU=HQ, O=HoldCo LLC, L=New York, C=US" - peers: - - peer: - name: store - subject: "O=Store,OU=Store,L=40.73/-74/New York,C=US" # This is the node identity. L=lat/long is mandatory for supplychain sample app - type: node + emailAddress: "dev@bevel.com" + db: h2 # default: h2 credentials: - truststore: trustpass - keystore: cordacadevpass - hsm: # hsm support for future release - enabled: false - p2p: - port: 10002 - targetPort: 10002 - ambassador: 15040 #Port for ambassador service (must be from env.ambassadorPorts above) - rpc: - port: 30000 - targetPort: 10003 - rpcadmin: - port: 30009 - targetPort: 10005 - dbtcp: - port: 9101 - targetPort: 1521 - dbweb: - port: 8080 - targetPort: 81 - springboot: - targetPort: 20001 - port: 20001 - expressapi: - targetPort: 3000 - port: 3000 - - - organization: - name: warehouse - version: 4.7 - cenm_version: 1.5 - firewall_version: 4.4 # Supports 4.4 - country: US - state: Massachusetts - location: Boston - subject: "O=Warehouse,OU=Warehouse,L=Boston,C=US" - type: node - external_url_suffix: test.corda.blockchaincloudpoc.com - firewall: - enabled: true # true if firewall components are to be deployed - subject: "CN=Test Firewall CA Certificate, OU=HQ, O=HoldCo LLC, L=New York, C=US" - credentials: - firewallca: firewallcapassword - float: floatpassword - bridge: bridgepassword - - cloud_provider: aws # Options: aws, azure, gcp - aws: - access_key: "aws_access_key" # AWS Access key, only used when cloud_provider=aws - secret_key: "aws_secret_key" # AWS Secret key, only used when cloud_provider=aws - - # Kubernetes cluster deployment variables. The config file path and name has to be provided in case - # the cluster has already been created. - k8s: - context: "cluster_context" - config_file: "cluster_config" - - # Hashicorp Vault server address and root-token. Vault should be unsealed. - # Do not check-in root_token - vault: - url: "vault_addr" - root_token: "vault_root_token" - secret_path: "secretsv2" - # Git Repo details which will be used by GitOps/Flux. - # Do not check-in git_access_token - gitops: - git_protocol: "https" # Option for git over https or ssh - git_url: "https://github.com//bevel.git" # Gitops https or ssh url for flux value files - branch: "develop" # Git branch where release is being made - release_dir: "platforms/r3-corda-ent/releases/dev" # Relative Path in the Git repo for flux sync per environment. - chart_source: "platforms/r3-corda-ent/charts" # Relative Path where the Helm charts are stored in Git repo - username: "git_username" # Git Service user who has rights to check-in in all branches - password: "git_access_token" # Git Server user password/token (Optional for ssh; Required for https) - email: "git@email.com" # Email to use in git config - git_repo: "github.com//bevel.git" # Gitops git repository URL for git push - private_key: "path_to_private_key" # Path to private key file which has write-access to the git repo (Optional for https; Required for ssh) - - # Cordapps Repository details (optional if cordapps jar are store in a repository) - cordapps: - jars: - - jar: - # e.g https://maven.pkg.github.com/hyperledger/bevel/com.supplychain.bcc.cordapp-supply-chain/4.7/cordapp-supply-chain-4.7.jar - url: "https://repo/path/cordapp1.jar" - - jar: - # e.g https://maven.pkg.github.com/hyperledger/bevel/com.supplychain.bcc.cordapp-contracts-states/4.7/cordapp-contracts-states-4.7.jar - url: "https://repo/path/cordapp2.jar" - username: "git_username" - password: "git_access_token" - - services: - float: - name: float - subject: "CN=Test Float Certificate, OU=HQ, O=HoldCo LLC, L=New York, C=US" - external_url_suffix: test.cordafloat.blockchaincloudpoc.com - cloud_provider: aws # Options: aws, azure, gcp - aws: - access_key: "aws_access_key" # AWS Access key, only used when cloud_provider=aws - secret_key: "aws_secret_key" # AWS Secret key, only used when cloud_provider=aws - k8s: - context: "float_cluster_context" - config_file: "float_cluster_config" - vault: - url: "float_vault_addr" - root_token: "float_vault_root_token" - gitops: - git_protocol: "https" # Option for git over https or ssh - git_url: "https://github.com//bevel.git" # Gitops https or ssh url for flux value files - branch: "develop" # Git branch where release is being made - release_dir: "platforms/r3-corda-ent/releases/float" # Relative Path in the Git repo for flux sync per environment. - chart_source: "platforms/r3-corda-ent/charts" # Relative Path where the Helm charts are stored in Git repo - username: "git_username" # Git Service user who has rights to check-in in all branches - password: "git_access_token" # Git Server user password/token (Optional for ssh; Required for https) - email: "git@email.com" # Email to use in git config - git_repo: "github.com//bevel.git" # Gitops git repository URL for git push - private_key: "path_to_private_key" # Path to private key file which has write-access to the git repo (Optional for https; Required for ssh) + truststore: cordacadevpass + keystore: trustpass ports: - p2p_port: 40000 - tunnelport: 39999 - ambassador_tunnel_port: 15051 - ambassador_p2p_port: 15050 - bridge: - name: bridge - subject: "CN=Test Bridge Certificate, OU=HQ, O=HoldCo LLC, L=New York, C=US" - peers: - - peer: - name: warehouse - subject: "O=Warehouse,OU=Warehouse,L=42.36/-71.06/Boston,C=US" # This is the node identity. L=lat/long is mandatory for supplychain sample app - type: node - credentials: - truststore: trustpass - keystore: cordacadevpass - hsm: # hsm support for future release - enabled: false - p2p: - port: 10002 - targetPort: 10002 - ambassador: 15050 #Port for ambassador service (must be from env.ambassadorPorts above) - rpc: - port: 30000 - targetPort: 10003 - rpcadmin: - port: 30009 - targetPort: 10005 + p2p: 10002 + rpc: 10003 + admin_rpc: 10005 dbtcp: port: 9101 targetPort: 1521 dbweb: port: 8080 targetPort: 81 - springboot: - targetPort: 20001 - port: 20001 - expressapi: - targetPort: 3000 - port: 3000 diff --git a/platforms/r3-corda/charts/README.md b/platforms/r3-corda/charts/README.md index c7c52b509aa..b0ffc5e77fa 100644 --- a/platforms/r3-corda/charts/README.md +++ b/platforms/r3-corda/charts/README.md @@ -6,51 +6,105 @@ # Charts for R3 Corda components ## About -This folder contains helm charts which are used by the ansible playbooks for the deployment of the R3-Corda components. Each chart folder contain a folder for templates, chart file and the corresponding value file. +This folder contains the helm charts which are used for the deployment of the R3 Corda components. Each helm chart that you can use has the following keys and you need to set them. The `global.cluster.provider` is used as a key for the various cloud features enabled. Also you only need to specify one cloud provider, **not** both if deploying to cloud. As of writing this doc, AWS is fully supported. -## Example Folder Structure ### +```yaml +global: + serviceAccountName: vault-auth + cluster: + provider: aws # choose from: minikube | aws + cloudNativeServices: false # future: set to true to use Cloud Native Services + kubernetesUrl: "https://yourkubernetes.com" # Provide the k8s URL, ignore if not using Hashicorp Vault + vault: + type: hashicorp # choose from hashicorp | kubernetes + network: corda # must be corda for these charts + # Following are necessary only when hashicorp vault is used. + address: http://vault.url:8200 + authPath: supplychain + secretEngine: secretsv2 + secretPrefix: "data/supplychain" + role: vault-role ``` -/corda-doorman -|-- templates -| |--_helpers.tpl -| |-- volumes.yaml -| |-- deployment.yaml -| |-- service.yaml -|-- Chart.yaml -|-- values.yaml + +## Usage + +### Pre-requisites + +- Kubernetes Cluster (either Managed cloud option like EKS or local like minikube) +- Accessible and unsealed Hahsicorp Vault (if using Vault) +- Configured Ambassador AES (if using Ambassador as proxy) +- Update the dependencies + ``` + helm dependency update corda-init + helm dependency update corda-network-service + helm dependency update corda-node + ``` + +### _Without Proxy or Vault_ + +```bash +helm install init ./corda-init --namespace supplychain-ns --create-namespace --values ./values/noproxy-and-novault/init.yaml + +# Install doorman and network-map services +helm install supplychain ./corda-network-service --namespace supplychain-ns --values ./values/noproxy-and-novault/network-service.yaml +# Install a notary service +helm install notary ./corda-node --namespace supplychain-ns --values ./values/noproxy-and-novault/notary.yaml + +``` +### To setup another node in a different namespace + +```bash +# Run init for new namespace +helm install init ./corda-init --namespace manufacturer-ns --create-namespace --values ./values/noproxy-and-novault/init.yaml +# Install a Corda node +helm install manufacturer ./corda-node --namespace manufacturer-ns --values ./values/noproxy-and-novault/node.yaml ``` -## Pre-requisites - - Helm to be installed and configured - -## Charts description ## - -### 1. doorman ### -- This folder contains chart templates and default values for doorman servers. -### 2. doorman-tls ### -- This folder contains chart templates and default values for doorman-tls servers. -### 3. h2 ### -- This folder contains chart templates and default values for creation of h2 database. -### 4. h2-adduser ### -- This folder contains chart templates and default values for adding new user into h2 database. -### 5. h2-password-change ### -- This folder contains chart templates and default values for changing the password for h2 database user. -### 6. mongodb ### -- This folder contains chart templates and default values for mongodb node -### 7. mongodb-tls ### -- This folder contains chart templates and default values for mongodb node with tls=on. -### 8. nms ### -- This folder contains chart templates and default values for nms -### 9. nms-tls ### -- This folder contains chart templates and default values for nms with tls=on. -### 10. node ### -- This folder contains chart templates and default values for node -### 11. node-initial-registration ### -- This folder contains chart templates and default values for registering node with notary -### 12. notary ### -- This folder contains chart templates and default values for notary. -### 13. notary-initial-registration ### -- This folder contains chart templates and default values for registering notary with nms. -### 14. storage ### -- This folder contains chart templates and default values for StorageClass +### _With Ambassador proxy and Vault_ +Replace the `global.vault.address`, `global.cluster.kubernetesUrl` and `global.proxy.externalUrlSuffix` in all the files in `./values/proxy-and-vault/` folder. Also update the `nodeConf.networkMapURL` and `nodeConf.doormanURL` as per your `global.proxy.externalUrlSuffix` of corda-network-service. + +```bash +kubectl create namespace supplychain-ns # if the namespace does not exist already +# Create the roottoken secret +kubectl -n supplychain-ns create secret generic roottoken --from-literal=token= + +helm install init ./corda-init --namespace supplychain-ns --values ./values/proxy-and-vault/init.yaml + +# Install doorman and network-map services +helm install supplychain ./corda-network-service --namespace supplychain-ns --values ./values/proxy-and-vault/network-service.yaml +# Install a notary service +helm install notary ./corda-node --namespace supplychain-ns --values ./values/proxy-and-vault/notary.yaml + +``` +### To setup another node in a different namespace + +Update the `global.proxy.externalUrlSuffix` and `nodeConf.legalName` in file `./values/proxy-and-vault/node.yaml` or pass via helm command line. +```bash +# Get the init and static nodes from existing member and place in corda-init/files +cd ./corda-init/files/ +kubectl --namespace supplychain-ns get secret nms-tls-certs -o jsonpath='{.data.tls\.crt}' > nms.crt +kubectl --namespace supplychain-ns get secret doorman-tls-certs -o jsonpath='{.data.tls\.crt}' > doorman.crt + +# Run secondary init +cd ../.. +kubectl create namespace manufacturer-ns # if the namespace does not exist already +# Create the roottoken secret +kubectl -n manufacturer-ns create secret generic roottoken --from-literal=token= + +helm install init ./corda-init --namespace manufacturer-ns --values ./values/proxy-and-vault/init-sec.yaml + +helm install manufacturer ./corda-node --namespace manufacturer-ns --values ./values/proxy-and-vault/node.yaml --set nodeConf.legalName="O=Manufacturer\,OU=Manufacturer\,L=47.38/8.54/Zurich\,C=CH" +``` + +### Clean-up + +To clean up, just uninstall the helm releases. +```bash +helm uninstall --namespace supplychain-ns notary +helm uninstall --namespace supplychain-ns supplychain +helm uninstall --namespace supplychain-ns init + +helm uninstall --namespace manufacturer-ns manufacturer +helm uninstall --namespace manufacturer-ns init + +``` \ No newline at end of file diff --git a/platforms/r3-corda/charts/corda-certs-gen/Chart.yaml b/platforms/r3-corda/charts/corda-certs-gen/Chart.yaml index 6bdce90e181..04535949bbb 100644 --- a/platforms/r3-corda/charts/corda-certs-gen/Chart.yaml +++ b/platforms/r3-corda/charts/corda-certs-gen/Chart.yaml @@ -5,7 +5,21 @@ ############################################################################################## apiVersion: v1 -appVersion: "2.0" -description: "R3-corda-os: Generates the ca-certificates." name: corda-certs-gen -version: 1.0.0 +description: "R3 Corda: Generates and stores TLS certificates for nodes and network services" +version: 1.0.1 +appVersion: latest +keywords: + - bevel + - hyperledger + - corda + - enterprise + - blockchain + - deployment + - accenture +home: https://hyperledger-bevel.readthedocs.io/en/latest/ +sources: + - https://github.com/hyperledger/bevel +maintainers: + - name: Hyperledger Bevel maintainers + email: bevel@lists.hyperledger.org diff --git a/platforms/r3-corda/charts/corda-certs-gen/README.md b/platforms/r3-corda/charts/corda-certs-gen/README.md index 22b8fa38804..3a70d52ba3d 100644 --- a/platforms/r3-corda/charts/corda-certs-gen/README.md +++ b/platforms/r3-corda/charts/corda-certs-gen/README.md @@ -3,163 +3,83 @@ [//]: # (SPDX-License-Identifier: Apache-2.0) [//]: # (##############################################################################################) - -# corda-certs-gen deployment +# corda-certs-gen -- [corda-certs-gen Deployment Helm Chart](#corda-certs-gen-deployment-helm-chart) -- [Prerequisites](#prerequisites) -- [Chart Structure](#chart-structure) -- [Configuration](#configuration) -- [Deployment](#deployment) -- [Contributing](#contributing) -- [License](#license) +This chart is a component of Hyperledger Bevel. The corda-certs-gen chart generates the TLS certificates needed for accessing Doorman, Network-Map and Corda nodes outside the cluster. If enabled, the certificates are then stored on the configured vault and also stored as Kubernetes secrets. See [Bevel documentation](https://hyperledger-bevel.readthedocs.io/en/latest/) for details. - -## corda-certs-gen Deployment Helm Chart ---- -This [Helm chart](https://github.com/hyperledger/bevel/tree/develop/platforms/r3-corda/charts/corda-certs-gen) generates the certificates. +## TL;DR - - -## Prerequisites ---- -Before deploying the chart please ensure you have the following prerequisites: - -- Doorman network is setup and running. -- Kubernetes cluster up and running. -- A HashiCorp Vault instance is set up and configured to use Kubernetes service account token-based authentication. -- The Vault is unsealed and initialized. -- Helm is installed. - - - -## Chart Structure ---- -This chart has following structue: - -``` - - ├── corda-certs-gen - │ ├── Chart.yaml - │ ├── templates - │ │ ├── job.yaml - │ │ ├── configmap.yaml - │ │ └── _helpers.tpl - │ └── values.yaml +```bash +helm repo add bevel https://hyperledger.github.io/bevel +helm install my-release bevel/corda-certs-gen ``` -Type of files used: - -- `templates` : This directory contains the Kubernetes manifest templates that define the resources to be deployed. -- `job.yaml` : This Job is responsible for generating the root CA certificate, Doorman CA certificate, and MongoDB CA certificate for doorman. -- `configmap.yaml` : ConfigMap resource in Kubernetes with a specific name and namespace, along with labels for identification.And holds the openssl configuration file. -- `_helpers.tpl` : A template file used for defining custom labels in the Helm chart. -- `chart.yaml` : Provides metadata about the chart, such as its name, version, and description. -- `values.yaml` : Contains the default configuration values for the chart. It includes configuration for the metadata, image, service, Vault, etc. - - - -## Configuration ---- -The [values.yaml](https://github.com/hyperledger/bevel/blob/develop/platforms/r3-corda/charts/corda-certs-gen/values.yaml) file contains configurable values for the Helm chart. We can modify these values according to the deployment requirements. Here are some important configuration options: - -## Parameters ---- - -### Name - -| Name | Description | Default Value | -| -----------| -------------------------------------------------- | ------------- | -| name | Provide the name of the node | doorman | - -### Metadata - -| Name | Description | Default Value | -| ----------------| ----------------------------------------------------------------------| ------------- | -| namespace | Provide the namespace for the Generate Certs Generator | notary-ns | -| labels | Provide any additional labels for the Generate Certs Generator | "" | - -### Image - -| Name | Description | Default Value | -| ------------------------ | ------------------------------------------------------- | --------------- | -| initContainerName | Provide the alpine utils image, which is used for all init-containers of deployments/jobs | "" | -| certsContainerName | Provide the image for the certs container | "" | -| imagePullSecret | Provide the docker-registry secret created and stored in kubernetes cluster as a secret | "" | -| pullPolicy | Pull policy to be used for the Docker image | IfNotPresent | - -### Vault +## Prerequisites -| Name | Description | Default Value | -| ------------------------- | --------------------------------------------------------------------------| ------------- | -| address | Address/URL of the Vault server | "" | -| role | Role used for authentication with Vault | vault-role | -| authpath | Authentication path for Vault | cordadoorman | -| serviceAccountName | Provide the already created service account name autheticated to vault | vault-auth | -| certSecretPrefix | Provide the vault path where the certificates are stored | doorman/data | -| retries | Number of retries to check contents from vault | 10 | -| sleepTimeAfterError | Sleep time in seconds when error while registration | 15 | +- Kubernetes 1.19+ +- Helm 3.2.0+ -### Subjects +If Hashicorp Vault is used, then +- HashiCorp Vault Server 1.13.1+ -| Name | Description | Default Value | -| ------------------------- | ---------------------------------- | ------------- | -| root_subject | Mention the subject for rootca | "" | -| mongorootca | Mention the subject for mongorootca| "" | -| doormanca | Mention the subject for doormanca | "" | -| networkmap | Mention the subject for networkmap | "" | +## Installing the Chart -### Volume +To install the chart with the release name `my-release`: -| Name | Description | Default Value | -| -----------------| -----------------------| ------------- | -| baseDir | Base directory | /home/bevel | +```bash +helm repo add bevel https://hyperledger.github.io/bevel +helm install my-release bevel/corda-certs-gen +``` +The command deploys the chart on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation. - -## Deployment ---- +> **Tip**: List all releases using `helm list` -To deploy the corda-certs-gen Helm chart, follow these steps: +## Uninstalling the Chart -1. Modify the [values.yaml](https://github.com/hyperledger/bevel/blob/develop/platforms/r3-corda/charts/corda-certs-gen/values.yaml) file to set the desired configuration values. -2. Run the following Helm command to install, upgrade, verify delete the chart: +To uninstall/delete the `my-release` deployment: -To install the chart: ```bash -helm repo add bevel https://hyperledger.github.io/bevel/ -helm install ./corda-certs-gen +helm uninstall my-release ``` -To upgrade the chart: -```bash -helm upgrade ./corda-certs-gen -``` +The command removes all the Kubernetes components associated with the chart and deletes the release. -To verify the deployment: -```bash -kubectl get jobs -n -``` -Note : Replace `` with the actual namespace where the Job was created. This command will display information about the Job, including the number of completions and the current status of the Job's pods. +## Parameters -To delete the chart: -```bash -helm uninstall -``` -Note : Replace `` with the desired name for the release. +### Global parameters +These parameters are refered to as same in each parent or chold chart +| Name | Description | Default Value | +|--------|---------|-------------| +|`global.serviceAccountName` | The serviceaccount name that will be used for Vault Auth management| `vault-auth` | +| `global.vault.type` | Type of Vault to support other providers. Currently, only `hashicorp` is supported. | `hashicorp` | +| `global.vault.role` | Role used for authentication with Vault | `vault-role` | +| `global.vault.address`| URL of the Vault server. | `""` | +| `global.vault.authPath` | Authentication path for Vault | `supplychain` | +| `global.vault.network` | Network type which will determine the vault policy | `corda` | +| `global.vault.secretEngine` | Vault secret engine name | `secretsv2` | +| `global.vault.secretPrefix` | Vault secret prefix which must start with `data/` | `data/supplychain` | +| `global.proxy.externalUrlSuffix` | External URL suffix which will be used as CN to generate certificate | `test.blockchaincloudpoc.com` | + +### Image +| Name | Description| Default Value | +|------------|-----------|---------| +| `image.repository` | Docker repository which will be used for this job | `ghcr.io/hyperledger/bevel-alpine` | +| `image.tag` | Docker image tag which will be used for this job | `latest` | +| `image.pullSecret` | Secret name in the namespace containing private image registry credentials | `""` | +| `image.pullPolicy` | The pull policy for the image | `IfNotPresent` | - -## Contributing ---- -If you encounter any bugs, have suggestions, or would like to contribute to the [corda-certs-gen Deployment Helm Chart](https://github.com/hyperledger/bevel/tree/develop/platforms/r3-corda/charts/corda-certs-gen), please feel free to open an issue or submit a pull request on the [project's GitHub repository](https://github.com/hyperledger/bevel). +### Settings +| Name | Description | Default Value | +| ------------| -------------- | --------------- | +| `settings.networkServices` | Set value to true when Doorman and NMS certificates are to be generated. | `false` | - ## License This chart is licensed under the Apache v2.0 license. -Copyright © 2023 Accenture +Copyright © 2024 Accenture ### Attribution diff --git a/platforms/r3-corda/charts/corda-certs-gen/templates/_helpers.tpl b/platforms/r3-corda/charts/corda-certs-gen/templates/_helpers.tpl index 7bf5f530a8e..0f08d8da5ad 100644 --- a/platforms/r3-corda/charts/corda-certs-gen/templates/_helpers.tpl +++ b/platforms/r3-corda/charts/corda-certs-gen/templates/_helpers.tpl @@ -1,5 +1,29 @@ -{{- define "labels.custom" }} - {{ range $key, $val := $.Values.metadata.labels }} - {{ $key }}: {{ $val }} - {{ end }} -{{- end }} \ No newline at end of file +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "corda-certs-gen.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "corda-certs-gen.fullname" -}} +{{- $name := default .Chart.Name -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" $name .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} + + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "corda-certs-gen.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} diff --git a/platforms/r3-corda/charts/corda-certs-gen/templates/configmap.yaml b/platforms/r3-corda/charts/corda-certs-gen/templates/configmap.yaml deleted file mode 100644 index 19255f292bc..00000000000 --- a/platforms/r3-corda/charts/corda-certs-gen/templates/configmap.yaml +++ /dev/null @@ -1,20 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - ---- -apiVersion: v1 -kind: ConfigMap -metadata: - name: {{ .Values.nodeName }}-conf - namespace: {{ .Values.metadata.namespace }} - labels: - app.kubernetes.io/name: {{ .Values.nodeName }} - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - {{- include "labels.custom" . | nindent 2 }} -data: - openssl.conf: |+ -{{ .Files.Get "files/openssl.conf" | indent 4 }} diff --git a/platforms/r3-corda/charts/corda-certs-gen/templates/job-cleanup.yaml b/platforms/r3-corda/charts/corda-certs-gen/templates/job-cleanup.yaml new file mode 100644 index 00000000000..66cea2ac9bd --- /dev/null +++ b/platforms/r3-corda/charts/corda-certs-gen/templates/job-cleanup.yaml @@ -0,0 +1,51 @@ +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ include "corda-certs-gen.name" . }}-cleanup + labels: + app.kubernetes.io/name: corda-certs-gen-job-cleanup + app.kubernetes.io/component: job-cleanup + app.kubernetes.io/part-of: {{ include "corda-certs-gen.fullname" . }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/managed-by: helm + namespace: {{ .Release.Namespace }} + annotations: + helm.sh/hook-weight: "0" + helm.sh/hook: "pre-delete" + helm.sh/hook-delete-policy: "hook-succeeded" +spec: + backoffLimit: 3 + completions: 1 + template: + metadata: + labels: + app.kubernetes.io/name: corda-certs-gen-job-cleanup + app.kubernetes.io/component: job-cleanup + app.kubernetes.io/part-of: {{ include "corda-certs-gen.fullname" . }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/managed-by: helm + spec: + serviceAccountName: {{ .Values.global.serviceAccountName }} + restartPolicy: "Never" + imagePullSecrets: + {{- if .Values.image.pullSecret }} + - name: {{ .Values.image.pullSecret }} + {{- end }} + containers: + - name: delete-certs + image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + securityContext: + runAsUser: 0 + imagePullPolicy: {{ .Values.image.pullPolicy }} + command: + - sh + - -c + args: + - | + echo "Deleting tls-certs secret in k8s ..." + kubectl delete secret --namespace {{ .Release.Namespace }} {{ .Release.Name }}-tls-certs + {{- if .Values.settings.networkServices }} + kubectl delete secret --namespace {{ .Release.Namespace }} doorman-tls-certs + kubectl delete secret --namespace {{ .Release.Namespace }} nms-tls-certs + {{- end }} diff --git a/platforms/r3-corda/charts/corda-certs-gen/templates/job.yaml b/platforms/r3-corda/charts/corda-certs-gen/templates/job.yaml index 15ce252282d..74d962b2e03 100644 --- a/platforms/r3-corda/charts/corda-certs-gen/templates/job.yaml +++ b/platforms/r3-corda/charts/corda-certs-gen/templates/job.yaml @@ -7,350 +7,260 @@ apiVersion: batch/v1 kind: Job metadata: - name: {{ .Values.nodeName }}-generate-certs - namespace: {{ .Values.metadata.namespace }} + name: "{{ include "corda-certs-gen.name" . }}" + namespace: {{ .Release.Namespace }} + annotations: + helm.sh/hook-delete-policy: "before-hook-creation" labels: - app: {{ .Values.nodeName }}-generate-certs - app.kubernetes.io/name: {{ .Values.nodeName }}-generate-certs - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} + app: "{{ include "corda-certs-gen.name" . }}" + app.kubernetes.io/name: "{{ include "corda-certs-gen.name" . }}" app.kubernetes.io/managed-by: {{ .Release.Service }} app.kubernetes.io/instance: {{ .Release.Name }} - {{- include "labels.custom" . | nindent 2 }} spec: - backoffLimit: 6 - template: - metadata: - labels: - app: {{ .Values.nodeName }}-generate-certs - app.kubernetes.io/name: {{ .Values.nodeName }}-generate-certs - app.kubernetes.io/instance: {{ .Release.Name }} - spec: - restartPolicy: "OnFailure" - serviceAccountName: {{ $.Values.vault.serviceAccountName }} - securityContext: - fsGroup: 1000 - initContainers: - - name: init-check-certificates - image: {{ .Values.image.initContainerName }} - imagePullPolicy: IfNotPresent - env: - - name: VAULT_ADDR - value: {{ $.Values.vault.address }} - - name: VAULT_APP_ROLE - value: {{.Values.vault.role}} - - name: KUBERNETES_AUTH_PATH - value: {{ $.Values.vault.authPath }} - - name: CERTS_SECRET_PREFIX - value: {{ .Values.vault.certSecretPrefix }} - - name: MOUNT_PATH - value: "/certcheck" - command: ["sh", "-c"] - args: - - |- - #!/usr/bin/env sh - validateVaultResponse () { - if echo ${2} | grep "errors"; then - echo "ERROR: unable to retrieve ${1}: ${2}" - exit 1 - fi - if [ "$3" == "LOOKUPSECRETRESPONSE" ] - then - http_code=$(curl -sS -o /dev/null -w "%{http_code}" \ - --header "X-Vault-Token: ${VAULT_TOKEN}" \ - ${VAULT_ADDR}/v1/${1}) - curl_response=$? - if test "$http_code" != "200" ; then - echo "Http response code from Vault - $http_code and curl_response - $curl_response" - if test "$curl_response" != "0"; then - echo "Error: curl command failed with error code - $curl_response" - exit 1 - fi - fi - fi - } - # Setting up the environment to get secrets/certificates from Vault - echo "Getting secrets/certificates from Vault server" - KUBE_SA_TOKEN=$(cat /var/run/secrets/kubernetes.io/serviceaccount/token) - VAULT_TOKEN=$(curl -sS --request POST ${VAULT_ADDR}/v1/auth/${KUBERNETES_AUTH_PATH}/login -H "Content-Type: application/json" -d '{"role":"'"${VAULT_APP_ROLE}"'","jwt":"'"${KUBE_SA_TOKEN}"'"}' | jq -r 'if .errors then . else .auth.client_token end') - validateVaultResponse 'vault login token' "${VAULT_TOKEN}" - echo "Logged into Vault" - mkdir -p ${MOUNT_PATH} - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${CERTS_SECRET_PREFIX}/certs | jq -r 'if .errors then . else . end') - data_info="$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data["data"]')" - - if [ "$data_info" == "null" ] - then - echo "Certficates absent in vault. Ignore error warning" - touch ${MOUNT_PATH}/absent.txt - else - validateVaultResponse "${CERTS_SECRET_PREFIX}/certs" "${LOOKUP_SECRET_RESPONSE}" "LOOKUPSECRETRESPONSE" - echo "Certificates present in vault" - touch ${MOUNT_PATH}/present.txt - fi - echo "Done checking for certificates in vault" - volumeMounts: - - name: certcheck - mountPath: /certcheck - - name: init-credentials - image: {{ .Values.image.initContainerName }} - imagePullPolicy: IfNotPresent - env: - - name: VAULT_ADDR - value: {{ $.Values.vault.address }} - - name: KUBERNETES_AUTH_PATH - value: {{ $.Values.vault.authPath }} - - name: VAULT_APP_ROLE - value: {{ $.Values.vault.role }} - - name: BASE_DIR - value: {{ $.Values.volume.baseDir }} - - name: CERTS_SECRET_PREFIX - value: {{ .Values.vault.certSecretPrefix }} - - name: MOUNT_PATH - value: "/DATA" - - name: NODEINFO_MOUNT_PATH - value: "/notary-nodeinfo" - command: ["sh", "-c"] - args: - - |- - #!/usr/bin/env sh - validateVaultResponse () { - if echo ${2} | grep "errors"; then - echo "ERROR: unable to retrieve ${1}: ${2}" - exit 1 - fi - if [ "$3" == "LOOKUPSECRETRESPONSE" ] - then - http_code=$(curl -sS -o /dev/null -w "%{http_code}" \ - --header "X-Vault-Token: ${VAULT_TOKEN}" \ - ${VAULT_ADDR}/v1/${1}) - curl_response=$? - if test "$http_code" != "200" ; then - echo "Http response code from Vault - $http_code and curl_response - $curl_response" - if test "$curl_response" != "0"; then - echo "Error: curl command failed with error code - $curl_response" - exit 1 - fi - fi - fi - } - if [ -e /certcheck/present.txt ] - then - echo "Certificates already present in the vault. Skipping.." - exit 0 - fi - # Setting up the environment to get secrets from Vault - echo "Getting secrets from Vault Server" - KUBE_SA_TOKEN=$(cat /var/run/secrets/kubernetes.io/serviceaccount/token) - VAULT_TOKEN=$(curl -sS --request POST ${VAULT_ADDR}/v1/auth/${KUBERNETES_AUTH_PATH}/login -H "Content-Type: application/json" -d '{"role":"vault-role","jwt":"'"${KUBE_SA_TOKEN}"'"}' | jq -r 'if .errors then . else .auth.client_token end') - validateVaultResponse 'vault login token' "${VAULT_TOKEN}" - echo "logged into vault" - # Creating dirs for storing certificates - mkdir -p ${MOUNT_PATH}/keystore; + backoffLimit: 6 + template: + metadata: + labels: + app: "{{ include "corda-certs-gen.name" . }}" + app.kubernetes.io/name: "{{ include "corda-certs-gen.name" . }}" + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} + spec: + restartPolicy: "OnFailure" + imagePullSecrets: + {{- if .Values.image.pullSecret }} + - name: {{ .Values.image.pullSecret }} + {{- end }} + serviceAccountName: {{ .Values.global.serviceAccountName }} + securityContext: + fsGroup: 1000 + volumes: + - name: certificates + emptyDir: + medium: Memory + - name: scripts-volume + configMap: + name: bevel-vault-script + defaultMode: 0777 + initContainers: + {{- if (eq .Values.global.vault.type "hashicorp") }} + - name: init-check-certificates + image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + env: + - name: VAULT_ADDR + value: "{{ $.Values.global.vault.address }}" + - name: VAULT_SECRET_ENGINE + value: "{{ $.Values.global.vault.secretEngine }}" + - name: VAULT_SECRET_PREFIX + value: "{{ $.Values.global.vault.secretPrefix }}" + - name: KUBERNETES_AUTH_PATH + value: "{{ $.Values.global.vault.authPath }}" + - name: VAULT_APP_ROLE + value: "{{ $.Values.global.vault.role }}" + - name: VAULT_TYPE + value: "{{ $.Values.global.vault.type }}" + command: ["sh", "-c"] + args: + - |- - OUTPUT_PATH=${MOUNT_PATH}/keystore; - # Fetching credentials for keystores - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${CERTS_SECRET_PREFIX}/credentials/keystore | jq -r 'if .errors then . else . end') - validateVaultResponse "${CERTS_SECRET_PREFIX}/credentials/keystore" "${LOOKUP_SECRET_RESPONSE}" "LOOKUPSECRETRESPONSE" - KEYSTORE_PASS=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["keystorepass"]') - echo "${KEYSTORE_PASS}"> ${OUTPUT_PATH}/keystorepass - - touch /DATA/done.txt - echo "Done" - volumeMounts: - - name: credentials - mountPath: /DATA - - name: certcheck - mountPath: /certcheck - containers: - - name: certs - image: "{{ required "certs[main]: missing value for .Values.image.certsContainerName" .Values.image.certsContainerName }}" - env: - - name: BASE_DIR - value: "{{ .Values.volume.baseDir }}" - imagePullPolicy: {{ .Values.image.pullPolicy }} - command: ["/bin/bash", "-c"] - args: - - |- - if [ -e /certcheck/present.txt ] - then - echo "Certificates already present in the vault. Skipping.." - exit 0 - fi - rm -r ${BASE_DIR}/DATA/done.txt - - # create directories - mkdir -p ${BASE_DIR}/DATA/rootca - mkdir -p ${BASE_DIR}/DATA/mongorootca - mkdir -p ${BASE_DIR}/DATA/mongodbca - mkdir -p ${BASE_DIR}/DATA/doormanca + # Source the bevel-vault.sh script to perform the Vault-CURD operations + . /scripts/bevel-vault.sh - KEYSTORE_PASS=$(cat ${BASE_DIR}/credentials/keystore/keystorepass) + # Get the Vault token + echo "Getting vault Token..." + vaultBevelFunc "init" + echo "Logged into Vault" - cd ${BASE_DIR}/DATA/rootca - set -x - keytool -genkey -keyalg RSA -alias key -dname "{{ .Values.subjects.rootca }}" -keystore keys.jks -storepass $KEYSTORE_PASS -keypass $KEYSTORE_PASS - openssl ecparam -name prime256v1 -genkey -noout -out cordarootca.key - openssl req -x509 -config ${BASE_DIR}/openssl.conf -new -nodes -key cordarootca.key -days 1024 -out cordarootca.pem -extensions v3_ca -subj '/{{ .Values.subjects.rootca | replace "," "/" }}' - openssl pkcs12 -export -name cert -inkey cordarootca.key -in cordarootca.pem -out cordarootcacert.pkcs12 -cacerts -passin pass:${KEYSTORE_PASS} -passout pass:${KEYSTORE_PASS} - openssl pkcs12 -export -name key -inkey cordarootca.key -in cordarootca.pem -out cordarootcakey.pkcs12 -passin pass:${KEYSTORE_PASS} -passout pass:${KEYSTORE_PASS} - eval "yes | keytool -importkeystore -srckeystore cordarootcacert.pkcs12 -srcstoretype PKCS12 -srcstorepass $KEYSTORE_PASS -destkeystore keys.jks -deststorepass $KEYSTORE_PASS" - eval "yes | keytool -importkeystore -srckeystore cordarootcakey.pkcs12 -srcstoretype PKCS12 -srcstorepass $KEYSTORE_PASS -destkeystore keys.jks -deststorepass $KEYSTORE_PASS" + OUTPUT_PATH=/certificates/check_certs + mkdir -p ${OUTPUT_PATH} + # Obtain the ambassador TLS certificates from Vault if exists + vault_secret_key="${VAULT_SECRET_ENGINE}/${VAULT_SECRET_PREFIX}/{{ .Release.Name }}-tlscerts" + echo "Checking certs in vault at path: ${vault_secret_key}" + vaultBevelFunc "readJson" ${vault_secret_key} - cd ${BASE_DIR}/DATA/doormanca - keytool -genkey -keyalg RSA -alias key -dname "{{ .Values.subjects.doormanca }}" -keystore keys.jks -storepass $KEYSTORE_PASS -keypass $KEYSTORE_PASS - openssl ecparam -name prime256v1 -genkey -noout -out cordadoormanca.key - openssl req -new -nodes -key cordadoormanca.key -days 1000 -out cordadoormanca.csr -subj '/{{ .Values.subjects.doormanca | replace "," "/" }}' - openssl x509 -req -days 1000 -in cordadoormanca.csr -CA ../rootca/cordarootca.pem -CAkey ../rootca/cordarootca.key -out cordadoormanca.pem -CAcreateserial \ - -CAserial serial -extfile ${BASE_DIR}/openssl.conf -extensions doorman - openssl pkcs12 -export -name cert -inkey cordadoormanca.key -in cordadoormanca.pem -out cordadoormancacert.pkcs12 -cacerts -passin pass:${KEYSTORE_PASS} -passout pass:${KEYSTORE_PASS} - openssl pkcs12 -export -name key -inkey cordadoormanca.key -in cordadoormanca.pem -out cordadoormancakey.pkcs12 -passin pass:${KEYSTORE_PASS} -passout pass:${KEYSTORE_PASS} - eval "yes | keytool -importkeystore -srckeystore cordadoormancacert.pkcs12 -srcstoretype PKCS12 -srcstorepass $KEYSTORE_PASS -destkeystore keys.jks -deststorepass $KEYSTORE_PASS - eval "yes | keytool -importkeystore -srckeystore cordadoormancakey.pkcs12 -srcstoretype PKCS12 -srcstorepass $KEYSTORE_PASS -destkeystore keys.jks -deststorepass $KEYSTORE_PASS - - cd ${BASE_DIR}/DATA/mongorootca - openssl genrsa -out mongoCA.key 3072 - openssl req -x509 -config ${BASE_DIR}/openssl.conf -new -extensions v3_ca -key mongoCA.key -days 365 -out mongoCA.crt -subj '{{ .Values.subjects.mongorootca }}' - - cd ${BASE_DIR}/DATA/mongodbca - openssl req -new -nodes -newkey rsa:4096 -keyout mongodb.key -out mongodb.csr -subj '{{ .Values.subjects.mongorootca }}-{{ .Values.nodeName }}' - openssl x509 -CA ../mongorootca/mongoCA.crt -CAkey ../mongorootca/mongoCA.key -CAcreateserial -CAserial serial -req -days 365 -in mongodb.csr -out mongodb.crt - cat mongodb.key mongodb.crt > mongodb.pem + # Get the ambassador TLS data info from Vault + cert=$(echo ${VAULT_SECRET} | jq -r '.["ambassadorcrt"]') - #creating a dummy file to perform check if last line is executed or not. - touch ${BASE_DIR}/DATA/done.txt - volumeMounts: - - name: certcheck - mountPath: /certcheck - - name: credentials - mountPath: {{ .Values.volume.baseDir }}/credentials - - name: certs-keys - mountPath: {{ .Values.volume.baseDir }}/DATA - - name: certs-etc - mountPath: {{ .Values.volume.baseDir }}/etc - - name: openssl-config - mountPath: {{ .Values.volume.baseDir }}/openssl.conf - subPath: openssl.conf - - name: store-certs - image: {{ .Values.image.initContainerName }} - imagePullPolicy: IfNotPresent - env: - - name: VAULT_ADDR - value: {{ $.Values.vault.address }} - - name: KUBERNETES_AUTH_PATH - value: {{ $.Values.vault.authPath }} - - name: VAULT_APP_ROLE - value: {{ $.Values.vault.role }} - - name: BASE_DIR - value: {{ $.Values.volume.baseDir }} - - name: CERTS_SECRET_PREFIX - value: {{ .Values.vault.certSecretPrefix }} - command: ["sh", "-c"] - args: - - |- - #!/usr/bin/env sh - validateVaultResponse () { - if echo ${2} | grep "errors"; then - echo "ERROR: unable to retrieve ${1}: ${2}" - exit 1 - fi - } - - if [ -e /certcheck/present.txt ] - then - echo "Certificates already present in the vault. Skipping.." - exit 0 - fi - # setting up env to get secrets from vault - KUBE_SA_TOKEN=$(cat /var/run/secrets/kubernetes.io/serviceaccount/token) - VAULT_TOKEN=$(curl -sS --request POST ${VAULT_ADDR}/v1/auth/${KUBERNETES_AUTH_PATH}/login -H "Content-Type: application/json" -d '{"role":"'"${VAULT_APP_ROLE}"'","jwt":"'"${KUBE_SA_TOKEN}"'"}' | jq -r 'if .errors then . else .auth.client_token end') - validateVaultResponse 'vault login token' "${VAULT_TOKEN}" - cd ${BASE_DIR}/DATA - # putting certificates - COUNTER=1 - while [ "$COUNTER" -lt {{ $.Values.vault.retries }} ] - do - if [ -e done.txt ] - then - cd ${BASE_DIR}/DATA - echo "found certificates, performing vault put" - # Use -w0 to get single line base64 -w0 - DOORMAN_CA=$(cat ./doormanca/keys.jks | base64 -w0) - ROOT_CA=$(cat ./rootca/keys.jks | base64 -w0) - CA_CERTS=$(cat ./rootca/cordarootca.pem | base64 -w0) - KEYSTORE=$(cat ./rootca/cordarootca.key | base64 -w0) - MONGO_KEY=$(cat ./mongodbca/mongodb.pem | base64 -w0) - MONGO_CERT=$(cat ./mongorootca/mongoCA.crt | base64 -w0) + # If the cert is null, empty, or contains a parse error, then the certificates do not exist in Vault + if [ "$cert" == "null" ] || [[ "$cert" = "parse error"* ]] || [ "$cert" = "" ] + then + # Create a file to indicate that the ambassador TLS certificates are absent + echo "Certficates absent in vault. Ignore error warning" + touch ${OUTPUT_PATH}/absent.txt + else + echo "Certificates present in vault" + touch ${OUTPUT_PATH}/present.txt + AMBASSADORTLS_PATH=/certificates/ambassadortls + mkdir -p ${AMBASSADORTLS_PATH} + cert=$(echo ${VAULT_SECRET} | jq -r '.["ambassadorcrt"]' | base64 -d ) + key=$(echo ${VAULT_SECRET} | jq -r '.["ambassadorkey"]' | base64 -d ) + echo "${cert}" > ${AMBASSADORTLS_PATH}/ambassador.crt + echo "${key}" > ${AMBASSADORTLS_PATH}/ambassador.key + + {{- if .Values.settings.networkServices }} + cert=$(echo ${VAULT_SECRET} | jq -r '.["doormancrt"]' | base64 -d ) + key=$(echo ${VAULT_SECRET} | jq -r '.["doormankey"]' | base64 -d ) + echo "${cert}" > ${AMBASSADORTLS_PATH}/doorman.crt + echo "${key}" > ${AMBASSADORTLS_PATH}/doorman.key + cert=$(echo ${VAULT_SECRET} | jq -r '.["nmscrt"]' | base64 -d ) + key=$(echo ${VAULT_SECRET} | jq -r '.["nmskey"]' | base64 -d ) + echo "${cert}" > ${AMBASSADORTLS_PATH}/nms.crt + echo "${key}" > ${AMBASSADORTLS_PATH}/nms.key + {{- end }} + + fi + echo "Done checking for certificates in vault" + + volumeMounts: + - name: certificates + mountPath: /certificates + - name: scripts-volume + mountPath: /scripts/bevel-vault.sh + subPath: bevel-vault.sh + {{- end }} + containers: + - name: generate-certs + image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + env: + {{- if (eq .Values.global.vault.type "hashicorp") }} + - name: VAULT_ADDR + value: "{{ $.Values.global.vault.address }}" + - name: VAULT_SECRET_ENGINE + value: "{{ $.Values.global.vault.secretEngine }}" + - name: VAULT_SECRET_PREFIX + value: "{{ $.Values.global.vault.secretPrefix }}" + - name: KUBERNETES_AUTH_PATH + value: "{{ $.Values.global.vault.authPath }}" + - name: VAULT_APP_ROLE + value: "{{ $.Values.global.vault.role }}" + - name: VAULT_TYPE + value: "{{ $.Values.global.vault.type }}" + {{- end }} + - name: EXTERNAL_URL + value: "{{ .Release.Name }}.{{ .Values.global.proxy.externalUrlSuffix }}" + command: ["sh", "-c"] + args: + - |- +{{- if (eq .Values.global.vault.type "hashicorp") }} + # Source the bevel-vault.sh script to perform the Vault-CURD operations + . /scripts/bevel-vault.sh + # Get the Vault token + echo "Getting vault Token..." + vaultBevelFunc "init" + echo "Logged into Vault" + function safeWriteSecret { + key=$1 + fpath=$2 + # Use -w0 to get single line base64 -w0 + TLS_CERT=$(cat ${fpath}/ambassador.crt | base64 -w0) + TLS_KEY=$(cat ${fpath}/ambassador.key | base64 -w0) +{{- if .Values.settings.networkServices }} + DOORMAN_CERT=$(cat ${fpath}/doorman.crt | base64 -w0) + DOORMAN_KEY=$(cat ${fpath}/doorman.key | base64 -w0) + NMS_CERT=$(cat ${fpath}/nms.crt | base64 -w0) + NMS_KEY=$(cat ${fpath}/nms.key | base64 -w0) - echo "{\"data\": { - \"{{ .Values.nodeName }}.jks\": \"${DOORMAN_CA}\", - \"rootcakey\": \"${ROOT_CA}\", - \"cacerts\": \"${CA_CERTS}\", - \"keystore\": \"${KEYSTORE}\", - \"mongodb-{{ .Values.nodeName }}.pem\": \"${MONGO_KEY}\", - \"mongoCA.crt\": \"${MONGO_CERT}\" - }}" > payload.json - - echo "before curl" - curl \ - --header "X-Vault-Token: ${VAULT_TOKEN}" \ - --request POST \ - --data @payload.json \ - ${VAULT_ADDR}/v1/${CERTS_SECRET_PREFIX}/certs - - echo "after POST" + echo " + { + \"data\": + { + \"ambassadorcrt\": \"${TLS_CERT}\", + \"ambassadorkey\": \"${TLS_KEY}\", + \"doormancrt\": \"${DOORMAN_CERT}\", + \"doormankey\": \"${DOORMAN_KEY}\", + \"nmscrt\": \"${NMS_CERT}\", + \"nmskey\": \"${NMS_KEY}\" + } + }" > payload.json +{{- else }} + echo " + { + \"data\": + { + \"ambassadorcrt\": \"${TLS_CERT}\", + \"ambassadorkey\": \"${TLS_KEY}\" + } + }" > payload.json +{{- end }} + # Copy the TLS certificates to the Vault + vaultBevelFunc 'write' "${VAULT_SECRET_ENGINE}/${VAULT_SECRET_PREFIX}/${key}-tlscerts" 'payload.json' + } +{{- else }} + function safeWriteSecret { + echo "Secrets are created. Add code specific to cloud provider vault here" + } +{{- end }} + # Set the directories path + CERTS_CHECKS_PATH=/certificates/check_certs + AMBASSADORTLS_PATH=/certificates/ambassadortls + + # if ambassadortls_absent file does not exist, create the certificates + if [ -e ${CERTS_CHECKS_PATH}/present.txt ] + then + echo "Certificates present." + else + # create directories + mkdir -p ${AMBASSADORTLS_PATH} + + cd ${AMBASSADORTLS_PATH} + echo "[req] + distinguished_name = dn + [dn] + [EXT] + keyUsage=digitalSignature + extendedKeyUsage=serverAuth + subjectAltName = @alt_names + [alt_names] + DNS.1 = {{ .Release.Name }}.{{ .Values.global.proxy.externalUrlSuffix }} + DNS.2 = {{ .Release.Name }}api.{{ .Values.global.proxy.externalUrlSuffix }} + DNS.3 = {{ .Release.Name }}web.{{ .Values.global.proxy.externalUrlSuffix }} + " > openssl.conf - # get certs from vault - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${CERTS_SECRET_PREFIX}/certs | jq -r 'if .errors then . else . end') - NODE_KEYS=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data[ "{{ .Values.nodeName }}.jks" ]' 2>&1) - CORDA_SSL_ROOT_KEYS=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data[ "rootcakey" ]' 2>&1) - CORDA_CA_CERTS=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data[ "cacerts" ]' 2>&1) - ROOT_KEYSTORE=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data[ "keystore" ]' 2>&1) - MONGODB_CERT=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data[ "mongoCA.crt" ]' 2>&1) - MONGODB_KEY=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data[ "mongodb-{{ .Values.nodeName }}.pem" ]' 2>&1) + openssl req -x509 -out ambassador.crt -keyout ambassador.key -newkey rsa:2048 -nodes -sha256 \ + -subj "/CN=${EXTERNAL_URL}" -extensions EXT -config openssl.conf + + {{- if .Values.settings.networkServices }} + echo "Create certificates for Network Services" + DOORMAN_URL="{{ .Release.Name }}-doorman.{{ .Values.global.proxy.externalUrlSuffix }}" + openssl req -x509 -out doorman.crt -keyout doorman.key -newkey rsa:2048 -nodes -sha256 \ + -subj "/CN=${DOORMAN_URL}" -addext "subjectAltName = DNS:${DOORMAN_URL}" -extensions EXT -config openssl.conf + NMS_URL="{{ .Release.Name }}-nms.{{ .Values.global.proxy.externalUrlSuffix }}" + openssl req -x509 -out nms.crt -keyout nms.key -newkey rsa:2048 -nodes -sha256 \ + -subj "/CN=${NMS_URL}" -addext "subjectAltName = DNS:${NMS_URL}" -extensions EXT -config openssl.conf - if [ "$NODE_KEYS" == "null" ] || [ "$CORDA_SSL_ROOT_KEYS" == "null" ] || [ "$CORDA_CA_CERTS" == "null" ] || [ "$ROOT_KEYSTORE" == "null" ] || [ "$MONGODB_CERT" == "null" ] || [ "$MONGODB_KEY" == "null" ] || [[ "$NODE_KEYS" == "parse error"* ]] || [[ "$CORDA_SSL_ROOT_KEYS" == "parse error"* ]] || [[ "$CORDA_CA_CERTS" == "parse error"* ]] || [[ "$ROOT_KEYSTORE" == "parse error"* ]] || [[ "$MONGODB_CERT" == "parse error"* ]] || [[ "$MONGODB_KEY" == "parse error"* ]] - then - echo "certificates write or read fail" - sleep {{ $.Values.vault.sleepTimeAfterError }} - if [ "$COUNTER" -ge {{ $.Values.vault.retries }} ] - then - echo "Retry attempted $COUNTER times, certificates have not been saved" - exit 1 - fi - fi - break - COUNTER=`expr "$COUNTER" + 1` - fi - done + {{- end }} + echo "Done creating certificates, now store as secrets in k8s" + safeWriteSecret {{ .Release.Name }} ${AMBASSADORTLS_PATH} - if [ "$COUNTER" -gt {{ $.Values.vault.retries }} ] - then - echo "Retry attempted $COUNTER times, no files found. Giving up!" - exit 1 - break - fi - echo "completed" - volumeMounts: - - name: certcheck - mountPath: /certcheck - - name: certs-keys - mountPath: {{ .Values.volume.baseDir }}/DATA - readOnly: false - imagePullSecrets: - - name: {{ .Values.image.imagePullSecret }} - volumes: - - name: certcheck - emptyDir: - medium: Memory - - name: credentials - emptyDir: - medium: Memory - - name: certs-keys - emptyDir: - medium: Memory - - name: certs-etc - emptyDir: - medium: Memory - - name: openssl-config - configMap: - name: {{ .Values.nodeName }}-conf + fi; + # Create tls secret with the certificates + kubectl get secret --namespace {{ .Release.Namespace }} {{ .Release.Name }}-tls-certs + if [ $? -ne 0 ]; then + kubectl create secret tls --namespace {{ .Release.Namespace }} {{ .Release.Name }}-tls-certs \ + --cert=${AMBASSADORTLS_PATH}/ambassador.crt \ + --key=${AMBASSADORTLS_PATH}/ambassador.key + fi; + + {{- if .Values.settings.networkServices }} + kubectl get secret --namespace {{ .Release.Namespace }} doorman-tls-certs + if [ $? -ne 0 ]; then + kubectl create secret tls --namespace {{ .Release.Namespace }} doorman-tls-certs \ + --cert=${AMBASSADORTLS_PATH}/doorman.crt \ + --key=${AMBASSADORTLS_PATH}/doorman.key + fi; + kubectl get secret --namespace {{ .Release.Namespace }} nms-tls-certs + if [ $? -ne 0 ]; then + kubectl create secret tls --namespace {{ .Release.Namespace }} nms-tls-certs \ + --cert=${AMBASSADORTLS_PATH}/nms.crt \ + --key=${AMBASSADORTLS_PATH}/nms.key + fi; + + {{- end }} + volumeMounts: + - name: certificates + mountPath: /certificates + - name: scripts-volume + mountPath: /scripts/bevel-vault.sh + subPath: bevel-vault.sh diff --git a/platforms/r3-corda/charts/corda-certs-gen/values.yaml b/platforms/r3-corda/charts/corda-certs-gen/values.yaml index 1783ec6bcf3..7ca8691f994 100644 --- a/platforms/r3-corda/charts/corda-certs-gen/values.yaml +++ b/platforms/r3-corda/charts/corda-certs-gen/values.yaml @@ -4,98 +4,53 @@ # SPDX-License-Identifier: Apache-2.0 ############################################################################################## -# Default values for Certs Generator chart. # This is a YAML-formatted file. # Declare variables to be passed into your templates. - -############################################################# -# Basic Configuration # -############################################################# -# Provide the name of the node -# Eg. nodeName: cert-generation -nodeName: doorman - -# This section contains the Corda metadata. -metadata: - # Provide the namespace for the Corda Certs Generator. - # Eg. namespace: cenm - namespace: notary-ns - # Provide any additional labels for the Corda Certs Generator. - labels: +# The following are for overriding global values +global: + #Provide the service account name which will be created. + #Eg. serviceAccountName: vault-auth + serviceAccountName: vault-auth + vault: + #Provide the type of vault + #Eg. type: hashicorp + type: hashicorp + #Provide the vault role used. + #Eg. role: vault-role + role: vault-role + #Provide the vault server address + #Eg. address: http://54.226.163.39:8200 + address: + #Provide the vault authPath configured to be used. + #Eg. authPath: supplychain + authPath: supplychain + #Provide the network type + network: corda + #Provide the secret engine. + #Eg. secretEngine: secretsv2 + secretEngine: secretsv2 + #Provide the vault path where the tls certificates will be stored + #Eg. secretPrefix: data/warehouse-bes/crypto/warehouse/tls MUST use data/ + secretPrefix: "data/supplychain" + proxy: + # Provide external URL for cert generation + externalUrlSuffix: test.blockchaincloudpoc.com # Provide information regarding the Docker images used. image: - # Provide the alpine utils image, which is used for all init-containers of deployments/jobs. - # NOTE: The alpine image used is the base alpine image with CURL installed. - # Eg. initContainerName: ghcr.io/hyperledger/bevel-alpine:latest - initContainerName: ghcr.io/hyperledger/alpine-utils:1.0 - # Provide the image for the certs container. - # Eg. certsContainerName: ghcr.io/hyperledger/bevel-doorman:latest - certsContainerName: ghcr.io/hyperledger/bevel-build:jdk8-latest + #Provide the image repository for all containers + #Eg. repository: ghcr.io/hyperledger/bevel-alpine + repository: ghcr.io/hyperledger/bevel-alpine + tag: latest # Provide the docker-registry secret created and stored in kubernetes cluster as a secret. # Eg. imagePullSecret: regcred - imagePullSecret: + pullSecret: # Pull policy to be used for the Docker image # Eg. pullPolicy: Always pullPolicy: IfNotPresent - -############################################################# -# HashiCorp Vault Configuration # -############################################################# -# NOTE: Make sure that the vault is already unsealed, intialized and configured to -# use Kubernetes service account token based authentication. -# For more info, see https://www.vaultproject.io/docs/auth/kubernetes - -vault: - # Provide the vault address - # Eg. address: http://vault.example.com:8200 - address: - # Provide the vault role used. - # Eg. role: vault-role - role: vault-role - # Provide the authPath configured to be used. - # Eg. authPath: cordaentcenm - authPath: cordadoorman - # Provide the service account name autheticated to vault. - # NOTE: Make sure that the service account is already created and autheticated to use the vault. - # Eg. serviceAccountName: vault-auth - serviceAccountName: vault-auth - # Provide the vault path where the certificates are stored - # Eg. certSecretPrefix: secret/cenm-org-name - certSecretPrefix: doorman/data - - # The amount of times to retry fetching from/writing to Vault before giving up. - # Eg. retries: 10 - retries: 10 - # The amount of time in seconds to wait after an error occurs when fetching from/writing to Vault. - # Eg. sleepTimeAfterError: 15 - sleepTimeAfterError: 15 - - -############################################################# -# SUBJECT Details # -############################################################# -# This section details the X509 subjects - -subjects: - # Mention the subject for rootca - # Eg. rootca: "CN=DLT Root CA,OU=DLT,O=DLT,L=London,C=GB" - rootca: "CN=DLT Root CA,OU=DLT,O=DLT,L=New York,C=US" - # Mention the subject for mongorootca - # Eg. mongorootca: "CN=Test Subordinate CA Certificate, OU=HQ, O=HoldCo LLC, L=New York, C=US" - mongorootca: "/C=US/ST=New York/L=New York/O=Lite/OU=DBA/CN=mongoDB" - # Mention the subject for doormanca - # Eg. doormanca: "CN=Test Identity Manager Service Certificate, OU=HQ, O=HoldCo LLC, L=New York, C=US" - doormanca: "CN=Corda Doorman CA,OU=DOORMAN,O=DOORMAN,L=New York,C=US" - # Mention the subject for networkmap - # Eg. networkmap: "CN=Test Network Map Service Certificate, OU=HQ, O=HoldCo LLC, L=New York, C=US" - networkmap: - - -############################################################# -# Settings # -############################################################# -volume: - # Eg. baseDir: /opt/corda - baseDir: /home/bevel +# Settings for certificate generation +settings: + #Set value to true when useing network_services like doorman and nms + #Eg. networkServices: true + networkServices: false diff --git a/platforms/r3-corda/charts/corda-doorman-tls/Chart.yaml b/platforms/r3-corda/charts/corda-doorman-tls/Chart.yaml deleted file mode 100644 index b3af17b4d56..00000000000 --- a/platforms/r3-corda/charts/corda-doorman-tls/Chart.yaml +++ /dev/null @@ -1,11 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -apiVersion: v1 -appVersion: "2.0" -description: "R3-corda-os: Deploys the doorman with TLS connection enabled." -name: corda-doorman-tls -version: 1.0.0 diff --git a/platforms/r3-corda/charts/corda-doorman-tls/README.md b/platforms/r3-corda/charts/corda-doorman-tls/README.md deleted file mode 100644 index 23ae572ed97..00000000000 --- a/platforms/r3-corda/charts/corda-doorman-tls/README.md +++ /dev/null @@ -1,173 +0,0 @@ -[//]: # (##############################################################################################) -[//]: # (Copyright Accenture. All Rights Reserved.) -[//]: # (SPDX-License-Identifier: Apache-2.0) -[//]: # (##############################################################################################) - - -# Doorman Deployment - -- [Doorman-tls Deployment Helm Chart](#Doorman-tls-deployment-helm-chart) -- [Prerequisites](#prerequisites) -- [Chart Structure](#chart-structure) -- [Configuration](#configuration) -- [Deployment](#deployment) -- [Contributing](#contributing) -- [License](#license) - - -## Doorman-tls Deployment Helm Chart ---- -This [Helm chart](https://github.com/hyperledger/bevel/tree/develop/platforms/r3-corda/charts/corda-doorman-tls) deploys the doorman with TLS connection enabled, which helps establish trust and secure communication within the network by acting as a gatekeeper for network participants. - - - -## Prerequisites ---- -Before deploying the chart please ensure you have the following prerequisites: - -- Mongodb for doorman-tls database up and running. -- Kubernetes cluster up and running. -- A HashiCorp Vault instance is set up and configured to use Kubernetes service account token-based authentication. -- The Vault is unsealed and initialized. -- Helm is installed. - - - -## Chart Structure ---- -This chart has following structue: - -``` - - ├── doorman-tls - │ ├── Chart.yaml - │ ├── templates - │ │ ├── deployment.yaml - │ │ ├── pvc.yaml - │ │ └── service.yaml - │ └── values.yaml -``` - -Type of files used: - -- `templates` : This directory contains the Kubernetes manifest templates that define the resources to be deployed. -- `deployment.yaml` : This file is a configuration file for deployement in Kubernetes.It creates a deployment file with a specified number of replicas and defines various settings for the deployment.Including volume mounts, environment variables, and initialization tasks using init containers. -- `pvc.yaml` : A PersistentVolumeClaim (PVC) is a request for storage by a user. -- `service.yaml` : This file defines a Kubernetes Service with multiple ports for protocols and targets, and supports Ambassador proxy annotations for specific configurations when using the "ambassador" proxy provider. -- `chart.yaml` : Provides metadata about the chart, such as its name, version, and description. -- `values.yaml` : Contains the default configuration values for the chart. It includes configuration for the metadata, image, service, Vault, etc. - - - -## Configuration ---- -The [values.yaml](https://github.com/hyperledger/bevel/blob/develop/platforms/r3-corda/charts/corda-doorman-tls/values.yaml) file contains configurable values for the Helm chart. We can modify these values according to the deployment requirements. Here are some important configuration options: - - -## Parameters ---- - -### Name - -| Name | Description | Default Value | -| -----------| -------------------------------------------------- | ------------- | -| name | Provide the name of the node | network-map | - -### Metadata - -| Name | Description | Default Value | -| ----------------| --------------------------------------------------------------------| ------------- | -| namespace | Provide the namespace for the doorman-tls Generator | default | -| labels | Provide any additional labels for the doorman-tls Generator | "" | - -### Image - -| Name | Description | Default Value | -| ------------------------ | ------------------------------------------------------- | --------------- | -| initContainerName | Provide the alpine utils image, which is used for all init-containers of deployments/jobs | "" | -| containerName | Provide the containerName of image | "" | -| imagePullSecret | Provide the image pull secret of image | regcred | -| mountPath | Provide enviroment variable for container image | /opt/doorman | -| env | These env are used by the Doorman application to connect to the MongoDB database | "" | - - -### Vault - -| Name | Description | Default Value | -| ------------------------- | --------------------------------------------------------------------------| ------------- | -| address | Address/URL of the Vault server | "" | -| role | Role used for authentication with Vault | vault-role | -| authpath | Authentication path for Vault | cordanms | -| secretprefix | Provide the kubernetes auth backed configured in vault | "" | -| imagesecretname | specify the name of the Kubernetes secret | "" | -| serviceaccountname | To authenticate with the Vault server and retrieve the secrets |vault-auth-issuer| - - -### Healthcheck - - Tasks performed in this container is used for database health check. - If db is up and running, starts the corda doorman-tls main container. - - - -## Deployment ---- - -To deploy the Doorman-tls Helm chart, follow these steps: - -1. Modify the [values.yaml](https://github.com/hyperledger/bevel/blob/develop/platforms/r3-corda/charts/corda-doorman-tls/values.yaml) file to set the desired configuration values. -2. Run the following Helm command to install, upgrade,verify, delete the chart: - -To install the chart: -```bash -helm repo add bevel https://hyperledger.github.io/bevel/ -helm install ./corda-doorman-tls -``` - -To upgrade the chart: -```bash -helm upgrade ./corda-doorman-tls -``` - -To verify the deployment: -```bash -kubectl get jobs -n -``` -Note : Replace `` with the actual namespace where the Job was created. This command will display information about the Job, including the number of completions and the current status of the Job's pods. - -To delete the chart: -```bash -helm uninstall -``` -Note : Replace `` with the desired name for the release. - - - -## Contributing ---- -If you encounter any bugs, have suggestions, or would like to contribute to the [Doorman-tls Deployment Helm Chart](https://github.com/hyperledger/bevel/tree/develop/platforms/r3-corda/charts/corda-doorman-tls), please feel free to open an issue or submit a pull request on the [project's GitHub repository](https://github.com/hyperledger/bevel). - - -## License - -This chart is licensed under the Apache v2.0 license. - -Copyright © 2023 Accenture - -### Attribution - -This chart is adapted from the [charts](https://hyperledger.github.io/bevel/) which is licensed under the Apache v2.0 License which is reproduced here: - -``` -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -``` diff --git a/platforms/r3-corda/charts/corda-doorman-tls/templates/deployment.yaml b/platforms/r3-corda/charts/corda-doorman-tls/templates/deployment.yaml deleted file mode 100644 index bba1a965301..00000000000 --- a/platforms/r3-corda/charts/corda-doorman-tls/templates/deployment.yaml +++ /dev/null @@ -1,361 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -# Deployment -# Creates the replicated container and manages lifecycle -# TLS certs mounted -# Persistent Volume mounted -# Service points to this deployment (uses labels!) -apiVersion: apps/v1 -kind: Deployment -metadata: - name: {{ .Values.nodeName }} - namespace: {{ .Values.metadata.namespace }} - {{- if .Values.deployment.annotations }} - annotations: -{{ toYaml .Values.deployment.annotations | indent 8 }} - {{- end }} - labels: - app.kubernetes.io/name: {{ .Values.nodeName }} - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} -spec: - replicas: {{ .Values.replicas }} - selector: - matchLabels: - app: {{ .Values.nodeName }} - app.kubernetes.io/name: {{ .Values.nodeName }} - app.kubernetes.io/instance: {{ .Release.Name }} - template: - metadata: - labels: - app: {{ .Values.nodeName }} - app.kubernetes.io/name: {{ .Values.nodeName }} - app.kubernetes.io/instance: {{ .Release.Name }} - spec: - serviceAccountName: {{ $.Values.vault.serviceaccountname }} - hostname: {{ .Values.nodeName }} - securityContext: - fsGroup: 1000 - containers: - - name: {{ .Values.nodeName }} - image: {{ .Values.image.containerName }} - command: ["sh", "-c"] - args: - - |- - #!/usr/bin/env sh - - # add permissions to dir - chmod 777 -R {{ .Values.image.mountPath.basePath }}/; - #setting up the required variable required for jar - {{- range $.Values.image.env }} - export {{ .name }}="{{ .value }}" - {{- end }} - export DOORMAN_TLS_CERT_PATH="{{ .Values.image.mountPath.basePath }}-tls/certs/doorman.crt" - export DOORMAN_TLS_KEY_PATH="{{ .Values.image.mountPath.basePath }}-tls/certs/doorman.key" - export DB_PASSWORD=`cat /opt/creds/db_root_password` - cat /opt/creds/db_root_password - export DOORMAN_MONGO_CONNECTION_STRING="mongodb://${DB_USERNAME}:${DB_PASSWORD}@${DB_URL}:${DB_PORT}/${DATABASE}?ssl=true&sslInvalidHostNameAllowed=true&streamType=netty" - export DOORMAN_AUTH_PASSWORD=`cat /opt/creds/user_cred` - - # import self signed tls certificate of mongodb, since java only trusts certificate signed by well known CA - yes | keytool -importcert -file {{ .Values.image.mountPath.basePath }}-tls/certs/mongoCA.crt -storepass changeit -alias mongoca -keystore /usr/lib/jvm/java-1.8-openjdk/jre/lib/security/cacerts - # command to run jar - java -jar {{ .Values.image.mountPath.basePath }}/doorman.jar 2>&1 - ports: - - containerPort: {{ .Values.service.targetPort }} - volumeMounts: - - name: {{ .Values.nodeName }}-servicedata - mountPath: "{{ .Values.image.mountPath.basePath }}/db/" - readOnly: false - - name: certs - mountPath: "{{ .Values.image.mountPath.basePath }}/db/certs" - readOnly: false - - name: tls-certs - mountPath: "{{ .Values.image.mountPath.basePath }}-tls/certs" - - name: creds - mountPath: "/opt/creds" - readOnly: false - initContainers: - - name: init-certificates - image: {{ .Values.image.initContainerName }} - imagePullPolicy: Always - env: - - name: VAULT_ADDR - value: {{.Values.vault.address}} - - name: KUBERNETES_AUTH_PATH - value: {{.Values.vault.authpath}} - - name: VAULT_APP_ROLE - value: {{.Values.vault.role}} - - name: MOUNT_PATH - value: {{ .Values.image.mountPath.basePath }}/db/certs/ - - name: VAULT_NODE_NAME - value: {{ .Values.nodeName }} - - name: SECRET_PREFIX - value: {{.Values.vault.secretprefix}} - command: ["sh", "-c"] - args: - - |- - #!/usr/bin/env sh - validateVaultResponse () { - if echo ${2} | grep "errors"; then - echo "ERROR: unable to retrieve ${1}: ${2}" - exit 1 - fi - if [ "$3" == "LOOKUPSECRETRESPONSE" ] - then - http_code=$(curl -sS -o /dev/null -w "%{http_code}" \ - --header "X-Vault-Token: ${VAULT_TOKEN}" \ - ${VAULT_ADDR}/v1/${1}) - curl_response=$? - if test "$http_code" != "200" ; then - echo "Http response code from Vault - $http_code" - if test "$curl_response" != "0"; then - echo "Error: curl command failed with error code - $curl_response" - exit 1 - fi - fi - fi - } - - KUBE_SA_TOKEN=$(cat /var/run/secrets/kubernetes.io/serviceaccount/token) - echo "Getting secrets from Vault Server" - VAULT_TOKEN=$(curl -sS --request POST ${VAULT_ADDR}/v1/auth/${KUBERNETES_AUTH_PATH}/login -H "Content-Type: application/json" -d '{"role":"'"${VAULT_APP_ROLE}"'","jwt":"'"${KUBE_SA_TOKEN}"'"}' | jq -r 'if .errors then . else .auth.client_token end') - validateVaultResponse 'vault login token' "${VAULT_TOKEN}" - OUTPUT_PATH=${MOUNT_PATH} - - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/{{ .Values.vault.certsecretprefix }} | jq -r 'if .errors then . else . end') - - validateVaultResponse "{{ .Values.vault.certsecretprefix }}" "${LOOKUP_SECRET_RESPONSE}" "LOOKUPSECRETRESPONSE" - - ROOTCA_KEY=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["rootcakey"]') - mkdir -p ${OUTPUT_PATH}/root; - echo "${ROOTCA_KEY}" | base64 -d > ${OUTPUT_PATH}/root/keys.jks - - DOORMAN_KEY=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["doorman.jks"]') - mkdir -p ${OUTPUT_PATH}/doorman; - echo "${DOORMAN_KEY}" | base64 -d > ${OUTPUT_PATH}/doorman/keys.jks - - chmod 777 -R {{ .Values.image.mountPath.basePath }}/db - volumeMounts: - - name: certs - mountPath: "{{ .Values.image.mountPath.basePath }}/db/certs/" - readOnly: false - - name: init-certificates-tls - image: {{ .Values.image.initContainerName }} - imagePullPolicy: Always - env: - - name: VAULT_ADDR - value: {{.Values.vault.address}} - - name: KUBERNETES_AUTH_PATH - value: {{.Values.vault.authpath}} - - name: VAULT_APP_ROLE - value: {{.Values.vault.role}} - - name: MOUNT_PATH - value: {{ .Values.image.mountPath.basePath }}-tls/certs/ - - name: SECRET_PREFIX - value: {{.Values.vault.secretprefix}} - command: ["sh", "-c"] - args: - - |- - #!/usr/bin/env sh - validateVaultResponse () { - if echo ${2} | grep "errors"; then - echo "ERROR: unable to retrieve ${1}: ${2}" - exit 1 - fi - if [ "$3" == "LOOKUPSECRETRESPONSE" ] - then - http_code=$(curl -sS -o /dev/null -w "%{http_code}" \ - --header "X-Vault-Token: ${VAULT_TOKEN}" \ - ${VAULT_ADDR}/v1/${1}) - curl_response=$? - if test "$http_code" != "200" ; then - echo "Http response code from Vault - $http_code" - if test "$curl_response" != "0"; then - echo "Error: curl command failed with error code - $curl_response" - exit 1 - fi - fi - fi - } - - # setting up env to get secrets from vault - echo "Getting secrets from Vault Server" - KUBE_SA_TOKEN=$(cat /var/run/secrets/kubernetes.io/serviceaccount/token) - VAULT_TOKEN=$(curl -sS --request POST ${VAULT_ADDR}/v1/auth/${KUBERNETES_AUTH_PATH}/login -H "Content-Type: application/json" -d '{"role":"'"${VAULT_APP_ROLE}"'","jwt":"'"${KUBE_SA_TOKEN}"'"}' | jq -r 'if .errors then . else .auth.client_token end') - validateVaultResponse 'vault login token' "${VAULT_TOKEN}" - - OUTPUT_PATH=${MOUNT_PATH} - - if [ "{{ .Values.image.tlsCertificate }}" == true ] - then - # get doorman tls cert and key from vault - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/{{ .Values.vault.tlscertsecretprefix }} | jq -r 'if .errors then . else . end') - validateVaultResponse "{{ .Values.vault.tlscertsecretprefix }}" "${LOOKUP_SECRET_RESPONSE}" "LOOKUPSECRETRESPONSE" - DOORMAN_CRT=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["tlscacerts"]') - DOORMAN_KEY=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["tlskey"]') - echo "${DOORMAN_CRT}" | base64 -d > {{ .Values.image.mountPath.basePath }}-tls/certs/doorman.crt - echo "${DOORMAN_KEY}" | base64 -d > {{ .Values.image.mountPath.basePath }}-tls/certs/doorman.key - fi - - # get mongo tls cert from vault - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/{{ .Values.vault.dbcertsecretprefix }} | jq -r 'if .errors then . else . end') - validateVaultResponse "{{ .Values.vault.dbcertsecretprefix }}" "${LOOKUP_SECRET_RESPONSE}" "LOOKUPSECRETRESPONSE" - CA_CERT=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["mongoCA.crt"]') - echo "${CA_CERT}" | base64 -d > ${OUTPUT_PATH}/mongoCA.crt - - # add permissions to dir - chmod 777 -R {{ .Values.image.mountPath.basePath }}-tls/certs/ - volumeMounts: - - name: tls-certs - mountPath: "{{ .Values.image.mountPath.basePath }}-tls/certs/" - readOnly: false - - name: init-creds - image: {{ .Values.image.initContainerName }} - imagePullPolicy: Always - env: - - name: VAULT_ADDR - value: {{.Values.vault.address}} - - name: KUBERNETES_AUTH_PATH - value: {{.Values.vault.authpath}} - - name: VAULT_APP_ROLE - value: {{.Values.vault.role}} - - name: MOUNT_PATH - value: /opt/creds - - name: VAULT_NODE_NAME - value: {{ .Values.nodeName }} - - name: DB_CRED_SECRET_PREFIX - value: {{ .Values.vault.dbcredsecretprefix }} - - name: USER_SECRET_PREFIX - value: {{ .Values.vault.secretdoormanpass }} - command: ["sh", "-c"] - args: - - |- - #!/usr/bin/env sh - validateVaultResponse () { - if echo ${2} | grep "errors"; then - echo "ERROR: unable to retrieve ${1}: ${2}" - exit 1 - fi - if [ "$3" == "LOOKUPSECRETRESPONSE" ] - then - http_code=$(curl -sS -o /dev/null -w "%{http_code}" \ - --header "X-Vault-Token: ${VAULT_TOKEN}" \ - ${VAULT_ADDR}/v1/${1}) - curl_response=$? - if test "$http_code" != "200" ; then - echo "Http response code from Vault - $http_code" - if test "$curl_response" != "0"; then - echo "Error: curl command failed with error code - $curl_response" - exit 1 - fi - fi - fi - } - - KUBE_SA_TOKEN=$(cat /var/run/secrets/kubernetes.io/serviceaccount/token) - echo "Getting secrets from Vault Server" - VAULT_TOKEN=$(curl -sS --request POST ${VAULT_ADDR}/v1/auth/${KUBERNETES_AUTH_PATH}/login -H "Content-Type: application/json" -d '{"role":"'"${VAULT_APP_ROLE}"'","jwt":"'"${KUBE_SA_TOKEN}"'"}' | jq -r 'if .errors then . else .auth.client_token end') - validateVaultResponse 'vault login token' "${VAULT_TOKEN}" - echo "logged into vault" - OUTPUT_PATH=${MOUNT_PATH} - LOOKUP_PWD_RESPONSE_DB_PASS=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${DB_CRED_SECRET_PREFIX} | jq -r 'if .errors then . else . end') - - validateVaultResponse "${DB_CRED_SECRET_PREFIX}" "${LOOKUP_PWD_RESPONSE_DB_PASS}" "LOOKUPSECRETRESPONSE" - - MONGODB_PASSWORD=$(echo ${LOOKUP_PWD_RESPONSE_DB_PASS} | jq -r '.data.data["mongodbPassword"]') - echo "${MONGODB_PASSWORD}" >> ${MOUNT_PATH}/db_root_password - cat ${MOUNT_PATH}/db_root_password - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${USER_SECRET_PREFIX} | jq -r 'if .errors then . else . end') - validateVaultResponse "${USER_SECRET_PREFIX}" "${LOOKUP_SECRET_RESPONSE}" "LOOKUPSECRETRESPONSE" - - USER_PASSWORD=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["{{ .Values.image.authusername }}"]') - echo "${USER_PASSWORD}" >> ${MOUNT_PATH}/user_cred - - volumeMounts: - - name: creds - mountPath: "/opt/creds" - readOnly: false - - name: changepermissions - image: {{ .Values.image.initContainerName }} - imagePullPolicy: Always - env: - - name: VAULT_ADDR - value: {{.Values.vault.address}} - - name: KUBERNETES_AUTH_PATH - value: {{.Values.vault.authpath}} - - name: VAULT_APP_ROLE - value: {{.Values.vault.role}} - - name: MOUNT_PATH - value: {{ .Values.image.mountPath.basePath }}/db/certs - - name: VAULT_NODE_NAME - value: {{ .Values.nodeName }} - - name: SECRET_PREFIX - value: {{.Values.vault.secretprefix}} - command: ["sh", "-c"] - args: - - |- - chmod 777 -R {{ .Values.image.mountPath.basePath }}/; - volumeMounts: - - name: {{ .Values.nodeName }}-servicedata - mountPath: "{{ .Values.image.mountPath.basePath }}/db" - readOnly: false - - name: db-healthcheck - image: {{ .Values.image.initContainerName }} - imagePullPolicy: Always - command: ["sh", "-c"] - args: - - |- - #!/usr/bin/env sh - COUNTER=1 - FLAG=true - while [ "$COUNTER" -le {{ $.Values.healthcheck.readinessthreshold }} ] - do - DB_NODE={{ .Values.healthcheck.dburl }} - STATUS=$(nc -vz $DB_NODE 2>&1 | grep -c open ) - if [ "$STATUS" == 0 ] - then - FLAG=false - else - FLAG=true - echo "DB up and running" - fi - if [ "$FLAG" == false ] - then - echo "Retry attempted $COUNTER times, retrying after {{ $.Values.healthcheck.readinesscheckinterval }} seconds" - COUNTER=`expr "$COUNTER" + 1` - sleep {{ $.Values.healthcheck.readinesscheckinterval }} - else - echo "SUCCESS!" - echo "DB up and running!" - exit 0 - break - fi - done - if [ "$COUNTER" -gt {{ $.Values.healthcheck.readinessthreshold }} ] || [ "$FLAG" == false ] - then - echo "Retry attempted $COUNTER times, no DB up and running. Giving up!" - exit 1 - break - fi - imagePullSecrets: - - name: {{ .Values.image.imagePullSecret }} - volumes: - - name: {{ .Values.nodeName }}-servicedata - persistentVolumeClaim: - claimName: {{ .Values.nodeName }}-pvc - - name: certs - emptyDir: - medium: Memory - - name: creds - emptyDir: - medium: Memory - - name: tls-certs - emptyDir: - medium: Memory - \ No newline at end of file diff --git a/platforms/r3-corda/charts/corda-doorman-tls/templates/pvc.yaml b/platforms/r3-corda/charts/corda-doorman-tls/templates/pvc.yaml deleted file mode 100644 index 94433d6faab..00000000000 --- a/platforms/r3-corda/charts/corda-doorman-tls/templates/pvc.yaml +++ /dev/null @@ -1,28 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - ---- -kind: PersistentVolumeClaim -apiVersion: v1 -metadata: - name: {{ .Values.nodeName }}-pvc - namespace: {{ .Values.metadata.namespace }} - {{- if .Values.pvc.annotations }} - annotations: -{{ toYaml .Values.pvc.annotations | indent 8 }} - {{- end }} - labels: - app.kubernetes.io/name: {{ .Values.nodeName }}-pvc - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} -spec: - storageClassName: {{ .Values.storage.name }} - accessModes: - - ReadWriteOnce - resources: - requests: - storage: {{ .Values.storage.memory }} diff --git a/platforms/r3-corda/charts/corda-doorman-tls/templates/service.yaml b/platforms/r3-corda/charts/corda-doorman-tls/templates/service.yaml deleted file mode 100644 index 5f117bd6f93..00000000000 --- a/platforms/r3-corda/charts/corda-doorman-tls/templates/service.yaml +++ /dev/null @@ -1,70 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -apiVersion: v1 -kind: Service -metadata: - name: {{ .Values.nodeName }} - namespace: {{ $.Values.metadata.namespace }} - annotations: - labels: - run: {{ .Values.nodeName }} - app.kubernetes.io/name: {{ .Values.nodeName }} - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} -spec: - selector: - app: {{ .Values.nodeName }} - type: {{ .Values.service.type }} - ports: - - protocol: TCP - port: {{ .Values.service.port }} - targetPort: {{ .Values.service.targetPort }} - {{- if .Values.service.nodePort }} - nodePort: {{ .Values.service.nodePort }} - {{- end }} -{{ if $.Values.ambassador }} ---- -apiVersion: getambassador.io/v3alpha1 -kind: Host -metadata: - name: {{ .Values.nodeName }}-host -spec: - hostname: {{ .Values.nodeName }}.{{ .Values.ambassador.external_url_suffix }} - acmeProvider: - authority: none - requestPolicy: - insecure: - action: Route - tlsSecret: - name: {{ .Values.nodeName }}-ambassador-certs - namespace: {{ .Values.metadata.namespace }} ---- -apiVersion: getambassador.io/v3alpha1 -kind: Mapping -metadata: - name: {{ .Values.nodeName }}-mapping - namespace: {{ .Values.metadata.namespace }} -spec: - host: {{ .Values.nodeName }}.{{ .Values.ambassador.external_url_suffix }} - prefix: / - service: https://{{ .Values.nodeName }}.{{ .Values.metadata.namespace }}:{{ .Values.service.port }} - tls: {{ .Values.nodeName }}-tlscontext ---- -apiVersion: getambassador.io/v3alpha1 -kind: TLSContext -metadata: - name: {{ .Values.nodeName }}-tlscontext - namespace: {{ .Values.metadata.namespace }} -spec: - hosts: - - {{ .Values.nodeName }}.{{ .Values.ambassador.external_url_suffix }} - secret: {{ .Values.nodeName }}-ambassador-certs.{{ .Values.metadata.namespace }} - secret_namespacing: true - min_tls_version: v1.2 -{{- end }} - diff --git a/platforms/r3-corda/charts/corda-doorman-tls/values.yaml b/platforms/r3-corda/charts/corda-doorman-tls/values.yaml deleted file mode 100644 index 252ccbda31c..00000000000 --- a/platforms/r3-corda/charts/corda-doorman-tls/values.yaml +++ /dev/null @@ -1,120 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -# Default values for nmschart. -# This is a YAML-formatted file. -# Declare variables to be passed into your templates. - -#Provide the Name for node to be deployed -#Eg. nodeName: network-map -nodeName: network-map - -metadata: - #Provide the namespace for organization's peer - #Eg. namespace: default - namespace: default - -image: - #Provide the name of image for init container - #Eg. initContainerName: ghcr.io/hyperledger/bevel-alpine:latest - initContainerName: ghcr.io/hyperledger/bevel-alpine:latest - #Provide the containerName of image - #Eg. containerName: ghcr.io/hyperledger/bevel-doorman-linuxkit:latest - containerName: ghcr.io/hyperledger/bevel-doorman-linuxkit:latest - #Provide the image pull secret of image - #Eg. pullSecret: regcred - imagePullSecret: "" - #Provide enviroment variable for container image - mountPath: - #Provide the path for base dir - #Eg. basePath: /opt/workdir - basePath: /opt/doorman - env: - #Provide rootcaname for the doorman - #Eg. rootcaname: CN=Corda Root CA, OU=FRA, O=FRA, L=London, ST=London, C=BR - rootcaname: - #Provide tlscertpath for the doorman - #Eg. tlscertpath: /opt/cordite/db/certs/tls/nms.crt - tlscertpath: - #Provide tlscertpath for the doorman - #Eg. tlscertpath: /opt/cordite/db/certs/tls/nms.key - tlskeypath: - #Provide whether TLS is enabled or not - #Eg. tls: false - tls: false - #Provide whether to enable Corda doorman protocol - #Eg. doorman: true - doorman: true - #Provide whether to enable Cordite certman protocol so that nodes can authenticate using a signed TLS cert - #Eg. certman: true - certman: true - #Provide database directory for this service - #Eg. database: db - database: db - #Provide MongoDB connection string. If set to embed will start its own mongo instance - #Eg. dataSourceUrl: db - dataSourceUrl: db - -service: - #Provide the type of service - #Eg. type: NodePort - type: NodePort - #Provide the node port for node service to be accessible outside - #Eg. nodePort: 30050 - nodePort: - #Provide the targetPort for node service to be accessible - #Eg. targetPort: 8080 - targetPort: - #Provide the port for node service to be accessible - #Eg. port: 8080 - port: - -deployment: - # annotations: - # key: "value" - annotations: {} - -storage: - #Provide the memory for node - #Eg. memory: 4Gi - memory: 4Gi - -pvc: - # annotations: - # key: "value" - annotations: {} - -vault: - #Provide the vault server address - #Eg. address: http://34.228.219.208:8200 - address: - #Provide the vaultrole - #Eg. role: vault-role - role: vault-role - #Eg. authpath: cordanms - authpath: cordanms - #Provide the kubernetes auth backed configured in vault - #Eg. secretprefix: - secretprefix: - #Eg. imagesecretname: - imagesecretname: - #Eg. serviceaccountname: vault-auth-issuer - serviceaccountname: - -mountPath: - #Provide the path for base dir - #Eg. basePath: /opt/workdir - basePath: - -healthcheck: - dburl: - -ambassador: - #Provides the suffix to be used in external URL - #Eg. external_url_suffix: org1.blockchaincloudpoc.com - external_url_suffix: - - diff --git a/platforms/r3-corda/charts/corda-doorman/Chart.yaml b/platforms/r3-corda/charts/corda-doorman/Chart.yaml deleted file mode 100644 index 2f2170e5033..00000000000 --- a/platforms/r3-corda/charts/corda-doorman/Chart.yaml +++ /dev/null @@ -1,11 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -apiVersion: v1 -appVersion: "2.0" -description: "R3-corda-os: Deploys the doorman service." -name: corda-doorman -version: 1.0.0 diff --git a/platforms/r3-corda/charts/corda-doorman/README.md b/platforms/r3-corda/charts/corda-doorman/README.md deleted file mode 100644 index e3a68c796fc..00000000000 --- a/platforms/r3-corda/charts/corda-doorman/README.md +++ /dev/null @@ -1,171 +0,0 @@ -[//]: # (##############################################################################################) -[//]: # (Copyright Accenture. All Rights Reserved.) -[//]: # (SPDX-License-Identifier: Apache-2.0) -[//]: # (##############################################################################################) - - -# Doorman Deployment - -- [Doorman Deployment Helm Chart](#Doorman-deployment-helm-chart) -- [Prerequisites](#prerequisites) -- [Chart Structure](#chart-structure) -- [Configuration](#configuration) -- [Deployment](#deployment) -- [Contributing](#contributing) -- [License](#license) - - -## Doorman Deployment Helm Chart ---- -This [Helm chart](https://github.com/hyperledger/bevel/tree/develop/platforms/r3-corda/charts/corda-doorman) deploys the doorman service, which helps establish trust and secure communication within the network by acting as a gatekeeper for network participants. - - - -## Prerequisites ---- -Before deploying the chart please ensure you have the following prerequisites: - -- Mongodb for doorman up and running. -- Kubernetes cluster up and running. -- A HashiCorp Vault instance is set up and configured to use Kubernetes service account token-based authentication. -- The Vault is unsealed and initialized. -- Helm is installed. - - - -## Chart Structure ---- -This chart has following structue: - -``` - - ├── doorman - │ ├── Chart.yaml - │ ├── templates - │ │ ├── deployment.yaml - │ │ ├── pvc.yaml - │ │ └── service.yaml - │ └── values.yaml -``` - -Type of files used: - -- `templates` : This directory contains the Kubernetes manifest templates that define the resources to be deployed. -- `deployment.yaml` : A Deployment controller provides declarative updates for Pods and ReplicaSets. -- `pvc.yaml` : A PersistentVolumeClaim (PVC) is a request for storage by a user. -- `service.yaml` : This file defines a Kubernetes Service with multiple ports for protocols and targets, and supports Ambassador proxy annotations for specific configurations when using the "ambassador" proxy provider. -- `chart.yaml` : Provides metadata about the chart, such as its name, version, and description. -- `values.yaml` : Contains the default configuration values for the chart. It includes configuration for the metadata, image, service, Vault, etc. - - - -## Configuration ---- -The [values.yaml](https://github.com/hyperledger/bevel/blob/develop/platforms/r3-corda/charts/corda-doorman/values.yaml) file contains configurable values for the Helm chart. We can modify these values according to the deployment requirements. Here are some important configuration options: - -## Parameters ---- - -### Name - -| Name | Description | Default Value | -| -----------| -------------------------------------------------- | ------------- | -| name | Provide the name of the node | network-map | - -### Metadata - -| Name | Description | Default Value | -| ----------------| ----------------------------------------------------------------| ------------- | -| namespace | Provide the namespace for the doorman Generator | default | -| labels | Provide any additional labels for the doorman Generator | "" | - -### Image - -| Name | Description | Default Value | -| ------------------------ | ------------------------------------------------------- | --------------- | -| initContainerName | Provide the alpine utils image, which is used for all init-containers of deployments/jobs | "" | -| containerName | Provide the containerName of image | "" | -| imagePullSecret | Provide the image pull secret of image | regcred | -| mountPath | Provide enviroment variable for container image | /opt/doorman | -| env |These env are used by the Doorman application to connect to the MongoDB database | "" | - - -### Vault - -| Name | Description | Default Value | -| ------------------------- | --------------------------------------------------------------------------| ------------- | -| address | Address/URL of the Vault server | "" | -| role | Role used for authentication with Vault | vault-role | -| authpath | Authentication path for Vault | cordanms | -| secretprefix | Provide the kubernetes auth backed configured in vault | "" | -| imagesecretname | specify the name of the Kubernetes secret | "" | -| serviceaccountname | To authenticate with the Vault server and retrieve the secrets |vault-auth-issuer| - -### Healthcheck - - Tasks performed in this container is used for database health check. - If db is up and running, starts the corda doorman main container. - - - -## Deployment ---- - -To deploy the Doorman Helm chart, follow these steps: - -1. Modify the [values.yaml](https://github.com/hyperledger/bevel/blob/develop/platforms/r3-corda/charts/corda-doorman/values.yaml) file to set the desired configuration values. -2. Run the following Helm command to install, upgrade, verify, delete the chart: - -To install the chart: -```bash -helm repo add bevel https://hyperledger.github.io/bevel/ -helm install ./corda-doorman -``` - -To upgrade the chart: -```bash -helm upgrade ./corda-doorman -``` - -To verify the deployment: -```bash -kubectl get jobs -n -``` -Note : Replace `` with the actual namespace where the Job was created. This command will display information about the Job, including the number of completions and the current status of the Job's pods. - -To delete the chart: -```bash -helm uninstall -``` -Note : Replace `` with the desired name for the release. - - - -## Contributing ---- -If you encounter any bugs, have suggestions, or would like to contribute to the [Doorman Deployment Helm Chart](https://github.com/hyperledger/bevel/tree/develop/platforms/r3-corda/charts/corda-doorman), please feel free to open an issue or submit a pull request on the [project's GitHub repository](https://github.com/hyperledger/bevel). - - -## License - -This chart is licensed under the Apache v2.0 license. - -Copyright © 2023 Accenture - -### Attribution - -This chart is adapted from the [charts](https://hyperledger.github.io/bevel/) which is licensed under the Apache v2.0 License which is reproduced here: - -``` -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -``` diff --git a/platforms/r3-corda/charts/corda-doorman/templates/deployment.yaml b/platforms/r3-corda/charts/corda-doorman/templates/deployment.yaml deleted file mode 100644 index cdcca981aea..00000000000 --- a/platforms/r3-corda/charts/corda-doorman/templates/deployment.yaml +++ /dev/null @@ -1,275 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -# Deployment -# Creates the replicated container and manages lifecycle -# TLS certs mounted -# Persistent Volume mounted -# Service points to this deployment (uses labels!) -apiVersion: apps/v1 -kind: Deployment -metadata: - name: {{ .Values.nodeName }} - namespace: {{ .Values.metadata.namespace }} - {{- if .Values.deployment.annotations }} - annotations: -{{ toYaml .Values.deployment.annotations | indent 8 }} - {{- end }} - labels: - app.kubernetes.io/name: {{ .Values.nodeName }} - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} -spec: - replicas: {{ .Values.replicas }} - selector: - matchLabels: - app: {{ .Values.nodeName }} - app.kubernetes.io/name: {{ .Values.nodeName }} - app.kubernetes.io/instance: {{ .Release.Name }} - template: - metadata: - labels: - app: {{ .Values.nodeName }} - app.kubernetes.io/name: {{ .Values.nodeName }} - app.kubernetes.io/instance: {{ .Release.Name }} - spec: - serviceAccountName: {{ $.Values.vault.serviceaccountname }} - hostname: {{ .Values.nodeName }} - securityContext: - fsGroup: 1000 - containers: - - name: {{ .Values.nodeName }} - image: {{ .Values.image.containerName }} - command: ["sh", "-c"] - args: - - |- - #!/usr/bin/env sh - {{- range $.Values.image.env }} - export {{ .name }}="{{ .value }}" - {{- end }} - export DB_PASSWORD=`cat /opt/creds/db_root_password` - cat /opt/creds/db_root_password - export DOORMAN_MONGO_CONNECTION_STRING="mongodb://${DB_USERNAME}:${DB_PASSWORD}@${DB_URL}:${DB_PORT}/${DATABASE}" - export DOORMAN_AUTH_PASSWORD=`cat /opt/creds/user_cred` - java -jar {{ .Values.image.mountPath.basePath }}/doorman.jar 2>&1 - ports: - - containerPort: {{ .Values.service.targetPort }} - volumeMounts: - - name: {{ .Values.nodeName }}-servicedata - mountPath: "{{ .Values.image.mountPath.basePath }}/db/" - readOnly: false - - name: certs - mountPath: "{{ .Values.image.mountPath.basePath }}/db/certs" - readOnly: false - - name: creds - mountPath: "/opt/creds" - readOnly: false - initContainers: - - name: init-certificates - image: {{ .Values.image.initContainerName }} - imagePullPolicy: Always - env: - - name: VAULT_ADDR - value: {{.Values.vault.address}} - - name: KUBERNETES_AUTH_PATH - value: {{.Values.vault.authpath}} - - name: VAULT_APP_ROLE - value: {{.Values.vault.role}} - - name: MOUNT_PATH - value: {{ .Values.image.mountPath.basePath }}/db/certs/ - - name: VAULT_NODE_NAME - value: {{ .Values.nodeName }} - - name: SECRET_PREFIX - value: {{.Values.vault.secretprefix}} - command: ["sh", "-c"] - args: - - |- - #!/usr/bin/env sh - validateVaultResponse () { - if echo ${2} | grep "errors"; then - echo "ERROR: unable to retrieve ${1}: ${2}" - exit 1 - fi - if [ "$3" == "LOOKUPSECRETRESPONSE" ] - then - http_code=$(curl -sS -o /dev/null -w "%{http_code}" \ - --header "X-Vault-Token: ${VAULT_TOKEN}" \ - ${VAULT_ADDR}/v1/${1}) - curl_response=$? - if test "$http_code" != "200" ; then - echo "Http response code from Vault - $http_code" - if test "$curl_response" != "0"; then - echo "Error: curl command failed with error code - $curl_response" - exit 1 - fi - fi - fi - } - - KUBE_SA_TOKEN=$(cat /var/run/secrets/kubernetes.io/serviceaccount/token) - echo "Getting secrets from Vault Server" - VAULT_TOKEN=$(curl -sS --request POST ${VAULT_ADDR}/v1/auth/${KUBERNETES_AUTH_PATH}/login -H "Content-Type: application/json" -d '{"role":"'"${VAULT_APP_ROLE}"'","jwt":"'"${KUBE_SA_TOKEN}"'"}' | jq -r 'if .errors then . else .auth.client_token end') - validateVaultResponse 'vault login token' "${VAULT_TOKEN}" - echo "logged into vault" - OUTPUT_PATH=${MOUNT_PATH} - - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/{{ .Values.vault.certsecretprefix }} | jq -r 'if .errors then . else . end') - - validateVaultResponse "{{ .Values.vault.certsecretprefix }}" "${LOOKUP_SECRET_RESPONSE}" "LOOKUPSECRETRESPONSE" - - ROOTCA_KEY=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["rootcakey"]') - mkdir -p ${OUTPUT_PATH}/root; - echo "${ROOTCA_KEY}" | base64 -d > ${OUTPUT_PATH}/root/keys.jks - - DOORMAN_KEY=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["doorman.jks"]') - mkdir -p ${OUTPUT_PATH}/doorman; - echo "${DOORMAN_KEY}" | base64 -d > ${OUTPUT_PATH}/doorman/keys.jks - chmod 777 -R {{ .Values.image.mountPath.basePath }}/db - volumeMounts: - - name: certs - mountPath: "{{ .Values.image.mountPath.basePath }}/db/certs/" - readOnly: false - - name: init-creds - image: {{ .Values.image.initContainerName }} - imagePullPolicy: Always - env: - - name: VAULT_ADDR - value: {{.Values.vault.address}} - - name: KUBERNETES_AUTH_PATH - value: {{.Values.vault.authpath}} - - name: VAULT_APP_ROLE - value: {{.Values.vault.role}} - - name: MOUNT_PATH - value: /opt/creds - - name: VAULT_NODE_NAME - value: {{ .Values.nodeName }} - - name: DB_CRED_SECRET_PREFIX - value: {{ .Values.vault.dbcredsecretprefix }} - - name: USER_SECRET_PREFIX - value: {{ .Values.vault.secretdoormanpass }} - command: ["sh", "-c"] - args: - - |- - #!/usr/bin/env sh - validateVaultResponse () { - if echo ${2} | grep "errors"; then - echo "ERROR: unable to retrieve ${1}: ${2}" - exit 1 - fi - if [ "$3" == "LOOKUPSECRETRESPONSE" ] - then - http_code=$(curl -sS -o /dev/null -w "%{http_code}" \ - --header "X-Vault-Token: ${VAULT_TOKEN}" \ - ${VAULT_ADDR}/v1/${1}) - curl_response=$? - if test "$http_code" != "200" ; then - echo "Http response code from Vault - $http_code" - if test "$curl_response" != "0"; then - echo "Error: curl command failed with error code - $curl_response" - exit 1 - fi - fi - fi - } - - KUBE_SA_TOKEN=$(cat /var/run/secrets/kubernetes.io/serviceaccount/token) - echo "Getting secrets from Vault Server" - VAULT_TOKEN=$(curl -sS --request POST ${VAULT_ADDR}/v1/auth/${KUBERNETES_AUTH_PATH}/login -H "Content-Type: application/json" -d '{"role":"'"${VAULT_APP_ROLE}"'","jwt":"'"${KUBE_SA_TOKEN}"'"}' | jq -r 'if .errors then . else .auth.client_token end') - validateVaultResponse 'vault login token' "${VAULT_TOKEN}" - echo "logged into vault" - OUTPUT_PATH=${MOUNT_PATH} - LOOKUP_PWD_RESPONSE_DB_PASS=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${DB_CRED_SECRET_PREFIX} | jq -r 'if .errors then . else . end') - - validateVaultResponse "${DB_CRED_SECRET_PREFIX}" "${LOOKUP_PWD_RESPONSE_DB_PASS}" "LOOKUPSECRETRESPONSE" - - MONGODB_PASSWORD=$(echo ${LOOKUP_PWD_RESPONSE_DB_PASS} | jq -r '.data.data["mongodbPassword"]') - echo "${MONGODB_PASSWORD}" >> ${MOUNT_PATH}/db_root_password - cat ${MOUNT_PATH}/db_root_password - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${USER_SECRET_PREFIX} | jq -r 'if .errors then . else . end') - validateVaultResponse "${USER_SECRET_PREFIX}" "${LOOKUP_SECRET_RESPONSE}" "LOOKUPSECRETRESPONSE" - - USER_PASSWORD=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["{{ .Values.image.authusername }}"]') - echo "${USER_PASSWORD}" >> ${MOUNT_PATH}/user_cred - - volumeMounts: - - name: creds - mountPath: "/opt/creds" - readOnly: false - - name: changepermissions - image: {{ .Values.image.initContainerName }} - imagePullPolicy: Always - env: - - name: VAULT_ADDR - value: {{.Values.vault.address}} - - name: KUBERNETES_AUTH_PATH - value: {{.Values.vault.authpath}} - - name: VAULT_APP_ROLE - value: {{.Values.vault.role}} - - name: MOUNT_PATH - value: {{ .Values.image.mountPath.basePath }}/db/certs - - name: VAULT_NODE_NAME - value: {{ .Values.nodeName }} - - name: SECRET_PREFIX - value: {{.Values.vault.secretprefix}} - command: ["sh", "-c"] - args: - - |- - chmod 777 -R {{ .Values.image.mountPath.basePath }}/; - volumeMounts: - - name: {{ .Values.nodeName }}-servicedata - mountPath: "{{ .Values.image.mountPath.basePath }}/db" - readOnly: false - - name: db-healthcheck - image: {{ .Values.image.initContainerName }} - imagePullPolicy: Always - command: ["sh", "-c"] - args: - - |- - #!/usr/bin/env sh - COUNTER=1 - FLAG=true - while [ "$COUNTER" -le {{ $.Values.healthcheck.readinessthreshold }} ] - do - DB_NODE={{ .Values.healthcheck.dburl }} - STATUS=$(nc -vz $DB_NODE 2>&1 | grep -c open ) - if [ "$STATUS" == 0 ] - then - FLAG=false - else - FLAG=true - echo "DB up and running" - fi - if [ "$FLAG" == false ] - then - echo "Retry attempted $COUNTER times, retrying after {{ $.Values.healthcheck.readinesscheckinterval }} seconds" - COUNTER=`expr "$COUNTER" + 1` - sleep {{ $.Values.healthcheck.readinesscheckinterval }} - else - echo "SUCCESS!" - echo "DB up and running!" - exit 0 - break - fi - done - if [ "$COUNTER" -gt {{ $.Values.healthcheck.readinessthreshold }} ] || [ "$FLAG" == false ] - then - echo "Retry attempted $COUNTER times, no DB up and running. Giving up!" - exit 1 - break - fi - imagePullSecrets: - - name: {{ .Values.image.imagePullSecret }} - volumes: - - name: {{ .Values.nodeName }}-servicedata - persistentVolumeClaim: - claimName: {{ .Values.nodeName }}-pvc - - name: certs - emptyDir: - medium: Memory - - name: creds - emptyDir: - medium: Memory diff --git a/platforms/r3-corda/charts/corda-doorman/templates/pvc.yaml b/platforms/r3-corda/charts/corda-doorman/templates/pvc.yaml deleted file mode 100644 index 94433d6faab..00000000000 --- a/platforms/r3-corda/charts/corda-doorman/templates/pvc.yaml +++ /dev/null @@ -1,28 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - ---- -kind: PersistentVolumeClaim -apiVersion: v1 -metadata: - name: {{ .Values.nodeName }}-pvc - namespace: {{ .Values.metadata.namespace }} - {{- if .Values.pvc.annotations }} - annotations: -{{ toYaml .Values.pvc.annotations | indent 8 }} - {{- end }} - labels: - app.kubernetes.io/name: {{ .Values.nodeName }}-pvc - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} -spec: - storageClassName: {{ .Values.storage.name }} - accessModes: - - ReadWriteOnce - resources: - requests: - storage: {{ .Values.storage.memory }} diff --git a/platforms/r3-corda/charts/corda-doorman/templates/service.yaml b/platforms/r3-corda/charts/corda-doorman/templates/service.yaml deleted file mode 100644 index a7f8737155b..00000000000 --- a/platforms/r3-corda/charts/corda-doorman/templates/service.yaml +++ /dev/null @@ -1,42 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -apiVersion: v1 -kind: Service -metadata: - name: {{ .Values.nodeName }} - namespace: {{ .Values.metadata.namespace }} - annotations: - labels: - run: {{ .Values.nodeName }} - app.kubernetes.io/name: {{ .Values.nodeName }} - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} -spec: - selector: - app: {{ .Values.nodeName }} - type: {{ .Values.service.type }} - ports: - - protocol: TCP - port: {{ .Values.service.port }} - targetPort: {{ .Values.service.targetPort }} - {{- if .Values.service.nodePort }} - nodePort: {{ .Values.service.nodePort }} - {{- end }} -{{ if $.Values.ambassador }} ---- -apiVersion: getambassador.io/v3alpha1 -kind: Mapping -metadata: - name: {{ .Values.nodeName }}-mapping - namespace: {{ .Values.metadata.namespace }} -spec: - hostname: {{ .Values.nodeName }}.{{ .Values.ambassador.external_url_suffix }} - prefix: / - service: http://{{ .Values.nodeName }}.{{ .Values.metadata.namespace }}:{{ .Values.service.port }} -{{ end }} - diff --git a/platforms/r3-corda/charts/corda-doorman/values.yaml b/platforms/r3-corda/charts/corda-doorman/values.yaml deleted file mode 100644 index 50cdaa23685..00000000000 --- a/platforms/r3-corda/charts/corda-doorman/values.yaml +++ /dev/null @@ -1,120 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -# Default values for nmschart. -# This is a YAML-formatted file. -# Declare variables to be passed into your templates. - -#Provide the Name for node to be deployed -#Eg. nodeName: network-map -nodeName: network-map - -metadata: - #Provide the namespace for organization's peer - #Eg. namespace: default - namespace: default - -image: - #Provide the name of image for init container - #Eg. initContainerName: ghcr.io/hyperledger/bevel-alpine:latest - initContainerName: ghcr.io/hyperledger/bevel-alpine:latest - #Provide the containerName of image - #Eg. containerName: ghcr.io/hyperledger/bevel-doorman-linuxkit:latest - containerName: ghcr.io/hyperledger/bevel-doorman-linuxkit:latest - #Provide the image pull secret of image - #Eg. pullSecret: regcred - imagePullSecret: "" - #Provide enviroment variable for container image - mountPath: - #Provide the path for base dir - #Eg. basePath: /opt/workdir - basePath: /opt/doorman - env: - #Provide rootcaname for the doorman - #Eg. rootcaname: CN=Corda Root CA, OU=FRA, O=FRA, L=London, ST=London, C=BR - rootcaname: - #Provide tlscertpath for the doorman - #Eg. tlscertpath: /opt/cordite/db/certs/tls/nms.crt - tlscertpath: - #Provide tlscertpath for the doorman - #Eg. tlscertpath: /opt/cordite/db/certs/tls/nms.key - tlskeypath: - #Provide whether TLS is enabled or not - #Eg. tls: false - tls: false - #Provide whether to enable Corda doorman protocol - #Eg. doorman: true - doorman: true - #Provide whether to enable Cordite certman protocol so that nodes can authenticate using a signed TLS cert - #Eg. certman: true - certman: true - #Provide database directory for this service - #Eg. database: db - database: db - #Provide MongoDB connection string. If set to embed will start its own mongo instance - #Eg. dataSourceUrl: db - dataSourceUrl: db - -service: - #Provide the type of service - #Eg. type: NodePort - type: NodePort - #Provide the node port for node service to be accessible outside - #Eg. nodePort: 30050 - nodePort: - #Provide the targetPort for node service to be accessible - #Eg. targetPort: 8080 - targetPort: - #Provide the port for node service to be accessible - #Eg. port: 8080 - port: - -deployment: - # annotations: - # key: "value" - annotations: {} - -storage: - #Provide the memory for node - #Eg. memory: 4Gi - memory: 4Gi - -pvc: - # annotations: - # key: "value" - annotations: {} - -vault: - #Provide the vault server address - #Eg. address: http://34.228.219.208:8200 - address: - #Provide the vaultrole - #Eg. role: vault-role - role: vault-role - #Eg. authpath: cordanms - authpath: cordanms - #Provide the kubernetes auth backed configured in vault - #Eg. secretprefix: - secretprefix: - #Eg. imagesecretname: - imagesecretname: - #Eg. serviceaccountname: vault-auth-issuer - serviceaccountname: vault-auth-issuer - -mountPath: - #Provide the path for base dir - #Eg. basePath: /opt/workdir - basePath: - -healthcheck: - dburl: - -ambassador: - #Provides the suffix to be used in external URL - #Eg. external_url_suffix: org1.blockchaincloudpoc.com - external_url_suffix: - - diff --git a/platforms/r3-corda/charts/corda-h2-addUser/Chart.yaml b/platforms/r3-corda/charts/corda-h2-addUser/Chart.yaml deleted file mode 100644 index 7e4915c3b56..00000000000 --- a/platforms/r3-corda/charts/corda-h2-addUser/Chart.yaml +++ /dev/null @@ -1,11 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -apiVersion: v1 -appVersion: "2.0" -description: A Helm chart for registering the notary with the nms -name: corda-h2-add-user -version: 1.0.0 diff --git a/platforms/r3-corda/charts/corda-h2-addUser/templates/job.yaml b/platforms/r3-corda/charts/corda-h2-addUser/templates/job.yaml deleted file mode 100644 index 438353d70f3..00000000000 --- a/platforms/r3-corda/charts/corda-h2-addUser/templates/job.yaml +++ /dev/null @@ -1,169 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -apiVersion: batch/v1 -kind: Job -metadata: - name: h2-add-user-{{ .Values.nodeName }} - labels: - app: h2-add-user-{{ .Values.nodeName }} - app.kubernetes.io/name: h2-add-user-{{ .Values.nodeName }} - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} -spec: - backoffLimit: 6 - ttlSecondsAfterFinished: 300 - template: - metadata: - labels: - app: h2-add-user-{{ .Values.nodeName }} - app.kubernetes.io/name: h2-add-user-{{ .Values.nodeName }} - app.kubernetes.io/instance: {{ .Release.Name }} - spec: - restartPolicy: "OnFailure" - serviceAccountName: {{ .Values.vault.serviceaccountname }} - containers: - - name: h2-add-user - image: {{ .Values.image.containerName }} - imagePullPolicy: Always - env: - - name: MOUNT_PATH - value: /opt/h2 - - name: SECRET_PREFIX - value: {{ .Values.vault.dbsecretprefix }} - command: ["sh", "-c"] - args: - - |- - #!/usr/bin/env sh - export SA_PASSWORD=`cat /opt/db/creds/db_cred` - export JDBC_URL={{ .Values.jdbcUrl }} - - {{- range .Values.users }} - USER_PASSWORD=`cat /opt/db/creds/{{ .name }}-password` - echo "Adding User : {{ .name }}" - cat << EOF > ${MOUNT_PATH}/newuser.sql - CREATE USER {{ .name }} PASSWORD '${USER_PASSWORD} admin '; - EOF - - chmod 777 ${MOUNT_PATH}/newuser.sql - H2JARPATH=${MOUNT_PATH}/bin/h2*.jar - H2SCRIPTCLASSPATH=org.h2.tools.RunScript - - java -cp ${H2JARPATH} ${H2SCRIPTCLASSPATH} -url ${JDBC_URL} -user sa -password "${SA_PASSWORD}" -script ${MOUNT_PATH}/newuser.sql - echo "New User Added" - {{- end }} - volumeMounts: - - name: {{ .Values.nodeName }}volume - mountPath: "/opt/h2-data" - - name: creds - mountPath: "/opt/db/creds" - readOnly: false - initContainers: - - name: init-credential - image: {{ .Values.image.initContainerName }} - imagePullPolicy: Always - env: - - name: VAULT_ADDR - value: {{.Values.vault.address}} - - name: KUBERNETES_AUTH_PATH - value: {{.Values.vault.authpath}} - - name: VAULT_APP_ROLE - value: {{.Values.vault.role}} - - name: MOUNT_PATH - value: /opt/db/creds - - name: VAULT_NODE_NAME - value: {{ .Values.nodeName }} - - name: DB_SECRET_PREFIX - value: {{ .Values.vault.dbsecretprefix }} - command: ["sh", "-c"] - args: - - |- - #!/usr/bin/env sh - validateVaultResponse () { - if echo ${2} | grep "errors"; then - echo "ERROR: unable to retrieve ${1}: ${2}" - exit 1 - fi - if [ "$3" == "LOOKUPSECRETRESPONSE" ] - then - http_code=$(curl -sS -o /dev/null -w "%{http_code}" \ - --header "X-Vault-Token: ${VAULT_TOKEN}" \ - ${VAULT_ADDR}/v1/${1}) - curl_response=$? - if test "$http_code" != "200" ; then - echo "Http response code from Vault - $http_code" - if test "$curl_response" != "0"; then - echo "Error: curl command failed with error code - $curl_response" - exit 1 - fi - fi - fi - } - - KUBE_SA_TOKEN=$(cat /var/run/secrets/kubernetes.io/serviceaccount/token) - VAULT_TOKEN=$(curl -sS --request POST ${VAULT_ADDR}/v1/auth/${KUBERNETES_AUTH_PATH}/login -H "Content-Type: application/json" -d '{"role":"'"${VAULT_APP_ROLE}"'","jwt":"'"${KUBE_SA_TOKEN}"'"}' | jq -r 'if .errors then . else .auth.client_token end') - validateVaultResponse 'vault login token' "${VAULT_TOKEN}" - - LOOKUP_PWD_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${DB_SECRET_PREFIX} | jq -r 'if .errors then . else . end') - validateVaultResponse "${DB_SECRET_PREFIX}" "${LOOKUP_PWD_RESPONSE}" "LOOKUPSECRETRESPONSE" - SA_PASSWORD=$(echo ${LOOKUP_PWD_RESPONSE} | jq -r '.data.data["sa"]') - echo "${SA_PASSWORD}" >> ${MOUNT_PATH}/db_cred - {{- range .Values.users }} - USER_PASS=$(echo ${LOOKUP_PWD_RESPONSE} | jq -r '.data.data["{{ .name }}"]') - echo "${USER_PASS}" >> ${MOUNT_PATH}/{{ .name }}-password - {{- end }} - volumeMounts: - - name: creds - mountPath: "/opt/db/creds" - readOnly: false - - name: db-healthcheck - image: {{ .Values.image.initContainerName }} - imagePullPolicy: Always - command: ["sh", "-c"] - args: - - |- - #!/usr/bin/env sh - COUNTER=1 - FLAG=true - while [ "$COUNTER" -le {{ $.Values.db.readinessthreshold }} ] - do - DB_NODE={{ .Values.dbUrl }}:{{ .Values.dbPort }} - STATUS=$(nc -vz $DB_NODE 2>&1 | grep -c open ) - if [ "$STATUS" == 0 ] - then - FLAG=false - else - FLAG=true - echo "DB up and running" - fi - if [ "$FLAG" == false ] - then - echo "Retry attempted $COUNTER times, retrying after {{ $.Values.db.readinesscheckinterval }} seconds" - COUNTER=`expr "$COUNTER" + 1` - sleep {{ $.Values.db.readinesscheckinterval }} - else - echo "SUCCESS!" - echo "DB up and running!" - exit 0 - break - fi - done - if [ "$COUNTER" -gt {{ $.Values.db.readinessthreshold }} ] || [ "$FLAG" == false ] - then - echo "Retry attempted $COUNTER times, no DB up and running. Giving up!" - exit 1 - break - fi - imagePullSecrets: - - name: {{ .Values.image.imagePullSecret }} - volumes: - - name: {{ .Values.nodeName }}volume - persistentVolumeClaim: - claimName: {{ .Values.nodeName }}db-pvc - - name: creds - emptyDir: - medium: Memory \ No newline at end of file diff --git a/platforms/r3-corda/charts/corda-h2-addUser/values.yaml b/platforms/r3-corda/charts/corda-h2-addUser/values.yaml deleted file mode 100644 index 927b499e6e2..00000000000 --- a/platforms/r3-corda/charts/corda-h2-addUser/values.yaml +++ /dev/null @@ -1,184 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -# This is a YAML-formatted file. -# This should be produced by build - -#Provide the nodeName for node -#Eg. nodeName: bank1 -nodeName: - -metadata: - namespace: - -image: - #Provide the containerName of image - #Eg. containerName: hyperledgerlabs/h2:2018 - containerName: - #Provide the name of image for init container - #Eg. name: ghcr.io/hyperledger/bevel-alpine:latest - initContainerName: - #Provide the image pull secret of image - #Eg. pullSecret: regcred - imagePullSecret: - -nodeConf: - #Provide the p2pUrl for node - #Eg. p2pUrl: rp-elb-corda-kube-check-cluster1-144808561.us-east-1.elb.amazonaws.com - p2p: - url: - port: - rpcSettings: - useSsl: - standAloneBroker: - address: - adminAddress: - ssl: - certificatesDirectory: - sslKeystorePath: - trustStoreFilePath: - #Provide the legalName for node - #Eg. legalName: "O=Bank1,L=London,C=GB,CN=Bank1" - legalName: - messagingServerAddress: - jvmArgs: - systemProperties: - sshd: - port: - exportJMXTo: - transactionCacheSizeMegaBytes: - attachmentContentCacheSizeMegaBytes: - notary: - validating: - detectPublicIp: - extraAdvertisedServiceIds: - database: - serverNameTablePrefix: - exportHibernateJMXStatistics: - runMigration: - #Provide the h2Url for node - #Eg. h2Url: bank1h2 - dbUrl: - #Provide the h2Port for node - #Eg. h2Port: 9101 - dbPort: - dataSourceClassName: - dataSourceUrl: - jarPath: - #Provide the nms for node - #Eg. nms: "http://rp-elb-fra-corda-kube-cluster7-2016021309.us-west-1.elb.amazonaws.com:30050" - networkMapURL: - doormanURL: - compatibilityZoneURL: - webAddress: - #Provide the jar Version for corda jar and finanace jar - #Eg. jarVersion: 3.3-corda - jarVersion: - #Provide the devMode for corda node - #Eg. devMode: true - devMode: - #Provide the useHTTPS for corda node - #Eg. useHTTPS: false - useHTTPS: - env: - - name: - value: - -credentials: - #Provide the dataSourceUser for corda node - #Eg. dataSourceUser: sa - dataSourceUser: - #Provide the rpcUser for corda node - #Eg. rpcUser: bank1operations - rpcUser: - - name: - permissions: - -volume: - mountPath: - -resources: - #Provide the limit memory for node - #Eg. limits: "1Gi" - limits: - #Provide the requests memory for node - #Eg. requests: "1Gi" - requests: - -storage: - #Provide the memory for node - #Eg. memory: 4Gi - provisioner: - memory: - type: - -service: - #Provide the type of service - #Eg. type: NodePort or LoadBalancer etc - type: - p2p: - #Provide the p2p port for node - #Eg. port: 10007 - port: - #Provide the p2p node port for node - #Eg. port: 30007 - nodePort: - #Provide the p2p targetPort for node - #Eg. targetPort: 30007 - targetPort: - rpc: - #Provide the rpc port for node - #Eg. port: 10008 - port: - #Provide the p2p targetPort for node - #Eg. targetPort: 10003 - targetPort: - #Provide the p2p node port for node - #Eg. nodePort: 30007 - nodePort: - rpcadmin: - #Provide the rpcadmin port for node - #Eg. port: 10108 - port: - #Provide the p2p targetPort for node - #Eg. targetPort: 10005 - targetPort: - #Provide the p2p node port for node - #Eg. nodePort: 30007 - nodePort: -jobservice: - type: - p2p: - nodePort: - rpc: - nodePort: - rpcadmin: - nodePort: - -vault: - #Provide the vault server address - #Eg. address: http://54.226.163.39:8200 - address: - #Provide the vaultrole - #Eg. role: vault-role - role: - #Provide the authpath - #Eg. authpath: cordabank1 - authpath: - #Provide the serviceaccountname - #Eg. serviceaccountname: vault-auth-issuer - serviceaccountname: - #Provide the secretprefix - #Eg. secretprefix: issuer - secretprefix: - -db: - #Provide the interval in seconds you want to iterate till db to be ready - #Eg. readinesscheckinterval: 5 - readinesscheckinterval: - #Provide the threshold till you want to check if specified db up and running - #Eg. readinessthreshold: 2 - readinessthreshold: diff --git a/platforms/r3-corda/charts/corda-h2-password-change/Chart.yaml b/platforms/r3-corda/charts/corda-h2-password-change/Chart.yaml deleted file mode 100644 index 79c7fbb28df..00000000000 --- a/platforms/r3-corda/charts/corda-h2-password-change/Chart.yaml +++ /dev/null @@ -1,11 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -apiVersion: v1 -appVersion: "2.0" -description: A Helm chart for registering the notary with the nms -name: corda-h2-pass-change -version: 1.0.0 diff --git a/platforms/r3-corda/charts/corda-h2-password-change/templates/job.yaml b/platforms/r3-corda/charts/corda-h2-password-change/templates/job.yaml deleted file mode 100644 index 54be140e38d..00000000000 --- a/platforms/r3-corda/charts/corda-h2-password-change/templates/job.yaml +++ /dev/null @@ -1,158 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -apiVersion: batch/v1 -kind: Job -metadata: - name: h2-pass-change-{{ .Values.nodeName }} - labels: - app: h2-pass-change-{{ .Values.nodeName }} - app.kubernetes.io/name: h2-pass-change-{{ .Values.nodeName }} - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} -spec: - backoffLimit: 6 - ttlSecondsAfterFinished: 300 - template: - metadata: - labels: - app: h2-pass-change-{{ .Values.nodeName }} - app.kubernetes.io/name: h2-pass-change-{{ .Values.nodeName }} - app.kubernetes.io/instance: {{ .Release.Name }} - spec: - restartPolicy: "OnFailure" - serviceAccountName: {{ .Values.vault.serviceaccountname }} - containers: - - name: h2-pass-change - image: {{ .Values.image.containerName }} - imagePullPolicy: Always - env: - - name: MOUNT_PATH - value: /opt/h2 - - name: SECRET_PREFIX - value: {{ .Values.vault.dbsecretprefix }} - command: ["sh", "-c"] - args: - - |- - #!/usr/bin/env sh - export SA_PASSWORD=`cat /opt/db/creds/db_cred` - cat << EOF > ${MOUNT_PATH}/changepass.sql - ALTER USER SA SET PASSWORD '${SA_PASSWORD}'; - EOF - - chmod 777 ${MOUNT_PATH}/changepass.sql - H2JARPATH=${MOUNT_PATH}/bin/h2*.jar - H2SCRIPTCLASSPATH=org.h2.tools.RunScript - - java -cp ${H2JARPATH} ${H2SCRIPTCLASSPATH} -url {{ .Values.jdbcUrl }} -user sa -script ${MOUNT_PATH}/changepass.sql - echo "Password for SA changed" - volumeMounts: - - name: {{ .Values.nodeName }}volume - mountPath: "/opt/h2-data" - - name: creds - mountPath: "/opt/db/creds" - readOnly: false - initContainers: - - name: init-credential - image: {{ .Values.image.initContainerName }} - imagePullPolicy: Always - env: - - name: VAULT_ADDR - value: {{.Values.vault.address}} - - name: KUBERNETES_AUTH_PATH - value: {{.Values.vault.authpath}} - - name: VAULT_APP_ROLE - value: {{.Values.vault.role}} - - name: MOUNT_PATH - value: /opt/db/creds - - name: VAULT_NODE_NAME - value: {{ .Values.nodeName }} - - name: DB_SECRET_PREFIX - value: {{ .Values.vault.dbsecretprefix }} - command: ["sh", "-c"] - args: - - |- - #!/usr/bin/env sh - validateVaultResponse () { - if echo ${2} | grep "errors"; then - echo "ERROR: unable to retrieve ${1}: ${2}" - exit 1 - fi - if [ "$3" == "LOOKUPSECRETRESPONSE" ] - then - http_code=$(curl -sS -o /dev/null -w "%{http_code}" \ - --header "X-Vault-Token: ${VAULT_TOKEN}" \ - ${VAULT_ADDR}/v1/${1}) - curl_response=$? - if test "$http_code" != "200" ; then - echo "Http response code from Vault - $http_code" - if test "$curl_response" != "0"; then - echo "Error: curl command failed with error code - $curl_response" - exit 1 - fi - fi - fi - } - KUBE_SA_TOKEN=$(cat /var/run/secrets/kubernetes.io/serviceaccount/token) - VAULT_TOKEN=$(curl -sS --request POST ${VAULT_ADDR}/v1/auth/${KUBERNETES_AUTH_PATH}/login -H "Content-Type: application/json" -d '{"role":"'"${VAULT_APP_ROLE}"'","jwt":"'"${KUBE_SA_TOKEN}"'"}' | jq -r 'if .errors then . else .auth.client_token end') - validateVaultResponse 'vault login token' "${VAULT_TOKEN}" - - LOOKUP_PWD_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${DB_SECRET_PREFIX} | jq -r 'if .errors then . else . end') - validateVaultResponse "${DB_SECRET_PREFIX}" "${LOOKUP_PWD_RESPONSE}" "LOOKUPSECRETRESPONSE" - SA_PASSWORD=$(echo ${LOOKUP_PWD_RESPONSE} | jq -r '.data.data["sa"]') - echo "${SA_PASSWORD}" >> ${MOUNT_PATH}/db_cred - volumeMounts: - - name: creds - mountPath: "/opt/db/creds" - readOnly: false - - name: db-healthcheck - image: {{ .Values.image.initContainerName }} - imagePullPolicy: Always - command: ["sh", "-c"] - args: - - |- - #!/usr/bin/env sh - COUNTER=1 - FLAG=true - while [ "$COUNTER" -le {{ $.Values.db.readinessthreshold }} ] - do - DB_NODE={{ .Values.dbUrl }}:{{ .Values.dbPort }} - STATUS=$(nc -vz $DB_NODE 2>&1 | grep -c open ) - if [ "$STATUS" == 0 ] - then - FLAG=false - else - FLAG=true - echo "DB up and running" - fi - if [ "$FLAG" == false ] - then - echo "Retry attempted $COUNTER times, retrying after {{ $.Values.db.readinesscheckinterval }} seconds" - COUNTER=`expr "$COUNTER" + 1` - sleep {{ $.Values.db.readinesscheckinterval }} - else - echo "SUCCESS!" - echo "DB up and running!" - exit 0 - break - fi - done - if [ "$COUNTER" -gt {{ $.Values.db.readinessthreshold }} ] || [ "$FLAG" == false ] - then - echo "Retry attempted $COUNTER times, no DB up and running. Giving up!" - exit 1 - break - fi - imagePullSecrets: - - name: {{ .Values.image.imagePullSecret }} - volumes: - - name: {{ .Values.nodeName }}volume - persistentVolumeClaim: - claimName: {{ .Values.nodeName }}db-pvc - - name: creds - emptyDir: - medium: Memory \ No newline at end of file diff --git a/platforms/r3-corda/charts/corda-h2-password-change/values.yaml b/platforms/r3-corda/charts/corda-h2-password-change/values.yaml deleted file mode 100644 index 927b499e6e2..00000000000 --- a/platforms/r3-corda/charts/corda-h2-password-change/values.yaml +++ /dev/null @@ -1,184 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -# This is a YAML-formatted file. -# This should be produced by build - -#Provide the nodeName for node -#Eg. nodeName: bank1 -nodeName: - -metadata: - namespace: - -image: - #Provide the containerName of image - #Eg. containerName: hyperledgerlabs/h2:2018 - containerName: - #Provide the name of image for init container - #Eg. name: ghcr.io/hyperledger/bevel-alpine:latest - initContainerName: - #Provide the image pull secret of image - #Eg. pullSecret: regcred - imagePullSecret: - -nodeConf: - #Provide the p2pUrl for node - #Eg. p2pUrl: rp-elb-corda-kube-check-cluster1-144808561.us-east-1.elb.amazonaws.com - p2p: - url: - port: - rpcSettings: - useSsl: - standAloneBroker: - address: - adminAddress: - ssl: - certificatesDirectory: - sslKeystorePath: - trustStoreFilePath: - #Provide the legalName for node - #Eg. legalName: "O=Bank1,L=London,C=GB,CN=Bank1" - legalName: - messagingServerAddress: - jvmArgs: - systemProperties: - sshd: - port: - exportJMXTo: - transactionCacheSizeMegaBytes: - attachmentContentCacheSizeMegaBytes: - notary: - validating: - detectPublicIp: - extraAdvertisedServiceIds: - database: - serverNameTablePrefix: - exportHibernateJMXStatistics: - runMigration: - #Provide the h2Url for node - #Eg. h2Url: bank1h2 - dbUrl: - #Provide the h2Port for node - #Eg. h2Port: 9101 - dbPort: - dataSourceClassName: - dataSourceUrl: - jarPath: - #Provide the nms for node - #Eg. nms: "http://rp-elb-fra-corda-kube-cluster7-2016021309.us-west-1.elb.amazonaws.com:30050" - networkMapURL: - doormanURL: - compatibilityZoneURL: - webAddress: - #Provide the jar Version for corda jar and finanace jar - #Eg. jarVersion: 3.3-corda - jarVersion: - #Provide the devMode for corda node - #Eg. devMode: true - devMode: - #Provide the useHTTPS for corda node - #Eg. useHTTPS: false - useHTTPS: - env: - - name: - value: - -credentials: - #Provide the dataSourceUser for corda node - #Eg. dataSourceUser: sa - dataSourceUser: - #Provide the rpcUser for corda node - #Eg. rpcUser: bank1operations - rpcUser: - - name: - permissions: - -volume: - mountPath: - -resources: - #Provide the limit memory for node - #Eg. limits: "1Gi" - limits: - #Provide the requests memory for node - #Eg. requests: "1Gi" - requests: - -storage: - #Provide the memory for node - #Eg. memory: 4Gi - provisioner: - memory: - type: - -service: - #Provide the type of service - #Eg. type: NodePort or LoadBalancer etc - type: - p2p: - #Provide the p2p port for node - #Eg. port: 10007 - port: - #Provide the p2p node port for node - #Eg. port: 30007 - nodePort: - #Provide the p2p targetPort for node - #Eg. targetPort: 30007 - targetPort: - rpc: - #Provide the rpc port for node - #Eg. port: 10008 - port: - #Provide the p2p targetPort for node - #Eg. targetPort: 10003 - targetPort: - #Provide the p2p node port for node - #Eg. nodePort: 30007 - nodePort: - rpcadmin: - #Provide the rpcadmin port for node - #Eg. port: 10108 - port: - #Provide the p2p targetPort for node - #Eg. targetPort: 10005 - targetPort: - #Provide the p2p node port for node - #Eg. nodePort: 30007 - nodePort: -jobservice: - type: - p2p: - nodePort: - rpc: - nodePort: - rpcadmin: - nodePort: - -vault: - #Provide the vault server address - #Eg. address: http://54.226.163.39:8200 - address: - #Provide the vaultrole - #Eg. role: vault-role - role: - #Provide the authpath - #Eg. authpath: cordabank1 - authpath: - #Provide the serviceaccountname - #Eg. serviceaccountname: vault-auth-issuer - serviceaccountname: - #Provide the secretprefix - #Eg. secretprefix: issuer - secretprefix: - -db: - #Provide the interval in seconds you want to iterate till db to be ready - #Eg. readinesscheckinterval: 5 - readinesscheckinterval: - #Provide the threshold till you want to check if specified db up and running - #Eg. readinessthreshold: 2 - readinessthreshold: diff --git a/platforms/r3-corda/charts/corda-h2/Chart.yaml b/platforms/r3-corda/charts/corda-h2/Chart.yaml deleted file mode 100644 index 6403de42645..00000000000 --- a/platforms/r3-corda/charts/corda-h2/Chart.yaml +++ /dev/null @@ -1,11 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -apiVersion: v1 -appVersion: "2.0" -description: "R3-corda-os: Deploys H2 DB." -name: corda-h2 -version: 1.0.0 diff --git a/platforms/r3-corda/charts/corda-h2/README.md b/platforms/r3-corda/charts/corda-h2/README.md deleted file mode 100644 index 4338167988d..00000000000 --- a/platforms/r3-corda/charts/corda-h2/README.md +++ /dev/null @@ -1,174 +0,0 @@ -[//]: # (##############################################################################################) -[//]: # (Copyright Accenture. All Rights Reserved.) -[//]: # (SPDX-License-Identifier: Apache-2.0) -[//]: # (##############################################################################################) - - -# H2 Deployment - -- [h2 Deployment Helm Chart](#h2-deployment-helm-chart) -- [Prerequisites](#prerequisites) -- [Chart Structure](#chart-structure) -- [Configuration](#configuration) -- [Deployment](#deployment) -- [Contributing](#contributing) -- [License](#license) - - -## h2 Deployment Helm Chart ---- -This [Helm chart](https://github.com/hyperledger/bevel/tree/develop/platforms/r3-corda/charts/corda-h2) deploys Kubernetes deployment resource for h2 database. - - - -## Prerequisites ---- -Before deploying the chart please ensure you have the following prerequisites: - -- Node's database up and running. -- Kubernetes cluster up and running. -- A HashiCorp Vault instance is set up and configured to use Kubernetes service account token-based authentication. -- The Vault is unsealed and initialized. -- Helm is installed. - - - -## Chart Structure ---- -This chart has following structue: - -``` - - ├── h2 - │ ├── Chart.yaml - │ ├── templates - │ │ ├── deployment.yaml - │ │ ├── pvc.yaml - │ │ └── service.yaml - │ └── values.yaml -``` -- `templates` : This directory contains the Kubernetes manifest templates that define the resources to be deployed. -- `deployment.yaml` : This file is a configuration file for deployement in Kubernetes.It creates a deployment file with a specified number of replicas and defines various settings for the deployment.Including volume mounts, environment variables, and ports for the container. -- `pvc.yaml` : A PersistentVolumeClaim (PVC) is a request for storage by a user. -- `service.yaml` : This file defines a Kubernetes Service with multiple ports for protocols and targets, and supports Ambassador proxy annotations for specific configurations when using the "ambassador" proxy provider. -- `chart.yaml` : Provides metadata about the chart, such as its name, version, and description. -- `values.yaml` : Contains the default configuration values for the chart. It includes configuration for the image, resources, storage, service, etc. - - - -## Configuration ---- -The [values.yaml](https://github.com/hyperledger/bevel/blob/develop/platforms/r3-corda/charts/corda-h2/values.yaml) file contains configurable values for the Helm chart. We can modify these values according to the deployment requirements. Here are some important configuration options: - -## Parameters ---- - -### Name - -| Name | Description | Default Value | -| -----------| -------------------------------------------------- | ------------- | -| name | Provide the name of the node | "" | - -### Image - -| Name | Description | Default Value | -| ------------------------ | ------------------------------------------------------- | --------------- | -| initContainerName | Provide the alpine utils image, which is used for all init-containers of deployments/jobs | "" | -| containerName | Provide the containerName of image | "" | -| imagePullSecret | Provide the image pull secret of image | regcred | - -### Resources - -| Name | Description | Default Value | -| ------------------------ | ------------------------------------------------------ | --------------- | -| limits | Provide the limit memory for node | "1Gi" | -| requests | Provide the requests memory for node | "1Gi" | - -### storage - -| Name | Description | Default Value | -| --------------------- | ------------------------------------------| ------------- | -| Memory | Provide the memory for node | "4Gi" | -| MountPath | The path where the volume will be mounted | "" | - -### Service - -| Name | Description | Default Value | -| --------------------- | ------------------------------------------| ------------- | -| type | Provide the type of service | NodePort | -| tcp port | Provide the tcp port for node | 9101 | -| nodePort | Provide the tcp node port for node | 32001 | -| targetPort | Provide the tcp targetPort for node | 1521 | - -## WEB - -| Name | Description | Default Value | -| --------------------- | ------------------------------------------| ------------- | -| nodePort | Provide the web node port for node | 32080 | -| targetPort | Provide the tcp targetPort for node | 81 | -| port | Provide the tcp node port for node | 8080 | - - - - -## Deployment ---- - -To deploy the h2 Helm chart, follow these steps: - -1. Modify the [values.yaml](https://github.com/hyperledger/bevel/blob/develop/platforms/r3-corda/charts/corda-h2/values.yaml) file to set the desired configuration values. -2. Run the following Helm command to install, upgrade, verify, delete the chart: - -To install the chart: -```bash -helm repo add bevel https://hyperledger.github.io/bevel/ -helm install ./corda-h2 -``` - -To upgrade the chart: -```bash -helm upgrade ./corda-h2 -``` - -To verify the deployment: -```bash -kubectl get jobs -n -``` -Note : Replace `` with the actual namespace where the Job was created. This command will display information about the Job, including the number of completions and the current status of the Job's pods. - -To delete the chart: -```bash -helm uninstall -``` -Note : Replace `` with the desired name for the release. - - - -## Contributing ---- -If you encounter any bugs, have suggestions, or would like to contribute to the [h2 Deployment Helm Chart](https://github.com/hyperledger/bevel/tree/develop/platforms/r3-corda/charts/corda-h2), please feel free to open an issue or submit a pull request on the [project's GitHub repository](https://github.com/hyperledger/bevel). - - -## License - -This chart is licensed under the Apache v2.0 license. - -Copyright © 2023 Accenture - -### Attribution - -This chart is adapted from the [charts](https://hyperledger.github.io/bevel/) which is licensed under the Apache v2.0 License which is reproduced here: - -``` -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -``` diff --git a/platforms/r3-corda/charts/corda-h2/templates/deployment.yaml b/platforms/r3-corda/charts/corda-h2/templates/deployment.yaml deleted file mode 100644 index bbf7c6826a2..00000000000 --- a/platforms/r3-corda/charts/corda-h2/templates/deployment.yaml +++ /dev/null @@ -1,71 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -apiVersion: apps/v1 -kind: Deployment -metadata: - name: {{ .Values.nodeName }}db - {{- if .Values.deployment.annotations }} - annotations: -{{ toYaml .Values.deployment.annotations | indent 8 }} - {{- end }} - namespace: {{ .Values.metadata.namespace }} - labels: - app.kubernetes.io/name: {{ .Values.nodeName }}db - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} -spec: - replicas: 1 - selector: - matchLabels: - app: {{ .Values.nodeName }}db - app.kubernetes.io/name: {{ .Values.nodeName }}db - app.kubernetes.io/instance: {{ .Release.Name }} - strategy: - type: Recreate - rollingUpdate: null - template: - metadata: - labels: - app: {{ .Values.nodeName }}db - app.kubernetes.io/name: {{ .Values.nodeName }}db - app.kubernetes.io/instance: {{ .Release.Name }} - spec: - hostname: {{ .Values.nodeName }}db - securityContext: - fsGroup: 1000 - containers: - - name: {{ .Values.nodeName }}db - image: {{ .Values.image.containerName }} - resources: - limits: - memory: {{ .Values.resources.limits }} - requests: - memory: {{ .Values.resources.requests }} - ports: - - containerPort: 1521 - name: p2p - - containerPort: 81 - name: web - env: - - name: JAVA_OPTIONS - value: -Xmx512m - volumeMounts: - - name: db - mountPath: {{ .Values.storage.mountPath }} - readOnly: false - livenessProbe: - tcpSocket: - port: 1521 - initialDelaySeconds: 15 - periodSeconds: 20 - imagePullSecrets: - - name: {{ .Values.image.imagePullSecret }} - volumes: - - name: db - persistentVolumeClaim: - claimName: {{ .Values.nodeName }}db-pvc diff --git a/platforms/r3-corda/charts/corda-h2/templates/pvc.yaml b/platforms/r3-corda/charts/corda-h2/templates/pvc.yaml deleted file mode 100644 index d35838beab0..00000000000 --- a/platforms/r3-corda/charts/corda-h2/templates/pvc.yaml +++ /dev/null @@ -1,27 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -kind: PersistentVolumeClaim -apiVersion: v1 -metadata: - name: {{ .Values.nodeName }}db-pvc - {{- if .Values.pvc.annotations }} - annotations: -{{ toYaml .Values.pvc.annotations | indent 8 }} - {{- end }} - namespace: {{ .Values.metadata.namespace }} - labels: - app.kubernetes.io/name: {{ .Values.nodeName }}db-pvc - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} -spec: - storageClassName: {{ .Values.storage.name }} - accessModes: - - ReadWriteOnce - resources: - requests: - storage: {{ .Values.storage.memory }} \ No newline at end of file diff --git a/platforms/r3-corda/charts/corda-h2/templates/service.yaml b/platforms/r3-corda/charts/corda-h2/templates/service.yaml deleted file mode 100644 index 80fda163eac..00000000000 --- a/platforms/r3-corda/charts/corda-h2/templates/service.yaml +++ /dev/null @@ -1,40 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -apiVersion: v1 -kind: Service -metadata: - name: {{ .Values.nodeName }}db - {{- if .Values.service.annotations }} - annotations: -{{ toYaml .Values.service.annotations | indent 8 }} - {{- end }} - namespace: {{ .Values.metadata.namespace }} - labels: - run: {{ .Values.nodeName }}db - app.kubernetes.io/name: {{ $.Values.nodeName }} - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} -spec: - type: {{ .Values.service.type }} - selector: - app: {{ .Values.nodeName }}db - ports: - - name: tcp - protocol: TCP - port: {{ .Values.service.tcp.port }} - targetPort: {{ .Values.service.tcp.targetPort}} - {{- if .Values.service.tcp.nodePort }} - nodePort: {{ .Values.service.tcp.nodePort}} - {{- end }} - - name: web - protocol: TCP - port: {{ .Values.service.web.port }} - targetPort: {{ .Values.service.web.targetPort }} - {{- if .Values.service.web.nodePort }} - nodePort: {{ .Values.service.web.nodePort}} - {{- end }} diff --git a/platforms/r3-corda/charts/corda-h2/values.yaml b/platforms/r3-corda/charts/corda-h2/values.yaml deleted file mode 100644 index 5b064de652e..00000000000 --- a/platforms/r3-corda/charts/corda-h2/values.yaml +++ /dev/null @@ -1,63 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -# Default values for nodechart. -# This is a YAML-formatted file. -# Declare variables to be passed into your templates. - -#Provide the Name for node to be deployed -#Eg. nodeName: bank1 -nodeName: - -image: - #Provide the name of image for container - #Eg. containerName: hyperledgerlabs/h2:2018 - containerName: hyperledgerlabs/h2:2018 - #Provide the name of image for init container - #Eg. name: ghcr.io/hyperledger/bevel-alpine:latest - initContainerName: ghcr.io/hyperledger/bevel-alpine:latest - #Provide the image pull secret of image - #Eg. pullSecret: regcred - imagePullSecret: "" - -resources: - #Provide the limit memory for node - #Eg. limits: "1Gi" - limits: "1Gi" - #Provide the requests memory for node - #Eg. requests: "1Gi" - requests: "1Gi" - -storage: - #Provide the memory for node - #Eg. memory: 4Gi - memory: 4Gi - mountPath: - -service: - #Provide the type of service - #Eg. type: NodePort - type: NodePort - tcp: - #Provide the tcp port for node - #Eg. port: 9101 - port: 9101 - #Provide the tcp node port for node - #Eg. port: 32001 - nodePort: - #Provide the tcp targetPort for node - #Eg. targetPort: 1521 - targetPort: 1521 - web: - #Provide the web node port for node - #Eg. port: 32080 - nodePort: - #Provide the tcp targetPort for node - #Eg. targetPort: 81 - targetPort: 81 - #Provide the tcp node port for node - #Eg. port: 8080 - port: 8080 diff --git a/platforms/r3-corda/charts/corda-init/Chart.yaml b/platforms/r3-corda/charts/corda-init/Chart.yaml new file mode 100644 index 00000000000..775fec7b694 --- /dev/null +++ b/platforms/r3-corda/charts/corda-init/Chart.yaml @@ -0,0 +1,25 @@ +############################################################################################## +# Copyright Accenture. All Rights Reserved. +# +# SPDX-License-Identifier: Apache-2.0 +############################################################################################## + +apiVersion: v1 +name: corda-init +description: "R3 Corda: Initializes Corda network." +version: 1.0.0 +appVersion: "latest" +keywords: + - bevel + - corda + - hyperledger + - enterprise + - blockchain + - deployment + - accenture +home: https://hyperledger-bevel.readthedocs.io/en/latest/ +sources: + - https://github.com/hyperledger/bevel +maintainers: + - name: Hyperledger Bevel maintainers + email: bevel@lists.hyperledger.org diff --git a/platforms/r3-corda/charts/corda-init/README.md b/platforms/r3-corda/charts/corda-init/README.md new file mode 100644 index 00000000000..afea84bf631 --- /dev/null +++ b/platforms/r3-corda/charts/corda-init/README.md @@ -0,0 +1,96 @@ +[//]: # (##############################################################################################) +[//]: # (Copyright Accenture. All Rights Reserved.) +[//]: # (SPDX-License-Identifier: Apache-2.0) +[//]: # (##############################################################################################) + +# corda-init + +This chart is a component of Hyperledger Bevel. The corda-init chart initializes a Kubernetes namespace for Corda network. See [Bevel documentation](https://hyperledger-bevel.readthedocs.io/en/latest/) for details. + +## TL;DR + +```bash +helm repo add bevel https://hyperledger.github.io/bevel +helm install init bevel/corda-init +``` + +## Prerequisites + +- Kubernetes 1.19+ +- Helm 3.2.0+ + +If Hashicorp Vault is used, then +- HashiCorp Vault Server 1.13.1+ + +> **Important**: Also check the dependent charts. + +## Installing the Chart + +To install the chart with the release name `init`: + +```bash +helm repo add bevel https://hyperledger.github.io/bevel +helm install init bevel/corda-init +``` + +The command deploys the chart on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation. + +> **Tip**: List all releases using `helm list` + +## Uninstalling the Chart + +To uninstall/delete the `init` deployment: + +```bash +helm uninstall init +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Parameters + +### Global parameters +These parameters are refered to as same in each parent or child chart +| Name | Description | Default Value | +|--------|---------|-------------| +|`global.serviceAccountName` | The serviceaccount name that will be created for Vault Auth and k8S Secret management| `vault-auth` | +| `global.cluster.provider` | Kubernetes cluster provider like AWS EKS or minikube. Currently ony `aws` and `minikube` is tested | `aws` | +| `global.cluster.cloudNativeServices` | only `false` is implemented, `true` to use Cloud Native Services (SecretsManager and IAM for AWS; KeyVault & Managed Identities for Azure) is for future | `false` | +| `global.cluster.kubernetesUrl` | URL of the Kubernetes Cluster | `""` | +| `global.vault.type` | Type of Vault to support other providers. Currently, only `hashicorp` and `kubernetes` is supported. | `hashicorp` | +| `global.vault.role` | Role used for authentication with Vault | `vault-role` | +| `global.vault.network` | Network type that is being deployed | `corda` | +| `global.vault.address`| URL of the Vault server. | `""` | +| `global.vault.authPath` | Authentication path for Vault | `supplychain` | +| `global.vault.secretEngine` | The value for vault secret engine name | `secretsv2` | +| `global.vault.secretPrefix` | The value for vault secret prefix which must start with `data/` | `data/supplychain` | + +### Settings + +| Name | Description | Default Value | +|--------|---------|-------------| +| `settings.secondaryInit` | Flag to doorman and nms certs from `files` for additional nodes, true only when tls: true | `false` | + +## License + +This chart is licensed under the Apache v2.0 license. + +Copyright © 2024 Accenture + +### Attribution + +This chart is adapted from the [charts](https://hyperledger.github.io/bevel/) which is licensed under the Apache v2.0 License which is reproduced here: + +``` +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +``` diff --git a/platforms/r3-corda/charts/corda-init/files/openssl.conf b/platforms/r3-corda/charts/corda-init/files/openssl.conf new file mode 100644 index 00000000000..5a9c0c69e6c --- /dev/null +++ b/platforms/r3-corda/charts/corda-init/files/openssl.conf @@ -0,0 +1,38 @@ +[req] +req_extensions = v3_ca +distinguished_name = dn + +[dn] + +[v3_ca] +basicConstraints = critical, CA:TRUE +keyUsage = critical,digitalSignature, keyCertSign, cRLSign +extendedKeyUsage=serverAuth,clientAuth +subjectKeyIdentifier = hash +authorityKeyIdentifier = keyid:always + +[v3_nonca] +basicConstraints = critical, CA:FALSE +keyUsage = critical,digitalSignature, keyCertSign, cRLSign +extendedKeyUsage=serverAuth,clientAuth +subjectKeyIdentifier = hash +authorityKeyIdentifier = keyid:always + +[networkMap] +1.3.6.1.4.1.50530.1.1 = ASN1:INTEGER:2 +basicConstraints = CA:FALSE +keyUsage = digitalSignature +extendedKeyUsage=serverAuth,clientAuth,anyExtendedKeyUsage +subjectKeyIdentifier = hash + +[doorman] +1.3.6.1.4.1.50530.1.1 = ASN1:INTEGER:1 +basicConstraints = critical, CA:TRUE +keyUsage = critical,digitalSignature, keyCertSign, cRLSign +extendedKeyUsage=serverAuth,clientAuth +subjectKeyIdentifier = hash +authorityKeyIdentifier = keyid:always + +[EXT] +keyUsage=digitalSignature +extendedKeyUsage=serverAuth diff --git a/platforms/r3-corda/charts/corda-init/requirements.yaml b/platforms/r3-corda/charts/corda-init/requirements.yaml new file mode 100644 index 00000000000..b1195396c5f --- /dev/null +++ b/platforms/r3-corda/charts/corda-init/requirements.yaml @@ -0,0 +1,11 @@ +dependencies: + - name: bevel-vault-mgmt + repository: "file://../../../shared/charts/bevel-vault-mgmt" + tags: + - bevel + version: ~1.0.0 + - name: bevel-scripts + repository: "file://../../../shared/charts/bevel-scripts" + tags: + - bevel + version: ~1.0.0 diff --git a/platforms/r3-corda/charts/corda-init/templates/_helpers.tpl b/platforms/r3-corda/charts/corda-init/templates/_helpers.tpl new file mode 100644 index 00000000000..0dea3f2bbea --- /dev/null +++ b/platforms/r3-corda/charts/corda-init/templates/_helpers.tpl @@ -0,0 +1,29 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "corda-init.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "corda-init.fullname" -}} +{{- $name := default .Chart.Name -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" $name .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} + + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "corda-init.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} diff --git a/platforms/r3-corda/charts/corda-init/templates/configmap.yaml b/platforms/r3-corda/charts/corda-init/templates/configmap.yaml new file mode 100644 index 00000000000..a1b3a7bfc69 --- /dev/null +++ b/platforms/r3-corda/charts/corda-init/templates/configmap.yaml @@ -0,0 +1,53 @@ +############################################################################################## +# Copyright Accenture. All Rights Reserved. +# +# SPDX-License-Identifier: Apache-2.0 +############################################################################################## + +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: openssl-conf + namespace: {{ .Release.Namespace }} + labels: + app.kubernetes.io/name: "openssl-config" + app.kubernetes.io/part-of: {{ include "corda-init.fullname" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} +data: + openssl.conf: |+ +{{ .Files.Get "files/openssl.conf" | indent 4 }} +{{- if .Values.settings.secondaryInit }} +--- +apiVersion: v1 +kind: Secret +metadata: + name: doorman-tls-certs + namespace: {{ .Release.Namespace }} + labels: + app.kubernetes.io/name: "doorman-tls-certs" + app.kubernetes.io/part-of: {{ include "corda-init.fullname" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} +data: + tls.crt: |+ +{{ .Files.Get "files/doorman.crt" | indent 4 }} +--- +apiVersion: v1 +kind: Secret +metadata: + name: nms-tls-certs + namespace: {{ .Release.Namespace }} + labels: + app.kubernetes.io/name: "nms-tls-certs" + app.kubernetes.io/part-of: {{ include "corda-init.fullname" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} +data: + tls.crt: |+ +{{ .Files.Get "files/nms.crt" | indent 4 }} +{{- end }} diff --git a/platforms/r3-corda/charts/corda-init/values.yaml b/platforms/r3-corda/charts/corda-init/values.yaml new file mode 100644 index 00000000000..9e9cebe0d29 --- /dev/null +++ b/platforms/r3-corda/charts/corda-init/values.yaml @@ -0,0 +1,35 @@ +############################################################################################## +# Copyright Accenture. All Rights Reserved. +# +# SPDX-License-Identifier: Apache-2.0 +############################################################################################## +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. +global: + #Provide the service account name which will be created. + serviceAccountName: vault-auth + cluster: + provider: aws # choose from: minikube | aws | azure | gcp + cloudNativeServices: false # only 'false' is implemented + #Provide the kubernetes host url + #Eg. kubernetesUrl: https://10.3.8.5:8443 + kubernetesUrl: + vault: + #Provide the type of vault + type: hashicorp + #Provide the vault role used. + role: vault-role + #Provide the network type + network: corda + #Provide the vault server address + address: + #Provide the vault authPath configured to be used. + authPath: supplychain + #Provide the secret engine. + secretEngine: secretsv2 + #Provide the vault path where the secrets will be stored + secretPrefix: "data/supplychain" + +settings: + # Flag to copy doorman and nms certs, true only when tls: true + secondaryInit: false diff --git a/platforms/r3-corda/charts/corda-mongodb-tls/Chart.yaml b/platforms/r3-corda/charts/corda-mongodb-tls/Chart.yaml deleted file mode 100644 index c6b47e9be54..00000000000 --- a/platforms/r3-corda/charts/corda-mongodb-tls/Chart.yaml +++ /dev/null @@ -1,11 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -apiVersion: v1 -appVersion: "2.0" -description: "R3-corda-os: Deploys mongodb with tls enabled, used for doorman and networkmap." -name: corda-mongodb-tls -version: 1.0.0 diff --git a/platforms/r3-corda/charts/corda-mongodb-tls/README.md b/platforms/r3-corda/charts/corda-mongodb-tls/README.md deleted file mode 100644 index df0ad44e0d8..00000000000 --- a/platforms/r3-corda/charts/corda-mongodb-tls/README.md +++ /dev/null @@ -1,158 +0,0 @@ -[//]: # (##############################################################################################) -[//]: # (Copyright Accenture. All Rights Reserved.) -[//]: # (SPDX-License-Identifier: Apache-2.0) -[//]: # (##############################################################################################) - - -# Mongodb-tls Deployment - -- [Mongodb-tls Deployment Helm Chart](#Mongodb-tls-deployment-helm-chart) -- [Prerequisites](#prerequisites) -- [Chart Structure](#chart-structure) -- [Configuration](#configuration) -- [Deployment](#deployment) -- [Contributing](#contributing) -- [License](#license) - - -## Mongodb-tls Deployment Helm Chart ---- -This [Helm chart](https://github.com/hyperledger/bevel/tree/develop/platforms/r3-corda/charts/corda-mongodb-tls) deploys MongoDB with tls enabled. - - -## Prerequisites ---- -Before deploying the chart please ensure you have the following prerequisites: - -- Mongodb database up and running. -- Kubernetes cluster up and running. -- A HashiCorp Vault instance is set up and configured to use Kubernetes service account token-based authentication. -- The Vault is unsealed and initialized. -- Helm is installed. - - -## Chart Structure ---- -This chart has following structue: - -``` - - ├── mongodb-tls - │ ├── Chart.yaml - │ ├── templates - │ │ ├── deployment.yaml - │ │ ├── pvc.yaml - │ │ └── service.yaml - │ └── values.yaml -``` - -Type of files used: - -- `templates` : This directory contains the Kubernetes manifest templates that define the resources to be deployed. -- `deployment.yaml` : This deployment file can deploy a MongoDB database in a Kubernetes cluster, manages a MongoDB replica set and it configures environment variables for the MongoDB root username and password. And also its includes ports, volume mounts and initialization tasks using init containers. -- `pvc.yaml` : A PersistentVolumeClaim (PVC) is a request for storage by a user. -- `service.yaml` : This file defines a Kubernetes Service with multiple ports for protocols and targets, and supports Ambassador proxy annotations for specific configurations when using the "ambassador" proxy provider. -- `chart.yaml` : Provides metadata about the chart, such as its name, version, and description. -- `values.yaml` : Contains the default configuration values for the chart. It includes configuration for the image, storage and service. - - - -## Configuration ---- -The [values.yaml](https://github.com/hyperledger/bevel/blob/develop/platforms/r3-corda/charts/corda-mongodb-tls/values.yaml) file contains configurable values for the Helm chart. We can modify these values according to the deployment requirements. Here are some important configuration options: - -## Parameters ---- - -### Name - -| Name | Description | Default Value | -| -----------| -------------------------------------------------- | ------------- | -| name | Provide the name of the node | mongodb-doorman | - -### Image - -| Name | Description | Default Value | -| ------------------------ | ------------------------------------------------------- | --------------- | -| containerName | Provide the containerName of image | "" | - -### storage - -| Name | Description | Default Value | -| --------------------- | ------------------------------------------| ------------- | -| Memory | Provide the memory for node | "4Gi" | -| MountPath | The path where the volume will be mounted | "" | - -### Service - -| Name | Description | Default Value | -| --------------------- | ------------------------------------------| ------------- | -| type | Provide the type of service | "NodePort" | -| tcp port | Provide the tcp port for node | "9101" | -| nodePort | Provide the tcp node port for node | "32001" | -| targetPort | Provide the tcp targetPort for node | "27017" | - - - -## Deployment ---- - -To deploy the Mongodb-tls Helm chart, follow these steps: - -1. Modify the [values.yaml](https://github.com/hyperledger/bevel/blob/develop/platforms/r3-corda/charts/corda-mongodb-tls/values.yaml) file to set the desired configuration values. -2. Run the following Helm command to install, upgrade, verify, delete the chart: - -To install the chart: -```bash -helm repo add bevel https://hyperledger.github.io/bevel/ -helm install ./corda-mongodb-tls -``` - -To upgrade the chart: -```bash -helm upgrade ./corda-mongodb-tls -``` - -To verify the deployment: -```bash -kubectl get jobs -n -``` -Note : Replace `` with the actual namespace where the Job was created. This command will display information about the Job, including the number of completions and the current status of the Job's pods. - - -To delete the chart: -```bash -helm uninstall -``` -Note : Replace `` with the desired name for the release. - - - -## Contributing ---- -If you encounter any bugs, have suggestions, or would like to contribute to the [Mongodb-tls Deployment Helm Chart](https://github.com/hyperledger/bevel/tree/develop/platforms/r3-corda/charts/corda-mongodb-tls), please feel free to open an issue or submit a pull request on the [project's GitHub repository](https://github.com/hyperledger/bevel). - - -## License - -This chart is licensed under the Apache v2.0 license. - -Copyright © 2023 Accenture - -### Attribution - -This chart is adapted from the [charts](https://hyperledger.github.io/bevel/) which is licensed under the Apache v2.0 License which is reproduced here: - -``` -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -``` diff --git a/platforms/r3-corda/charts/corda-mongodb-tls/templates/deployment.yaml b/platforms/r3-corda/charts/corda-mongodb-tls/templates/deployment.yaml deleted file mode 100644 index 81079a5c330..00000000000 --- a/platforms/r3-corda/charts/corda-mongodb-tls/templates/deployment.yaml +++ /dev/null @@ -1,184 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -apiVersion: apps/v1 -kind: Deployment -metadata: - name: {{ .Values.nodeName }} - namespace: {{ .Values.metadata.namespace }} - {{- if .Values.deployment.annotations }} - annotations: -{{ toYaml .Values.deployment.annotations | indent 8 }} - {{- end }} - labels: - appdb: {{ .Values.nodeName }} -spec: - replicas: {{ .Values.replicas }} - selector: - matchLabels: - appdb: {{ .Values.nodeName }} - template: - metadata: - labels: - appdb: {{ .Values.nodeName }} - spec: - serviceAccountName: {{ $.Values.vault.serviceaccountname }} - hostname: {{ .Values.nodeName }} - securityContext: - fsGroup: 1000 - containers: - - name: {{ .Values.nodeName }} - image: {{ .Values.image.containerName }} - env: - - name: MONGO_INITDB_ROOT_USERNAME_FILE - value: /run/secrets/db_root_username - - name: MONGO_INITDB_ROOT_PASSWORD_FILE - value: /run/secrets/db_root_password - command: - - /bin/sh - - -c - - > - if [ -f /data/db/admin-user.lock ]; then - #file /data/db/admin-user.lock created and checked to ensure mongod is fully up for adding new db user in postStart hook. - echo "KUBERNETES LOG $HOSTNAME- Starting Mongo Daemon" - - # ensure wiredTigerCacheSize is set within the size of the containers memory limit, Setting up with tag --sslAllowConnectionsWithoutCertificates only client validates the server to ensure that it receives data from the intended server. - if [ "$HOSTNAME" = "{{ $.Values.nodeName }}" ]; then - #for Mongodb single server. - echo "check 1" - mongod --wiredTigerCacheSizeGB 0.25 --bind_ip 0.0.0.0 --sslMode requireSSL --sslPEMKeyFile /etc/ssl/{{ $.Values.nodeName }}.pem --sslCAFile /etc/ssl/mongoCA.crt --sslAllowConnectionsWithoutCertificates --sslAllowInvalidHostnames --auth; - fi; - else - echo "KUBERNETES LOG $HOSTNAME- Starting Mongo Daemon with setup setting (authMode)" - mongod --auth; - fi; - lifecycle: - postStart: - exec: - command: - - /bin/sh - - -c - - > - if [ ! -f /data/db/admin-user.lock ]; then - echo "KUBERNETES LOG $HOSTNAME no Admin-user.lock file found yet" - # user name and password for creation of new db user. - DB_PASSWORD=`cat /run/secrets/db_root_password` - DB_USERNAME=`cat /run/secrets/db_root_username` - # sleep 20 to 'ensure' mongod is accepting connections for creating db user. - sleep 20; - touch /data/db/admin-user.lock - # Adding database user with password in admin database, checking for host name to create new db user. - if [ "$HOSTNAME" = "{{ .Values.nodeName }}" ]; then - echo "KUBERNETES LOG $HOSTNAME- creating admin user doorman" - # Adding database user in admin db using mongo shell command. - mongo --eval "db = db.getSiblingDB('admin'); db.createUser({ user: '${DB_USERNAME}', pwd: '${DB_PASSWORD}', roles: [{ role: 'root', db: 'admin' }]});" >> /data/db/config.log - fi; - echo "KUBERNETES LOG $HOSTNAME-shutting mongod down for final restart" - mongod --shutdown; - fi; - ports: - - containerPort: {{ .Values.service.tcp.targetPort }} - volumeMounts: - - name: {{ .Values.storage.volname }} - mountPath: {{ .Values.storage.mountPath }} - - name: creds - mountPath: "/run/secrets" - readOnly: false - - name: certs - mountPath: "/etc/ssl" - readOnly: false - initContainers: - - name: init-credential - image : {{ .Values.image.initContainerName }} - imagePullPolicy: Always - env: - - name: VAULT_ADDR - value: {{.Values.vault.address}} - - name: KUBERNETES_AUTH_PATH - value: {{.Values.vault.authpath}} - - name: VAULT_APP_ROLE - value: {{.Values.vault.role}} - - name: MOUNT_PATH - value: /run/secrets - - name: SECRET_PREFIX - value: {{.Values.vault.secretprefix}} - - name: CERT_SECRET_PREFIX - value: {{.Values.vault.certsecretprefix}} - - name: MONGODB_USERNAME - value: {{.Values.mongodb.username}} - command: ["/bin/sh","-c"] - args: - - |- - #!/usr/bin/env sh - validateVaultResponse () { - if echo ${2} | grep "errors"; then - echo "ERROR: unable to retrieve ${1}: ${2}" - exit 1 - fi - if [ "$3" == "LOOKUPSECRETRESPONSE" ] - then - http_code=$(curl -sS -o /dev/null -w "%{http_code}" \ - --header "X-Vault-Token: ${VAULT_TOKEN}" \ - ${VAULT_ADDR}/v1/${1}) - curl_response=$? - if test "$http_code" != "200" ; then - echo "Http response code from Vault - $http_code" - if test "$curl_response" != "0"; then - echo "Error: curl command failed with error code - $curl_response" - exit 1 - fi - fi - fi - } - echo "Getting secrets from Vault Server" - - KUBE_SA_TOKEN=$(cat /var/run/secrets/kubernetes.io/serviceaccount/token) - VAULT_TOKEN=$(curl -sS --request POST ${VAULT_ADDR}/v1/auth/${KUBERNETES_AUTH_PATH}/login -H "Content-Type: application/json" -d '{"role":"'"${VAULT_APP_ROLE}"'","jwt":"'"${KUBE_SA_TOKEN}"'"}' | jq -r 'if .errors then . else .auth.client_token end') - validateVaultResponse 'vault login token' "${VAULT_TOKEN}" - - OUTPUT_PATH=${MOUNT_PATH} - LOOKUP_PWD_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${SECRET_PREFIX} | jq -r 'if .errors then . else . end') - - validateVaultResponse "${SECRET_PREFIX}" "${LOOKUP_PWD_RESPONSE}" "LOOKUPSECRETRESPONSE" - - - MONGODB_PASSWORD=$(echo ${LOOKUP_PWD_RESPONSE} | jq -r '.data.data["mongodbPassword"]') - - echo "${MONGODB_PASSWORD}" >> ${MOUNT_PATH}/db_root_password - echo "${MONGODB_USERNAME}" >> ${MOUNT_PATH}/db_root_username - - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${CERT_SECRET_PREFIX} | jq -r 'if .errors then . else . end') - - # Validating vault response for mongodb certificates. - validateVaultResponse "${CERT_SECRET_PREFIX}" "${LOOKUP_SECRET_RESPONSE}" "LOOKUPSECRETRESPONSE" - - # Getting certificates of server from vault and storing into /etc/ssl. - SERVER_CERT=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["{{ $.Values.nodeName }}.pem"]') - echo "${SERVER_CERT}" | base64 -d > /etc/ssl/{{ $.Values.nodeName }}.pem - - # Getting certificate authority cert from vault which is required for client validation and storing into /etc/ssl. - CA_CERT=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["mongoCA.crt"]') - echo "${CA_CERT}" | base64 -d > /etc/ssl/mongoCA.crt - volumeMounts: - - name: creds - mountPath: "/run/secrets" - readOnly: false - - name: certs - mountPath: "/etc/ssl" - readOnly: false - imagePullSecrets: - - name: {{ .Values.image.imagePullSecret }} - volumes: - - name: {{ .Values.storage.volname }} - persistentVolumeClaim: - claimName: {{ .Values.nodeName }}-pvc - - name: creds - emptyDir: - medium: Memory - - name: certs - emptyDir: - medium: Memory - \ No newline at end of file diff --git a/platforms/r3-corda/charts/corda-mongodb-tls/templates/pvc.yaml b/platforms/r3-corda/charts/corda-mongodb-tls/templates/pvc.yaml deleted file mode 100644 index 37d95dcfd6b..00000000000 --- a/platforms/r3-corda/charts/corda-mongodb-tls/templates/pvc.yaml +++ /dev/null @@ -1,26 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -kind: PersistentVolumeClaim -apiVersion: v1 -metadata: - name: {{ .Values.nodeName }}-pvc - namespace: {{ .Values.metadata.namespace }} - {{- if .Values.pvc.annotations }} - annotations: -{{ toYaml .Values.pvc.annotations | indent 8 }} - {{- end }} - labels: - app.kubernetes.io/name: {{ .Values.nodeName }}-pvc - app: {{ .Values.nodeName }}-pv -spec: - storageClassName: {{ .Values.storage.name }} - accessModes: - - ReadWriteOnce - resources: - requests: - storage: {{ .Values.storage.memory }} - \ No newline at end of file diff --git a/platforms/r3-corda/charts/corda-mongodb-tls/templates/service.yaml b/platforms/r3-corda/charts/corda-mongodb-tls/templates/service.yaml deleted file mode 100644 index b3a105db29e..00000000000 --- a/platforms/r3-corda/charts/corda-mongodb-tls/templates/service.yaml +++ /dev/null @@ -1,31 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -apiVersion: v1 -kind: Service -metadata: - name: {{ .Values.nodeName }} - namespace: {{ .Values.metadata.namespace }} - {{- if .Values.service.annotations }} - annotations: -{{ toYaml .Values.service.annotations | indent 8 }} - {{- end }} - labels: - run: {{ .Values.nodeName }} - app.kubernetes.io/name: {{ .Values.nodeName }} - app: {{ .Values.nodeName }} -spec: - type: {{ .Values.service.type }} - ports: - - protocol: TCP - port: {{ .Values.service.tcp.port }} - targetPort: {{ .Values.service.tcp.targetPort }} - {{- if .Values.service.tcp.nodePort }} - nodePort: {{ .Values.service.tcp.nodePort}} - {{- end }} - selector: - appdb: {{ .Values.nodeName }} - diff --git a/platforms/r3-corda/charts/corda-mongodb-tls/values.yaml b/platforms/r3-corda/charts/corda-mongodb-tls/values.yaml deleted file mode 100644 index 239b7e61235..00000000000 --- a/platforms/r3-corda/charts/corda-mongodb-tls/values.yaml +++ /dev/null @@ -1,38 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -# Default values for nodechart. -# This is a YAML-formatted file. -# Declare variables to be passed into your templates. - -#Provide the Name for node to be deployed -#Eg. nodeName: mongodb-doorman -nodeName: mongodb-doorman -replicas: -image: - #Provide the name of image for container - #Eg. containerName: hyperledgerlabs/h2:2018 - containerName: hyperledgerlabs/h2:2018 -storage: - #Provide the memory for node - #Eg. memory: 4Gi - memory: 4Gi - name: - mountPath: -service: - #Provide the type of service - #Eg. type: NodePort - type: NodePort - tcp: - #Provide the tcp port for node - #Eg. port: 9101 - port: 9101 - #Provide the tcp node port for node - #Eg. port: 32001 - nodePort: - #Provide the tcp node port for node - #Eg. targetPort: 27017 - targetPort: 27017 diff --git a/platforms/r3-corda/charts/corda-mongodb/Chart.yaml b/platforms/r3-corda/charts/corda-mongodb/Chart.yaml deleted file mode 100644 index ff49aabd3f2..00000000000 --- a/platforms/r3-corda/charts/corda-mongodb/Chart.yaml +++ /dev/null @@ -1,11 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -apiVersion: v1 -appVersion: "2.0" -description: "R3-corda-os: Deploys MongoDB, used for doorman and networkmap." -name: corda-mongodb -version: 1.0.0 diff --git a/platforms/r3-corda/charts/corda-mongodb/README.md b/platforms/r3-corda/charts/corda-mongodb/README.md deleted file mode 100644 index e0db2a97c02..00000000000 --- a/platforms/r3-corda/charts/corda-mongodb/README.md +++ /dev/null @@ -1,158 +0,0 @@ -[//]: # (##############################################################################################) -[//]: # (Copyright Accenture. All Rights Reserved.) -[//]: # (SPDX-License-Identifier: Apache-2.0) -[//]: # (##############################################################################################) - - -# Mongodb Deployment - -- [Mongodb Deployment Helm Chart](#Mongodb-deployment-helm-chart) -- [Prerequisites](#prerequisites) -- [Chart Structure](#chart-structure) -- [Configuration](#configuration) -- [Deployment](#deployment) -- [Contributing](#contributing) -- [License](#license) - - -## Mongodb Deployment Helm Chart ---- -This [Helm chart](https://github.com/hyperledger/bevel/tree/develop/platforms/r3-corda/charts/corda-mongodb) deploys Mongodb. - - - -## Prerequisites ---- -Before deploying the chart please ensure you have the following prerequisites: - -- Mongodb database up and running. -- Kubernetes cluster up and running. -- A HashiCorp Vault instance is set up and configured to use Kubernetes service account token-based authentication. -- The Vault is unsealed and initialized. -- Helm is installed. - - -## Chart Structure ---- -This chart has following structue: - -``` - - ├── mongodb - │ ├── Chart.yaml - │ ├── templates - │ │ ├── deployment.yaml - │ │ ├── pvc.yaml - │ │ └── service.yaml - │ └── values.yaml -``` - -Type of files used: - -- `templates` : This directory contains the Kubernetes manifest templates that define the resources to be deployed. -- `deployment.yaml` : This Deployment manages a MongoDB replica set of a Pod template, including volume mounts, environment variables, and ports for the container. -- `pvc.yaml` : A PersistentVolumeClaim (PVC) is a request for storage by a user. -- `service.yaml` : This file defines a Kubernetes Service with multiple ports for protocols and targets, and supports Ambassador proxy annotations for specific configurations when using the "ambassador" proxy provider. -- `chart.yaml` : Provides metadata about the chart, such as its name, version, and description. -- `values.yaml` : Contains the default configuration values for the chart. It includes configuration for the image, storage and service. - - -## Configuration ---- -The [values.yaml](https://github.com/hyperledger/bevel/blob/develop/platforms/r3-corda/charts/corda-mongodb/values.yaml) file contains configurable values for the Helm chart. We can modify these values according to the deployment requirements. Here are some important configuration options: - -## Parameters ---- - -### Name - -| Name | Description | Default Value | -| -----------| -------------------------------------------------- | ------------- | -| name | Provide the name of the node | mongodb | - -### Image - -| Name | Description | Default Value | -| ------------------------ | ------------------------------------------------------- | --------------- | -| containerName | Provide the containerName of image | "" | - -### storage - -| Name | Description | Default Value | -| --------------------- | ------------------------------------------| ------------- | -| Memory | Provide the memory for node | "4Gi" | -| MountPath | The path where the volume will be mounted | "" | - -### Service - -| Name | Description | Default Value | -| --------------------- | ------------------------------------------| ------------- | -| type | Provide the type of service | "NodePort" | -| tcp port | Provide the tcp port for node | "9101" | -| nodePort | Provide the tcp node port for node | "32001" | -| targetPort | Provide the tcp targetPort for node | "27017" | - - - -## Deployment ---- - -To deploy the Mongodb Helm chart, follow these steps: - -1. Modify the [values.yaml](https://github.com/hyperledger/bevel/blob/develop/platforms/r3-corda/charts/corda-mongodb/values.yaml) file to set the desired configuration values. -2. Run the following Helm command to install, upgrade, verify, delete the chart: - -To install the chart: -```bash -helm repo add bevel https://hyperledger.github.io/bevel/ -helm install ./corda-mongodb -``` - -To upgrade the chart: -```bash -helm upgrade ./corda-mongodb -``` - -To verify the deployment: -```bash -kubectl get jobs -n -``` -Note : Replace `` with the actual namespace where the Job was created. This command will display information about the Job, including the number of completions and the current status of the Job's pods. - - -To delete the chart: -```bash -helm uninstall -``` -Note : Replace `` with the desired name for the release. - - - -## Contributing ---- -If you encounter any bugs, have suggestions, or would like to contribute to the [Mongodb Deployment Helm Chart](https://github.com/hyperledger/bevel/tree/develop/platforms/r3-corda/charts/corda-mongodb), please feel free to open an issue or submit a pull request on the [project's GitHub repository](https://github.com/hyperledger/bevel). - - -## License - -This chart is licensed under the Apache v2.0 license. - -Copyright © 2023 Accenture - -### Attribution - -This chart is adapted from the [charts](https://hyperledger.github.io/bevel/) which is licensed under the Apache v2.0 License which is reproduced here: - -``` -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -``` diff --git a/platforms/r3-corda/charts/corda-mongodb/templates/deployment.yaml b/platforms/r3-corda/charts/corda-mongodb/templates/deployment.yaml deleted file mode 100644 index 56560a46b66..00000000000 --- a/platforms/r3-corda/charts/corda-mongodb/templates/deployment.yaml +++ /dev/null @@ -1,117 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -apiVersion: apps/v1 -kind: Deployment -metadata: - name: {{ .Values.nodeName }} - namespace: {{ .Values.metadata.namespace }} - {{- if .Values.deployment.annotations }} - annotations: -{{ toYaml .Values.deployment.annotations | indent 8 }} - {{- end }} - labels: - appdb: {{ .Values.nodeName }} -spec: - replicas: {{ .Values.replicas }} - selector: - matchLabels: - appdb: {{ .Values.nodeName }} - template: - metadata: - labels: - appdb: {{ .Values.nodeName }} - spec: - serviceAccountName: {{ $.Values.vault.serviceaccountname }} - hostname: {{ .Values.nodeName }} - securityContext: - fsGroup: 1000 - containers: - - name: {{ .Values.nodeName }} - image: {{ .Values.image.containerName }} - env: - - name: MONGO_INITDB_ROOT_USERNAME_FILE - value: /run/secrets/db_root_username - - name: MONGO_INITDB_ROOT_PASSWORD_FILE - value: /run/secrets/db_root_password - ports: - - containerPort: {{ .Values.service.tcp.targetPort }} - volumeMounts: - - name: {{ .Values.storage.volname }} - mountPath: {{ .Values.storage.mountPath }} - - name: creds - mountPath: "/run/secrets" - readOnly: false - initContainers: - - name: init-credential - image : {{ .Values.image.initContainerName }} - imagePullPolicy: Always - env: - - name: VAULT_ADDR - value: {{.Values.vault.address}} - - name: KUBERNETES_AUTH_PATH - value: {{.Values.vault.authpath}} - - name: VAULT_APP_ROLE - value: {{.Values.vault.role}} - - name: MOUNT_PATH - value: /run/secrets - - name: SECRET_PREFIX - value: {{.Values.vault.secretprefix}} - - name: MONGODB_USERNAME - value: {{.Values.mongodb.username}} - command: ["/bin/sh","-c"] - args: - - |- - #!/usr/bin/env sh - validateVaultResponse () { - if echo ${2} | grep "errors"; then - echo "ERROR: unable to retrieve ${1}: ${2}" - exit 1 - fi - if [ "$3" == "LOOKUPSECRETRESPONSE" ] - then - http_code=$(curl -sS -o /dev/null -w "%{http_code}" \ - --header "X-Vault-Token: ${VAULT_TOKEN}" \ - ${VAULT_ADDR}/v1/${1}) - curl_response=$? - if test "$http_code" != "200" ; then - echo "Http response code from Vault - $http_code" - if test "$curl_response" != "0"; then - echo "Error: curl command failed with error code - $curl_response" - exit 1 - fi - fi - fi - } - echo "Getting secrets from Vault Server" - - KUBE_SA_TOKEN=$(cat /var/run/secrets/kubernetes.io/serviceaccount/token) - VAULT_TOKEN=$(curl -sS --request POST ${VAULT_ADDR}/v1/auth/${KUBERNETES_AUTH_PATH}/login -H "Content-Type: application/json" -d '{"role":"'"${VAULT_APP_ROLE}"'","jwt":"'"${KUBE_SA_TOKEN}"'"}' | jq -r 'if .errors then . else .auth.client_token end') - validateVaultResponse 'vault login token' "${VAULT_TOKEN}" - - OUTPUT_PATH=${MOUNT_PATH} - LOOKUP_PWD_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${SECRET_PREFIX} | jq -r 'if .errors then . else . end') - - validateVaultResponse "${SECRET_PREFIX}" "${LOOKUP_PWD_RESPONSE}" "LOOKUPSECRETRESPONSE" - - MONGODB_PASSWORD=$(echo ${LOOKUP_PWD_RESPONSE} | jq -r '.data.data["mongodbPassword"]') - - echo "${MONGODB_PASSWORD}" >> ${MOUNT_PATH}/db_root_password - echo "${MONGODB_USERNAME}" >> ${MOUNT_PATH}/db_root_username - - volumeMounts: - - name: creds - mountPath: "/run/secrets" - readOnly: false - imagePullSecrets: - - name: {{ .Values.image.imagePullSecret }} - volumes: - - name: {{ .Values.storage.volname }} - persistentVolumeClaim: - claimName: {{ .Values.nodeName }}-pvc - - name: creds - emptyDir: - medium: Memory \ No newline at end of file diff --git a/platforms/r3-corda/charts/corda-mongodb/templates/pvc.yaml b/platforms/r3-corda/charts/corda-mongodb/templates/pvc.yaml deleted file mode 100644 index d19ff565673..00000000000 --- a/platforms/r3-corda/charts/corda-mongodb/templates/pvc.yaml +++ /dev/null @@ -1,25 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -kind: PersistentVolumeClaim -apiVersion: v1 -metadata: - name: {{ .Values.nodeName }}-pvc - namespace: {{ .Values.metadata.namespace }} - {{- if .Values.pvc.annotations }} - annotations: -{{ toYaml .Values.pvc.annotations | indent 8 }} - {{- end }} - labels: - app.kubernetes.io/name: {{ .Values.nodeName }}-pvc - app: {{ .Values.nodeName }}-pv -spec: - storageClassName: {{ .Values.storage.name }} - accessModes: - - ReadWriteOnce - resources: - requests: - storage: {{ .Values.storage.memory }} diff --git a/platforms/r3-corda/charts/corda-mongodb/templates/service.yaml b/platforms/r3-corda/charts/corda-mongodb/templates/service.yaml deleted file mode 100644 index a5d5a4381e9..00000000000 --- a/platforms/r3-corda/charts/corda-mongodb/templates/service.yaml +++ /dev/null @@ -1,30 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -apiVersion: v1 -kind: Service -metadata: - name: {{ .Values.nodeName }} - namespace: {{ .Values.metadata.namespace }} - {{- if .Values.service.annotations }} - annotations: -{{ toYaml .Values.service.annotations | indent 8 }} - {{- end }} - labels: - run: {{ .Values.nodeName }} - app.kubernetes.io/name: {{ .Values.nodeName }} - app: {{ .Values.nodeName }} -spec: - type: {{ .Values.service.type }} - ports: - - protocol: TCP - port: {{ .Values.service.tcp.port }} - targetPort: {{ .Values.service.tcp.targetPort }} - {{- if .Values.service.tcp.nodePort }} - nodePort: {{ .Values.service.tcp.nodePort}} - {{- end }} - selector: - appdb: {{ .Values.nodeName }} diff --git a/platforms/r3-corda/charts/corda-mongodb/values.yaml b/platforms/r3-corda/charts/corda-mongodb/values.yaml deleted file mode 100644 index af3c1a90d01..00000000000 --- a/platforms/r3-corda/charts/corda-mongodb/values.yaml +++ /dev/null @@ -1,38 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -# Default values for nodechart. -# This is a YAML-formatted file. -# Declare variables to be passed into your templates. - -#Provide the Name for node to be deployed -#Eg. nodeName: mongodb-doorman -nodeName: mongodb -replicas: -image: - #Provide the name of image for container - #Eg. containerName: hyperledgerlabs/h2:2018 - containerName: hyperledgerlabs/h2:2018 -storage: - #Provide the memory for node - #Eg. memory: 4Gi - memory: 4Gi - name: - mountPath: -service: - #Provide the type of service - #Eg. type: NodePort - type: NodePort - tcp: - #Provide the tcp port for node - #Eg. port: 9101 - port: 9101 - #Provide the tcp node port for node - #Eg. port: 32001 - nodePort: - #Provide the tcp node port for node - #Eg. targetPort: 27017 - targetPort: 27017 diff --git a/platforms/r3-corda/charts/corda-network-service/Chart.yaml b/platforms/r3-corda/charts/corda-network-service/Chart.yaml new file mode 100644 index 00000000000..d9de3a66787 --- /dev/null +++ b/platforms/r3-corda/charts/corda-network-service/Chart.yaml @@ -0,0 +1,25 @@ +############################################################################################## +# Copyright Accenture. All Rights Reserved. +# +# SPDX-License-Identifier: Apache-2.0 +############################################################################################## + +apiVersion: v1 +name: corda-network-service +description: "R3 Corda Network Service: Doorman, Networkmap and MongoDB." +version: 1.0.0 +appVersion: "latest" +keywords: + - bevel + - corda + - hyperledger + - enterprise + - blockchain + - deployment + - accenture +home: https://hyperledger-bevel.readthedocs.io/en/latest/ +sources: + - https://github.com/hyperledger/bevel +maintainers: + - name: Hyperledger Bevel maintainers + email: bevel@lists.hyperledger.org diff --git a/platforms/r3-corda/charts/corda-network-service/README.md b/platforms/r3-corda/charts/corda-network-service/README.md new file mode 100644 index 00000000000..4ae6252b7b4 --- /dev/null +++ b/platforms/r3-corda/charts/corda-network-service/README.md @@ -0,0 +1,146 @@ +[//]: # (##############################################################################################) +[//]: # (Copyright Accenture. All Rights Reserved.) +[//]: # (SPDX-License-Identifier: Apache-2.0) +[//]: # (##############################################################################################) + +# corda-network-service + +This chart is a component of Hyperledger Bevel. The corda-network-service chart deploys a R3 Corda Doorman, Networkmap and associated MongoDB database. If enabled, the keys are then stored on the configured vault and stored as Kubernetes secrets. See [Bevel documentation](https://hyperledger-bevel.readthedocs.io/en/latest/) for details. + +## TL;DR + +```bash +helm repo add bevel https://hyperledger.github.io/bevel +helm install network-service bevel/corda-network-service +``` + +## Prerequisites + +- Kubernetes 1.19+ +- Helm 3.2.0+ + +If Hashicorp Vault is used, then +- HashiCorp Vault Server 1.13.1+ + +> **Important**: Ensure the `corda-init` chart has been installed before installing this. Also check the dependent charts. + +## Installing the Chart + +To install the chart with the release name `network-service`: + +```bash +helm repo add bevel https://hyperledger.github.io/bevel +helm install network-service bevel/corda-network-service +``` + +The command deploys the chart on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation. + +> **Tip**: List all releases using `helm list` + +## Uninstalling the Chart + +To uninstall/delete the `network-service` deployment: + +```bash +helm uninstall network-service +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Parameters + +### Global parameters +These parameters are refered to as same in each parent or child chart +| Name | Description | Default Value | +|--------|---------|-------------| +|`global.serviceAccountName` | The serviceaccount name that will be used for Vault Auth management| `vault-auth` | +| `global.cluster.provider` | Kubernetes cluster provider like AWS EKS or minikube. Currently ony `aws` and `minikube` is tested | `aws` | +| `global.cluster.cloudNativeServices` | only `false` is implemented, `true` to use Cloud Native Services (SecretsManager and IAM for AWS; KeyVault & Managed Identities for Azure) is for future | `false` | +| `global.vault.type` | Type of Vault to support other providers. Currently, only `hashicorp` is supported. | `hashicorp` | +| `global.vault.role` | Role used for authentication with Vault | `vault-role` | +| `global.vault.address`| URL of the Vault server. | `""` | +| `global.vault.authPath` | Authentication path for Vault | `supplychain` | +| `global.vault.secretEngine` | The value for vault secret engine name | `secretsv2` | +| `global.vault.secretPrefix` | The value for vault secret prefix which must start with `data/` | `data/supplychain` | +| `global.proxy.provider` | The proxy or Ingress provider. Can be `none` or `ambassador` | `ambassador` | +| `global.proxy.externalUrlSuffix` | The External URL suffix at which the Corda P2P service will be available | `test.blockchaincloudpoc.com` | + +### Storage + +| Name | Description | Default Value | +|--------|---------|-------------| +| `storage.size` | Size of the Volume needed for NMS and Doorman node | `1Gi` | +| `storage.dbSize` | Size of the Volume needed for MongoDB | `1Gi` | +| `storage.allowedTopologies.enabled` | Check [bevel-storageclass](../../../shared/charts/bevel-storageclass/README.md) for details | `false` | + +### TLS +This is where you can override the values for the [corda-certs-gen subchart](../corda-certs-gen/README.md). + +| Name | Description | Default Value | +|--------|---------|-------------| +| `tls.enabled` | Use TLS for all communcations | `false` | +| `tls.settings.networkServices` | Enable TLS certificate generation for Doorman and NMS | `true` | + +### Image +| Name | Description | Default Value | +| -------------| ---------- | --------- | +| `image.pullSecret` | Secret name in the namespace containing private image registry credentials | `""` | +| `image.pullPolicy` | Pull policy to be used for the Docker images | `IfNotPresent` | +| `image.mongo.repository` | MongoDB image repository | `mongo`| +| `image.mongo.tag` | MongoDB image tag as per version of MongoDB | `3.6.6`| +| `image.hooks.repository` | Corda hooks image repository | `ghcr.io/hyperledger/bevel-build` | +| `image.hooks.tag` | Corda hooks image tag | `jdk8-stable` | +| `image.doorman` | Corda Doorman image repository and tag | `ghcr.io/hyperledger/bevel-doorman-linuxkit:latest` | +| `image.nms` | Corda Network Map image repository and tag | `ghcr.io/hyperledger/bevel-networkmap-linuxkit:latest` | + +### Common Settings +| Name | Description | Default Value | +| -------------| ---------- | --------- | +| `settings.removeKeysOnDelete` | Flag to delete the secrets on uninstall | `true` | +| `settings.rootSubject` | X.509 Subject for the Corda Root CA | `"CN=DLT Root CA,OU=DLT,O=DLT,L=New York,C=US"` | +| `settings.mongoSubject` | X.509 Subject for the MongoDB CA | `"C=US,ST=New York,L=New York,O=Lite,OU=DBA,CN=mongoDB"`| +| `settings.dbPort` | MongoDB Port | `27017`| + +### Doorman + +| Name | Description | Default Value | +| -------------| ---------- | --------- | +| `doorman.subject` | X.509 Subject for the Doorman | `"CN=Corda Doorman CA,OU=DOORMAN,O=DOORMAN,L=New York,C=US"` | +| `doorman.username` | Username of Doorman DB | `doorman` | +| `doorman.authPassword` | Password of `sa` user to access doorman admin api | `admin`| +| `doorman.dbPassword` | Password for Doorman DB | `newdbnm`| +| `doorman.port` | Port for Doorman Service | `8080`| + +### NMS + +| Name | Description | Default Value | +| -------------| ---------- | --------- | +| `nms.subject` | X.509 Subject for the NetworkMap | `"CN=Network Map,OU=FRA,O=FRA,L=Berlin,C=DE"` | +| `nms.username` | Username of NetworkMap DB | `networkmap` | +| `nms.authPassword` | Password of `sa` user to access NetworkMap admin api | `admin`| +| `nms.dbPassword` | Password for NetworkMap DB | `newdbnm`| +| `nms.port` | Port for NetworkMap Service | `8080`| + +## License + +This chart is licensed under the Apache v2.0 license. + +Copyright © 2024 Accenture + +### Attribution + +This chart is adapted from the [charts](https://hyperledger.github.io/bevel/) which is licensed under the Apache v2.0 License which is reproduced here: + +``` +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +``` diff --git a/platforms/r3-corda/charts/corda-network-service/requirements.yaml b/platforms/r3-corda/charts/corda-network-service/requirements.yaml new file mode 100644 index 00000000000..35059a61d0d --- /dev/null +++ b/platforms/r3-corda/charts/corda-network-service/requirements.yaml @@ -0,0 +1,14 @@ +dependencies: + - name: bevel-storageclass + alias: storage + repository: "file://../../../shared/charts/bevel-storageclass" + tags: + - storage + version: ~1.0.0 + - name: corda-certs-gen + alias: tls + repository: "file://../corda-certs-gen" + tags: + - bevel + version: ~1.0.0 + condition: tls.enabled diff --git a/platforms/r3-corda/charts/corda-network-service/templates/_helpers.tpl b/platforms/r3-corda/charts/corda-network-service/templates/_helpers.tpl new file mode 100644 index 00000000000..d9aa91552d3 --- /dev/null +++ b/platforms/r3-corda/charts/corda-network-service/templates/_helpers.tpl @@ -0,0 +1,29 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "corda-network-service.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "corda-network-service.fullname" -}} +{{- $name := default .Chart.Name -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" $name .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} + + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "corda-network-service.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} diff --git a/platforms/r3-corda/charts/corda-network-service/templates/hooks-pre-delete.yaml b/platforms/r3-corda/charts/corda-network-service/templates/hooks-pre-delete.yaml new file mode 100644 index 00000000000..3cd67972e03 --- /dev/null +++ b/platforms/r3-corda/charts/corda-network-service/templates/hooks-pre-delete.yaml @@ -0,0 +1,71 @@ +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ include "corda-network-service.fullname" . }}-pre-delete-hook + namespace: {{ .Release.Namespace }} + annotations: + helm.sh/hook: pre-delete + helm.sh/hook-weight: "0" + helm.sh/hook-delete-policy: "hook-succeeded" + labels: + app.kubernetes.io/name: pre-delete-hook + app.kubernetes.io/component: cleanup + app.kubernetes.io/part-of: {{ include "corda-network-service.fullname" . }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} + app.kubernetes.io/managed-by: helm +spec: + backoffLimit: 3 + completions: 1 + template: + metadata: + labels: + app.kubernetes.io/name: pre-delete-hook + app.kubernetes.io/component: cleanup + app.kubernetes.io/part-of: {{ include "corda-network-service.fullname" . }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} + app.kubernetes.io/managed-by: helm + spec: + serviceAccountName: {{ .Values.global.serviceAccountName }} + restartPolicy: "Never" + containers: + - name: {{ template "corda-network-service.fullname" . }}-cleanup + image: "{{ .Values.image.hooks.repository }}:{{ .Values.image.hooks.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + command: + - /bin/bash + - -c + args: + - | + + echo "{{ template "corda-network-service.fullname" . }} pre-delete-hook ..." + +{{- if and (ne .Values.global.cluster.provider "minikube") (.Values.global.cluster.cloudNativeServices) }} + # placeholder for cloudNative deleteSecret function +{{- else }} + + function deleteSecret { + key=$1 + kubectl delete secret ${key} --namespace {{ .Release.Namespace }} + } + +{{- end }} + +{{- if .Values.settings.removeKeysOnDelete }} + +{{- if and (ne .Values.global.cluster.provider "minikube") (.Values.global.cluster.cloudNativeServices) }} + deleteSecret {{.Release.Name }}-nmskeystore + deleteSecret {{.Release.Name }}-doormankeystore + deleteSecret {{.Release.Name }}-rootcakeystore + deleteSecret {{.Release.Name }}-rootcacert + deleteSecret {{.Release.Name }}-rootcakey + deleteSecret {{.Release.Name }}-dbcert + deleteSecret {{.Release.Name }}-dbcacert +{{- else }} + deleteSecret {{.Release.Name }}-certs +{{- end }} + +{{- end }} + echo "Completed" diff --git a/platforms/r3-corda/charts/corda-network-service/templates/hooks-pre-install.yaml b/platforms/r3-corda/charts/corda-network-service/templates/hooks-pre-install.yaml new file mode 100644 index 00000000000..23dfa85a33c --- /dev/null +++ b/platforms/r3-corda/charts/corda-network-service/templates/hooks-pre-install.yaml @@ -0,0 +1,226 @@ +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ include "corda-network-service.fullname" . }}-pre-install-hook + namespace: {{ .Release.Namespace }} + annotations: + "helm.sh/hook": pre-install + "helm.sh/hook-weight": "0" + "helm.sh/hook-delete-policy": "before-hook-creation" + labels: + app.kubernetes.io/name: pre-install-hook + app.kubernetes.io/component: certgen + app.kubernetes.io/part-of: {{ include "corda-network-service.fullname" . }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} + app.kubernetes.io/managed-by: helm +spec: + backoffLimit: 1 + completions: 1 + template: + metadata: + labels: + app.kubernetes.io/name: pre-install-hook + app.kubernetes.io/component: certgen + app.kubernetes.io/part-of: {{ include "corda-network-service.fullname" . }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} + app.kubernetes.io/managed-by: helm + spec: + serviceAccountName: {{ .Values.global.serviceAccountName }} + restartPolicy: "OnFailure" + containers: + - name: corda-certgen + image: {{ .Values.image.hooks.repository }}:{{ .Values.image.hooks.tag }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + securityContext: + runAsUser: 0 + volumeMounts: + - name: generated-config + mountPath: /home + - name: scripts-volume + mountPath: /scripts/bevel-vault.sh + subPath: bevel-vault.sh + - name: openssl-conf + mountPath: /home/openssl.conf + subPath: openssl.conf + {{- if (eq .Values.global.vault.type "hashicorp") }} + env: + - name: VAULT_ADDR + value: "{{ .Values.global.vault.address }}" + - name: VAULT_SECRET_ENGINE + value: "{{ .Values.global.vault.secretEngine }}" + - name: VAULT_SECRET_PREFIX + value: "{{ .Values.global.vault.secretPrefix }}" + - name: KUBERNETES_AUTH_PATH + value: "{{ .Values.global.vault.authPath }}" + - name: VAULT_APP_ROLE + value: "{{ .Values.global.vault.role }}" + - name: VAULT_TYPE + value: "{{ .Values.global.vault.type }}" + {{- end }} + command: + - /bin/bash + - -c + args: + - | +{{- if (eq .Values.global.vault.type "hashicorp") }} + . /scripts/bevel-vault.sh + echo "Getting vault Token..." + vaultBevelFunc "init" + #Read if secret exists in Vault + vaultBevelFunc 'readJson' "${VAULT_SECRET_ENGINE}/${VAULT_SECRET_PREFIX}/{{ .Release.Name }}-certs" + function safeWriteSecret { + key=$1 + fpath=$2 + if [ "$SECRETS_AVAILABLE" == "yes" ] + then + # Get secret from Vault and create the k8s secret if it does not exist + kubectl get secret ${key}-certs --namespace {{ .Release.Namespace }} -o json > /dev/null 2>&1 + if [ $? -ne 0 ]; then + NMS_KEYS=$(echo ${VAULT_SECRET} | jq -r '.["nmskeystore_base64"]') + DOORMAN_KEYS=$(echo ${VAULT_SECRET} | jq -r '.["doormankeystore_base64"]') + ROOT_CA=$(echo ${VAULT_SECRET} | jq -r '.["rootcakeystore_base64"]') + CA_CERTS=$(echo ${VAULT_SECRET} | jq -r '.["rootcacert_base64"]' | base64 -d) + CA_KEY=$(echo ${VAULT_SECRET} | jq -r '.["rootcakey_base64"]' | base64 -d) + MONGO_CERT=$(echo ${VAULT_SECRET} | jq -r '.["dbcert_base64"]') + MONGO_CACERT=$(echo ${VAULT_SECRET} | jq -r '.["dbcacert_base64"]') + echo $NMS_KEYS | base64 -d > /tmp/nmsKeys.jks + echo $DOORMAN_KEYS | base64 -d > /tmp/doormankeys.jks + echo $ROOT_CA | base64 -d > /tmp/rootkeys.jks + echo $CA_CERTS > /tmp/rootca.pem + echo $CA_KEY > /tmp/rootca.key + echo $MONGO_CERT > /tmp/mongodb.pem + echo $MONGO_CACERT > /tmp/mongoca.pem + kubectl create secret generic ${key}-certs --namespace {{ .Release.Namespace }} \ + --from-file=nmskeystore=/tmp/nmsKeys.jks --from-file=doormankeystore=/tmp/doormankeys.jks \ + --from-file=rootcakeystore=/tmp/rootkeys.jks \ + --from-file=rootcacert=/tmp/rootca.pem --from-file=rootcakey=/tmp/rootca.key \ + --from-file=mongodb.pem=/tmp/mongodb.pem --from-file=mongoCA.crt=/tmp/mongoca.pem + fi + else + # Save Certs to Vault + # Use -w0 to get single line base64 -w0 + NMS_KEYS=$(cat ${fpath}/nms/keys.jks | base64 -w0) + DOORMAN_KEYS=$(cat ${fpath}/doorman/keys.jks | base64 -w0) + ROOT_CA=$(cat ${fpath}/rootca/keys.jks | base64 -w0) + CA_CERTS=$(cat ${fpath}/rootca/cordarootca.pem | base64 -w0) + CA_KEY=$(cat ${fpath}/rootca/cordarootca.key | base64 -w0) + MONGO_CERT=$(cat ${fpath}/mongodb/mongodb.pem | base64 -w0) + MONGO_CACERT=$(cat ${fpath}/mongodb/mongoCA.crt | base64 -w0) + # create a JSON file for the data related to node crypto + echo " + { + \"data\": + { + \"nmskeystore_base64\": \"${NMS_KEYS}\", + \"doormankeystore_base64\": \"${DOORMAN_KEYS}\", + \"rootcakeystore_base64\": \"${ROOT_CA}\", + \"rootcacert_base64\": \"${CA_CERTS}\", + \"rootcakey_base64\": \"${CA_KEY}\", + \"dbcert_base64\": \"${MONGO_CERT}\", + \"dbcacert_base64\": \"${MONGO_CACERT}\" + } + }" > payload.json + vaultBevelFunc 'write' "${VAULT_SECRET_ENGINE}/${VAULT_SECRET_PREFIX}/${key}-certs" 'payload.json' + rm payload.json + # Also create the k8s secret + kubectl create secret generic ${key}-certs --namespace {{ .Release.Namespace }} \ + --from-file=nmskeystore=${fpath}/nms/keys.jks --from-file=doormankeystore=${fpath}/doorman/keys.jks \ + --from-file=rootcakeystore=${fpath}/rootca/keys.jks \ + --from-file=rootcacert=${fpath}/rootca/cordarootca.pem --from-file=rootcakey=${fpath}/rootca/cordarootca.key \ + --from-literal=mongodb.pem=${MONGO_CERT} --from-literal=mongoCA.crt=${MONGO_CACERT} + fi + } +{{- else }} + function safeWriteSecret { + key=$1 + fpath=$2 + kubectl get secret ${key}-certs --namespace {{ .Release.Namespace }} -o json > /dev/null 2>&1 + if [ $? -ne 0 ]; then + kubectl create secret generic ${key}-certs --namespace {{ .Release.Namespace }} \ + --from-file=nmskeystore=${fpath}/nms/keys.jks --from-file=doormankeystore=${fpath}/doorman/keys.jks \ + --from-file=rootcakeystore=${fpath}/rootca/keys.jks \ + --from-file=rootcacert=${fpath}/rootca/cordarootca.pem --from-file=rootcakey=${fpath}/rootca/cordarootca.key \ + --from-file=mongodb.pem=<(base64 -w0 ${fpath}/mongodb/mongodb.pem) --from-file=mongoCA.crt=<(base64 -w0 ${fpath}/mongodb/mongoCA.crt) + fi + } +{{- end }} + if [ "$SECRETS_AVAILABLE" == "yes" ] + then + echo "Certificates found for {{ .Release.Name }} ..." + else + echo "Creating certificates for {{ .Release.Name }} ..." + ROOTCA_PATH=/home/certificates/rootca + DBCA_PATH=/home/certificates/mongodb + DOORMAN_CERTS=/home/certificates/doorman + NMS_CERTS=/home/certificates/nms + mkdir -p ${ROOTCA_PATH} + mkdir -p ${DBCA_PATH} + mkdir -p ${DOORMAN_CERTS} + mkdir -p ${NMS_CERTS} + # Do not change keystore_pass as it is hardcoded as default in doorman/networkmap app + KEYSTORE_PASS='changeme' + + cd ${ROOTCA_PATH} + keytool -genkey -keyalg RSA -alias key -dname "{{ .Values.settings.rootSubject }}" -keystore keys.jks -storepass $KEYSTORE_PASS -keypass $KEYSTORE_PASS + openssl ecparam -name prime256v1 -genkey -noout -out cordarootca.key + openssl req -x509 -config /home/openssl.conf -new -nodes -key cordarootca.key -days 1024 -out cordarootca.pem -extensions v3_ca -subj '/{{ .Values.settings.rootSubject | replace "," "/" }}' + openssl pkcs12 -export -name cert -inkey cordarootca.key -in cordarootca.pem -out cordarootcacert.pkcs12 -cacerts -passin pass:${KEYSTORE_PASS} -passout pass:${KEYSTORE_PASS} + openssl pkcs12 -export -name key -inkey cordarootca.key -in cordarootca.pem -out cordarootcakey.pkcs12 -passin pass:${KEYSTORE_PASS} -passout pass:${KEYSTORE_PASS} + eval "yes | keytool -importkeystore -srckeystore cordarootcacert.pkcs12 -srcstoretype PKCS12 -srcstorepass $KEYSTORE_PASS -destkeystore keys.jks -deststorepass $KEYSTORE_PASS" + eval "yes | keytool -importkeystore -srckeystore cordarootcakey.pkcs12 -srcstoretype PKCS12 -srcstorepass $KEYSTORE_PASS -destkeystore keys.jks -deststorepass $KEYSTORE_PASS" + + cd ${DBCA_PATH} + openssl genrsa -out mongoCA.key 3072 + openssl req -x509 -config /home/openssl.conf -new -extensions v3_ca -key mongoCA.key -days 365 -out mongoCA.crt -subj '/{{ .Values.settings.mongoSubject | replace "," "/" }}' + openssl req -new -nodes -newkey rsa:4096 -keyout mongodb.key -out mongodb.csr -subj '/{{ .Values.settings.mongoSubject | replace "," "/" }}' + openssl x509 -CA mongoCA.crt -CAkey mongoCA.key -CAcreateserial -CAserial serial -req -days 365 -in mongodb.csr -out mongodb.crt + cat mongodb.key mongodb.crt > mongodb.pem + + cd ${DOORMAN_CERTS} + keytool -genkey -keyalg RSA -alias key -dname "{{ .Values.doorman.subject }}" -keystore keys.jks -storepass $KEYSTORE_PASS -keypass $KEYSTORE_PASS + openssl ecparam -name prime256v1 -genkey -noout -out cordadoormanca.key + openssl req -new -nodes -key cordadoormanca.key -out cordadoormanca.csr -subj '/{{ .Values.doorman.subject | replace "," "/" }}' + openssl x509 -req -days 1000 -in cordadoormanca.csr -CA ${ROOTCA_PATH}/cordarootca.pem -CAkey ${ROOTCA_PATH}/cordarootca.key -out cordadoormanca.pem -CAcreateserial \ + -CAserial serial -extfile /home/openssl.conf -extensions doorman + openssl pkcs12 -export -name cert -inkey cordadoormanca.key -in cordadoormanca.pem -out cordadoormancacert.pkcs12 -cacerts -passin pass:${KEYSTORE_PASS} -passout pass:${KEYSTORE_PASS} + openssl pkcs12 -export -name key -inkey cordadoormanca.key -in cordadoormanca.pem -out cordadoormancakey.pkcs12 -passin pass:${KEYSTORE_PASS} -passout pass:${KEYSTORE_PASS} + eval "yes | keytool -importkeystore -srckeystore cordadoormancacert.pkcs12 -srcstoretype PKCS12 -srcstorepass $KEYSTORE_PASS -destkeystore keys.jks -deststorepass $KEYSTORE_PASS" + eval "yes | keytool -importkeystore -srckeystore cordadoormancakey.pkcs12 -srcstoretype PKCS12 -srcstorepass $KEYSTORE_PASS -destkeystore keys.jks -deststorepass $KEYSTORE_PASS" + + cd ${NMS_CERTS} + keytool -genkey -keyalg RSA -alias key -dname "{{ .Values.nms.subject }}" -keystore keys.jks -storepass $KEYSTORE_PASS -keypass $KEYSTORE_PASS + openssl ecparam -name prime256v1 -genkey -noout -out cordanetworkmap.key + openssl req -new -nodes -key cordanetworkmap.key -out cordanetworkmap.csr -subj '/{{ .Values.nms.subject | replace "," "/" }}' + openssl x509 -req -days 1000 -in cordanetworkmap.csr -CA ${ROOTCA_PATH}/cordarootca.pem -CAkey ${ROOTCA_PATH}/cordarootca.key -out cordanetworkmap.pem -CAcreateserial \ + -CAserial serial -extfile /home/openssl.conf -extensions networkMap + openssl pkcs12 -export -name cert -inkey cordanetworkmap.key -in cordanetworkmap.pem -out cordanetworkmapcacert.pkcs12 -cacerts -passin pass:${KEYSTORE_PASS} -passout pass:${KEYSTORE_PASS} + openssl pkcs12 -export -name key -inkey cordanetworkmap.key -in cordanetworkmap.pem -out cordanetworkmapcakey.pkcs12 -passin pass:${KEYSTORE_PASS} -passout pass:${KEYSTORE_PASS} + eval "yes | keytool -importkeystore -srckeystore cordanetworkmapcacert.pkcs12 -srcstoretype PKCS12 -srcstorepass ${KEYSTORE_PASS} -destkeystore keys.jks -deststorepass ${KEYSTORE_PASS}" + eval "yes | keytool -importkeystore -srckeystore cordanetworkmapcakey.pkcs12 -srcstoretype PKCS12 -srcstorepass ${KEYSTORE_PASS} -destkeystore keys.jks -deststorepass ${KEYSTORE_PASS}" + fi; + echo "Creating {{ .Release.Name }}-certs secrets in k8s ..." +{{- if and (ne .Values.global.cluster.provider "minikube") (.Values.global.cluster.cloudNativeServices) }} + safeWriteSecret {{ .Release.Name }}-nmskeystore $NMS_CERTS/keys.jks + safeWriteSecret {{ .Release.Name }}-doormankeystore $DOORMAN_CERTS/keys.jks + safeWriteSecret {{ .Release.Name }}-rootcakeystore $ROOTCA_PATH/keys.jks + safeWriteSecret {{ .Release.Name }}-rootcacert $ROOTCA_PATH/cordarootca.pem + safeWriteSecret {{ .Release.Name }}-rootcakey $ROOTCA_PATH/cordarootca.key + safeWriteSecret {{ .Release.Name }}-dbcert $DBCA_PATH/mongodb.pem + safeWriteSecret {{ .Release.Name }}-dbcacert $DBCA_PATH/mongoCA.crt +{{- else }} + safeWriteSecret {{ .Release.Name }} /home/certificates +{{- end }} + echo "Completed ..." + volumes: + - name: generated-config + emptyDir: {} + - name: scripts-volume + configMap: + name: bevel-vault-script + defaultMode: 0777 + - name: openssl-conf + configMap: + name: openssl-conf diff --git a/platforms/r3-corda/charts/corda-network-service/templates/service.yaml b/platforms/r3-corda/charts/corda-network-service/templates/service.yaml new file mode 100644 index 00000000000..0bcb8459b0e --- /dev/null +++ b/platforms/r3-corda/charts/corda-network-service/templates/service.yaml @@ -0,0 +1,171 @@ +############################################################################################## +# Copyright Accenture. All Rights Reserved. +# +# SPDX-License-Identifier: Apache-2.0 +############################################################################################## +apiVersion: v1 +kind: Service +metadata: + name: {{ .Release.Name }}-mongodb + namespace: {{ .Release.Namespace }} + labels: + app.kubernetes.io/name: mongodb-service + app.kubernetes.io/component: database + app.kubernetes.io/part-of: {{ include "corda-network-service.fullname" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} +spec: + type: ClusterIP + selector: + app.kubernetes.io/name: mongodb-statefulset + app.kubernetes.io/component: database + app.kubernetes.io/part-of: {{ include "corda-network-service.fullname" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} + ports: + - name: mongo-db + protocol: TCP + port: {{ .Values.settings.dbPort }} + targetPort: {{ .Values.settings.dbPort }} +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ .Release.Name }}-doorman + namespace: {{ .Release.Namespace }} + labels: + app.kubernetes.io/name: doorman-service + app.kubernetes.io/component: doorman + app.kubernetes.io/part-of: {{ include "corda-network-service.fullname" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} +spec: + type: ClusterIP + selector: + app.kubernetes.io/name: doorman-statefulset + app.kubernetes.io/component: doorman + app.kubernetes.io/part-of: {{ include "corda-network-service.fullname" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} + ports: + - name: doorman + protocol: TCP + port: {{ .Values.doorman.port }} + targetPort: {{ .Values.doorman.port }} +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ .Release.Name }}-nms + namespace: {{ .Release.Namespace }} + labels: + app.kubernetes.io/name: networkmap-service + app.kubernetes.io/component: nms + app.kubernetes.io/part-of: {{ include "corda-network-service.fullname" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} +spec: + type: ClusterIP + selector: + app.kubernetes.io/component: nms + app.kubernetes.io/part-of: {{ include "corda-network-service.fullname" . }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} + ports: + - name: nms + protocol: TCP + port: {{ .Values.nms.port }} + targetPort: {{ .Values.nms.port }} +{{- if eq .Values.global.proxy.provider "ambassador" }} +{{- if .Values.tls.enabled }} +--- +## Host for doorman +apiVersion: getambassador.io/v3alpha1 +kind: Host +metadata: + name: {{ .Release.Name }}-doorman +spec: + hostname: {{ .Release.Name }}-doorman.{{ .Values.global.proxy.externalUrlSuffix }} + acmeProvider: + authority: none + requestPolicy: + insecure: + action: Reject + tlsSecret: + name: doorman-tls-certs + namespace: {{ .Release.Namespace }} +--- +## Host for nms +apiVersion: getambassador.io/v3alpha1 +kind: Host +metadata: + name: {{ .Release.Name }}-nms +spec: + hostname: {{ .Release.Name }}-nms.{{ .Values.global.proxy.externalUrlSuffix }} + acmeProvider: + authority: none + requestPolicy: + insecure: + action: Reject + tlsSecret: + name: nms-tls-certs + namespace: {{ .Release.Namespace }} +{{- end }} +--- +## Mapping for doorman port +apiVersion: getambassador.io/v3alpha1 +kind: Mapping +metadata: + name: {{ .Release.Name }}-mapping + namespace: {{ .Release.Namespace }} +spec: + host: {{ .Release.Name }}-doorman.{{ .Values.global.proxy.externalUrlSuffix }} + prefix: / + service: {{ .Release.Name }}-doorman.{{ .Release.Namespace }}:{{ .Values.doorman.port }} +{{- if .Values.tls.enabled }} + tls: {{ .Release.Name }}-doorman-tlscontext +--- +apiVersion: getambassador.io/v3alpha1 +kind: TLSContext +metadata: + name: {{ .Release.Name }}-doorman-tlscontext + namespace: {{ .Release.Namespace }} +spec: + hosts: + - {{ .Release.Name }}-doorman.{{ .Values.global.proxy.externalUrlSuffix }} + secret: doorman-tls-certs.{{ .Release.Namespace }} + secret_namespacing: true + min_tls_version: v1.2 +{{- end }} +--- +## Mapping for nms port +apiVersion: getambassador.io/v3alpha1 +kind: Mapping +metadata: + name: {{ .Release.Name }}-nms-mapping + namespace: {{ .Release.Namespace }} +spec: + host: {{ .Release.Name }}-nms.{{ .Values.global.proxy.externalUrlSuffix }} + prefix: / + service: {{ .Release.Name }}-nms.{{ .Release.Namespace }}:{{ .Values.nms.port }} +{{- if .Values.tls.enabled }} + tls: {{ .Release.Name }}-nms-tlscontext +--- +apiVersion: getambassador.io/v3alpha1 +kind: TLSContext +metadata: + name: {{ .Release.Name }}-nms-tlscontext + namespace: {{ .Release.Namespace }} +spec: + hosts: + - {{ .Release.Name }}-nms.{{ .Values.global.proxy.externalUrlSuffix }} + secret: nms-tls-certs.{{ .Release.Namespace }} + secret_namespacing: true + min_tls_version: v1.2 +{{- end }} +{{- end }} diff --git a/platforms/r3-corda/charts/corda-network-service/templates/statefulset-doorman.yaml b/platforms/r3-corda/charts/corda-network-service/templates/statefulset-doorman.yaml new file mode 100644 index 00000000000..a69217e6e3b --- /dev/null +++ b/platforms/r3-corda/charts/corda-network-service/templates/statefulset-doorman.yaml @@ -0,0 +1,138 @@ +############################################################################################## +# Copyright Accenture. All Rights Reserved. +# +# SPDX-License-Identifier: Apache-2.0 +############################################################################################## +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ template "corda-network-service.fullname" . }}-doorman + namespace: {{ .Release.Namespace }} + labels: + app: {{ include "corda-network-service.fullname" . }} + app.kubernetes.io/name: doorman-statefulset + app.kubernetes.io/component: doorman + app.kubernetes.io/part-of: {{ include "corda-network-service.fullname" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} +spec: + replicas: 1 + podManagementPolicy: OrderedReady + updateStrategy: + type: RollingUpdate + selector: + matchLabels: + app: {{ include "corda-network-service.fullname" . }} + app.kubernetes.io/name: doorman-statefulset + app.kubernetes.io/component: doorman + app.kubernetes.io/part-of: {{ include "corda-network-service.fullname" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} + serviceName: {{ include "corda-network-service.fullname" . }} + volumeClaimTemplates: + - metadata: + name: data + spec: + storageClassName: storage-{{ .Release.Name }} + accessModes: [ "ReadWriteOnce" ] + resources: + requests: + storage: {{ .Values.storage.size }} + template: + metadata: + labels: + app: {{ include "corda-network-service.fullname" . }} + app.kubernetes.io/name: doorman-statefulset + app.kubernetes.io/component: doorman + app.kubernetes.io/part-of: {{ include "corda-network-service.fullname" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} + spec: + serviceAccountName: {{ .Values.global.serviceAccountName }} + hostname: {{ .Release.Name }} + imagePullSecrets: + {{- if .Values.image.pullSecret }} + - name: {{ .Values.image.pullSecret }} + {{- end }} + securityContext: + fsGroup: 1000 + containers: + - name: doorman + image: {{ .Values.image.doorman }} + imagePullPolicy: IfNotPresent + env: + - name: DOORMAN_PORT + value: "{{ .Values.doorman.port }}" + - name: DOORMAN_ROOT_CA_NAME + value: {{ .Values.doorman.subject }} + - name: DOORMAN_TLS + value: "{{ .Values.tls.enabled }}" + - name: DOORMAN_DB + value: /opt/doorman/db + - name: DOORMAN_AUTH_USERNAME + value: sa + - name: DB_URL + value: {{ .Release.Name }}-mongodb + - name: DB_PORT + value: "{{ .Values.settings.dbPort }}" + - name: DATABASE + value: admin + - name: DB_USERNAME + value: {{ .Values.doorman.username }} + command: ["sh", "-c"] + args: + - |- + #!/usr/bin/env sh + + export DB_PASSWORD={{ .Values.doorman.dbPassword }} + export DOORMAN_AUTH_PASSWORD={{ .Values.doorman.authPassword }} + # Copy from read-only to read-write dirs + mkdir -p /opt/doorman/db/certs/root + mkdir -p /opt/doorman/db/certs/doorman + + cp /certs/rootcakeystore /opt/doorman/db/certs/root/keys.jks + cp /certs/doormankeystore /opt/doorman/db/certs/doorman/keys.jks + + if [ "$DOORMAN_TLS" = "true" ]; then + cat /certs/mongoCA.crt | base64 -d > /opt/doorman/mongoCA.crt + export DOORMAN_MONGO_CONNECTION_STRING="mongodb://${DB_USERNAME}:${DB_PASSWORD}@${DB_URL}:${DB_PORT}/${DATABASE}?ssl=true&sslInvalidHostNameAllowed=true&streamType=netty" + + # tls certs are mounted via tls secrets + export DOORMAN_TLS_CERT_PATH="/secret/tls.crt" + export DOORMAN_TLS_KEY_PATH="/secret/tls.key" + + # import self signed tls certificate of mongodb, since java only trusts certificate signed by well known CA + yes | keytool -importcert -file /opt/doorman/mongoCA.crt -storepass changeit -alias mongoca -keystore /usr/lib/jvm/java-1.8-openjdk/jre/lib/security/cacerts + else + export DOORMAN_MONGO_CONNECTION_STRING="mongodb://${DB_USERNAME}:${DB_PASSWORD}@${DB_URL}:${DB_PORT}/${DATABASE}" + fi; + # command to run jar + java -jar /opt/doorman/doorman.jar 2>&1 + ports: + - containerPort: {{ .Values.doorman.port }} + volumeMounts: + - name: data + mountPath: "/opt/doorman/db" + readOnly: false + - name: network-certs + mountPath: "/certs" +{{- if .Values.tls.enabled }} + - name: doorman-certs + mountPath: "/secret" +{{- end }} + volumes: + - name: scripts-volume + configMap: + name: bevel-vault-script + defaultMode: 0777 + - name: network-certs + secret: + secretName: {{ .Release.Name }}-certs +{{- if .Values.tls.enabled }} + - name: doorman-certs + secret: + secretName: doorman-tls-certs +{{- end }} diff --git a/platforms/r3-corda/charts/corda-network-service/templates/statefulset-mongodb.yaml b/platforms/r3-corda/charts/corda-network-service/templates/statefulset-mongodb.yaml new file mode 100644 index 00000000000..1639d3298e9 --- /dev/null +++ b/platforms/r3-corda/charts/corda-network-service/templates/statefulset-mongodb.yaml @@ -0,0 +1,136 @@ +############################################################################################## +# Copyright Accenture. All Rights Reserved. +# +# SPDX-License-Identifier: Apache-2.0 +############################################################################################## +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ template "corda-network-service.fullname" . }}-db + namespace: {{ .Release.Namespace }} + labels: + app: {{ include "corda-network-service.fullname" . }}-db + app.kubernetes.io/name: mongodb-statefulset + app.kubernetes.io/component: database + app.kubernetes.io/part-of: {{ include "corda-network-service.fullname" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} +spec: + replicas: 1 + podManagementPolicy: OrderedReady + updateStrategy: + type: RollingUpdate + selector: + matchLabels: + app: {{ include "corda-network-service.fullname" . }}-db + app.kubernetes.io/name: mongodb-statefulset + app.kubernetes.io/component: database + app.kubernetes.io/part-of: {{ include "corda-network-service.fullname" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} + serviceName: {{ include "corda-network-service.fullname" . }}-db + volumeClaimTemplates: + - metadata: + name: data-mongodb + spec: + storageClassName: storage-{{ .Release.Name }} + accessModes: [ "ReadWriteOnce" ] + resources: + requests: + storage: {{ .Values.storage.dbSize }} + template: + metadata: + labels: + app: {{ include "corda-network-service.fullname" . }}-db + app.kubernetes.io/name: mongodb-statefulset + app.kubernetes.io/component: database + app.kubernetes.io/part-of: {{ include "corda-network-service.fullname" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} + spec: + serviceAccountName: {{ .Values.global.serviceAccountName }} + hostname: {{ .Release.Name }} + imagePullSecrets: + {{- if .Values.image.pullSecret }} + - name: {{ .Values.image.pullSecret }} + {{- end }} + securityContext: + fsGroup: 1000 + containers: + - name: mongodb + image: {{ .Values.image.mongo.repository }}:{{ .Values.image.mongo.tag }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + env: + - name: MONGO_INITDB_ROOT_USERNAME + value: {{ .Values.doorman.username }} + - name: MONGO_INITDB_ROOT_PASSWORD + value: {{ .Values.doorman.dbPassword }} + # The complete command and lifecycle section is for TLS enabled + {{- if .Values.tls.enabled }} + command: + - /bin/sh + - -c + - > + if [ -f /data/db/admin-user.lock ]; then + #file /data/db/admin-user.lock created and checked to ensure mongod is fully up for adding new db user in postStart hook. + echo "KUBERNETES LOG $HOSTNAME- Starting Mongo Daemon" + cat /certs/mongoCA.crt | base64 -d > /data/db/mongoCA.crt + cat /certs/mongodb.pem | base64 -d > /data/db/mongodb.pem + # ensure wiredTigerCacheSize is set within the size of the containers memory limit, Setting up with tag --sslAllowConnectionsWithoutCertificates only client validates the server to ensure that it receives data from the intended server. + if [ "$HOSTNAME" = "{{ template "corda-network-service.fullname" . }}-db-0" ]; then + #for Mongodb single server. + echo "check 1" + mongod --wiredTigerCacheSizeGB 0.25 --bind_ip 0.0.0.0 --sslMode requireSSL --sslPEMKeyFile /data/db/mongodb.pem --sslCAFile /data/db/mongoCA.crt --sslAllowConnectionsWithoutCertificates --sslAllowInvalidHostnames --auth; + fi; + else + echo "KUBERNETES LOG $HOSTNAME- Starting Mongo Daemon with setup setting (authMode)" + mongod --auth; + fi; + lifecycle: + postStart: + exec: + command: + - /bin/sh + - -c + - > + if [ ! -f /data/db/admin-user.lock ]; then + echo "KUBERNETES LOG $HOSTNAME no Admin-user.lock file found yet" + # user name and password for creation of new db user. + DB_PASSWORD={{ .Values.doorman.dbPassword }} + DB_USERNAME={{ .Values.doorman.username }} + # sleep 20 to 'ensure' mongod is accepting connections for creating db user. + sleep 20; + touch /data/db/admin-user.lock + # Adding database user with password in admin database, checking for host name to create new db user. + if [ "$HOSTNAME" = "{{ template "corda-network-service.fullname" . }}-db-0" ]; then + echo "KUBERNETES LOG $HOSTNAME- creating admin user" + # Adding database user in admin db using mongo shell command. + mongo --eval "db = db.getSiblingDB('admin'); db.createUser({ user: '${DB_USERNAME}', pwd: '${DB_PASSWORD}', roles: [{ role: 'root', db: 'admin' }]});" >> /data/db/config.log + fi; + echo "KUBERNETES LOG $HOSTNAME-shutting mongod down for final restart" + mongod --shutdown; + fi; + {{- end }} + ports: + - containerPort: {{ .Values.settings.dbPort }} + volumeMounts: + - name: data-mongodb + mountPath: "/data/db" +{{- if .Values.tls.enabled }} + - name: network-certs + mountPath: "/certs" + readOnly: false +{{- end }} + volumes: + - name: scripts-volume + configMap: + name: bevel-vault-script + defaultMode: 0777 +{{- if .Values.tls.enabled }} + - name: network-certs + secret: + secretName: {{ .Release.Name }}-certs +{{- end }} diff --git a/platforms/r3-corda/charts/corda-network-service/templates/statefulset-nms.yaml b/platforms/r3-corda/charts/corda-network-service/templates/statefulset-nms.yaml new file mode 100644 index 00000000000..7e23c9e0d29 --- /dev/null +++ b/platforms/r3-corda/charts/corda-network-service/templates/statefulset-nms.yaml @@ -0,0 +1,142 @@ +############################################################################################## +# Copyright Accenture. All Rights Reserved. +# +# SPDX-License-Identifier: Apache-2.0 +############################################################################################## +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ template "corda-network-service.fullname" . }}-nms + namespace: {{ .Release.Namespace }} + labels: + app: {{ include "corda-network-service.fullname" . }} + app.kubernetes.io/name: nms-statefulset + app.kubernetes.io/component: nms + app.kubernetes.io/part-of: {{ include "corda-network-service.fullname" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} +spec: + replicas: 1 + podManagementPolicy: OrderedReady + updateStrategy: + type: RollingUpdate + selector: + matchLabels: + app: {{ include "corda-network-service.fullname" . }} + app.kubernetes.io/name: nms-statefulset + app.kubernetes.io/component: nms + app.kubernetes.io/part-of: {{ include "corda-network-service.fullname" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} + serviceName: {{ include "corda-network-service.fullname" . }} + volumeClaimTemplates: + - metadata: + name: data + spec: + storageClassName: storage-{{ .Release.Name }} + accessModes: [ "ReadWriteOnce" ] + resources: + requests: + storage: {{ .Values.storage.size }} + template: + metadata: + labels: + app: {{ include "corda-network-service.fullname" . }} + app.kubernetes.io/name: nms-statefulset + app.kubernetes.io/component: nms + app.kubernetes.io/part-of: {{ include "corda-network-service.fullname" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} + spec: + serviceAccountName: {{ .Values.global.serviceAccountName }} + hostname: {{ .Release.Name }} + imagePullSecrets: + {{- if .Values.image.pullSecret }} + - name: {{ .Values.image.pullSecret }} + {{- end }} + securityContext: + fsGroup: 1000 + containers: + - name: nms + image: {{ .Values.image.nms }} + imagePullPolicy: IfNotPresent + env: + - name: NETWORKMAP_PORT + value: "{{ .Values.nms.port }}" + - name: NETWORKMAP_ROOT_CA_NAME + value: {{ .Values.nms.subject }} + - name: NETWORKMAP_TLS + value: "{{ .Values.tls.enabled }}" + - name: NETWORKMAP_DB + value: /opt/networkmap/db + - name: DOORMAN_AUTH_USERNAME + value: sa + - name: DB_URL + value: {{ .Release.Name }}-mongodb + - name: DB_PORT + value: "{{ .Values.settings.dbPort }}" + - name: DATABASE + value: admin + - name: DB_USERNAME + value: {{ .Values.nms.username }} + - name: NETWORKMAP_CACHE_TIMEOUT + value: 60S + - name: NETWORKMAP_MONGOD_DATABASE + value: networkmap + command: ["sh", "-c"] + args: + - |- + #!/usr/bin/env sh + + export DB_PASSWORD={{ .Values.nms.dbPassword }} + export NETWORKMAP_AUTH_PASSWORD={{ .Values.nms.authPassword }} + # Copy from read-only to read-write dirs + mkdir -p /opt/networkmap/db/certs/root + mkdir -p /opt/networkmap/db/certs/network-map + + cp /certs/rootcakeystore /opt/networkmap/db/certs/root/keys.jks + cp /certs/nmskeystore /opt/networkmap/db/certs/network-map/keys.jks + + if [ "$NETWORKMAP_TLS" = "true" ]; then + cat /certs/mongoCA.crt | base64 -d > /opt/networkmap/mongoCA.crt + export NETWORKMAP_MONGO_CONNECTION_STRING="mongodb://${DB_USERNAME}:${DB_PASSWORD}@${DB_URL}:${DB_PORT}/${DATABASE}?ssl=true&sslInvalidHostNameAllowed=true&streamType=netty" + + # tls certs are mounted via tls secrets + export NETWORKMAP_TLS_CERT_PATH="/secret/tls.crt" + export NETWORKMAP_TLS_KEY_PATH="/secret/tls.key" + + # import self signed tls certificate of mongodb, since java only trusts certificate signed by well known CA + yes | keytool -importcert -file /opt/networkmap/mongoCA.crt -storepass changeit -alias mongoca -keystore /usr/lib/jvm/java-1.8-openjdk/jre/lib/security/cacerts + else + export NETWORKMAP_MONGO_CONNECTION_STRING="mongodb://${DB_USERNAME}:${DB_PASSWORD}@${DB_URL}:${DB_PORT}/${DATABASE}" + fi; + # command to run jar + java -jar /opt/networkmap/network-map-service.jar 2>&1 + ports: + - containerPort: {{ .Values.nms.port }} + volumeMounts: + - name: data + mountPath: "/opt/networkmap/db" + readOnly: false + - name: network-certs + mountPath: "/certs" +{{- if .Values.tls.enabled }} + - name: nms-certs + mountPath: "/secret" +{{- end }} + volumes: + - name: scripts-volume + configMap: + name: bevel-vault-script + defaultMode: 0777 + - name: network-certs + secret: + secretName: {{ .Release.Name }}-certs +{{- if .Values.tls.enabled }} + - name: nms-certs + secret: + secretName: nms-tls-certs +{{- end }} diff --git a/platforms/r3-corda/charts/corda-network-service/values.yaml b/platforms/r3-corda/charts/corda-network-service/values.yaml new file mode 100644 index 00000000000..7b5afbf93e5 --- /dev/null +++ b/platforms/r3-corda/charts/corda-network-service/values.yaml @@ -0,0 +1,82 @@ +############################################################################################## +# Copyright Accenture. All Rights Reserved. +# +# SPDX-License-Identifier: Apache-2.0 +############################################################################################## + +# Default values for nodechart. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. +global: + serviceAccountName: vault-auth + cluster: + provider: aws # choose from: minikube | aws | azure | gcp + cloudNativeServices: false # set to true to use Cloud Native Services (SecretsManager and IAM for AWS; KeyVault & Managed Identities for Azure) + vault: + type: hashicorp + role: vault-role + address: + authPath: supplychain + secretEngine: secretsv2 + secretPrefix: "data/supplychain" + proxy: + #This will be the proxy/ingress provider. Can have values "ambassador" or "none" + #Eg. provider: "ambassador" + provider: "ambassador" + #This field contains the external URL of the node + #Eg. externalUrlSuffix: test.blockchaincloudpoc.com + externalUrlSuffix: test.blockchaincloudpoc.com + +storage: + #Provide the size for PVC + #Eg. size: 4Gi + size: 1Gi + dbSize: 1Gi + allowedTopologies: + enabled: false + +tls: + enabled: false + settings: + networkServices: true + +image: + #Provide the docker secret name in the namespace + #Eg. pullSecret: regcred + pullSecret: + #Pull policy to be used for the Docker image + #Eg. pullPolicy: IfNotPresent + pullPolicy: IfNotPresent + #Provide a valid image and version for mongodb + mongo: + repository: mongo + tag: 3.6.6 + hooks: + repository: ghcr.io/hyperledger/bevel-build + tag: jdk8-stable + doorman: ghcr.io/hyperledger/bevel-doorman-linuxkit:latest + nms: ghcr.io/hyperledger/bevel-networkmap-linuxkit:latest + +settings: + removeKeysOnDelete: true + rootSubject: "CN=DLT Root CA,OU=DLT,O=DLT,L=New York,C=US" + mongoSubject: "C=US,ST=New York,L=New York,O=Lite,OU=DBA,CN=mongoDB" + #Provide the tcp node port for database + #Eg. dbPort: 27017 + dbPort: 27017 + +doorman: + subject: "CN=Corda Doorman CA,OU=DOORMAN,O=DOORMAN,L=New York,C=US" + username: doorman + authPassword: admin + dbPassword: newdbnm + #Provide the tcp port for node + #Eg. port: 8080 + port: 8080 + +nms: + subject: "CN=Network Map,OU=FRA,O=FRA,L=Berlin,C=DE" + username: networkmap + authPassword: admin + dbPassword: newdbnm + port: 8080 diff --git a/platforms/r3-corda/charts/corda-networkmap-tls/Chart.yaml b/platforms/r3-corda/charts/corda-networkmap-tls/Chart.yaml deleted file mode 100644 index 5d4a785f2de..00000000000 --- a/platforms/r3-corda/charts/corda-networkmap-tls/Chart.yaml +++ /dev/null @@ -1,11 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -apiVersion: v1 -appVersion: "2.0" -description: "R3-corda-os: Deploys networkmap sevice with TLS." -name: corda-networkmap-tls -version: 1.0.0 diff --git a/platforms/r3-corda/charts/corda-networkmap-tls/README.md b/platforms/r3-corda/charts/corda-networkmap-tls/README.md deleted file mode 100644 index 80d1bfcef42..00000000000 --- a/platforms/r3-corda/charts/corda-networkmap-tls/README.md +++ /dev/null @@ -1,178 +0,0 @@ -[//]: # (##############################################################################################) -[//]: # (Copyright Accenture. All Rights Reserved.) -[//]: # (SPDX-License-Identifier: Apache-2.0) -[//]: # (##############################################################################################) - - -# Nms Deployment - -- [Nms-tls Deployment Helm Chart](#Nms-tls-deployment-helm-chart) -- [Prerequisites](#prerequisites) -- [Chart Structure](#chart-structure) -- [Configuration](#configuration) -- [Deployment](#deployment) -- [Contributing](#contributing) -- [License](#license) - - -## nms-tls Deployment Helm Chart ---- -This [Helm chart](https://github.com/hyperledger/bevel/tree/develop/platforms/r3-corda/charts/corda-networkmap-tls) deploys a networkmap sevice with TLS enabled. - - -## Prerequisites ---- -Before deploying the chart please ensure you have the following prerequisites: - -- NetworkMap and Node's database up and running. -- Kubernetes cluster up and running. -- A HashiCorp Vault instance is set up and configured to use Kubernetes service account token-based authentication. -- The Vault is unsealed and initialized. -- Helm is installed. - - -## Chart Structure ---- -This chart has following structue: - -``` - - ├── nms-tls - │ ├── Chart.yaml - │ ├── templates - │ │ ├── deployment.yaml - │ │ ├── Volume.yaml - │ │ └── service.yaml - │ └── values.yaml -``` - -Type of files used: - -- `templates` : This directory contains the Kubernetes manifest templates that define the resources to be deployed. -- `deployment.yaml` : This file sets up a deployment with multiple containers, mounts volumes, retrieves secrets from Vault, and performs some initialization tasks before starting the main containers. -- `volume.yaml` : These PVCs can be used to provide persistent storage for the network map service deployment. -- `service.yaml` : This file defines a Kubernetes Service with multiple ports for protocols and targets, and supports Ambassador proxy annotations for specific configurations when using the "ambassador" proxy provider. -- `chart.yaml` : Provides metadata about the chart, such as its name, version, and description. -- `values.yaml` : Contains the default configuration values for the chart. It includes configuration for the image, storage, service, vault and ambassador. - - -## Configuration ---- -The [values.yaml](https://github.com/hyperledger/bevel/blob/develop/platforms/r3-corda/charts/corda-networkmap-tls/values.yaml) file contains configurable values for the Helm chart. We can modify these values according to the deployment requirements. Here are some important configuration options: - -## Parameters ---- - -### Name - -| Name | Description | Default Value | -| -----------| -------------------------------------------------- | ------------- | -| name | Provide the name of the node | network-map | - -### Metadata - -| Name | Description | Default Value | -| ----------------| -------------------------------------------------------- | ------------- | -| namespace | Provide the namespace for the nms Generator | default | - -### Image - -| Name | Description | Default Value | -| ------------------------ | ------------------------------------------------------- | --------------- | -| initContainerName | Provide the alpine utils image, which is used for all init-containers of deployments/jobs | "" | -| containerName | Provide the containerName of image | "" | -| imagePullSecret | Provide the image pull secret of image | regcred | -| env | Provide enviroment variable for container image | "" | - -### Service - -| Name | Description | Default Value | -| --------------------- | ------------------------------------------ | ------------- | -| type | Provide the type of service | "NodePort" | -| port | Provide the NMS service port | "30007" | -| nodePort | Provide the node port for node service to be accessible outside| "30050" | - - -### storage - -| Name | Description | Default Value | -| --------------------- | ------------------------------------------| ------------- | -| Memory | Provide the memory for node | "4Gi" | - - -### Vault - -| Name | Description | Default Value | -| ------------------------- | --------------------------------------------------------------------------| ------------- | -| address | Address/URL of the Vault server | "" | -| role | Role used for authentication with Vault | vault-role | -| authpath | Authentication path for Vault | cordanms | -| secretprefix | Provide the kubernetes auth backed configured in vault | "" | -| imagesecretname | specify the name of the Kubernetes secret | "" | -| serviceaccountname | To authenticate with the Vault server and retrieve the secrets |vault-auth-issuer| -| ambassador | Provides the suffix to be used in external URL |"" | - - - -## Deployment ---- - -To deploy the nms-tls Helm chart, follow these steps: - -1. Modify the [values.yaml](https://github.com/hyperledger/bevel/blob/develop/platforms/r3-corda/charts/corda-networkmap-tls/values.yaml) file to set the desired configuration values. -2. Run the following Helm command to install, upgrade, verify, delete the chart: - -To install the chart: -```bash -helm repo add bevel https://hyperledger.github.io/bevel/ -helm install ./corda-networkmap-tls -``` - -To upgrade the chart: -```bash -helm upgrade ./corda-networkmap-tls -``` - -To verify the deployment: -```bash -kubectl get jobs -n -``` -Note : Replace `` with the actual namespace where the Job was created. This command will display information about the Job, including the number of completions and the current status of the Job's pods. - - -To delete the chart: -```bash -helm uninstall -``` -Note : Replace `` with the desired name for the release. - - - -## Contributing ---- -If you encounter any bugs, have suggestions, or would like to contribute to the [Nms-tls Deployment Helm Chart](https://github.com/hyperledger/bevel/tree/develop/platforms/r3-corda/charts/corda-networkmap-tls), please feel free to open an issue or submit a pull request on the [project's GitHub repository](https://github.com/hyperledger/bevel). - - -## License - -This chart is licensed under the Apache v2.0 license. - -Copyright © 2023 Accenture - -### Attribution - -This chart is adapted from the [charts](https://hyperledger.github.io/bevel/) which is licensed under the Apache v2.0 License which is reproduced here: - -``` -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -``` diff --git a/platforms/r3-corda/charts/corda-networkmap-tls/templates/deployment.yaml b/platforms/r3-corda/charts/corda-networkmap-tls/templates/deployment.yaml deleted file mode 100644 index c56e290e3e8..00000000000 --- a/platforms/r3-corda/charts/corda-networkmap-tls/templates/deployment.yaml +++ /dev/null @@ -1,365 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -# Deployment -# Creates the replicated container and manages lifecycle -# TLS certs mounted -# Persistent Volume mounted -# Service points to this deployment (uses labels!) -apiVersion: apps/v1 -kind: Deployment -metadata: - name: {{ .Values.nodeName }} - namespace: {{ .Values.metadata.namespace }} - {{- if .Values.deployment.annotations }} - annotations: -{{ toYaml .Values.deployment.annotations | indent 8 }} - {{- end }} - labels: - app.kubernetes.io/name: {{ .Values.nodeName }} - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} -spec: - replicas: {{ .Values.replicas }} - selector: - matchLabels: - app: {{ .Values.nodeName }} - app.kubernetes.io/name: {{ .Values.nodeName }} - app.kubernetes.io/instance: {{ .Release.Name }} - template: - metadata: - labels: - app: {{ .Values.nodeName }} - app.kubernetes.io/name: {{ .Values.nodeName }} - app.kubernetes.io/instance: {{ .Release.Name }} - spec: - serviceAccountName: {{ $.Values.vault.serviceaccountname }} - hostname: {{ .Values.nodeName }} - securityContext: - fsGroup: 1000 - containers: - - name: {{ .Values.nodeName }}-service - image: {{ .Values.image.containerName }} - imagePullPolicy: Always - command: ["sh", "-c"] - args: - - |- - #!/usr/bin/env sh - - # # add permissions to dir - # chmod 777 -R {{ .Values.image.mountPath.basePath }}/; - # chmod 777 -R {{ .Values.image.mountPath.basePath }}-tls/; - # Setting up enviroment variables required for jar - {{- range $.Values.image.env }} - export {{ .name }}="{{ .value }}" - {{- end }} - export NETWORKMAP_TLS_CERT_PATH="{{ .Values.image.mountPath.basePath }}-tls/certs/networkmap.crt" - export NETWORKMAP_TLS_KEY_PATH="{{ .Values.image.mountPath.basePath }}-tls/certs/networkmap.key" - export DB_PASSWORD=`cat /opt/creds/db_root_password` - export NETWORKMAP_MONGO_CONNECTION_STRING="mongodb://${DB_USERNAME}:${DB_PASSWORD}@${DB_URL}:${DB_PORT}/${DATABASE}?ssl=true&sslInvalidHostNameAllowed=true&streamType=netty" - export NETWORKMAP_AUTH_PASSWORD=`cat /opt/creds/user_cred` - - # import self signed tls certificate of mongodb, since java only trusts certificate signed by well known CA - yes | keytool -importcert -file {{ .Values.image.mountPath.basePath }}-tls/certs/mongoCA.crt -storepass changeit -alias mongoca -keystore /usr/lib/jvm/java-1.8-openjdk/jre/lib/security/cacerts - java -jar {{ .Values.image.mountPath.basePath }}/network-map-service.jar 2>&1 - ports: - - containerPort: {{ .Values.service.targetPort }} - volumeMounts: - - name: {{ .Values.nodeName }}-servicedata - mountPath: "{{ .Values.image.mountPath.basePath }}/db/" - readOnly: false - - name: certs - mountPath: "{{ .Values.image.mountPath.basePath }}/db/certs" - readOnly: false - - name: creds - mountPath: "/opt/creds" - readOnly: false - - name: tls-certs - mountPath: "{{ .Values.image.mountPath.basePath }}-tls/certs/" - readOnly: false - initContainers: - - name: init-certificates - image: {{ .Values.image.initContainerName }} - imagePullPolicy: Always - env: - - name: VAULT_ADDR - value: {{.Values.vault.address}} - - name: KUBERNETES_AUTH_PATH - value: {{.Values.vault.authpath}} - - name: VAULT_APP_ROLE - value: {{.Values.vault.role}} - - name: MOUNT_PATH - value: {{ .Values.image.mountPath.basePath }}/db/certs/ - - name: VAULT_NODE_NAME - value: {{ .Values.nodeName }} - - name: SECRET_PREFIX - value: {{.Values.vault.secretprefix}} - command: ["sh", "-c"] - args: - - |- - #!/usr/bin/env sh - validateVaultResponse () { - if echo ${2} | grep "errors"; then - echo "ERROR: unable to retrieve ${1}: ${2}" - exit 1 - fi - if [ "$3" == "LOOKUPSECRETRESPONSE" ] - then - http_code=$(curl -sS -o /dev/null -w "%{http_code}" \ - --header "X-Vault-Token: ${VAULT_TOKEN}" \ - ${VAULT_ADDR}/v1/${1}) - curl_response=$? - if test "$http_code" != "200" ; then - echo "Http response code from Vault - $http_code" - if test "$curl_response" != "0"; then - echo "Error: curl command failed with error code - $curl_response" - exit 1 - fi - fi - fi - } - - KUBE_SA_TOKEN=$(cat /var/run/secrets/kubernetes.io/serviceaccount/token) - echo "Getting secrets from Vault Server" - VAULT_TOKEN=$(curl -sS --request POST ${VAULT_ADDR}/v1/auth/${KUBERNETES_AUTH_PATH}/login -H "Content-Type: application/json" -d '{"role":"'"${VAULT_APP_ROLE}"'","jwt":"'"${KUBE_SA_TOKEN}"'"}' | jq -r 'if .errors then . else .auth.client_token end') - validateVaultResponse 'vault login token' "${VAULT_TOKEN}" - echo "logged into vault" - OUTPUT_PATH=${MOUNT_PATH} - - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/{{ .Values.vault.certsecretprefix }} | jq -r 'if .errors then . else . end') - - validateVaultResponse "${{ .Values.vault.certsecretprefix }})" "${LOOKUP_SECRET_RESPONSE}" "LOOKUPSECRETRESPONSE" - - ROOTCA_KEY=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["rootcakey"]') - mkdir -p ${OUTPUT_PATH}/root; - echo "${ROOTCA_KEY}" | base64 -d > ${OUTPUT_PATH}/root/keys.jks - - NETWORKMAP_KEY=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["networkmap.jks"]') - mkdir -p ${OUTPUT_PATH}/network-map; - echo "${NETWORKMAP_KEY}" | base64 -d > ${OUTPUT_PATH}/network-map/keys.jks - - chmod 777 -R {{ .Values.image.mountPath.basePath }}/db - volumeMounts: - - name: certs - mountPath: "{{ .Values.image.mountPath.basePath }}/db/certs/" - readOnly: false - - name: init-certificates-tls - image: {{ .Values.image.initContainerName }} - imagePullPolicy: Always - env: - - name: VAULT_ADDR - value: {{.Values.vault.address}} - - name: KUBERNETES_AUTH_PATH - value: {{.Values.vault.authpath}} - - name: VAULT_APP_ROLE - value: {{.Values.vault.role}} - - name: MOUNT_PATH - value: {{ .Values.image.mountPath.basePath }}-tls/certs/ - - name: SECRET_PREFIX - value: {{.Values.vault.secretprefix}} - command: ["sh", "-c"] - args: - - |- - #!/usr/bin/env sh - validateVaultResponse () { - if echo ${2} | grep "errors"; then - echo "ERROR: unable to retrieve ${1}: ${2}" - exit 1 - fi - if [ "$3" == "LOOKUPSECRETRESPONSE" ] - then - http_code=$(curl -sS -o /dev/null -w "%{http_code}" \ - --header "X-Vault-Token: ${VAULT_TOKEN}" \ - ${VAULT_ADDR}/v1/${1}) - curl_response=$? - if test "$http_code" != "200" ; then - echo "Http response code from Vault - $http_code" - if test "$curl_response" != "0"; then - echo "Error: curl command failed with error code - $curl_response" - exit 1 - fi - fi - fi - } - - # setting up env to get secrets from vault - KUBE_SA_TOKEN=$(cat /var/run/secrets/kubernetes.io/serviceaccount/token) - echo "Getting secrets from Vault Server: ${VAULT_ADDR}" - VAULT_TOKEN=$(curl -sS --request POST ${VAULT_ADDR}/v1/auth/${KUBERNETES_AUTH_PATH}/login -H "Content-Type: application/json" -d '{"role":"'"${VAULT_APP_ROLE}"'","jwt":"'"${KUBE_SA_TOKEN}"'"}' | jq -r 'if .errors then . else .auth.client_token end') - echo "VAULT TOKEN IS : ${VAULT_TOKEN}" - validateVaultResponse 'vault login token' "${VAULT_TOKEN}" - - OUTPUT_PATH=${MOUNT_PATH} - - if [ "{{ .Values.image.tlsCertificate }}" == true ] - then - # get networkmap tls cert and key from vault - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/{{ .Values.vault.tlscertsecretprefix }} | jq -r 'if .errors then . else . end') - validateVaultResponse "${{ .Values.vault.tlscertsecretprefix }}" "${LOOKUP_SECRET_RESPONSE}" "LOOKUPSECRETRESPONSE" - NETWORKMAP_CRT=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["tlscacerts"]') - NETWORKMAP_KEY=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["tlskey"]') - echo "check-1 $NETWORKMAP_CRT" - echo "check-2 $NETWORKMAP_KEY" - echo "${NETWORKMAP_CRT}" | base64 -d > {{ .Values.image.mountPath.basePath }}-tls/certs/networkmap.crt - echo "${NETWORKMAP_KEY}" | base64 -d > {{ .Values.image.mountPath.basePath }}-tls/certs/networkmap.key - fi - - # get mongo tls cert from vault - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/{{ .Values.vault.dbcertsecretprefix }} | jq -r 'if .errors then . else . end') - validateVaultResponse "{{ .Values.vault.dbcertsecretprefix }}" "${LOOKUP_SECRET_RESPONSE}" "LOOKUPSECRETRESPONSE" - CA_CERT=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["mongoCA.crt"]') - echo "${CA_CERT}" | base64 -d > ${OUTPUT_PATH}/mongoCA.crt - echo "$CA_CERT" - # add permissions to dir - chmod 777 -R {{ .Values.image.mountPath.basePath }}-tls/certs/ - volumeMounts: - - name: tls-certs - mountPath: "{{ .Values.image.mountPath.basePath }}-tls/certs/" - readOnly: false - - name: init-certificates-cred - image: {{ .Values.image.initContainerName }} - imagePullPolicy: Always - env: - - name: VAULT_ADDR - value: {{.Values.vault.address}} - - name: KUBERNETES_AUTH_PATH - value: {{.Values.vault.authpath}} - - name: VAULT_APP_ROLE - value: {{.Values.vault.role}} - - name: MOUNT_PATH - value: /opt/creds - - name: VAULT_NODE_NAME - value: {{ .Values.nodeName }} - - name: DB_CRED_SECRET_PREFIX - value: {{ .Values.vault.dbcredsecretprefix }} - - name: USER_SECRET_PREFIX - value: {{ .Values.vault.secretnetworkmappass }} - command: ["sh", "-c"] - args: - - |- - #!/usr/bin/env sh - validateVaultResponse () { - if echo ${2} | grep "errors"; then - echo "ERROR: unable to retrieve ${1}: ${2}" - exit 1 - fi - if [ "$3" == "LOOKUPSECRETRESPONSE" ] - then - http_code=$(curl -sS -o /dev/null -w "%{http_code}" \ - --header "X-Vault-Token: ${VAULT_TOKEN}" \ - ${VAULT_ADDR}/v1/${1}) - curl_response=$? - if test "$http_code" != "200" ; then - echo "Http response code from Vault - $http_code" - if test "$curl_response" != "0"; then - echo "Error: curl command failed with error code - $curl_response" - exit 1 - fi - fi - fi - } - - KUBE_SA_TOKEN=$(cat /var/run/secrets/kubernetes.io/serviceaccount/token) - echo "Getting secrets from Vault Server" - # Login to Vault and so I can get an approle token - VAULT_TOKEN=$(curl -sS --request POST ${VAULT_ADDR}/v1/auth/${KUBERNETES_AUTH_PATH}/login -H "Content-Type: application/json" -d '{"role":"'"${VAULT_APP_ROLE}"'","jwt":"'"${KUBE_SA_TOKEN}"'"}' | jq -r 'if .errors then . else .auth.client_token end') - validateVaultResponse 'vault login token' "${VAULT_TOKEN}" - - OUTPUT_PATH=${MOUNT_PATH} - - LOOKUP_PWD_RESPONSE_DB_PASS=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${DB_CRED_SECRET_PREFIX} | jq -r 'if .errors then . else . end') - validateVaultResponse "${DB_CRED_SECRET_PREFIX}" "${LOOKUP_PWD_RESPONSE_DB_PASS}" "LOOKUPSECRETRESPONSE" - MONGODB_PASSWORD=$(echo ${LOOKUP_PWD_RESPONSE_DB_PASS} | jq -r '.data.data["mongodbPassword"]') - - echo "${MONGODB_PASSWORD}" >> ${MOUNT_PATH}/db_root_password - - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${USER_SECRET_PREFIX} | jq -r 'if .errors then . else . end') - validateVaultResponse "${USER_SECRET_PREFIX}" "${LOOKUP_SECRET_RESPONSE}" "LOOKUPSECRETRESPONSE" - USER_PASSWORD=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["{{ .Values.image.authusername }}"]') - echo "${USER_PASSWORD}" >> ${MOUNT_PATH}/user_cred - - volumeMounts: - - name: creds - mountPath: "/opt/creds" - readOnly: false - - name: changepermissions - image: {{ .Values.image.initContainerName }} - imagePullPolicy: Always - env: - - name: VAULT_ADDR - value: {{.Values.vault.address}} - - name: KUBERNETES_AUTH_PATH - value: {{.Values.vault.authpath}} - - name: VAULT_APP_ROLE - value: {{.Values.vault.role}} - - name: MOUNT_PATH - value: {{ .Values.image.mountPath.basePath }}/db/certs - - name: VAULT_NODE_NAME - value: {{ .Values.nodeName }} - - name: SECRET_PREFIX - value: {{.Values.vault.secretprefix}} - command: ["sh", "-c"] - args: - - |- - chmod 777 -R {{ .Values.image.mountPath.basePath }}; - volumeMounts: - - name: {{ .Values.nodeName }}-servicedata - mountPath: "{{ .Values.image.mountPath.basePath }}/db" - readOnly: false - - name: db-healthcheck - image: {{ .Values.image.initContainerName }} - imagePullPolicy: Always - command: ["sh", "-c"] - args: - - |- - #!/usr/bin/env sh - COUNTER=1 - FLAG=true - while [ "$COUNTER" -le {{ $.Values.healthcheck.readinessthreshold }} ] - do - DB_NODE={{ .Values.healthcheck.dburl }} - STATUS=$(nc -vz $DB_NODE 2>&1 | grep -c open ) - if [ "$STATUS" == 0 ] - then - FLAG=false - else - FLAG=true - echo "DB up and running" - fi - if [ "$FLAG" == false ] - then - echo "Retry attempted $COUNTER times, retrying after {{ $.Values.healthcheck.readinesscheckinterval }} seconds" - COUNTER=`expr "$COUNTER" + 1` - sleep {{ $.Values.healthcheck.readinesscheckinterval }} - else - echo "SUCCESS!" - echo "DB up and running!" - exit 0 - break - fi - done - if [ "$COUNTER" -gt {{ $.Values.healthcheck.readinessthreshold }} ] || [ "$FLAG" == false ] - then - echo "Retry attempted $COUNTER times, no DB up and running. Giving up!" - exit 1 - break - fi - imagePullSecrets: - - name: {{ .Values.image.imagePullSecret }} - volumes: - - name: {{ .Values.nodeName }}-servicedata - persistentVolumeClaim: - claimName: {{ .Values.nodeName }}-pvc - - name: certs - emptyDir: - medium: Memory - - name: creds - emptyDir: - medium: Memory - - name: tls-certs - emptyDir: - medium: Memory diff --git a/platforms/r3-corda/charts/corda-networkmap-tls/templates/service.yaml b/platforms/r3-corda/charts/corda-networkmap-tls/templates/service.yaml deleted file mode 100644 index f3035e4dc20..00000000000 --- a/platforms/r3-corda/charts/corda-networkmap-tls/templates/service.yaml +++ /dev/null @@ -1,70 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -apiVersion: v1 -kind: Service -metadata: - name: {{ .Values.nodeName }} - namespace: {{ $.Values.metadata.namespace }} - annotations: - labels: - run: {{ .Values.nodeName }} - app.kubernetes.io/name: {{ .Values.nodeName }} - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} -spec: - selector: - app: {{ .Values.nodeName }} - type: {{ .Values.service.type }} - ports: - - protocol: TCP - port: {{ .Values.service.port }} - targetPort: {{ .Values.service.targetPort }} - {{- if .Values.service.nodePort }} - nodePort: {{ .Values.service.nodePort }} - {{- end }} -{{ if $.Values.ambassador }} ---- -apiVersion: getambassador.io/v3alpha1 -kind: Host -metadata: - name: {{ .Values.nodeName }}-host -spec: - hostname: {{ .Values.nodeName }}.{{ .Values.ambassador.external_url_suffix }} - acmeProvider: - authority: none - requestPolicy: - insecure: - action: Route - tlsSecret: - name: {{ .Values.nodeName }}-ambassador-certs - namespace: {{ .Values.metadata.namespace }} ---- -apiVersion: getambassador.io/v3alpha1 -kind: Mapping -metadata: - name: {{ .Values.nodeName }}-mapping - namespace: {{ .Values.metadata.namespace }} -spec: - host: {{ .Values.nodeName }}.{{ .Values.ambassador.external_url_suffix }} - prefix: / - service: https://{{ .Values.nodeName }}.{{ .Values.metadata.namespace }}:{{ .Values.service.port }} - tls: {{ .Values.nodeName }}-tlscontext ---- -apiVersion: getambassador.io/v3alpha1 -kind: TLSContext -metadata: - name: {{ .Values.nodeName }}-tlscontext - namespace: {{ .Values.metadata.namespace }} -spec: - hosts: - - {{ .Values.nodeName }}.{{ .Values.ambassador.external_url_suffix }} - secret: {{ .Values.nodeName }}-ambassador-certs.{{ .Values.metadata.namespace }} - secret_namespacing: true - min_tls_version: v1.2 -{{- end }} - diff --git a/platforms/r3-corda/charts/corda-networkmap-tls/templates/volume.yaml b/platforms/r3-corda/charts/corda-networkmap-tls/templates/volume.yaml deleted file mode 100644 index ed662b428c9..00000000000 --- a/platforms/r3-corda/charts/corda-networkmap-tls/templates/volume.yaml +++ /dev/null @@ -1,24 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - ---- -kind: PersistentVolumeClaim -apiVersion: v1 -metadata: - name: {{ .Values.nodeName }}-pvc - namespace: {{ .Values.metadata.namespace }} - labels: - app.kubernetes.io/name: {{ .Values.nodeName }}-pvc - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} -spec: - storageClassName: {{ .Values.storage.name }} - accessModes: - - ReadWriteOnce - resources: - requests: - storage: {{ .Values.storage.memory }} \ No newline at end of file diff --git a/platforms/r3-corda/charts/corda-networkmap-tls/values.yaml b/platforms/r3-corda/charts/corda-networkmap-tls/values.yaml deleted file mode 100644 index 56c681d49a0..00000000000 --- a/platforms/r3-corda/charts/corda-networkmap-tls/values.yaml +++ /dev/null @@ -1,90 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -# Default values for nmschart. -# This is a YAML-formatted file. -# Declare variables to be passed into your templates. - -#Provide the Name for node to be deployed -#Eg. nodeName: network-map -nodeName: network-map - -metadata: - #Provide the namespace - #Eg. namespace: default - namespace: default - -image: - #Provide the name of image for init container - #Eg. initContainerName: ghcr.io/hyperledger/bevel-alpine:latest - initContainerName: ghcr.io/hyperledger/bevel-alpine:latest - #Provide the containerName of image - containerName: ghcr.io/hyperledger/bevel-networkmap-linuxkit:latest - #Provide the image pull secret of image - #Eg. pullSecret: regcred - imagePullSecret: - #Provide enviroment variable for container image - env: - #Provide rootcaname for the doorman - #Eg. rootcaname: CN=Corda Root CA, OU=FRA, O=FRA, L=London, ST=London, C=BR - rootcaname: - tlscertpath: - tlskeypath: - #Provide whether TLS is enabled or not - #Eg. tls: false - tls: - #Provide whether to enable Corda doorman protocol - #Eg. doorman: true - doorman: - #Provide whether to enable Cordite certman protocol so that nodes can authenticate using a signed TLS cert - #Eg. certman: true - certman: - #Provide database directory for this service - #Eg. database: db - database: - #Provide MongoDB connection string. If set to embed will start its own mongo instance - #Eg. dataSourceUrl: db - dataSourceUrl: - -service: - #Provide the NMS service port - #Eg. port: 30007 - port: 30007 - #Provide the type of service - #Eg. type: NodePort - type: NodePort - #Provide the node port for node service to be accessible outside - #Eg. nodePort: 30050 - nodePort: - -storage: - #Provide the memory for node - #Eg. memory: 4Gi - memory: 4Gi - -vault: - #Provide the vault server address - #Eg. address: http://34.228.219.208:8200 - address: - #Provide the vaultrole - #Eg. role: vault-role - role: vault-role - #Eg. authpath: cordanms - authpath: cordanms - #Provide the kubernetes auth backed configured in vault - #Eg. secretprefix: - secretprefix: - #Eg. imagesecretname: - imagesecretname: - #Eg. serviceaccountname: vault-auth-issuer - serviceaccountname: vault-auth-issuer - #Path in vault where tls certificates are present - tlscertsecretprefix: - -ambassador: - #Provides the suffix to be used in external URL - #Eg. external_url_suffix: org1.blockchaincloudpoc.com - external_url_suffix: diff --git a/platforms/r3-corda/charts/corda-networkmap/Chart.yaml b/platforms/r3-corda/charts/corda-networkmap/Chart.yaml deleted file mode 100644 index 75905e9de2f..00000000000 --- a/platforms/r3-corda/charts/corda-networkmap/Chart.yaml +++ /dev/null @@ -1,11 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -apiVersion: v1 -appVersion: "2.0" -description: "R3-corda-os: Deploys networkmap service without TLS." -name: corda-networkmap -version: 1.0.0 diff --git a/platforms/r3-corda/charts/corda-networkmap/README.md b/platforms/r3-corda/charts/corda-networkmap/README.md deleted file mode 100644 index 0f3a690fee2..00000000000 --- a/platforms/r3-corda/charts/corda-networkmap/README.md +++ /dev/null @@ -1,179 +0,0 @@ -[//]: # (##############################################################################################) -[//]: # (Copyright Accenture. All Rights Reserved.) -[//]: # (SPDX-License-Identifier: Apache-2.0) -[//]: # (##############################################################################################) - - -# Nms Deployment - -- [Nms Deployment Helm Chart](#Nms-deployment-helm-chart) -- [Prerequisites](#prerequisites) -- [Chart Structure](#chart-structure) -- [Configuration](#configuration) -- [Deployment](#deployment) -- [Contributing](#contributing) -- [License](#license) - - -## nms Deployment Helm Chart ---- -This [Helm chart](https://github.com/hyperledger/bevel/tree/develop/platforms/r3-corda/charts/corda-networkmap) deploys a networkmap service without TLS. - - -## Prerequisites ---- -Before deploying the chart please ensure you have the following prerequisites: - -- NetworkMap and Node's database up and running. -- Kubernetes cluster up and running. -- A HashiCorp Vault instance is set up and configured to use Kubernetes service account token-based authentication. -- The Vault is unsealed and initialized. -- Helm is installed. - - -## Chart Structure ---- -This chart has following structue: - -``` - - ├── nms - │ ├── Chart.yaml - │ ├── templates - │ │ ├── deployment.yaml - │ │ ├── Volume.yaml - │ │ └── service.yaml - │ └── values.yaml -``` - -Type of files used: - -- `templates` : This directory contains the Kubernetes manifest templates that define the resources to be deployed. -- `deployment.yaml` : A Deployment controller provides declarative updates for Pods and ReplicaSets. -- `volume.yaml` : These PVCs can be used to provide persistent storage for the network map service deployment, allowing data to be stored and accessed across the lifecycle of the deployment. -- `service.yaml` : This file defines a Kubernetes Service with multiple ports for protocols and targets, and supports Ambassador proxy annotations for specific configurations when using the "ambassador" proxy provider. -- `chart.yaml` : Provides metadata about the chart, such as its name, version, and description. -- `values.yaml` : Contains the default configuration values for the chart. It includes configuration for the image, storage, service and vault. - - -## Configuration ---- -The [values.yaml](https://github.com/hyperledger/bevel/blob/develop/platforms/r3-corda/charts/corda-networkmap/values.yaml) file contains configurable values for the Helm chart. We can modify these values according to the deployment requirements. Here are some important configuration options: - -## Parameters ---- - -### Name - -| Name | Description | Default Value | -| -----------| -------------------------------------------------- | ------------- | -| name | Provide the name of the node | network-map | - -### Metadata - -| Name | Description | Default Value | -| ----------------| ---------------------------------------------------------------------------- | ------------- | -| namespace | Provide the namespace for the nms Generator | default | - -### Image - -| Name | Description | Default Value | -| ------------------------ | ------------------------------------------------------- | --------------- | -| initContainerName | Provide the alpine utils image, which is used for all init-containers of deployments/jobs | "" | -| containerName | Provide the containerName of image | "" | -| imagePullSecret | Provide the image pull secret of image | regcred | -| env | Provide enviroment variable for container image | "" | - -### Service - -| Name | Description | Default Value | -| --------------------- | ------------------------------------------ | ------------- | -| type | Provide the type of service | "NodePort" | -| port | Provide the NMS service port | "30007" | -| nodePort | Provide the node port for node service to be accessible outside| "32001" | - - -### storage - -| Name | Description | Default Value | -| --------------------- | ------------------------------------------| ------------- | -| Memory | Provide the memory for node | "4Gi" | - - -### Vault - -| Name | Description | Default Value | -| ------------------------- | --------------------------------------------------------------------------| ------------- | -| address | Address/URL of the Vault server | "" | -| role | Role used for authentication with Vault | vault-role | -| authpath | Authentication path for Vault | cordanms | -| secretprefix | Provide the kubernetes auth backed configured in vault | "" | -| imagesecretname | specify the name of the Kubernetes secret | "" | -| serviceaccountname | To authenticate with the Vault server and retrieve the secrets |vault-auth-issuer| -| ambassador | Provides the suffix to be used in external URL |"" | - - - - -## Deployment ---- - -To deploy the nms Helm chart, follow these steps: - -1. Modify the [values.yaml](https://github.com/hyperledger/bevel/blob/develop/platforms/r3-corda/charts/corda-networkmap/values.yaml) file to set the desired configuration values. -2. Run the following Helm command to install, upgrade, verify, delete the chart: - -To install the chart: -```bash -helm repo add bevel https://hyperledger.github.io/bevel/ -helm install ./corda-networkmap -``` - -To upgrade the chart: -```bash -helm upgrade ./corda-networkmap -``` - -To verify the deployment: -```bash -kubectl get jobs -n -``` -Note : Replace `` with the actual namespace where the Job was created. This command will display information about the Job, including the number of completions and the current status of the Job's pods. - - -To delete the chart: -```bash -helm uninstall -``` -Note : Replace `` with the desired name for the release. - - - -## Contributing ---- -If you encounter any bugs, have suggestions, or would like to contribute to the [Nms Deployment Helm Chart](https://github.com/hyperledger/bevel/tree/develop/platforms/r3-corda/charts/corda-networkmap), please feel free to open an issue or submit a pull request on the [project's GitHub repository](https://github.com/hyperledger/bevel). - - -## License - -This chart is licensed under the Apache v2.0 license. - -Copyright © 2023 Accenture - -### Attribution - -This chart is adapted from the [charts](https://hyperledger.github.io/bevel/) which is licensed under the Apache v2.0 License which is reproduced here: - -``` -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -``` diff --git a/platforms/r3-corda/charts/corda-networkmap/templates/deployment.yaml b/platforms/r3-corda/charts/corda-networkmap/templates/deployment.yaml deleted file mode 100644 index 74cb9553ef3..00000000000 --- a/platforms/r3-corda/charts/corda-networkmap/templates/deployment.yaml +++ /dev/null @@ -1,312 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -# Deployment -# Creates the replicated container and manages lifecycle -# TLS certs mounted -# Persistent Volume mounted -# Service points to this deployment (uses labels!) -apiVersion: apps/v1 -kind: Deployment -metadata: - name: {{ .Values.nodeName }} - namespace: {{ .Values.metadata.namespace }} - {{- if .Values.deployment.annotations }} - annotations: -{{ toYaml .Values.deployment.annotations | indent 8 }} - {{- end }} - labels: - app.kubernetes.io/name: {{ .Values.nodeName }} - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} -spec: - replicas: {{ .Values.replicas }} - selector: - matchLabels: - app: {{ .Values.nodeName }} - app.kubernetes.io/name: {{ .Values.nodeName }} - app.kubernetes.io/instance: {{ .Release.Name }} - template: - metadata: - labels: - app: {{ .Values.nodeName }} - app.kubernetes.io/name: {{ .Values.nodeName }} - app.kubernetes.io/instance: {{ .Release.Name }} - spec: - serviceAccountName: {{ $.Values.vault.serviceaccountname }} - hostname: {{ .Values.nodeName }} - securityContext: - fsGroup: 1000 - containers: - - name: {{ .Values.nodeName }}-service - image: {{ .Values.image.containerName }} - imagePullPolicy: Always - command: ["sh", "-c"] - args: - - |- - #!/usr/bin/env sh - {{- range $.Values.image.env }} - export {{ .name }}="{{ .value }}" - {{- end }} - export DB_PASSWORD=`cat /opt/creds/db_root_password` - export NETWORKMAP_MONGO_CONNECTION_STRING="mongodb://${DB_USERNAME}:${DB_PASSWORD}@${DB_URL}:${DB_PORT}/${DATABASE}" - export NETWORKMAP_AUTH_PASSWORD=`cat /opt/creds/user_cred` - java -jar {{ .Values.image.mountPath.basePath }}/network-map-service.jar 2>&1 - ports: - - containerPort: {{ .Values.service.targetPort }} - volumeMounts: - - name: {{ .Values.nodeName }}-servicedata - mountPath: "{{ .Values.image.mountPath.basePath }}/db" - readOnly: false - - name: {{ .Values.nodeName }}-logs - mountPath: "{{ .Values.image.mountPath.basePath }}/logs" - readOnly: false - - name: certs - mountPath: "{{ .Values.image.mountPath.basePath }}/db/certs" - readOnly: false - - name: creds - mountPath: "/opt/creds" - readOnly: false - - name: logs - image: "{{ .Values.image.initContainerName }}" - imagePullPolicy: {{ .Values.image.pullPolicy }} - command: ["sh", "-c"] - args: - - |- - cd /opt/networkmap/ - COUNTER=1 - NETWORK_ROOT_TRUST=/opt/networkmap/network-map-truststore.jks - while [ "$COUNTER" -le {{ $.Values.healthcheck.readinessthreshold }} ] - do - curl http://localhost:8080/network-map/truststore --output network-map-truststore.jks - if [ -f "$NETWORK_ROOT_TRUST" ] - then - echo "SUCCESS!" - echo "NMS running and fetched truststore" - break - else - COUNTER=`expr "$COUNTER" + 1` - sleep {{ $.Values.healthcheck.readinesscheckinterval }} - fi - done - if ["$COUNTER" -gt {{ $.Values.healthcheck.readinessthreshold }} ] - then - exit 1 - fi - while true; do tail -f ./logs/*.log 2>/dev/null; sleep 5; done - volumeMounts: - - name: {{ .Values.nodeName }}-logs - mountPath: "{{ .Values.image.mountPath.basePath }}/logs" - initContainers: - - name: init-certificates - image: {{ .Values.image.initContainerName }} - imagePullPolicy: Always - env: - - name: VAULT_ADDR - value: {{.Values.vault.address}} - - name: KUBERNETES_AUTH_PATH - value: {{.Values.vault.authpath}} - - name: VAULT_APP_ROLE - value: {{.Values.vault.role}} - - name: MOUNT_PATH - value: {{ .Values.image.mountPath.basePath }}/db/certs/ - - name: VAULT_NODE_NAME - value: {{ .Values.nodeName }} - - name: SECRET_PREFIX - value: {{.Values.vault.secretprefix}} - command: ["sh", "-c"] - args: - - |- - #!/usr/bin/env sh - validateVaultResponse () { - if echo ${2} | grep "errors"; then - echo "ERROR: unable to retrieve ${1}: ${2}" - exit 1 - fi - if [ "$3" == "LOOKUPSECRETRESPONSE" ] - then - http_code=$(curl -sS -o /dev/null -w "%{http_code}" \ - --header "X-Vault-Token: ${VAULT_TOKEN}" \ - ${VAULT_ADDR}/v1/${1}) - curl_response=$? - if test "$http_code" != "200" ; then - echo "Http response code from Vault - $http_code" - if test "$curl_response" != "0"; then - echo "Error: curl command failed with error code - $curl_response" - exit 1 - fi - fi - fi - } - - KUBE_SA_TOKEN=$(cat /var/run/secrets/kubernetes.io/serviceaccount/token) - echo "Getting secrets from Vault Server" - VAULT_TOKEN=$(curl -sS --request POST ${VAULT_ADDR}/v1/auth/${KUBERNETES_AUTH_PATH}/login -H "Content-Type: application/json" -d '{"role":"'"${VAULT_APP_ROLE}"'","jwt":"'"${KUBE_SA_TOKEN}"'"}' | jq -r 'if .errors then . else .auth.client_token end') - validateVaultResponse 'vault login token' "${VAULT_TOKEN}" - echo "logged into vault" - OUTPUT_PATH=${MOUNT_PATH} - - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/{{ .Values.vault.certsecretprefix }} | jq -r 'if .errors then . else . end') - - validateVaultResponse "${{ .Values.vault.certsecretprefix }}" "${LOOKUP_SECRET_RESPONSE}" "LOOKUPSECRETRESPONSE" - - ROOTCA_KEY=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["rootcakey"]') - mkdir -p ${OUTPUT_PATH}/root; - echo "${ROOTCA_KEY}" | base64 -d > ${OUTPUT_PATH}/root/keys.jks - - NETWORKMAP_KEY=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["networkmap.jks"]') - mkdir -p ${OUTPUT_PATH}/network-map; - echo "${NETWORKMAP_KEY}" | base64 -d > ${OUTPUT_PATH}/network-map/keys.jks - - chmod 777 -R {{ .Values.image.mountPath.basePath }}/db - volumeMounts: - - name: certs - mountPath: "{{ .Values.image.mountPath.basePath }}/db/certs/" - readOnly: false - - name: init-certificates-cred - image: {{ .Values.image.initContainerName }} - imagePullPolicy: Always - env: - - name: VAULT_ADDR - value: {{.Values.vault.address}} - - name: KUBERNETES_AUTH_PATH - value: {{.Values.vault.authpath}} - - name: VAULT_APP_ROLE - value: {{.Values.vault.role}} - - name: MOUNT_PATH - value: /opt/creds - - name: VAULT_NODE_NAME - value: {{ .Values.nodeName }} - - name: DB_CRED_SECRET_PREFIX - value: {{ .Values.vault.dbcredsecretprefix }} - - name: USER_SECRET_PREFIX - value: {{ .Values.vault.secretnetworkmappass }} - command: ["sh", "-c"] - args: - - |- - #!/usr/bin/env sh - validateVaultResponse () { - if echo ${2} | grep "errors"; then - echo "ERROR: unable to retrieve ${1}: ${2}" - exit 1 - fi - if [ "$3" == "LOOKUPSECRETRESPONSE" ] - then - http_code=$(curl -sS -o /dev/null -w "%{http_code}" \ - --header "X-Vault-Token: ${VAULT_TOKEN}" \ - ${VAULT_ADDR}/v1/${1}) - curl_response=$? - if test "$http_code" != "200" ; then - echo "Http response code from Vault - $http_code" - if test "$curl_response" != "0"; then - echo "Error: curl command failed with error code - $curl_response" - exit 1 - fi - fi - fi - } - - KUBE_SA_TOKEN=$(cat /var/run/secrets/kubernetes.io/serviceaccount/token) - echo "Getting secrets from Vault Server" - # Login to Vault and so I can get an approle token - VAULT_TOKEN=$(curl -sS --request POST ${VAULT_ADDR}/v1/auth/${KUBERNETES_AUTH_PATH}/login -H "Content-Type: application/json" -d '{"role":"'"${VAULT_APP_ROLE}"'","jwt":"'"${KUBE_SA_TOKEN}"'"}' | jq -r 'if .errors then . else .auth.client_token end') - validateVaultResponse 'vault login token' "${VAULT_TOKEN}" - echo "logged into vault" - OUTPUT_PATH=${MOUNT_PATH} - - LOOKUP_PWD_RESPONSE_DB_PASS=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${DB_CRED_SECRET_PREFIX} | jq -r 'if .errors then . else . end') - validateVaultResponse "${DB_CRED_SECRET_PREFIX}" "${LOOKUP_PWD_RESPONSE_DB_PASS}" "LOOKUPSECRETRESPONSE" - MONGODB_PASSWORD=$(echo ${LOOKUP_PWD_RESPONSE_DB_PASS} | jq -r '.data.data["mongodbPassword"]') - - echo "${MONGODB_PASSWORD}" >> ${MOUNT_PATH}/db_root_password - - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${USER_SECRET_PREFIX} | jq -r 'if .errors then . else . end') - validateVaultResponse "secret (${CERTS_SECRET_PREFIX})" "${LOOKUP_SECRET_RESPONSE}" - USER_PASSWORD=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["{{ .Values.image.authusername }}"]') - echo "${USER_PASSWORD}" >> ${MOUNT_PATH}/user_cred - - volumeMounts: - - name: creds - mountPath: "/opt/creds" - readOnly: false - - name: changepermissions - image: {{ .Values.image.initContainerName }} - imagePullPolicy: Always - env: - - name: VAULT_ADDR - value: {{.Values.vault.address}} - - name: KUBERNETES_AUTH_PATH - value: {{.Values.vault.authpath}} - - name: VAULT_APP_ROLE - value: {{.Values.vault.role}} - - name: MOUNT_PATH - value: {{ .Values.image.mountPath.basePath }}/db/certs - - name: VAULT_NODE_NAME - value: {{ .Values.nodeName }} - - name: SECRET_PREFIX - value: {{.Values.vault.secretprefix}} - command: ["sh", "-c"] - args: - - |- - chmod 777 -R {{ .Values.image.mountPath.basePath }}; - volumeMounts: - - name: {{ .Values.nodeName }}-servicedata - mountPath: "{{ .Values.image.mountPath.basePath }}/db" - readOnly: false - - name: db-healthcheck - image: {{ .Values.image.initContainerName }} - imagePullPolicy: Always - command: ["sh", "-c"] - args: - - |- - #!/usr/bin/env sh - COUNTER=1 - FLAG=true - while [ "$COUNTER" -le {{ $.Values.healthcheck.readinessthreshold }} ] - do - DB_NODE={{ .Values.healthcheck.dburl }} - STATUS=$(nc -vz $DB_NODE 2>&1 | grep -c open ) - if [ "$STATUS" == 0 ] - then - FLAG=false - else - FLAG=true - echo "DB up and running" - fi - if [ "$FLAG" == false ] - then - echo "Retry attempted $COUNTER times, retrying after {{ $.Values.healthcheck.readinesscheckinterval }} seconds" - COUNTER=`expr "$COUNTER" + 1` - sleep {{ $.Values.healthcheck.readinesscheckinterval }} - else - echo "SUCCESS!" - echo "DB up and running!" - exit 0 - break - fi - done - if [ "$COUNTER" -gt {{ $.Values.healthcheck.readinessthreshold }} ] || [ "$FLAG" == false ] - then - echo "Retry attempted $COUNTER times, no DB up and running. Giving up!" - exit 1 - break - fi - imagePullSecrets: - - name: {{ .Values.image.imagePullSecret }} - volumes: - - name: {{ .Values.nodeName }}-servicedata - persistentVolumeClaim: - claimName: {{ .Values.nodeName }}-pvc - - name: {{ .Values.nodeName }}-logs - persistentVolumeClaim: - claimName: {{ .Values.nodeName }}-pvc-logs - - name: certs - emptyDir: - medium: Memory - - name: creds - emptyDir: - medium: Memory diff --git a/platforms/r3-corda/charts/corda-networkmap/templates/service.yaml b/platforms/r3-corda/charts/corda-networkmap/templates/service.yaml deleted file mode 100644 index 93833c487df..00000000000 --- a/platforms/r3-corda/charts/corda-networkmap/templates/service.yaml +++ /dev/null @@ -1,43 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -apiVersion: v1 -kind: Service -metadata: - name: {{ .Values.nodeName }} - namespace: {{ $.Values.metadata.namespace }} - annotations: - labels: - run: {{ .Values.nodeName }} - app.kubernetes.io/name: {{ .Values.nodeName }} - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} -spec: - selector: - app: {{ .Values.nodeName }} - type: {{ .Values.service.type }} - ports: - - protocol: TCP - port: {{ .Values.service.port }} - targetPort: {{ .Values.service.targetPort }} - {{- if .Values.service.nodePort }} - nodePort: {{ .Values.service.nodePort}} - {{- end }} - -{{ if $.Values.ambassador }} ---- -apiVersion: getambassador.io/v3alpha1 -kind: Mapping -metadata: - name: {{ .Values.nodeName }}-mapping - namespace: {{ .Values.metadata.namespace }} -spec: - hostname: {{ .Values.nodeName }}.{{ .Values.ambassador.external_url_suffix }} - prefix: / - service: {{ .Values.nodeName }}.{{ .Values.metadata.namespace }}:{{ .Values.service.port }} -{{ end }} - diff --git a/platforms/r3-corda/charts/corda-networkmap/templates/volume.yaml b/platforms/r3-corda/charts/corda-networkmap/templates/volume.yaml deleted file mode 100644 index 1d487cfe4e5..00000000000 --- a/platforms/r3-corda/charts/corda-networkmap/templates/volume.yaml +++ /dev/null @@ -1,42 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - ---- -kind: PersistentVolumeClaim -apiVersion: v1 -metadata: - name: {{ .Values.nodeName }}-pvc - namespace: {{ .Values.metadata.namespace }} - labels: - app.kubernetes.io/name: {{ .Values.nodeName }}-pvc - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} -spec: - storageClassName: {{ .Values.storage.name }} - accessModes: - - ReadWriteOnce - resources: - requests: - storage: {{ .Values.storage.memory }} ---- -kind: PersistentVolumeClaim -apiVersion: v1 -metadata: - name: {{ .Values.nodeName }}-pvc-logs - namespace: {{ .Values.metadata.namespace }} - labels: - app.kubernetes.io/name: {{ .Values.nodeName }}-pvc-logs - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} -spec: - storageClassName: {{ .Values.storage.name }} - accessModes: - - ReadWriteOnce - resources: - requests: - storage: {{ .Values.storage.memory }} \ No newline at end of file diff --git a/platforms/r3-corda/charts/corda-networkmap/values.yaml b/platforms/r3-corda/charts/corda-networkmap/values.yaml deleted file mode 100644 index c1a73197c0a..00000000000 --- a/platforms/r3-corda/charts/corda-networkmap/values.yaml +++ /dev/null @@ -1,87 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -# Default values for nmschart. -# This is a YAML-formatted file. -# Declare variables to be passed into your templates. - -#Provide the Name for node to be deployed -#Eg. nodeName: network-map -nodeName: network-map - -metadata: - #Provide the namespace - #Eg. namespace: default - namespace: default - -image: - #Provide the name of image for init container - #Eg. initContainerName: ghcr.io/hyperledger/bevel-alpine:latest - initContainerName: ghcr.io/hyperledger/bevel-alpine:latest - #Provide the containerName of image - containerName: ghcr.io/hyperledger/bevel-networkmap-linuxkit:latest - #Provide the image pull secret of image - #Eg. pullSecret: regcred - imagePullSecret: regcred - #Provide enviroment variable for container image - env: - #Provide rootcaname for the doorman - #Eg. rootcaname: CN=Corda Root CA, OU=FRA, O=FRA, L=London, ST=London, C=BR - rootcaname: - tlscertpath: - tlskeypath: - #Provide whether TLS is enabled or not - #Eg. tls: false - tls: - #Provide whether to enable Corda doorman protocol - #Eg. doorman: true - doorman: - #Provide whether to enable Cordite certman protocol so that nodes can authenticate using a signed TLS cert - #Eg. certman: true - certman: - #Provide database directory for this service - #Eg. database: db - database: - #Provide MongoDB connection string. If set to embed will start its own mongo instance - #Eg. dataSourceUrl: db - dataSourceUrl: - -service: - #Provide the NMS service port - #Eg. port: 30007 - port: 30007 - #Provide the type of service - #Eg. type: NodePort - type: NodePort - #Provide the node port for node service to be accessible outside - #Eg. nodePort: 30050 - nodePort: - -storage: - #Provide the memory for node - #Eg. memory: 4Gi - memory: 4Gi - -vault: - #Provide the vault server address - #Eg. address: http://34.228.219.208:8200 - address: - #Provide the vaultrole - #Eg. role: vault-role - role: vault-role - #Eg. authpath: cordanms - authpath: cordanms - #Provide the kubernetes auth backed configured in vault - #Eg. secretprefix: - secretprefix: - #Eg. imagesecretname: - imagesecretname: - #Eg. serviceaccountname: vault-auth-issuer - serviceaccountname: vault-auth-issuer -ambassador: - #Provides the suffix to be used in external URL - #Eg. external_url_suffix: org1.blockchaincloudpoc.com - external_url_suffix: diff --git a/platforms/r3-corda/charts/corda-node-initial-registration/Chart.yaml b/platforms/r3-corda/charts/corda-node-initial-registration/Chart.yaml deleted file mode 100644 index 6ec236e2889..00000000000 --- a/platforms/r3-corda/charts/corda-node-initial-registration/Chart.yaml +++ /dev/null @@ -1,11 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -apiVersion: v1 -appVersion: "2.0" -description: "R3-corda-os: Job for initial node registration." -name: corda-node-initial-registration -version: 1.0.0 diff --git a/platforms/r3-corda/charts/corda-node-initial-registration/README.md b/platforms/r3-corda/charts/corda-node-initial-registration/README.md deleted file mode 100644 index a9f0ec710a8..00000000000 --- a/platforms/r3-corda/charts/corda-node-initial-registration/README.md +++ /dev/null @@ -1,231 +0,0 @@ -[//]: # (##############################################################################################) -[//]: # (Copyright Accenture. All Rights Reserved.) -[//]: # (SPDX-License-Identifier: Apache-2.0) -[//]: # (##############################################################################################) - - -# Node Deployment - -- [Node-initial-registration Deployment Helm Chart](#Node-initial-registration-deployment-helm-chart) -- [Prerequisites](#prerequisites) -- [Chart Structure](#chart-structure) -- [Configuration](#configuration) -- [Deployment](#deployment) -- [Contributing](#contributing) -- [License](#license) - - - -## node-initial-registration Deployment Helm Chart ---- -This [Helm chart](https://github.com/hyperledger/bevel/tree/develop/platforms/r3-corda/charts/corda-node-initial-registration) helps to delpoy the job for registering the r3corda node. - - -## Prerequisites ---- -Before deploying the chart please ensure you have the following prerequisites: - -- Node's database up and running. -- Kubernetes cluster up and running. -- A HashiCorp Vault instance is set up and configured to use Kubernetes service account token-based authentication. -- The Vault is unsealed and initialized. -- Helm is installed. - - -This chart has following structue: -``` - . - ├── node-initial-registration - │ ├── templates - │ │ ├── _helpers.tpl - │ │ └── job.yaml - | ├── Chart.yaml - │ └── values.yaml -``` - -Type of files used: - -- `templates` : This directory contains the Kubernetes manifest templates that define the resources to be deployed. -- `job.yaml` : This file is a configuration file for deployement in Kubernetes.It creates a deployment file with a specified number of replicas and defines various settings for the deployment. Including volume mounts, environment variables, and ports for the container. -- `chart.yaml` : Provides metadata about the chart, such as its name, version, and description. -- `values.yaml` : Contains the default configuration values for the chart. It includes configuration for the image, nodeconfig, credenatials, storage, service , vault, etc. -- `_helpers.tpl` : A template file used for defining custom labels and ports for the metrics in the Helm chart. - - -## Configuration ---- -The [values.yaml](https://github.com/hyperledger/bevel/blob/develop/platforms/r3-corda/charts/corda-node-initial-registration/values.yaml) file contains configurable values for the Helm chart. We can modify these values according to the deployment requirements. Here are some important configuration options: - -## Parameters ---- - -### Name - -| Name | Description | Default Value | -| -----------| -------------------------------------------------- | ------------- | -| name | Provide the name of the node | bank1 | - -### Metadata - -| Name | Description | Default Value | -| ----------------| ---------------------------------------------------------------------------- | ------------- | -| namespace | Provide the namespace for the Node-initial-registration Generator | default | -| labels | Provide any additional labels for the Node-initial-registration Generator | "" | - -### Image - -| Name | Description | Default Value | -| ------------------------ | --------------------------------------------------------------------------------------- | --------------- | -| initContainerName | Provide the alpine utils image, which is used for all init-containers of deployments/jobs | "" | -| containerName | Provide the containerName of image | "" | -| imagePullSecret | Provide the image pull secret of image | regcred | -| privateCertificate | Provide true or false if private certificate to be added | "true" | -| doormanCertAlias | Provide true or false if private certificate to be added | "" | -| networkmapCertAlias | Provide true or false if private certificate to be added | "" | - -### NodeConf - -| Name | Description | Default Value | -| ------------------------ | -------------------------------------------------------------------------------------- | --------------- | -| p2p | The host and port on which the node is available for protocol operations over ArtemisMQ | "" | -| ambassadorAddress | Specify ambassador host:port which will be advertised in addition to p2paddress | "" | -| legalName | Provide the legalName for node | "" | -| dbUrl | Provide the h2Url for node | "bank1h2" | -| dbPort | Provide the h2Port for node | "9101" | -| networkMapURL | Provide the nms for node | "" | -| doormanURL | Provide the doorman for node | "" | -| jarVersion | Provide the jar Version for corda jar and finanace jar | "3.3-corda" | -| devMode | Provide the devMode for corda node | "true" | -| env | Provide the enviroment variables to be set | "" | - -### credentials - -| Name | Description | Default Value | -| ----------------| ----------------------------------------------| ------------- | -| dataSourceUser | Provide the dataSourceUser for corda node | "" | -| rpcUser | Provide the rpcUser for corda node | bank1operations| - -### Volume - -| Name | Description | Default Value | -| -----------------| -----------------------| ------------- | -| baseDir | Base directory | /home/bevel | - -### Resources - -| Name | Description | Default Value | -| ------------------------ | ------------------------------------------------------- | --------------- | -| limits | Provide the limit memory for node | "1Gi" | -| requests | Provide the requests memory for node | "1Gi" | - -### storage - -| Name | Description | Default Value | -| --------------------- | -------------------------------------------------------- | ------------- | -| provisioner | Provide the provisioner for node | "" | -| name | Provide the name for node | bank1nodesc | -| memory | Provide the memory for node | "4Gi" | -| type | Provide the type for node | "gp2" | -| encrypted | Provide whether the EBS volume should be encrypted or not | "true" | -| annotations | Provide the annotation of the node | "" | - -### Service - -| Name | Description | Default Value | -| --------------------- | ------------------------------------------| ------------- | -| type | Provide the type of service | NodePort | -| p2p port | Provide the tcp port for node | 10007 | -| p2p nodePort | Provide the p2p nodeport for node | 30007 | -| p2p targetPort | Provide the p2p targetPort for node | 30007 | -| rpc port | Provide the tpc port for node | 10008 | -| rpc targetPort | Provide the rpc targetport for node | 10003 | -| rpc nodePort | Provide the rpc nodePort for node | 30007 | -| rpcadmin port | Provide the rpcadmin port for node | 10108 | -| rpcadmin targetPort | Provide the rpcadmin targetport for node | 10005 | -| rpcadmin nodePort | Provide the rpcadmin nodePort for node | 30007 | - -### Vault - -| Name | Description | Default Value | -| ------------------------- | --------------------------------------------------------------------------| ------------------------- | -| address | Address/URL of the Vault server. | "" | -| role | Role used for authentication with Vault | vault-role | -| authpath | Authentication path for Vault | cordabank1 | -| serviceAccountName | Provide the already created service account name autheticated to vault | vault-auth-issuer | -| certSecretPrefix | Provide the vault path where the certificates are stored | bank1/certs | -| dbsecretprefix | Provide the secretprefix | bank1/credentials/database | -| rpcusersecretprefix | Provide the secretprefix | bank1/credentials/rpcusers | -| keystoresecretprefix | Provide the secretprefix | bank1/credentials/keystore | -| retires | Provide the no of retires | "" | - -### Healthcheck - -| Name | Description | Default Value | -| ----------------------------| ------------------------------------------------------------------------------| ------------- | -| readinesscheckinterval | Provide the interval in seconds you want to iterate till db to be ready | 5 | -| readinessthreshold | Provide the threshold till you want to check if specified db up and running | 2 | - - - -## Deployment ---- - -To deploy the node-initial-registration Helm chart, follow these steps: - -1. Modify the [values.yaml](https://github.com/hyperledger/bevel/blob/develop/platforms/r3-corda/charts/corda-node-initial-registration/values.yaml) file to set the desired configuration values. -2. Run the following Helm command to install, upgrade,verify, delete the chart: - -To install the chart: -```bash -helm repo add bevel https://hyperledger.github.io/bevel/ -helm install ./corda-node-initial-registration -``` - -To upgrade the chart: -```bash -helm upgrade ./corda-node-initial-registration -``` - -To verify the deployment: -```bash -kubectl get jobs -n -``` -Note : Replace `` with the actual namespace where the Job was created. This command will display information about the Job, including the number of completions and the current status of the Job's pods. - -To delete the chart: -```bash -helm uninstall -``` -Note : Replace `` with the desired name for the release. - - - -## Contributing ---- -If you encounter any bugs, have suggestions, or would like to contribute to the [node-initial-registration Deployment Helm Chart](https://github.com/hyperledger/bevel/tree/develop/platforms/r3-corda/charts/corda-node-initial-registration), please feel free to open an issue or submit a pull request on the [project's GitHub repository](https://github.com/hyperledger/bevel). - - - -## License - -This chart is licensed under the Apache v2.0 license. - -Copyright © 2023 Accenture - -### Attribution - -This chart is adapted from the [charts](https://hyperledger.github.io/bevel/) which is licensed under the Apache v2.0 License which is reproduced here: - -``` -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -``` diff --git a/platforms/r3-corda/charts/corda-node-initial-registration/templates/_helpers.tpl b/platforms/r3-corda/charts/corda-node-initial-registration/templates/_helpers.tpl deleted file mode 100644 index 7bf5f530a8e..00000000000 --- a/platforms/r3-corda/charts/corda-node-initial-registration/templates/_helpers.tpl +++ /dev/null @@ -1,5 +0,0 @@ -{{- define "labels.custom" }} - {{ range $key, $val := $.Values.metadata.labels }} - {{ $key }}: {{ $val }} - {{ end }} -{{- end }} \ No newline at end of file diff --git a/platforms/r3-corda/charts/corda-node-initial-registration/templates/job.yaml b/platforms/r3-corda/charts/corda-node-initial-registration/templates/job.yaml deleted file mode 100644 index 178d93fe9c7..00000000000 --- a/platforms/r3-corda/charts/corda-node-initial-registration/templates/job.yaml +++ /dev/null @@ -1,545 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -apiVersion: batch/v1 -kind: Job -metadata: - name: {{ .Values.nodeName }}-registration - namespace: {{ .Values.metadata.namespace }} - labels: - app: {{ .Values.nodeName }}-registration - app.kubernetes.io/name: {{ .Values.nodeName }}-registration - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} - {{- include "labels.custom" . | nindent 2 }} -spec: - backoffLimit: 6 - template: - metadata: - labels: - app: {{ .Values.nodeName }}-initial-registration - app.kubernetes.io/name: {{ .Values.nodeName }}-registration - app.kubernetes.io/instance: {{ .Release.Name }} - spec: - restartPolicy: "OnFailure" - serviceAccountName: {{ $.Values.vault.serviceaccountname }} - hostname: {{ .Values.nodeName }} - securityContext: - fsGroup: 1000 - containers: - - name: node-initial-registration - image: {{ .Values.image.containerName }} - imagePullPolicy: Always - env: - - name: JAVA_OPTIONS - value: -Xmx512m - - name: CORDA_HOME - value: /opt/corda - - name: BASE_DIR - value: {{ $.Values.volume.baseDir }} - command: ["sh", "-c"] - args: - - |- - #!/usr/bin/env sh - rm -rf ${BASE_DIR}/certificates/done.txt - - # Setting up enviroment variables - export DEFAULT_TRUSTSTORE_PASSWORD=`cat /opt/node/creds/default_truststore_cred` - export KEYSTORE_PASSWORD=`cat /opt/node/creds/keystore_cred` - export TRUSTSTORE_PASSWORD=`cat /opt/node/creds/truststore_cred` - export DEFAULT_KEYSTORE_PASSWORD=`cat /opt/node/creds/default_keystore_cred` - - # import self signed tls certificate of doorman and networkmap, since java only trusts certificate signed by well known CA - {{- if .Values.image.privateCertificate }} - yes | keytool -importcert -file {{ $.Values.volume.baseDir }}/certificates/networkmap/networkmap.crt -storepass changeit -alias {{ $.Values.image.networkmapCertAlias }} -keystore /usr/lib/jvm/java-1.8-openjdk/jre/lib/security/cacerts - yes | keytool -importcert -file {{ $.Values.volume.baseDir }}/certificates/doorman/doorman.crt -storepass changeit -alias {{ $.Values.image.doormanCertAlias }} -keystore /usr/lib/jvm/java-1.8-openjdk/jre/lib/security/cacerts - {{- end }} - - # command to run corda jar and perform initial-registration - java $JAVA_OPTIONS -jar ${CORDA_HOME}/corda.jar initial-registration --network-root-truststore-password ${DEFAULT_TRUSTSTORE_PASSWORD} --network-root-truststore ${BASE_DIR}/certificates/network-map-truststore.jks --base-directory=${BASE_DIR} - - #changing password of keystore. - keytool -storepasswd -new ${KEYSTORE_PASSWORD} -keystore ${BASE_DIR}/certificates/nodekeystore.jks -storepass ${DEFAULT_KEYSTORE_PASSWORD} - keytool -storepasswd -new ${KEYSTORE_PASSWORD} -keystore ${BASE_DIR}/certificates/sslkeystore.jks -storepass ${DEFAULT_KEYSTORE_PASSWORD} - keytool -storepasswd -new ${TRUSTSTORE_PASSWORD} -keystore ${BASE_DIR}/certificates/truststore.jks -storepass ${DEFAULT_TRUSTSTORE_PASSWORD} - - #changing password of nodekeystore.jks certificate. - keytool -keypasswd -alias cordaclientca -keypass ${DEFAULT_KEYSTORE_PASSWORD} -new ${KEYSTORE_PASSWORD} -keystore ${BASE_DIR}/certificates/nodekeystore.jks -storepass ${KEYSTORE_PASSWORD} - keytool -keypasswd -alias identity-private-key -keypass ${DEFAULT_KEYSTORE_PASSWORD} -new ${KEYSTORE_PASSWORD} -keystore ${BASE_DIR}/certificates/nodekeystore.jks -storepass ${KEYSTORE_PASSWORD} - - #changing password of sslkeystore.jks certificate. - keytool -keypasswd -alias cordaclienttls -keypass ${DEFAULT_KEYSTORE_PASSWORD} -new ${KEYSTORE_PASSWORD} -keystore ${BASE_DIR}/certificates/sslkeystore.jks -storepass ${KEYSTORE_PASSWORD} - - # create dummy file to perform check if last line of the container is executed or not - touch ${BASE_DIR}/certificates/done.txt - volumeMounts: - - name: node-volume - mountPath: "{{ $.Values.volume.baseDir }}" - readOnly: false - - name: certificates - mountPath: "{{ $.Values.volume.baseDir }}/certificates" - readOnly: false - - name: nodeconf - mountPath: "{{ $.Values.volume.baseDir }}/node.conf" - subPath: "node.conf" - readOnly: false - - name: creds - mountPath: "/opt/node/creds" - readOnly: false - - name: store-certs - image: {{ .Values.image.initContainerName }} - imagePullPolicy: Always - env: - - name: VAULT_ADDR - value: {{ $.Values.vault.address }} - - name: KUBERNETES_AUTH_PATH - value: {{ $.Values.vault.authpath }} - - name: VAULT_APP_ROLE - value: {{ $.Values.vault.role }} - - name: BASE_DIR - value: {{ $.Values.volume.baseDir }} - - name: CERTS_SECRET_PREFIX - value: {{ .Values.vault.certsecretprefix }} - - name: JAVA_OPTIONS - value: -Xmx512m - command: ["sh", "-c"] - args: - - |- - #!/usr/bin/env sh - - OUTPUT_PATH=${BASE_DIR} - - # setting up env to get secrets from vault - KUBE_SA_TOKEN=$(cat /var/run/secrets/kubernetes.io/serviceaccount/token) - VAULT_TOKEN=$(curl -sS --request POST ${VAULT_ADDR}/v1/auth/${KUBERNETES_AUTH_PATH}/login -H "Content-Type: application/json" -d '{"role":"'"${VAULT_APP_ROLE}"'","jwt":"'"${KUBE_SA_TOKEN}"'"}' | jq -r 'if .errors then . else .auth.client_token end') - - # perform check if certificates are ready or not, and upload certificate into vault when ready - COUNTER=1 - cd ${BASE_DIR}/certificates - while [ "$COUNTER" -lt {{ $.Values.healthcheck.readinessthreshold }} ] - do - if [ -e nodekeystore.jks ] && [ -e sslkeystore.jks ] && [ -e truststore.jks ] && [ -e done.txt ] - then - echo "found certificates, performing vault put" - (echo '{"data": {"nodekeystore.jks": "'; base64 ${BASE_DIR}/certificates/nodekeystore.jks; echo '"}}') | curl -H "X-Vault-Token: ${VAULT_TOKEN}" -d @- ${VAULT_ADDR}/v1/${CERTS_SECRET_PREFIX}/nodekeystore - (echo '{"data": {"sslkeystore.jks": "'; base64 ${BASE_DIR}/certificates/sslkeystore.jks; echo '"}}') | curl -H "X-Vault-Token: ${VAULT_TOKEN}" -d @- ${VAULT_ADDR}/v1/${CERTS_SECRET_PREFIX}/sslkeystore - (echo '{"data": {"truststore.jks": "'; base64 ${BASE_DIR}/certificates/truststore.jks; echo '"}}') | curl -H "X-Vault-Token: ${VAULT_TOKEN}" -d @- ${VAULT_ADDR}/v1/${CERTS_SECRET_PREFIX}/truststore - # get nodekeystore.jks from vault - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${CERTS_SECRET_PREFIX}/nodekeystore | jq -r 'if .errors then . else . end') - TLS_NODEKEYSTORE=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data[ "nodekeystore.jks" ]' 2>&1) - # get sslkeystore.jks from vault - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${CERTS_SECRET_PREFIX}/sslkeystore | jq -r 'if .errors then . else . end') - TLS_SSLKEYSTORE=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data[ "sslkeystore.jks" ]' 2>&1) - # get truststore.jks from vault - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${CERTS_SECRET_PREFIX}/truststore | jq -r 'if .errors then . else . end') - TLS_TRUSTSTORE=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data[ "truststore.jks" ]' 2>&1) - if [ "$TLS_NODEKEYSTORE" == "null" ] || [ "$TLS_SSLKEYSTORE" == "null" ] || [ "$TLS_TRUSTSTORE" == "null" ] || [[ "$TLS_NODEKEYSTORE" == "parse error"* ]] || [[ "$TLS_SSLKEYSTORE" == "parse error"* ]] || [[ "$TLS_TRUSTSTORE" == "parse error"* ]] - then - echo "certificates write or read fail" - sleep {{ $.Values.healthcheck.readinessthreshold }} - if [ "$COUNTER" -ge {{ $.Values.vault.retries }} ] - then - echo "Retry attempted $COUNTER times, certificates have not been saved" - exit 1 - fi - fi - COUNTER=`expr "$COUNTER" + 1` - fi - done - volumeMounts: - - name: node-volume - mountPath: "{{ $.Values.volume.baseDir }}" - readOnly: false - - name: certificates - mountPath: "{{ $.Values.volume.baseDir }}/certificates" - readOnly: false - - name: nodeconf - mountPath: "{{ $.Values.volume.baseDir }}/node.conf" - subPath: "node.conf" - readOnly: false - initContainers: - - name: init-nodeconf - image : {{ .Values.image.initContainerName }} - imagePullPolicy: Always - env: - - name: VAULT_ADDR - value: {{ $.Values.vault.address }} - - name: KUBERNETES_AUTH_PATH - value: {{ $.Values.vault.authpath }} - - name: VAULT_APP_ROLE - value: {{ $.Values.vault.role }} - - name: BASE_DIR - value: {{ $.Values.volume.baseDir }} - - name: KS_SECRET_PREFIX - value: {{ .Values.vault.keystoresecretprefix }} - - name: DB_SECRET_PREFIX - value: {{ .Values.vault.dbsecretprefix }} - - name: RPCUSER_SECRET_PREFIX - value: {{ .Values.vault.rpcusersecretprefix }} - command: ["/bin/sh","-c"] - args: - - |- - #!/bin/bash - # delete previously created node.conf, and create a new node.conf - rm -f ${BASE_DIR}/node.conf; - touch ${BASE_DIR}/node.conf; - - # setting up env to get secrets from vault - KUBE_SA_TOKEN=$(cat /var/run/secrets/kubernetes.io/serviceaccount/token) - echo "Getting secrets from Vault Server" - VAULT_TOKEN=$(curl -sS --request POST ${VAULT_ADDR}/v1/auth/${KUBERNETES_AUTH_PATH}/login -H "Content-Type: application/json" -d '{"role":"'"${VAULT_APP_ROLE}"'","jwt":"'"${KUBE_SA_TOKEN}"'"}' | jq -r 'if .errors then . else .auth.client_token end') - - # save keyStorePassword & trustStorePassword from vault - LOOKUP_PWD_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${KS_SECRET_PREFIX} | jq -r 'if .errors then . else . end') - CONF_KEYSTOREPASSWORD=$(echo ${LOOKUP_PWD_RESPONSE} | jq -r '.data.data["defaultKeyStorePassword"]') - CONF_TRUSTSTOREPASSWORD=$(echo ${LOOKUP_PWD_RESPONSE} | jq -r '.data.data["defaultTrustStorePassword"]') - - # save dataSourceUserPassword from vault - LOOKUP_PWD_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${DB_SECRET_PREFIX} | jq -r 'if .errors then . else . end') - CONF_DATASOURCEPASSWORD=$(echo ${LOOKUP_PWD_RESPONSE} | jq -r '.data.data["{{ .Values.credentials.dataSourceUser }}"]') - LOOKUP_PWD_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${RPCUSER_SECRET_PREFIX} | jq -r 'if .errors then . else . end') - - #For more information for node.Conf fields please refer to: https://docs.corda.r3.com/releases/4.0/corda-configuration-file.html - cat << EOF > ${BASE_DIR}/node.conf - p2pAddress : "{{ .Values.nodeConf.p2p.url }}:{{ .Values.nodeConf.p2p.port }}" - myLegalName : "{{ .Values.nodeConf.legalName }}" - keyStorePassword : "${CONF_KEYSTOREPASSWORD}" - trustStorePassword : "${CONF_TRUSTSTOREPASSWORD}" - transactionCacheSizeMegaBytes : {{ .Values.nodeConf.transactionCacheSizeMegaBytes }} - attachmentContentCacheSizeMegaBytes : {{ .Values.nodeConf.attachmentContentCacheSizeMegaBytes }} - detectPublicIp = {{ .Values.nodeConf.detectPublicIp }} - additionalP2PAddresses = ["{{ .Values.nodeConf.ambassadorAddress }}"] - devMode : {{ .Values.nodeConf.devMode }} - dataSourceProperties = { - dataSourceClassName = "{{ .Values.nodeConf.dataSourceClassName }}" - dataSource.url = "{{ .Values.nodeConf.dataSourceUrl }}" - dataSource.user = {{ .Values.credentials.dataSourceUser }} - dataSource.password = "${CONF_DATASOURCEPASSWORD}" - } - database = { - exportHibernateJMXStatistics = {{ .Values.nodeConf.database.exportHibernateJMXStatistics }} - } - jarDirs = ["{{ .Values.nodeConf.jarPath }}"] - EOF - - if [ -z "{{ .Values.nodeConf.compatibilityZoneURL }}" ] - then - echo 'networkServices = { - doormanURL = "{{ .Values.nodeConf.doormanURL }}" - networkMapURL = "{{ .Values.nodeConf.networkMapURL }}" - }' >> ${BASE_DIR}/node.conf - else - echo 'compatibilityZoneURL : "{{ .Values.nodeConf.compatibilityZoneURL }}"' >> ${BASE_DIR}/node.conf - fi - - if [ -z "{{ .Values.nodeConf.jvmArgs }}" ] - then - echo 'jvmArgs is not configured' - else - echo 'jvmArgs = "{{ .Values.nodeConf.jvmArgs }}" ' >> ${BASE_DIR}/node.conf - fi - - if [ -z "{{ .Values.nodeConf.sshd.port }}" ] - then - echo 'sshd port is not configured' - else - echo 'sshd { port = {{ .Values.nodeConf.sshd.port }} } ' >> ${BASE_DIR}/node.conf - fi - - if [ -z "{{ .Values.nodeConf.systemProperties }}" ] - then - echo 'systemProperties is not configured' - else - echo 'systemProperties = {{ .Values.nodeConf.systemProperties }} ' >> ${BASE_DIR}/node.conf - fi - - if [ -z "{{ .Values.nodeConf.exportJMXTo }}" ] - then - echo 'exportJMXTo is not configured' - else - echo 'exportJMXTo = {{ .Values.nodeConf.exportJMXTo }} ' >> ${BASE_DIR}/node.conf - fi - - if [ -z "{{ .Values.nodeConf.messagingServerAddress }}" ] - then - echo 'The address of the ArtemisMQ broker instance is not configured' - else - echo 'messagingServerAddress : "{{ .Values.nodeConf.messagingServerAddress }}" ' >> ${BASE_DIR}/node.conf - fi - - if [ -z "{{ .Values.credentials.rpcUser }}" ] - then - echo 'rpc useer is not configured' - else - echo 'rpcUsers : [' >> ${BASE_DIR}/node.conf - {{- range $.Values.credentials.rpcUser }} - echo '{ username={{ .name }} ,permissions={{ .permissions }} , ' >> ${BASE_DIR}/node.conf - echo " password=$(echo ${LOOKUP_PWD_RESPONSE} | jq -r '.data.data["{{ .name }}"]') }" >> ${BASE_DIR}/node.conf - {{- end }} - echo ']' >> ${BASE_DIR}/node.conf - fi - - LOOKUP_PWD_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${KS_SECRET_PREFIX} | jq -r 'if .errors then . else . end') - if [ "{{ .Values.nodeConf.rpcSettings.useSsl }}" == true ] - then - echo "rpcSettings { - standAloneBroker = {{ .Values.nodeConf.rpcSettings.standAloneBroker }} - address = "{{ .Values.nodeConf.rpcSettings.address }}" - adminAddress = "{{ .Values.nodeConf.rpcSettings.adminAddress }}" - useSsl = {{ .Values.nodeConf.rpcSettings.useSsl }} - ssl = { - keyStorePassword = $(echo ${LOOKUP_PWD_RESPONSE} | jq -r '.data.data["sslkeyStorePassword"]') - trustStorePassword = $(echo ${LOOKUP_PWD_RESPONSE} | jq -r '.data.data["ssltrustStorePassword"]') - certificatesDirectory = ${BASE_DIR}/{{ .Values.nodeConf.rpcSettings.ssl.certificatesDirectory }} - sslKeystore = ${BASE_DIR}/{{ .Values.nodeConf.rpcSettings.ssl.certificatesDirectory }}/{{ .Values.nodeConf.rpcSettings.ssl.sslKeystoreFileName }} - trustStoreFile = ${BASE_DIR}/{{ .Values.nodeConf.rpcSettings.ssl.certificatesDirectory }}/{{ .Values.nodeConf.rpcSettings.ssl.trustStoreFileName }} - } - }" >> ${BASE_DIR}/node.conf - else - echo 'rpcSettings { - standAloneBroker = {{ .Values.nodeConf.rpcSettings.standAloneBroker }} - address = "{{ .Values.nodeConf.rpcSettings.address }}" - adminAddress = "{{ .Values.nodeConf.rpcSettings.adminAddress }}" - }' >> ${BASE_DIR}/node.conf - fi - echo "node.conf created in ${BASE_DIR}" - volumeMounts: - - name: nodeconf - mountPath: "{{ $.Values.volume.baseDir }}" - - name: init-certificates - image: {{ .Values.image.initContainerName }} - imagePullPolicy: Always - env: - - name: VAULT_ADDR - value: {{ $.Values.vault.address }} - - name: KUBERNETES_AUTH_PATH - value: {{ $.Values.vault.authpath }} - - name: VAULT_APP_ROLE - value: {{ $.Values.vault.role }} - - name: BASE_DIR - value: {{ $.Values.volume.baseDir }} - - name: CERTS_SECRET_PREFIX - value: {{.Values.vault.certsecretprefix}} - - name: H2SSL_SECRET_PREFIX - value: {{ .Values.vault.h2sslsecretprefix }} - command: ["sh", "-c"] - args: - - |- - #!/usr/bin/env sh - validateVaultResponse () { - if echo ${2} | grep "errors"; then - echo "ERROR: unable to retrieve ${1}: ${2}" - exit 1 - fi - if [ "$3" == "LOOKUPSECRETRESPONSE" ] - then - http_code=$(curl -sS -o /dev/null -w "%{http_code}" \ - --header "X-Vault-Token: ${VAULT_TOKEN}" \ - ${VAULT_ADDR}/v1/${1}) - curl_response=$? - if test "$http_code" != "200" ; then - echo "Http response code from Vault - $http_code" - if test "$curl_response" != "0"; then - echo "Error: curl command failed with error code - $curl_response" - exit 1 - fi - fi - fi - } - - # To check if custom nodekeystore is retrived from vault, if yes then store it in nodekeystore.jks - validateVaultResponseCustomnodeKeystore () { - if echo ${2} | grep "errors"; - then - echo "custom nodekeystore.jks is not provided and new one will be created." - else - echo "Found custom nodekeystore.jks" - echo "${NODE_KEY}" | base64 -d > ${OUTPUT_PATH}/nodekeystore.jks - fi - } - - # To check if certificates are already present in vault or not - validateVaultResponseKeystore () { - if echo ${2} | grep "errors"; - then - echo "Initial registration will create keystore ${1}" - else - echo "Initial registration was performed before." - exit 1 - fi - } - - # setting up env to get secrets from vault - echo "Getting secrets from Vault Server" - KUBE_SA_TOKEN=$(cat /var/run/secrets/kubernetes.io/serviceaccount/token) - VAULT_TOKEN=$(curl -sS --request POST ${VAULT_ADDR}/v1/auth/${KUBERNETES_AUTH_PATH}/login -H "Content-Type: application/json" -d '{"role":"'"${VAULT_APP_ROLE}"'","jwt":"'"${KUBE_SA_TOKEN}"'"}' | jq -r 'if .errors then . else .auth.client_token end') - validateVaultResponse 'vault login token' "${VAULT_TOKEN}" - - OUTPUT_PATH=${BASE_DIR} - - # get customnodekeystore from vault - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${CERTS_SECRET_PREFIX}/customnodekeystore ) - NODE_KEY=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["nodekeystore.jks"]') - validateVaultResponseCustomnodeKeystore "secret (${CERTS_SECRET_PREFIX}/customnodekeystore)" "${LOOKUP_SECRET_RESPONSE}" - - # get network-map-truststore.jks from vault - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${CERTS_SECRET_PREFIX}/networkmaptruststore | jq -r 'if .errors then . else . end') - validateVaultResponse "${CERTS_SECRET_PREFIX}/networkmaptruststore" "${LOOKUP_SECRET_RESPONSE}" "LOOKUPSECRETRESPONSE" - TLS_NMS=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["network-map-truststore"]') - echo "${TLS_NMS}" | base64 -d > ${OUTPUT_PATH}/network-map-truststore.jks - - # To check if sslkeystore,nodekeystore,truststore are present in vault - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${CERTS_SECRET_PREFIX}/nodekeystore | jq -r 'if .errors then . else . end') - validateVaultResponseKeystore "secret on (${CERTS_SECRET_PREFIX}/nodekeystore)" "${LOOKUP_SECRET_RESPONSE}" - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${CERTS_SECRET_PREFIX}/sslkeystore | jq -r 'if .errors then . else . end') - validateVaultResponseKeystore "secret on (${CERTS_SECRET_PREFIX}/sslkeystore)" "${LOOKUP_SECRET_RESPONSE}" - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${CERTS_SECRET_PREFIX}/truststore | jq -r 'if .errors then . else . end') - validateVaultResponseKeystore "secret on (${CERTS_SECRET_PREFIX}/truststore)" "${LOOKUP_SECRET_RESPONSE}" - - # when using doorman and networkmap in TLS: true, and using private certificate then download certificate - if [ "{{ .Values.image.privateCertificate }}" == true ] - then - mkdir -p ${OUTPUT_PATH}/networkmap - mkdir -p ${OUTPUT_PATH}/doorman - - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${CERTS_SECRET_PREFIX}/networkmap | jq -r 'if .errors then . else . end') - validateVaultResponse "${CERTS_SECRET_PREFIX}/networkmap" "${LOOKUP_SECRET_RESPONSE}" "LOOKUPSECRETRESPONSE" - NETWORKMAP_CRT=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["networkmap.crt"]') - echo "${NETWORKMAP_CRT}" | base64 -d > ${OUTPUT_PATH}/networkmap/networkmap.crt - - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${CERTS_SECRET_PREFIX}/doorman | jq -r 'if .errors then . else . end') - validateVaultResponse "${CERTS_SECRET_PREFIX}/doorman" "${LOOKUP_SECRET_RESPONSE}" "LOOKUPSECRETRESPONSE" - DOORMAN_CRT=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["doorman.crt"]') - echo "${DOORMAN_CRT}" | base64 -d > ${OUTPUT_PATH}/doorman/doorman.crt - fi - chmod 777 -R ${BASE_DIR}/; - echo "Done" - volumeMounts: - - name: certificates - mountPath: {{ $.Values.volume.baseDir }} - - name: init-credential - image: {{ .Values.image.initContainerName }} - imagePullPolicy: Always - env: - - name: VAULT_ADDR - value: {{.Values.vault.address}} - - name: KUBERNETES_AUTH_PATH - value: {{.Values.vault.authpath}} - - name: VAULT_APP_ROLE - value: {{.Values.vault.role}} - - name: BASE_DIR - value: /opt/node/creds - - name: KS_SECRET_PREFIX - value: {{ .Values.vault.keystoresecretprefix }} - command: ["sh", "-c"] - args: - - |- - #!/usr/bin/env sh - validateVaultResponse () { - if echo ${2} | grep "errors"; then - echo "ERROR: unable to retrieve ${1}: ${2}" - exit 1 - fi - if [ "$3" == "LOOKUPSECRETRESPONSE" ] - then - http_code=$(curl -sS -o /dev/null -w "%{http_code}" \ - --header "X-Vault-Token: ${VAULT_TOKEN}" \ - ${VAULT_ADDR}/v1/${1}) - curl_response=$? - if test "$http_code" != "200" ; then - echo "Http response code from Vault - $http_code" - if test "$curl_response" != "0"; then - echo "Error: curl command failed with error code - $curl_response" - exit 1 - fi - fi - fi - } - - # setting up env to get secrets from vault - KUBE_SA_TOKEN=$(cat /var/run/secrets/kubernetes.io/serviceaccount/token) - echo "Getting secrets from Vault Server" - VAULT_TOKEN=$(curl -sS --request POST ${VAULT_ADDR}/v1/auth/${KUBERNETES_AUTH_PATH}/login -H "Content-Type: application/json" -d '{"role":"'"${VAULT_APP_ROLE}"'","jwt":"'"${KUBE_SA_TOKEN}"'"}' | jq -r 'if .errors then . else .auth.client_token end') - validateVaultResponse 'vault login token' "${VAULT_TOKEN}" - - OUTPUT_PATH=${BASE_DIR} - - # get keystore passwords from vault - LOOKUP_PWD_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${KS_SECRET_PREFIX} | jq -r 'if .errors then . else . end') - validateVaultResponse "${KS_SECRET_PREFIX}" "${LOOKUP_PWD_RESPONSE}" "LOOKUPSECRETRESPONSE" - DEFAULT_TRUSTSTOREPASSWORD=$(echo ${LOOKUP_PWD_RESPONSE} | jq -r '.data.data["defaultTrustStorePassword"]') - DEFAULT_KEYSTOREPASSWORD=$(echo ${LOOKUP_PWD_RESPONSE} | jq -r '.data.data["defaultKeyStorePassword"]') - KEYSTOREPASSWORD=$(echo ${LOOKUP_PWD_RESPONSE} | jq -r '.data.data["keyStorePassword"]') - TRUSTSTOREPASSWORD=$(echo ${LOOKUP_PWD_RESPONSE} | jq -r '.data.data["trustStorePassword"]') - echo "${DEFAULT_TRUSTSTOREPASSWORD}" >> ${BASE_DIR}/default_truststore_cred - echo "${KEYSTOREPASSWORD}" >> ${BASE_DIR}/keystore_cred - echo "${TRUSTSTOREPASSWORD}" >> ${BASE_DIR}/truststore_cred - echo "${DEFAULT_KEYSTOREPASSWORD}" >> ${BASE_DIR}/default_keystore_cred - - echo "Done" - volumeMounts: - - name: creds - mountPath: "/opt/node/creds" - readOnly: false - - name: db-healthcheck - image: {{ .Values.image.initContainerName }} - imagePullPolicy: Always - command: ["sh", "-c"] - args: - - |- - #!/usr/bin/env sh - COUNTER=1 - FLAG=true - # perform health check if db is up and running before starting corda node - while [ "$COUNTER" -le {{ $.Values.healthcheck.readinessthreshold }} ] - do - DB_NODE={{ .Values.nodeConf.dbUrl }}:{{ .Values.nodeConf.dbPort }} - STATUS=$(nc -vz $DB_NODE 2>&1 | grep -c open ) - - if [ "$STATUS" == 0 ] - then - FLAG=false - else - FLAG=true - echo "DB up and running" - fi - - if [ "$FLAG" == false ] - then - echo "Retry attempted $COUNTER times, retrying after {{ $.Values.healthcheck.readinesscheckinterval }} seconds" - COUNTER=`expr "$COUNTER" + 1` - sleep {{ $.Values.healthcheck.readinesscheckinterval }} - else - echo "SUCCESS!" - echo "DB up and running!" - exit 0 - break - fi - done - - if [ "$COUNTER" -gt {{ $.Values.healthcheck.readinessthreshold }} ] || [ "$FLAG" == false ] - then - echo "Retry attempted $COUNTER times, no DB up and running. Giving up!" - exit 1 - break - fi - imagePullSecrets: - - name: {{ .Values.image.imagePullSecret }} - volumes: - - name: node-volume - emptyDir: - medium: Memory - - name: certificates - emptyDir: - medium: Memory - - name: nodeconf - emptyDir: - medium: Memory - - name: creds - emptyDir: - medium: Memory - diff --git a/platforms/r3-corda/charts/corda-node-initial-registration/values.yaml b/platforms/r3-corda/charts/corda-node-initial-registration/values.yaml deleted file mode 100644 index 6884c027bbc..00000000000 --- a/platforms/r3-corda/charts/corda-node-initial-registration/values.yaml +++ /dev/null @@ -1,232 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -#Provide the nodeName for node -#Eg. nodeName: bank1 -nodeName: bank1 - -#Provide the replica set for node deployed -#Eg. replicas: 1 -replicas: 1 - -metadata: - #Provide the namespace - #Eg. namespace: default - namespace: default - #Provide the custom labels - #NOTE: Provide labels other than name, release name , release service, chart version , chart name , app. - #Eg. labels: - # role: create_channel - labels: - -image: - #Provide the containerName of image - #Eg. containerName: ghcr.io/hyperledger/bevel-corda:4.9 - containerName: ghcr.io/hyperledger/bevel-corda:4.9 - #Provide the name of image for init container - #Eg. name: ghcr.io/hyperledger/bevel-alpine:latest - initContainerName: ghcr.io/hyperledger/bevel-alpine:latest - #Provide the image pull secret of image - #Eg. pullSecret: regcred - imagePullSecret: regcred - #Provide true or false if private certificate to be added - #Eg. privateCertificate: true - privateCertificate: true - #Provide true or false if private certificate to be added - #Eg. doormanCertAlias: doorman.fracordakubetest7.com - doormanCertAlias: doorman.fracordakubetest7.com - #Provide true or false if private certificate to be added - #Eg. networkmapCertAlias: networkmap.fracordakubetest7.com - networkmapCertAlias: networkmap.fracordakubetest7.com - - -#For more information for node.Conf fields please refer to: https://docs.corda.net/releases/release-V3.3/corda-configuration-file.html -nodeConf: - #The host and port on which the node is available for protocol operations over ArtemisMQ. - p2p: - url: - port: - #Specify the ambassador host:port which will be advertised in addition to p2paddress - ambassadorAddress: - rpcSettings: - useSsl: - standAloneBroker: - address: - adminAddress: - ssl: - certificatesDirectory: - sslKeystorePath: - trustStoreFilePath: - #Provide the legalName for node - #Eg. legalName: "O=Bank1,L=London,C=GB,CN=Bank1" - legalName: - messagingServerAddress: - jvmArgs: - systemProperties: - sshd: - port: - exportJMXTo: - transactionCacheSizeMegaBytes: - attachmentContentCacheSizeMegaBytes: - notary: - validating: - detectPublicIp: - database: - exportHibernateJMXStatistics: - #Provide the h2Url for node - #Eg. h2Url: bank1h2 - dbUrl: bank1h2 - #Provide the h2Port for node - #Eg. h2Port: 9101 - dbPort: 9101 - dataSourceClassName: - dataSourceUrl: - jarPath: - #Provide the nms for node - #Eg. nms: "http://rp-elb-fra-corda-kube-cluster7-2016021309.us-west-1.elb.amazonaws.com:30050" - networkMapURL: - doormanURL: - compatibilityZoneURL: - webAddress: - #Provide the jar Version for corda jar and finanace jar - #Eg. jarVersion: 3.3-corda - jarVersion: 3.3-corda - #Provide the devMode for corda node - #Eg. devMode: true - devMode: true - #Provide the enviroment variables to be set - env: - - name: JAVA_OPTIONS - value: - - name: CORDA_HOME - value: - - name: BASE_DIR - value: - -credentials: - #Provide the dataSourceUser for corda node - #Eg. dataSourceUser: - dataSourceUser: - #Provide the rpcUser for corda node - rpcUser: - - name: bank1operations - permissions: [ALL] - -volume: - #Provide the base path - #Eg. mountPath: "/opt/h2-data" - baseDir: - -resources: - #Provide the limit memory for node - #Eg. limits: "1Gi" - limits: "1Gi" - #Provide the requests memory for node - #Eg. requests: "1Gi" - requests: "1Gi" - -storage: - #Provide the provisioner for node - #Eg. provisioner: kubernetes.io/aws-ebs - provisioner: - #Provide the name for node - #Eg. name: bank1nodesc - name: bank1nodesc - #Provide the memory for node - #Eg. memory: 4Gi - memory: 4Gi - parameters: - #Provide the type for node - #Eg. type: gp2 - type: gp2 - # Provide whether the EBS volume should be encrypted or not - #Eg. encrypted: "true" - encrypted: "true" - # annotations: - # key: "value" - annotations: - - -service: -# Note: Target ports are dependent on image being used. Please change them accordingly -# nodePort should be kept empty while using service type as ClusterIP ( Values.service.type ) - #Provide the type of service - #Eg. type: NodePort or LoadBalancer etc - type: NodePort - p2p: - #Provide the p2p port for node - #Eg. port: 10007 - port: 10007 - #Provide the p2p node port for node - #Eg. port: 30007 - nodePort: - #Provide the p2p targetPort for node - #Eg. targetPort: 30007 - targetPort: 30007 - rpc: - #Provide the rpc port for node - #Eg. port: 10008 - port: 10008 - #Provide the rpc targetPort for node - #Eg. targetPort: 10003 - targetPort: 10003 - #Provide the rpc node port for node - #Eg. nodePort: 30007 - nodePort: - rpcadmin: - #Provide the rpcadmin port for node - #Eg. port: 10108 - port: 10108 - #Provide the rpcadmin targetPort for node - #Eg. targetPort: 10005 - targetPort: 10005 - #Provide the rpcadmin node port for node - #Eg. nodePort: 30007 - nodePort: - # annotations: - # key: "value" - annotations: - -pvc: - # annotations: - # key: "value" - annotations: - -vault: - #Provide the vault server address - #Eg. address: http://54.226.163.39:8200 - address: - #Provide the vaultrole - #Eg. role: vault-role - role: vault-role - #Provide the authpath - #Eg. authpath: cordabank1 - authpath: cordabank1 - #Provide the serviceaccountname - #Eg. serviceaccountname: vault-auth-issuer - serviceaccountname: vault-auth-issuer - #Provide the secretprefix - #Eg. dbsecretprefix: bank1/credentials/database - dbsecretprefix: bank1/credentials/database - #Provide the secretprefix - #Eg. rpcusersecretprefix: bank1/credentials/rpcusers - rpcusersecretprefix: bank1/credentials/rpcusers - #Provide the secretprefix - #Eg. keystoresecretprefix: bank1/credentials/keystore - keystoresecretprefix: bank1/credentials/keystore - #Provide the secretprefix - #Eg. certsecretprefix: bank1/certs - certsecretprefix: bank1/certs - # Number of retries to check contents from vault -  retries: - -healthcheck: - #Provide the interval in seconds you want to iterate till db to be ready - #Eg. readinesscheckinterval: 5 - readinesscheckinterval: 5 - #Provide the threshold till you want to check if specified db up and running - #Eg. readinessthreshold: 2 - readinessthreshold: 2 diff --git a/platforms/r3-corda/charts/corda-node/Chart.yaml b/platforms/r3-corda/charts/corda-node/Chart.yaml index 54ec97a8dc2..cd2a82bcf2e 100644 --- a/platforms/r3-corda/charts/corda-node/Chart.yaml +++ b/platforms/r3-corda/charts/corda-node/Chart.yaml @@ -5,7 +5,21 @@ ############################################################################################## apiVersion: v1 -appVersion: "2.0" -description: "R3-corda-os: Deploys the r3corda node." name: corda-node -version: 1.0.0 +description: "R3 Corda: Deploys the Corda Open-source node." +version: 1.0.2 +appVersion: "latest" +keywords: + - bevel + - corda + - hyperledger + - enterprise + - blockchain + - deployment + - accenture +home: https://hyperledger-bevel.readthedocs.io/en/latest/ +sources: + - https://github.com/hyperledger/bevel +maintainers: + - name: Hyperledger Bevel maintainers + email: bevel@lists.hyperledger.org diff --git a/platforms/r3-corda/charts/corda-node/README.md b/platforms/r3-corda/charts/corda-node/README.md index 18d3fb5b88b..c254aeea2ba 100644 --- a/platforms/r3-corda/charts/corda-node/README.md +++ b/platforms/r3-corda/charts/corda-node/README.md @@ -3,236 +3,166 @@ [//]: # (SPDX-License-Identifier: Apache-2.0) [//]: # (##############################################################################################) - -# Node Deployment - -- [Node Deployment Helm Chart](#Node-deployment-helm-chart) -- [Prerequisites](#prerequisites) -- [Chart Structure](#chart-structure) -- [Configuration](#configuration) -- [Deployment](#deployment) -- [Contributing](#contributing) -- [License](#license) - - -## node Deployment Helm Chart ---- -This [Helm chart](https://github.com/hyperledger/bevel/tree/develop/platforms/r3-corda/charts/corda-node) helps to delpoy the r3corda node. - - -## Prerequisites ---- -Before deploying the chart please ensure you have the following prerequisites: +# corda-node -- Node's database up and running. -- Kubernetes cluster up and running. -- A HashiCorp Vault instance is set up and configured to use Kubernetes service account token-based authentication. -- The Vault is unsealed and initialized. -- Helm is installed. +This chart is a component of Hyperledger Bevel. The corda-node chart deploys a R3 Corda Opens-source node with different settings like notary or node. If enabled, the keys are then stored on the configured vault and stored as Kubernetes secrets. See [Bevel documentation](https://hyperledger-bevel.readthedocs.io/en/latest/) for details. +## TL;DR -This chart has following structue: -``` - . - ├── node - │ ├── Chart.yaml - │ ├── templates - │ │ ├── deployment.yaml - │ │ ├── _helpers.tpl - │ │ ├── pvc.yaml - │ │ └── service.yaml - │ └── values.yaml +```bash +helm repo add bevel https://hyperledger.github.io/bevel +helm install notary bevel/corda-node ``` -Type of files used: +## Prerequisites -- `templates` : This directory contains the Kubernetes manifest templates that define the resources to be deployed. -- `deployment.yaml`: This file is a configuration file for deployement in Kubernetes.It creates a deployment file with a specified number of replicas and defines various settings for the deployment.It includes an init container for initializing the retrieves secrets from Vault and checks if node registration is complete, and a main container for running the r3corda node.It also specifies volume mounts for storing certificates and data. -- `pvc.yaml` : A PersistentVolumeClaim (PVC) is a request for storage by a user. -- `service.yaml` : This file defines a Kubernetes Service with multiple ports for protocols and targets, and supports Ambassador proxy annotations for specific configurations when using the "ambassador" proxy provider. -- `chart.yaml` : Provides metadata about the chart, such as its name, version, and description. -- `values.yaml` : Contains the default configuration values for the chart. It includes configuration for the image, nodeconfig, credenatials, storage, service , vault, etc. -- `_helpers.tpl` : A template file used for defining custom labels and ports for the metrics in the Helm chart. +- Kubernetes 1.19+ +- Helm 3.2.0+ - -## Configuration ---- -The [values.yaml](https://github.com/hyperledger/bevel/blob/develop/platforms/r3-corda/charts/corda-node/values.yaml) file contains configurable values for the Helm chart. We can modify these values according to the deployment requirements. Here are some important configuration options: +If Hashicorp Vault is used, then +- HashiCorp Vault Server 1.13.1+ -## Parameters ---- +> **Important**: Ensure the `corda-init` chart has been installed before installing this. Also check the dependent charts. -### Name +## Installing the Chart -| Name | Description | Default Value | -| -----------| -------------------------------------------------- | ------------- | -| name | Provide the name of the node | bank1 | +To install the chart with the release name `notary`: -### Metadata +```bash +helm repo add bevel https://hyperledger.github.io/bevel +helm install notary bevel/corda-node +``` -| Name | Description | Default Value | -| ----------------| -----------------------------------------------------------------| ------------- | -| namespace | Provide the namespace for the Node Generator | default | -| labels | Provide any additional labels for the Node Generator | "" | +The command deploys the chart on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation. -### Image +> **Tip**: List all releases using `helm list` -| Name | Description | Default Value | -| ------------------------ | --------------------------------------------------------------------------------------- | --------------- | -| initContainerName | Provide the alpine utils image, which is used for all init-containers of deployments/jobs | "" | -| containerName | Provide the containerName of image | "" | -| imagePullSecret | Provide the image pull secret of image | regcred | -| gitContainerName | Provide the name of image for git clone container | "" | -| privateCertificate | Provide true or false if private certificate to be added | "true" | -| doormanCertAlias | Provide true or false if private certificate to be added | "" | -| networkmapCertAlias | Provide true or false if private certificate to be added | "" | - -### NodeConf - -| Name | Description | Default Value | -| ------------------------ | -------------------------------------------------------------------------------------- | --------------- | -| p2p | The host and port on which the node is available for protocol operations over ArtemisMQ | "" | -| ambassadorAddress | Specify ambassador host:port which will be advertised in addition to p2paddress | "" | -| legalName | Provide the legalName for node | "" | -| dbUrl | Provide the h2Url for node | "bank1h2" | -| dbPort | Provide the h2Port for node | "9101" | -| networkMapURL | Provide the nms for node | "" | -| doormanURL | Provide the doorman for node | "" | -| jarVersion | Provide the jar Version for corda jar and finanace jar | "3.3-corda" | -| devMode | Provide the devMode for corda node | "true" | -| useHTTPS | Provide the useHTTPS for corda node | "false" | -| env | Provide the enviroment variables to be set | "" | - -### credentials - -| Name | Description | Default Value | -| ----------------| ----------------------------------------------| ------------- | -| dataSourceUser | Provide the dataSourceUser for corda node | "" | -| rpcUser | Provide the rpcUser for corda node | bank1operations| - -### Volume - -| Name | Description | Default Value | -| -----------------| -----------------------| ------------- | -| baseDir | Base directory | /home/bevel | +## Uninstalling the Chart -### Resources +To uninstall/delete the `notary` deployment: -| Name | Description | Default Value | -| ------------------------ | ------------------------------------------------------- | --------------- | -| limits | Provide the limit memory for node | "1Gi" | -| requests | Provide the requests memory for node | "1Gi" | - -### storage - -| Name | Description | Default Value | -| --------------------- | -------------------------------------------------------- | ------------- | -| provisioner | Provide the provisioner for node | "" | -| name | Provide the name for node | bank1nodesc | -| memory | Provide the memory for node | "4Gi" | -| type | Provide the type for node | "gp2" | -| encrypted | Provide whether the EBS volume should be encrypted or not | "true" | -| annotations | Provide the annotation of the node | "" | - -### Service - -| Name | Description | Default Value | -| --------------------- | ------------------------------------------| ------------- | -| type | Provide the type of service | NodePort | -| p2p port | Provide the tcp port for node | 10007 | -| p2p nodePort | Provide the p2p nodeport for node | 30007 | -| p2p targetPort | Provide the p2p targetPort for node | 30007 | -| rpc port | Provide the tpc port for node | 10008 | -| rpc targetPort | Provide the rpc targetport for node | 10003 | -| rpc nodePort | Provide the rpc nodePort for node | 30007 | -| rpcadmin port | Provide the rpcadmin port for node | 10108 | -| rpcadmin targetPort | Provide the rpcadmin targetport for node | 10005 | -| rpcadmin nodePort | Provide the rpcadmin nodePort for node | 30007 | - -### Vault - -| Name | Description | Default Value | -| ------------------------- | --------------------------------------------------------------------------| ------------------------- | -| address | Address/URL of the Vault server. | "" | -| role | Role used for authentication with Vault | vault-role | -| authpath | Authentication path for Vault | cordabank1 | -| serviceAccountName | Provide the already created service account name autheticated to vault | vault-auth-issuer | -| certSecretPrefix | Provide the vault path where the certificates are stored | bank1/certs | -| dbsecretprefix | Provide the secretprefix | bank1/credentials/database | -| rpcusersecretprefix | Provide the secretprefix | bank1/credentials/rpcusers | -| keystoresecretprefix | Provide the secretprefix | bank1/credentials/keystore | -| cordappsreposecretprefix | Provide the secretprefix | bank1/credentials/cordapps | - -### cordapps - -| Name | Description | Default Value | -| ------------------------ | ------------------------------------------------------- | --------------- | -| getcordapps | Provide if you want to provide jars in cordapps | "" | -| repository | Provide the repository of cordapps | "" | -| jars url | Provide url to download the jar using wget cmd | "" | - -### Healthcheck - -| Name | Description | Default Value | -| ----------------------------| ------------------------------------------------------------------------------| ------------- | -| readinesscheckinterval | Provide the interval in seconds you want to iterate till db to be ready | 5 | -| readinessthreshold | Provide the threshold till you want to check if specified db up and running | 2 | - -### ambassador - -| Name | Description | Default Value | -| ------------------------ | ------------------------------------------------------- | -------------------------- | -| component_name | Provides component name | node | -| external_url_suffix | Provides the suffix to be used in external URL | org1.blockchaincloudpoc.com | -| p2p_ambassador | Provide the p2p port for ambassador | 10007 | - - - - -## Deployment ---- - -To deploy the node Helm chart, follow these steps: - -1. Modify the [values.yaml](https://github.com/hyperledger/bevel/blob/develop/platforms/r3-corda/charts/corda-node/values.yaml) file to set the desired configuration values. -2. Run the following Helm command to install, upgrade,verify, delete the chart: - -To install the chart: ```bash -helm repo add bevel https://hyperledger.github.io/bevel/ -helm install ./corda-node +helm uninstall notary ``` -To upgrade the chart: -```bash -helm upgrade ./corda-node -``` +The command removes all the Kubernetes components associated with the chart and deletes the release. -To verify the deployment: -```bash -kubectl get jobs -n -``` -Note : Replace `` with the actual namespace where the Job was created. This command will display information about the Job, including the number of completions and the current status of the Job's pods. +## Parameters -To delete the chart: -```bash -helm uninstall -``` -Note : Replace `` with the desired name for the release. +### Global parameters +These parameters are refered to as same in each parent or child chart +| Name | Description | Default Value | +|--------|---------|-------------| +|`global.serviceAccountName` | The serviceaccount name that will be used for Vault Auth management| `vault-auth` | +| `global.cluster.provider` | Kubernetes cluster provider like AWS EKS or minikube. Currently ony `aws` and `minikube` is tested | `aws` | +| `global.cluster.cloudNativeServices` | only `false` is implemented, `true` to use Cloud Native Services (SecretsManager and IAM for AWS; KeyVault & Managed Identities for Azure) is for future | `false` | +| `global.vault.type` | Type of Vault to support other providers. Currently, only `hashicorp` is supported. | `hashicorp` | +| `global.vault.role` | Role used for authentication with Vault | `vault-role` | +| `global.vault.address`| URL of the Vault server. | `""` | +| `global.vault.authPath` | Authentication path for Vault | `supplychain` | +| `global.vault.secretEngine` | The value for vault secret engine name | `secretsv2` | +| `global.vault.secretPrefix` | The value for vault secret prefix which must start with `data/` | `data/supplychain` | +| `global.proxy.provider` | The proxy or Ingress provider. Can be `none` or `ambassador` | `ambassador` | +| `global.proxy.externalUrlSuffix` | The External URL suffix at which the Corda P2P service will be available | `test.blockchaincloudpoc.com` | +| `global.proxy.p2p` | The external port at which the Corda P2P service will be available. This port must be unique for a single cluster and enabled on Ambassador. | `15010` | + +### Storage + +| Name | Description | Default Value | +|--------|---------|-------------| +| `storage.size` | Size of the Volume needed for Corda node | `1Gi` | +| `storage.dbSize` | Size of the Volume needed for H2 Database node | `2Gi` | +| `storage.allowedTopologies.enabled` | Check [bevel-storageclass](../../../shared/charts/bevel-storageclass/README.md) for details | `false` | + +### TLS +This is where you can override the values for the [corda-certs-gen subchart](../corda-certs-gen/README.md). + +| Name | Description | Default Value | +|--------|---------|-------------| +| `tls.enabled` | Flag to enable TLS and certificate generation | `true` | + +### Image +| Name | Description | Default Value | +| -------------| ---------- | --------- | +| `image.pullSecret` | Secret name in the namespace containing private image registry credentials | `""` | +| `image.pullPolicy` | Pull policy to be used for the Docker images | `IfNotPresent` | +| `image.h2` | H2 DB image repository and tag | `ghcr.io/hyperledger/h2:2018`| +| `image.corda.repository` | Corda Image repository | `ghcr.io/hyperledger/bevel-corda`| +| `image.corda.tag` | Corda image tag as per version of Corda | `4.9`| +| `image.initContainer` | Image repository and tag for alpine container | `ghcr.io/hyperledger/bevel-alpine:latest` | +| `image.hooks.repository` | Corda hooks image repository | `ghcr.io/hyperledger/bevel-build` | +| `image.hooks.tag` | Corda hooks image tag | `jdk8-stable` | + + +### Corda nodeConf + +This contains all the parameters for the Corda node. Please read [R3 Corda documentation](https://docs.r3.com/en/platform/corda/4.9/community/corda-configuration-fields.html) for detailed explanation of each parameter. + +| Name | Description | Default Value | +| ----------------| ----------- | ------------- | +| `nodeConf.defaultKeystorePassword` | Default Keystore password, do not change this | `cordacadevpass` | +| `nodeConf.defaultTruststorePassword` | Default Truststore password, do not change this | `trustpass` | +| `nodeConf.keystorePassword` | New keystore password which will be set after initialisation | `newpass` | +| `nodeConf.truststorePassword` | New truststore password which will be set after initialisation | `newtrustpass` | +| `nodeConf.sslkeyStorePassword` | SSL keystore password which will be set after initialisation | `sslpass` | +| `nodeConf.ssltrustStorePassword` | SSL truststore password which will be set after initialisation | `ssltrustpass` | +| `nodeConf.removeKeysOnDelete` | Flag to delete the keys when the release is uninstalled | `true` | +| `nodeConf.rpcUser` | Array of RPC Users that you want to create at initialization | `- name: nodeoperations`
`password: nodeoperationsAdmin`
`permissions: [ALL]` | +| `nodeConf.p2pPort` | P2P Port for Corda Node | `10002` | +| `nodeConf.rpcPort` | RPC Port for Corda Node | `10003` | +| `nodeConf.rpcadminPort` | RPC Admin Port for Corda Node | `10005` | +| `nodeConf.rpcSettings.useSsl` | Use SSL for RPC | `false` | +| `nodeConf.rpcSettings.standAloneBroker` | Standalone Broker setting for RPC | `false` | +| `nodeConf.rpcSettings.address` | Address for RPC Service | `"0.0.0.0:10003"` | +| `nodeConf.rpcSettings.adminAddress` | Address for RPC Admin Service | `"0.0.0.0:10005"` | +| `nodeConf.rpcSettings.ssl.certificatesDirectory` | SSL Certificate directory when useSSl is `true` | `na-ssl-false` | +| `nodeConf.rpcSettings.ssl.sslKeystorePath` | SSL Keystore path when useSSl is `true` | `na-ssl-false` | +| `nodeConf.rpcSettings.ssl.trustStoreFilePath` | SSL Truststore path when useSSl is `true` | `na-ssl-false` | +| `nodeConf.legalName` | X.509 Subject for Corda Node Identity. Must be unique for different nodes in a network | `"O=Notary,OU=Notary,L=London,C=GB"` | +| `nodeConf.messagingServerAddress` | Messaging Server Address | `""` | +| `nodeConf.jvmArgs` | Additional JVM Args | `""` | +| `nodeConf.systemProperties` | Additional System properties | `""` | +| `nodeConf.sshd.port` | SSHD Admin port | `""` | +| `nodeConf.exportJMXTo` | JMX Reporter Address | `""` | +| `nodeConf.transactionCacheSizeMegaBytes` | Specify how much memory should be used for caching of ledger transactions in memory (in MB) | `8` | +| `nodeConf.attachmentContentCacheSizeMegaBytes` | Specify how much memory should be used to cache attachment contents in memory (in MB) | `10` | +| `nodeConf.notary.enabled` | Enable this Corda node as a Notary | `true` | +| `nodeConf.notary.validating` | Flag to setup validating or non-validating notary | `true` | +| `nodeConf.notary.serviceLegalName` | Specify the legal name of the notary cluster or node | `"O=Notary Service,OU=Notary,L=London,C=GB"` | +| `nodeConf.detectPublicIp` | Flag to detect public IP | `false` | +| `nodeConf.database.exportHibernateJMXStatistics` | Whether to export Hibernate JMX statistics | `false` | +| `nodeConf.dbPort` | Database port | `9101` | +| `nodeConf.dataSourceUser` | Database username | `sa` | +| `nodeConf.dataSourcePassword` | Database user password | `admin` | +| `nodeConf.dataSourceClassName` | JDBC Data Source class name | `"org.h2.jdbcx.JdbcDataSource"` | +| `nodeConf.jarPath` | Additional Jar path| `"/data/corda-workspace/h2/bin"` | +| `nodeConf.networkMapURL` | Root address of the network map service. | `https://supplychain-nms.supplychain-ns` | +| `nodeConf.doormanURL` | Root address of the doorman service | `https://supplychain-doorman.supplychain-ns` | +| `nodeConf.devMode` | Flag to set the node to run in development mode. | `false` | +| `nodeConf.javaOptions` | Additional JAVA_OPTIONS for Corda | `"-Xmx512m"` | + +### CordApps + +| Name | Description | Default Value | +|--------|---------|-------------| +| `cordApps.getCordApps` | Flag to download CordApps from urls provided | `false` | +| `cordApps.jars` | List of `url`s from where the CordApps will be downloaded | `- url: ""` | +### Resources - -## Contributing ---- -If you encounter any bugs, have suggestions, or would like to contribute to the [node Deployment Helm Chart](https://github.com/hyperledger/bevel/tree/develop/platforms/r3-corda/charts/corda-node), please feel free to open an issue or submit a pull request on the [project's GitHub repository](https://github.com/hyperledger/bevel). +| Name | Description | Default Value | +|--------|---------|-------------| +| `resources.db.memLimit` | Kubernetes Memory limit for H2 Database pod | `1G` | +| `resources.db.memRequest` | Kubernetes Memory request for H2 Database pod | `512M` | +| `resources.node.memLimit` | Kubernetes Memory limit for Corda pod | `2G` | +| `resources.node.memRequest` | Kubernetes Memory request for Corda pod | `1G` | - ## License This chart is licensed under the Apache v2.0 license. -Copyright © 2023 Accenture +Copyright © 2024 Accenture ### Attribution diff --git a/platforms/r3-corda/charts/corda-node/requirements.yaml b/platforms/r3-corda/charts/corda-node/requirements.yaml new file mode 100644 index 00000000000..35059a61d0d --- /dev/null +++ b/platforms/r3-corda/charts/corda-node/requirements.yaml @@ -0,0 +1,14 @@ +dependencies: + - name: bevel-storageclass + alias: storage + repository: "file://../../../shared/charts/bevel-storageclass" + tags: + - storage + version: ~1.0.0 + - name: corda-certs-gen + alias: tls + repository: "file://../corda-certs-gen" + tags: + - bevel + version: ~1.0.0 + condition: tls.enabled diff --git a/platforms/r3-corda/charts/corda-node/templates/_helpers.tpl b/platforms/r3-corda/charts/corda-node/templates/_helpers.tpl index 592feeaa311..e05e77e8efa 100644 --- a/platforms/r3-corda/charts/corda-node/templates/_helpers.tpl +++ b/platforms/r3-corda/charts/corda-node/templates/_helpers.tpl @@ -1,10 +1,44 @@ -{{- define "labels.custom" }} - {{ range $key, $val := $.Values.metadata.labels }} - {{ $key }}: {{ $val }} - {{ end }} -{{- end }} - -{{- define "application.labels" }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} -{{- end }} \ No newline at end of file +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "corda-node.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "corda-node.fullname" -}} +{{- $name := default .Chart.Name -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" $name .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "corda-node.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- define "corda-node.doormanDomain" -}} +{{- $url := .Values.nodeConf.doormanURL -}} +{{- $urlParts := splitList "//" $url -}} +{{- $protocol := index $urlParts 0 -}} +{{- $domainParts := splitList "/" (index $urlParts 1) -}} +{{- index $domainParts 0 -}} +{{- end -}} + +{{- define "corda-node.nmsDomain" -}} +{{- $url := .Values.nodeConf.networkMapURL -}} +{{- $urlParts := splitList "//" $url -}} +{{- $protocol := index $urlParts 0 -}} +{{- $domainParts := splitList "/" (index $urlParts 1) -}} +{{- index $domainParts 0 -}} +{{- end -}} diff --git a/platforms/r3-corda/charts/corda-node/templates/deployment.yaml b/platforms/r3-corda/charts/corda-node/templates/deployment.yaml deleted file mode 100644 index ed6aa47b0a6..00000000000 --- a/platforms/r3-corda/charts/corda-node/templates/deployment.yaml +++ /dev/null @@ -1,539 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -apiVersion: apps/v1 -kind: Deployment -metadata: - name: {{ .Values.nodeName }} - {{- if .Values.deployment.annotations }} - annotations: -{{ toYaml .Values.deployment.annotations | indent 8 }} - {{- end }} - namespace: {{ .Values.metadata.namespace }} - labels: - app.kubernetes.io/name: {{ .Values.nodeName }} - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} - {{- include "labels.custom" . | nindent 2 }} -spec: - replicas: {{ .Values.replicas }} - selector: - matchLabels: - app: {{ .Values.nodeName }} - app.kubernetes.io/name: {{ .Values.nodeName }} - app.kubernetes.io/instance: {{ .Release.Name }} - strategy: - type: Recreate - rollingUpdate: null - template: - metadata: - labels: - app: {{ .Values.nodeName }} - app.kubernetes.io/name: {{ .Values.nodeName }} - app.kubernetes.io/instance: {{ .Release.Name }} - spec: - serviceAccountName: {{ $.Values.vault.serviceaccountname }} - hostname: {{ .Values.nodeName }} - securityContext: - fsGroup: 1000 - containers: - - name: corda-node - image: {{ .Values.image.containerName }} - command: ["sh", "-c"] - args: - - |- - #!/usr/bin/env sh - - # Setting up enviroment variables required for corda jar - {{- range $.Values.nodeConf.env }} - export {{ .name }}="{{ .value }}" - {{- end }} - - # import self signed tls certificate of doorman and networkmap, since java only trusts certificate signed by well known CA - {{- if .Values.image.privateCertificate }} - yes | keytool -importcert -file {{ $.Values.volume.baseDir }}/certificates/networkmap/networkmap.crt -storepass changeit -alias {{ $.Values.image.networkmapCertAlias }} -keystore /usr/lib/jvm/java-1.8-openjdk/jre/lib/security/cacerts - yes | keytool -importcert -file {{ $.Values.volume.baseDir }}/certificates/doorman/doorman.crt -storepass changeit -alias {{ $.Values.image.doormanCertAlias }} -keystore /usr/lib/jvm/java-1.8-openjdk/jre/lib/security/cacerts - {{- end }} - - # to clean network-parameters on every restart - rm -rf ${BASE_DIR}/network-parameters - - # Run schema migration scripts for corDApps - java -Djavax.net.ssl.keyStore=${BASE_DIR}/certificates/sslkeystore.jks -Djavax.net.ssl.keyStorePassword=newpass $JAVA_OPTIONS -jar ${CORDA_HOME}/corda.jar run-migration-scripts --core-schemas --app-schemas --base-directory=${BASE_DIR} - # command to run corda jar, we are setting javax.net.ssl.keyStore as ${BASE_DIR}/certificates/sslkeystore.jks since keystore gets reset when using h2 ssl - java -Djavax.net.ssl.keyStore=${BASE_DIR}/certificates/sslkeystore.jks -Djavax.net.ssl.keyStorePassword=newpass $JAVA_OPTIONS -jar ${CORDA_HOME}/corda.jar --base-directory=${BASE_DIR} 2>&1 - resources: - limits: - memory: {{ .Values.resources.limits }} - requests: - memory: {{ .Values.resources.requests }} - ports: - - containerPort: {{ .Values.service.p2p.targetPort }} - name: p2p - - containerPort: {{ .Values.service.rpc.targetPort }} - name: rpc - - containerPort: {{ .Values.service.rpcadmin.targetPort }} - name: rpcadmin - volumeMounts: - - name: node-volume - mountPath: "{{ $.Values.volume.baseDir }}" - readOnly: false - - name: certificates - mountPath: "{{ $.Values.volume.baseDir }}/certificates" - readOnly: false - - name: nodeconf - mountPath: "{{ $.Values.volume.baseDir }}/node.conf" - subPath: "node.conf" - readOnly: false - livenessProbe: - tcpSocket: - port: {{ .Values.service.p2p.targetPort }} - initialDelaySeconds: 65 - periodSeconds: 30 - - name: corda-logs - image: {{ .Values.image.initContainerName }} - command: ["sh", "-c"] - args: - - |- - #!/usr/bin/env sh - {{- range $.Values.nodeConf.env }} - export {{ .name }}="{{ .value }}" - {{- end }} - COUNTER=0 - if [ -e ${BASE_DIR}/logs/node-{{ .Values.nodeName }}.log ] - then - clear - tail -f ${BASE_DIR}/logs/node-{{ .Values.nodeName }}.log - else - echo "waiting for corda to generate log, sleeping for 10s" - sleep {{ $.Values.healthcheck.readinesscheckinterval }} - fi - volumeMounts: - - name: node-volume - mountPath: "{{ $.Values.volume.baseDir }}" - readOnly: false - initContainers: - - name: init-checkregistration - image: {{ .Values.image.initContainerName }} - imagePullPolicy: Always - env: - - name: VAULT_ADDR - value: {{ $.Values.vault.address }} - - name: VAULT_APP_ROLE - value: {{.Values.vault.role}} - - name: KUBERNETES_AUTH_PATH - value: {{ $.Values.vault.authpath }} - - name: CERTS_SECRET_PREFIX - value: {{ .Values.vault.certsecretprefix }} - command: ["sh", "-c"] - args: - - |- - #!/usr/bin/env sh - validateVaultResponse () { - if echo ${2} | grep "errors"; then - echo "ERROR: unable to retrieve ${1}: ${2}" - exit 1 - fi - if [ "$3" == "LOOKUPSECRETRESPONSE" ] - then - http_code=$(curl -sS -o /dev/null -w "%{http_code}" \ - --header "X-Vault-Token: ${VAULT_TOKEN}" \ - ${VAULT_ADDR}/v1/${1}) - curl_response=$? - if test "$http_code" != "200" ; then - echo "Http response code from Vault - $http_code" - if test "$curl_response" != "0"; then - echo "Error: curl command failed with error code - $curl_response" - exit 1 - fi - fi - fi - } - - # setting up env to get secrets from vault - echo "Getting secrets from Vault Server" - KUBE_SA_TOKEN=$(cat /var/run/secrets/kubernetes.io/serviceaccount/token) - VAULT_TOKEN=$(curl -sS --request POST ${VAULT_ADDR}/v1/auth/${KUBERNETES_AUTH_PATH}/login -H "Content-Type: application/json" -d '{"role":"'"${VAULT_APP_ROLE}"'","jwt":"'"${KUBE_SA_TOKEN}"'"}' | jq -r 'if .errors then . else .auth.client_token end') - validateVaultResponse 'vault login token' "${VAULT_TOKEN}" - echo "logged into vault" - - COUNTER=1 - while [ "$COUNTER" -lt {{ $.Values.healthcheck.readinessthreshold }} ] - do - # get truststore from vault to see if registration is done or not - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${CERTS_SECRET_PREFIX}/truststore | jq -r 'if .errors then . else . end') - if echo ${LOOKUP_SECRET_RESPONSE} | grep "errors" - then - sleep {{ $.Values.healthcheck.readinesscheckinterval }} - else - break - fi - COUNTER=`expr "$COUNTER" + 1` - done - - if [ "$COUNTER" -ge {{ $.Values.healthcheck.readinessthreshold }} ] - then - # printing number of trial done before giving up - echo "$COUNTER" - echo "Node registration might not have been done." - exit 1 - fi - echo "Done" - - name: init-nodeconf - image : {{ .Values.image.initContainerName }} - imagePullPolicy: Always - env: - - name: VAULT_ADDR - value: {{ $.Values.vault.address }} - - name: KUBERNETES_AUTH_PATH - value: {{ $.Values.vault.authpath }} - - name: VAULT_APP_ROLE - value: {{ $.Values.vault.role }} - - name: BASE_DIR - value: {{ $.Values.volume.baseDir }} - - name: KS_SECRET_PREFIX - value: {{ .Values.vault.keystoresecretprefix }} - - name: DB_SECRET_PREFIX - value: {{ .Values.vault.dbsecretprefix }} - - name: RPCUSER_SECRET_PREFIX - value: {{ .Values.vault.rpcusersecretprefix }} - command: ["/bin/sh","-c"] - args: - - |- - #!/bin/bash - # delete previously created node.conf, and create a new node.conf - rm -f ${BASE_DIR}/node.conf; - touch ${BASE_DIR}/node.conf; - - # setting up env to get secrets from vault - KUBE_SA_TOKEN=$(cat /var/run/secrets/kubernetes.io/serviceaccount/token) - echo "Getting secrets from Vault Server" - VAULT_TOKEN=$(curl -sS --request POST ${VAULT_ADDR}/v1/auth/${KUBERNETES_AUTH_PATH}/login -H "Content-Type: application/json" -d '{"role":"'"${VAULT_APP_ROLE}"'","jwt":"'"${KUBE_SA_TOKEN}"'"}' | jq -r 'if .errors then . else .auth.client_token end') - - # save keyStorePassword & trustStorePassword from vault - LOOKUP_PWD_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${KS_SECRET_PREFIX} | jq -r 'if .errors then . else . end') - CONF_KEYSTOREPASSWORD=$(echo ${LOOKUP_PWD_RESPONSE} | jq -r '.data.data["keyStorePassword"]') - CONF_TRUSTSTOREPASSWORD=$(echo ${LOOKUP_PWD_RESPONSE} | jq -r '.data.data["trustStorePassword"]') - - # save dataSourceUserPassword from vault - LOOKUP_PWD_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${DB_SECRET_PREFIX} | jq -r 'if .errors then . else . end') - CONF_DATASOURCEPASSWORD=$(echo ${LOOKUP_PWD_RESPONSE} | jq -r '.data.data["{{ .Values.credentials.dataSourceUser }}"]') - LOOKUP_PWD_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${RPCUSER_SECRET_PREFIX} | jq -r 'if .errors then . else . end') - - #For more information for node.Conf fields please refer to: https://docs.corda.r3.com/releases/4.0/corda-configuration-file.html - cat << EOF > ${BASE_DIR}/node.conf - p2pAddress : "{{ .Values.nodeConf.p2p.url }}:{{ .Values.nodeConf.p2p.port }}" - myLegalName : "{{ .Values.nodeConf.legalName }}" - keyStorePassword : "${CONF_KEYSTOREPASSWORD}" - trustStorePassword : "${CONF_TRUSTSTOREPASSWORD}" - transactionCacheSizeMegaBytes : {{ .Values.nodeConf.transactionCacheSizeMegaBytes }} - attachmentContentCacheSizeMegaBytes : {{ .Values.nodeConf.attachmentContentCacheSizeMegaBytes }} - detectPublicIp = {{ .Values.nodeConf.detectPublicIp }} - additionalP2PAddresses = ["{{ .Values.nodeConf.ambassadorAddress }}"] - devMode : {{ .Values.nodeConf.devMode }} - dataSourceProperties = { - dataSourceClassName = "{{ .Values.nodeConf.dataSourceClassName }}" - dataSource.url = "{{ .Values.nodeConf.dataSourceUrl }}" - dataSource.user = {{ .Values.credentials.dataSourceUser }} - dataSource.password = "${CONF_DATASOURCEPASSWORD}" - } - database = { - exportHibernateJMXStatistics = {{ .Values.nodeConf.database.exportHibernateJMXStatistics }} - } - jarDirs = ["{{ .Values.nodeConf.jarPath }}"] - EOF - if [ -z "{{ .Values.nodeConf.compatibilityZoneURL }}" ] - then - echo 'networkServices = { - doormanURL = "{{ .Values.nodeConf.doormanURL }}" - networkMapURL = "{{ .Values.nodeConf.networkMapURL }}" - }' >> ${BASE_DIR}/node.conf - else - echo 'compatibilityZoneURL : "{{ .Values.nodeConf.compatibilityZoneURL }}"' >> ${BASE_DIR}/node.conf - fi - - if [ -z "{{ .Values.nodeConf.jvmArgs }}" ] - then - echo 'jvmArgs is not configured' - else - echo 'jvmArgs = "{{ .Values.nodeConf.jvmArgs }}" ' >> ${BASE_DIR}/node.conf - fi - - if [ -z "{{ .Values.nodeConf.sshd.port }}" ] - then - echo 'sshd port is not configured' - else - echo 'sshd { port = {{ .Values.nodeConf.sshd.port }} } ' >> ${BASE_DIR}/node.conf - fi - - if [ -z "{{ .Values.nodeConf.systemProperties }}" ] - then - echo 'systemProperties is not configured' - else - echo 'systemProperties = {{ .Values.nodeConf.systemProperties }} ' >> ${BASE_DIR}/node.conf - fi - - if [ -z "{{ .Values.nodeConf.exportJMXTo }}" ] - then - echo 'exportJMXTo is not configured' - else - echo 'exportJMXTo = {{ .Values.nodeConf.exportJMXTo }} ' >> ${BASE_DIR}/node.conf - fi - - if [ -z "{{ .Values.nodeConf.messagingServerAddress }}" ] - then - echo 'The address of the ArtemisMQ broker instance is not configured' - else - echo 'messagingServerAddress : "{{ .Values.nodeConf.messagingServerAddress }}" ' >> ${BASE_DIR}/node.conf - fi - - if [ -z "{{ .Values.credentials.rpcUser }}" ] - then - echo 'rpc useer is not configured' - else - echo 'rpcUsers : [' >> ${BASE_DIR}/node.conf - {{- range $.Values.credentials.rpcUser }} - echo '{ username={{ .name }} ,permissions={{ .permissions }}, ' >> ${BASE_DIR}/node.conf - echo " password=$(echo ${LOOKUP_PWD_RESPONSE} | jq -r '.data.data["{{ .name }}"]') }" >> ${BASE_DIR}/node.conf - {{- end }} - echo ']' >> ${BASE_DIR}/node.conf - fi - - LOOKUP_PWD_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${KS_SECRET_PREFIX} | jq -r 'if .errors then . else . end') - if [ "{{ .Values.nodeConf.rpcSettings.useSsl }}" == true ] - then - echo "rpcSettings { - standAloneBroker = {{ .Values.nodeConf.rpcSettings.standAloneBroker }} - address = "{{ .Values.nodeConf.rpcSettings.address }}" - adminAddress = "{{ .Values.nodeConf.rpcSettings.adminAddress }}" - useSsl = {{ .Values.nodeConf.rpcSettings.useSsl }} - ssl = { - keyStorePassword = $(echo ${LOOKUP_PWD_RESPONSE} | jq -r '.data.data["sslkeyStorePassword"]') - trustStorePassword = $(echo ${LOOKUP_PWD_RESPONSE} | jq -r '.data.data["ssltrustStorePassword"]') - certificatesDirectory = ${BASE_DIR}/{{ .Values.nodeConf.rpcSettings.ssl.certificatesDirectory }} - sslKeystore = ${BASE_DIR}/{{ .Values.nodeConf.rpcSettings.ssl.certificatesDirectory }}/{{ .Values.nodeConf.rpcSettings.ssl.sslKeystoreFileName }} - trustStoreFile = ${BASE_DIR}/{{ .Values.nodeConf.rpcSettings.ssl.certificatesDirectory }}/{{ .Values.nodeConf.rpcSettings.ssl.trustStoreFileName }} - } - }" >> ${BASE_DIR}/node.conf - else - echo 'rpcSettings { - standAloneBroker = {{ .Values.nodeConf.rpcSettings.standAloneBroker }} - address = "{{ .Values.nodeConf.rpcSettings.address }}" - adminAddress = "{{ .Values.nodeConf.rpcSettings.adminAddress }}" - }' >> ${BASE_DIR}/node.conf - fi - echo "node.conf created in ${BASE_DIR}" - volumeMounts: - - name: nodeconf - mountPath: "{{ $.Values.volume.baseDir }}" - - name: init-certificates - image: {{ .Values.image.initContainerName }} - imagePullPolicy: Always - env: - - name: VAULT_ADDR - value: {{ $.Values.vault.address }} - - name: KUBERNETES_AUTH_PATH - value: {{ $.Values.vault.authpath }} - - name: VAULT_APP_ROLE - value: {{ $.Values.vault.role }} - - name: BASE_DIR - value: {{ $.Values.volume.baseDir }} - - name: CERTS_SECRET_PREFIX - value: {{ .Values.vault.certsecretprefix }} - command: ["sh", "-c"] - args: - - |- - #!/usr/bin/env sh - validateVaultResponse () { - if echo ${2} | grep "errors"; then - echo "ERROR: unable to retrieve ${1}: ${2}" - exit 1 - fi - if [ "$3" == "LOOKUPSECRETRESPONSE" ] - then - http_code=$(curl -sS -o /dev/null -w "%{http_code}" \ - --header "X-Vault-Token: ${VAULT_TOKEN}" \ - ${VAULT_ADDR}/v1/${1}) - curl_response=$? - if test "$http_code" != "200" ; then - echo "Http response code from Vault - $http_code" - if test "$curl_response" != "0"; then - echo "Error: curl command failed with error code - $curl_response" - exit 1 - fi - fi - fi - } - - # setting up env to get secrets from vault - echo "Getting secrets from Vault Server" - KUBE_SA_TOKEN=$(cat /var/run/secrets/kubernetes.io/serviceaccount/token) - VAULT_TOKEN=$(curl -sS --request POST ${VAULT_ADDR}/v1/auth/${KUBERNETES_AUTH_PATH}/login -H "Content-Type: application/json" -d '{"role":"'"${VAULT_APP_ROLE}"'","jwt":"'"${KUBE_SA_TOKEN}"'"}' | jq -r 'if .errors then . else .auth.client_token end') - validateVaultResponse 'vault login token' "${VAULT_TOKEN}" - echo "logged into vault" - - OUTPUT_PATH=${BASE_DIR} - - # get nodekeystore.jks from vault - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${CERTS_SECRET_PREFIX}/nodekeystore | jq -r 'if .errors then . else . end') - validateVaultResponse "${CERTS_SECRET_PREFIX}/nodekeystore" "${LOOKUP_SECRET_RESPONSE}" "LOOKUPSECRETRESPONSE" - TLS_NODEKEYSTORE=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["nodekeystore.jks"]') - echo "${TLS_NODEKEYSTORE}" | base64 -d > ${OUTPUT_PATH}/nodekeystore.jks - - # get sslkeystore.jks from vault - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${CERTS_SECRET_PREFIX}/sslkeystore | jq -r 'if .errors then . else . end') - validateVaultResponse "${CERTS_SECRET_PREFIX}/sslkeystore" "${LOOKUP_SECRET_RESPONSE}" "LOOKUPSECRETRESPONSE" - TLS_SSLKEYSTORE=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["sslkeystore.jks"]') - echo "${TLS_SSLKEYSTORE}" | base64 -d > ${OUTPUT_PATH}/sslkeystore.jks - - # get truststore.jks from vault - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${CERTS_SECRET_PREFIX}/truststore | jq -r 'if .errors then . else . end') - validateVaultResponse "${CERTS_SECRET_PREFIX}/truststore" "${LOOKUP_SECRET_RESPONSE}" "LOOKUPSECRETRESPONSE" - TLS_TRUSTSTORE=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["truststore.jks"]') - echo "${TLS_TRUSTSTORE}" | base64 -d > ${OUTPUT_PATH}/truststore.jks - - # get network-map-truststore.jks from vault - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${CERTS_SECRET_PREFIX}/networkmaptruststore | jq -r 'if .errors then . else . end') - validateVaultResponse "${CERTS_SECRET_PREFIX}/networkmaptruststore" "${LOOKUP_SECRET_RESPONSE}" "LOOKUPSECRETRESPONSE" - TLS_NMS=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["network-map-truststore"]') - echo "${TLS_NMS}" | base64 -d > ${OUTPUT_PATH}/network-map-truststore.jks - - # when using doorman and networkmap in TLS: true, and using private certificate then download certificate - if [ "{{ .Values.image.privateCertificate }}" == true ] - then - mkdir -p ${OUTPUT_PATH}/networkmap - mkdir -p ${OUTPUT_PATH}/doorman - - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${CERTS_SECRET_PREFIX}/networkmap | jq -r 'if .errors then . else . end') - validateVaultResponse "${CERTS_SECRET_PREFIX}/networkmap" "${LOOKUP_SECRET_RESPONSE}" "LOOKUPSECRETRESPONSE" - NETWORKMAP_CRT=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["networkmap.crt"]') - echo "${NETWORKMAP_CRT}" | base64 -d > ${OUTPUT_PATH}/networkmap/networkmap.crt - - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${CERTS_SECRET_PREFIX}/doorman | jq -r 'if .errors then . else . end') - validateVaultResponse "${CERTS_SECRET_PREFIX}/doorman" "${LOOKUP_SECRET_RESPONSE}" "LOOKUPSECRETRESPONSE" - DOORMAN_CRT=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["doorman.crt"]') - echo "${DOORMAN_CRT}" | base64 -d > ${OUTPUT_PATH}/doorman/doorman.crt - fi - - # when using custom sslKeystore while setting in node.conf - if [ "{{ .Values.nodeConf.rpcSettings.useSsl }}" == true ] - then - mkdir -p ${OUTPUT_PATH}/${SSL_CERT_PATH} - chmod -R ${OUTPUT_PATH}/${SSL_CERT_PATH} - SSL_CERT_PATH={{ .Values.nodeConf.rpcSettings.ssl.certificatesDirectory }} - SSL_KEYSTORE_FILE_NAME_KEY={{ .Values.nodeConf.rpcSettings.ssl.sslKeystoreFileName }} - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${CERTS_SECRET_PREFIX}/${SSL_KEYSTORE_FILE_NAME_KEY} | jq -r 'if .errors then . else . end') - validateVaultResponse "${CERTS_SECRET_PREFIX}/${SSL_KEYSTORE_FILE_NAME_KEY}" "${LOOKUP_SECRET_RESPONSE}" "LOOKUPSECRETRESPONSE" - SSLKEYSTORE=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["SSL_KEYSTORE_FILE_NAME_KEY"]') - echo "${SSLKEYSTORE}" | base64 -d > ${OUTPUT_PATH}/${SSL_CERT_PATH}/${SSL_KEYSTORE_FILE_NAME_KEY} - TRUSTKEYSTORE_FILE_NAME_KEY={{ .Values.nodeConf.rpcSettings.ssl.sslKeystoreFileName }} - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${CERTS_SECRET_PREFIX}/${TRUSTKEYSTORE_FILE_NAME_KEY} | jq -r 'if .errors then . else . end') - validateVaultResponse "${CERTS_SECRET_PREFIX}/${TRUSTKEYSTORE_FILE_NAME_KEY}" "${LOOKUP_SECRET_RESPONSE}" "LOOKUPSECRETRESPONSE" - TRUSTSTORE=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["TRUSTKEYSTORE_FILE_NAME_KEY"]') - echo "${TRUSTSTORE}" | base64 -d > ${OUTPUT_PATH}/${SSL_CERT_PATH}/${TRUSTKEYSTORE_FILE_NAME_KEY} - else - echo "" - fi - - echo "Done" - volumeMounts: - - name: certificates - mountPath: {{ $.Values.volume.baseDir }} - - name: init-healthcheck - image: {{ .Values.image.initContainerName }} - imagePullPolicy: Always - command: ["sh", "-c"] - args: - - |- - #!/usr/bin/env sh - COUNTER=1 - FLAG=true - # perform health check if db is up and running before starting corda node - while [ "$COUNTER" -le {{ $.Values.healthcheck.readinessthreshold }} ] - do - DB_NODE={{ .Values.nodeConf.dbUrl }}:{{ .Values.nodeConf.dbPort }} - STATUS=$(nc -vz $DB_NODE 2>&1 | grep -c open ) - if [ "$STATUS" == 0 ] - then - FLAG=false - else - FLAG=true - echo "DB up and running" - fi - if [ "$FLAG" == false ] - then - echo "Retry attempted $COUNTER times, retrying after {{ $.Values.healthcheck.readinesscheckinterval }} seconds" - COUNTER=`expr "$COUNTER" + 1` - sleep {{ $.Values.healthcheck.readinesscheckinterval }} - else - echo "SUCCESS!" - echo "DB up and running!" - exit 0 - break - fi - done - if [ "$COUNTER" -gt {{ $.Values.healthcheck.readinessthreshold }} ] || [ "$FLAG" == false ] - then - echo "Retry attempted $COUNTER times, no DB up and running. Giving up!" - exit 1 - break - fi - - name: init-cordapps - image: {{ .Values.image.initContainerName }} - imagePullPolicy: Always - command: ["sh", "-c"] - env: - - name: BASE_DIR - value: {{ $.Values.volume.baseDir }} - - name: VAULT_ADDR - value: {{ $.Values.vault.address }} - - name: KUBERNETES_AUTH_PATH - value: {{ $.Values.vault.authpath }} - - name: VAULT_APP_ROLE - value: {{.Values.vault.role}} - - name: SECRET_PREFIX - value: {{ $.Values.vault.cordappsreposecretprefix }} - args: - - |- - # crearting cordapps dir in volume to keep jars - mkdir -p {{ .Values.volume.baseDir }}/cordapps - {{- if .Values.cordapps.getcordapps }} - mkdir -p /tmp/downloaded-jars - # setting up env to get secrets from vault - KUBE_SA_TOKEN=$(cat /var/run/secrets/kubernetes.io/serviceaccount/token) - echo "Getting secrets from Vault Server" - VAULT_TOKEN=$(curl -sS --request POST ${VAULT_ADDR}/v1/auth/${KUBERNETES_AUTH_PATH}/login -H "Content-Type: application/json" -d '{"role":"'"${VAULT_APP_ROLE}"'","jwt":"'"${KUBE_SA_TOKEN}"'"}' | jq -r 'if .errors then . else .auth.client_token end') - - # Save CorDapps repository login password from vault - LOOKUP_PWD_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${SECRET_PREFIX} | jq -r 'if .errors then . else . end') - REPO_USER_PASS=$(echo ${LOOKUP_PWD_RESPONSE} | jq -r '.data.data["repo_password"]') - REPO_USER=$(echo ${LOOKUP_PWD_RESPONSE} | jq -r '.data.data["repo_username"]') - - # Downloading official corda provided jars using curl - {{- range .Values.cordapps.jars }} - cd /tmp/downloaded-jars && curl -u $REPO_USER:$REPO_USER_PASS -O -L {{ .url }} - {{- end }} - cp -ar /tmp/downloaded-jars/* {{ $.Values.volume.baseDir }}/cordapps - {{- end }} - volumeMounts: - - name: node-volume - mountPath: "{{ $.Values.volume.baseDir }}" - imagePullSecrets: - - name: {{ .Values.image.imagePullSecret }} - volumes: - - name: node-volume - persistentVolumeClaim: - claimName: {{ .Values.pvc.name }} - - name: certificates - emptyDir: - medium: Memory - - name: nodeconf - emptyDir: - medium: Memory - - name: nodeprops - emptyDir: - medium: Memory diff --git a/platforms/r3-corda/charts/corda-node/templates/hooks-pre-delete.yaml b/platforms/r3-corda/charts/corda-node/templates/hooks-pre-delete.yaml new file mode 100644 index 00000000000..2f987b6a44b --- /dev/null +++ b/platforms/r3-corda/charts/corda-node/templates/hooks-pre-delete.yaml @@ -0,0 +1,71 @@ +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ include "corda-node.fullname" . }}-pre-delete-hook + namespace: {{ .Release.Namespace }} + annotations: + helm.sh/hook: pre-delete + helm.sh/hook-weight: "0" + helm.sh/hook-delete-policy: "hook-succeeded" + labels: + app.kubernetes.io/name: pre-delete-hook + app.kubernetes.io/component: cleanup + app.kubernetes.io/part-of: {{ include "corda-node.fullname" . }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} + app.kubernetes.io/managed-by: helm +spec: + backoffLimit: 3 + completions: 1 + template: + metadata: + labels: + app.kubernetes.io/name: pre-delete-hook + app.kubernetes.io/component: cleanup + app.kubernetes.io/part-of: {{ include "corda-node.fullname" . }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} + app.kubernetes.io/managed-by: helm + spec: + serviceAccountName: {{ .Values.global.serviceAccountName }} + restartPolicy: "Never" + containers: + - name: {{ template "corda-node.fullname" . }}-cleanup + image: "{{ .Values.image.hooks.repository }}:{{ .Values.image.hooks.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + command: + - /bin/bash + - -c + args: + - | + + echo "{{ template "corda-node.fullname" . }} pre-delete-hook ..." + +{{- if and (ne .Values.global.cluster.provider "minikube") (.Values.global.cluster.cloudNativeServices) }} + # placeholder for cloudNative deleteSecret function +{{- else }} + + function deleteSecret { + key=$1 + kubectl delete secret ${key} --namespace {{ .Release.Namespace }} + } + +{{- end }} + +{{- if .Values.nodeConf.removeKeysOnDelete }} + +{{- if and (ne .Values.global.cluster.provider "minikube") (.Values.global.cluster.cloudNativeServices) }} + deleteSecret {{.Release.Name }}-nmskeystore + deleteSecret {{.Release.Name }}-doormankeystore + deleteSecret {{.Release.Name }}-rootcakeystore + deleteSecret {{.Release.Name }}-rootcacert + deleteSecret {{.Release.Name }}-rootcakey + deleteSecret {{.Release.Name }}-dbcert + deleteSecret {{.Release.Name }}-dbcacert +{{- else }} + deleteSecret {{.Release.Name }}-certs +{{- end }} + +{{- end }} + echo "Completed" diff --git a/platforms/r3-corda/charts/corda-node/templates/hooks-pre-install.yaml b/platforms/r3-corda/charts/corda-node/templates/hooks-pre-install.yaml new file mode 100644 index 00000000000..4192dcb5642 --- /dev/null +++ b/platforms/r3-corda/charts/corda-node/templates/hooks-pre-install.yaml @@ -0,0 +1,165 @@ +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ include "corda-node.fullname" . }}-pre-install-hook + namespace: {{ .Release.Namespace }} + annotations: + "helm.sh/hook": pre-install + "helm.sh/hook-weight": "0" + "helm.sh/hook-delete-policy": "before-hook-creation" + labels: + app.kubernetes.io/name: pre-install-hook + app.kubernetes.io/component: certgen + app.kubernetes.io/part-of: {{ include "corda-node.fullname" . }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} + app.kubernetes.io/managed-by: helm +spec: + backoffLimit: 1 + completions: 1 + template: + metadata: + labels: + app.kubernetes.io/name: pre-install-hook + app.kubernetes.io/component: certgen + app.kubernetes.io/part-of: {{ include "corda-node.fullname" . }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} + app.kubernetes.io/managed-by: helm + spec: + serviceAccountName: {{ .Values.global.serviceAccountName }} + restartPolicy: "OnFailure" + containers: + - name: corda-certgen + image: {{ .Values.image.hooks.repository }}:{{ .Values.image.hooks.tag }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + securityContext: + runAsUser: 0 + volumeMounts: + - name: generated-config + mountPath: /home + - name: scripts-volume + mountPath: /scripts/bevel-vault.sh + subPath: bevel-vault.sh + {{- if (eq .Values.global.vault.type "hashicorp") }} + env: + - name: VAULT_ADDR + value: "{{ .Values.global.vault.address }}" + - name: VAULT_SECRET_ENGINE + value: "{{ .Values.global.vault.secretEngine }}" + - name: VAULT_SECRET_PREFIX + value: "{{ .Values.global.vault.secretPrefix }}" + - name: KUBERNETES_AUTH_PATH + value: "{{ .Values.global.vault.authPath }}" + - name: VAULT_APP_ROLE + value: "{{ .Values.global.vault.role }}" + - name: VAULT_TYPE + value: "{{ .Values.global.vault.type }}" + {{- end }} + command: + - /bin/bash + - -c + args: + - | +{{- if (eq .Values.global.vault.type "hashicorp") }} + . /scripts/bevel-vault.sh + echo "Getting vault Token..." + vaultBevelFunc "init" + #Read if secret exists in Vault + vaultBevelFunc 'readJson' "${VAULT_SECRET_ENGINE}/${VAULT_SECRET_PREFIX}/{{ .Release.Name }}-certs" + function safeWriteSecret { + key=$1 + fpath=$2 + if [ "$SECRETS_AVAILABLE" == "yes" ] + then + # Get secret from Vault and create the k8s secret if it does not exist + kubectl get secret ${key}-certs --namespace {{ .Release.Namespace }} -o json > /dev/null 2>&1 + if [ $? -ne 0 ]; then + NMS_STORE=$(echo ${VAULT_SECRET} | jq -r '.["nmstruststore_base64"]') + NODE_CERTS=$(echo ${VAULT_SECRET} | jq -r '.["nodecert_base64"]') + NODE_KEY=$(echo ${VAULT_SECRET} | jq -r '.["nodekey_base64"]') + NODE_STORE=$(echo ${VAULT_SECRET} | jq -r '.["nodekeystore_base64"]') + echo $NMS_STORE | base64 -d > /tmp/nmstruststore.jks + echo $NODE_STORE | base64 -d > /tmp/nodekeystore.jks + echo $NODE_CERTS > /tmp/node.cer + echo $NODE_KEY > /tmp/node.key + kubectl create secret generic ${key}-certs --namespace {{ .Release.Namespace }} \ + --from-file=network-map-truststore.jks=/tmp/nmstruststore.jks --from-file=nodekeystore.jks=/tmp/nodekeystore.jks \ + --from-file=node.crt=/tmp/node.cer --from-file=node.key=/tmp/node.key + fi + else + # Save keystores/truststores to Vault + # Use -w0 to get single line base64 -w0 + NMS_STORE=$(cat ${fpath}/network-map-truststore.jks | base64 -w0) + NODE_STORE=$(cat ${fpath}/nodekeystore.jks | base64 -w0) + NODE_CERTS=$(cat ${fpath}/node.cer | base64 -w0) + NODE_KEY=$(cat ${fpath}/node.key | base64 -w0) + # create a JSON file for the data related to node crypto + echo " + { + \"data\": + { + \"nmstruststore_base64\": \"${NMS_STORE}\", + \"nodekeystore_base64\": \"${NODE_STORE}\", + \"nodecert_base64\": \"${NODE_CERTS}\", + \"nodekey_base64\": \"${NODE_KEY}\" + } + }" > payload.json + vaultBevelFunc 'write' "${VAULT_SECRET_ENGINE}/${VAULT_SECRET_PREFIX}/${key}-certs" 'payload.json' + rm payload.json + # Also create the k8s secret + kubectl create secret generic ${key}-certs --namespace {{ .Release.Namespace }} \ + --from-file=network-map-truststore.jks=${fpath}/network-map-truststore.jks --from-file=nodekeystore.jks=${fpath}/nodekeystore.jks \ + --from-literal=node.crt=${NODE_CERTS} --from-literal=node.key=${NODE_KEY} + fi + } +{{- else }} + function safeWriteSecret { + key=$1 + fpath=$2 + kubectl get secret ${key}-certs --namespace {{ .Release.Namespace }} -o json > /dev/null 2>&1 + if [ $? -ne 0 ]; then + kubectl create secret generic ${key}-certs --namespace {{ .Release.Namespace }} \ + --from-file=network-map-truststore.jks=${fpath}/network-map-truststore.jks --from-file=nodekeystore.jks=${fpath}/nodekeystore.jks \ + --from-file=node.crt=<(base64 -w0 ${fpath}/node.cer) --from-file=node.key=<(base64 -w0 ${fpath}/node.key) + fi + } +{{- end }} + if [ "$SECRETS_AVAILABLE" == "yes" ] + then + echo "Certificates found for {{ .Release.Name }} ..." + else + echo "Creating certificates for {{ .Release.Name }} ..." + # Generate node certs + CA_PATH=/home/certificates/node + + mkdir -p ${CA_PATH} + DEFAULT_KEYSTORE_PASS={{ .Values.nodeConf.defaultKeystorePassword }} + + cd ${CA_PATH} + openssl genrsa -out node.key 3072 + openssl req -new -x509 -key node.key -out node.cer -days 365 -subj '/{{ .Values.nodeConf.legalName | replace "," "/" }}' + openssl dgst -sha256 -sign node.key node.cer | base64 | cat node.cer + openssl pkcs12 -export -in node.cer -inkey node.key -out testkeystore.p12 -passin pass:${DEFAULT_KEYSTORE_PASS} -passout pass:${DEFAULT_KEYSTORE_PASS} + eval "yes | keytool -importkeystore -srckeystore testkeystore.p12 -srcstoretype pkcs12 -srcstorepass ${DEFAULT_KEYSTORE_PASS} -destkeystore nodekeystore.jks -deststorepass ${DEFAULT_KEYSTORE_PASS} -deststoretype JKS" + # Get networkmap-truststore + wget --no-check-certificate {{ .Values.nodeConf.networkMapURL }}/network-map/truststore -O network-map-truststore.jks + fi; + echo "Creating {{ .Release.Name }}-certs secrets in k8s ..." +{{- if and (ne .Values.global.cluster.provider "minikube") (.Values.global.cluster.cloudNativeServices) }} + safeWriteSecret {{ .Release.Name }}-network-map-truststore.jks $CA_PATH/network-map-truststore.jks + safeWriteSecret {{ .Release.Name }}-cert $CA_PATH/node.cer + safeWriteSecret {{ .Release.Name }}-key $CA_PATH/node.key + safeWriteSecret {{ .Release.Name }}-nodekeystore.jks $CA_PATH/nodekeystore.jks +{{- else }} + safeWriteSecret {{ .Release.Name }} ${CA_PATH} +{{- end }} + echo "Completed ..." + volumes: + - name: generated-config + emptyDir: {} + - name: scripts-volume + configMap: + name: bevel-vault-script + defaultMode: 0777 diff --git a/platforms/r3-corda/charts/corda-node/templates/pvc.yaml b/platforms/r3-corda/charts/corda-node/templates/pvc.yaml deleted file mode 100644 index 95f123ad10a..00000000000 --- a/platforms/r3-corda/charts/corda-node/templates/pvc.yaml +++ /dev/null @@ -1,29 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - ---- -kind: PersistentVolumeClaim -apiVersion: v1 -metadata: - name: {{ .Values.pvc.name }} - {{- if .Values.pvc.annotations }} - annotations: -{{ toYaml .Values.pvc.annotations | indent 8 }} - {{- end }} - namespace: {{ .Values.metadata.namespace }} - labels: - app.kubernetes.io/name: {{ .Values.pvc.name }} - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} - {{- include "labels.custom" . | nindent 2 }} -spec: - storageClassName: {{ .Values.pvc.storageClassName }} - accessModes: - - ReadWriteOnce - resources: - requests: - storage: {{ .Values.pvc.memory }} \ No newline at end of file diff --git a/platforms/r3-corda/charts/corda-node/templates/service.yaml b/platforms/r3-corda/charts/corda-node/templates/service.yaml index 43341c2dd23..5bde0e4d1d8 100644 --- a/platforms/r3-corda/charts/corda-node/templates/service.yaml +++ b/platforms/r3-corda/charts/corda-node/templates/service.yaml @@ -3,89 +3,113 @@ # # SPDX-License-Identifier: Apache-2.0 ############################################################################################## - apiVersion: v1 kind: Service metadata: - name: {{ .Values.service.name }} - namespace: {{ .Values.metadata.namespace }} - annotations: + name: {{ .Release.Name }}-db + namespace: {{ .Release.Namespace }} labels: - run: {{ .Values.service.name }} - app.kubernetes.io/name: {{ .Values.service.name }} - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} + app.kubernetes.io/name: h2-service + app.kubernetes.io/component: database + app.kubernetes.io/part-of: "{{ include "corda-node.fullname" . }}" app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} - {{- include "labels.custom" . | nindent 2 }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} spec: - type: {{ .Values.service.type }} - selector: - app: {{ .Values.nodeName }} - ports: - # for p2p communication among corda node - - name: p2p - protocol: TCP - port: {{ .Values.service.p2p.port }} - targetPort: {{ .Values.service.p2p.targetPort }} - {{- if .Values.service.p2p.nodePort }} - nodePort: {{ .Values.service.p2p.nodePort}} - {{- end }} - # for rpc communication between corda node and webserver - - name: rpc - protocol: TCP - port: {{ .Values.service.rpc.port }} - targetPort: {{ .Values.service.rpc.targetPort }} - {{- if .Values.service.rpc.nodePort }} - nodePort: {{ .Values.service.rpc.nodePort}} - {{- end }} - # for rpc admin communication - - name: rpcadmin - protocol: TCP - port: {{ .Values.service.rpcadmin.port }} - targetPort: {{ .Values.service.rpcadmin.targetPort }} - {{- if .Values.service.rpcadmin.nodePort }} - nodePort: {{ .Values.service.rpcadmin.nodePort}} - {{- end }} - -{{ if $.Values.ambassador }} + type: ClusterIP + selector: + app.kubernetes.io/component: database + app.kubernetes.io/part-of: {{ include "corda-node.fullname" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} + ports: + - name: tcp + protocol: TCP + port: {{ .Values.nodeConf.dbPort }} + targetPort: 1521 + - name: web + protocol: TCP + port: 8080 + targetPort: 81 +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ .Release.Name }} + namespace: {{ .Release.Namespace }} + labels: + app.kubernetes.io/name: p2p-service + app.kubernetes.io/component: corda + app.kubernetes.io/part-of: {{ include "corda-node.fullname" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} +spec: + type: ClusterIP + selector: + app.kubernetes.io/component: corda + app.kubernetes.io/part-of: {{ include "corda-node.fullname" . }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} + ports: + # for p2p communication among corda node + - name: p2p + protocol: TCP + port: {{ .Values.nodeConf.p2pPort }} + targetPort: {{ .Values.nodeConf.p2pPort }} + # for rpc communication between corda node and webserver + - name: rpc + protocol: TCP + port: {{ .Values.nodeConf.rpcPort }} + targetPort: {{ .Values.nodeConf.rpcPort }} + # for rpc admin communication + - name: rpcadmin + protocol: TCP + port: {{ .Values.nodeConf.rpcadminPort }} + targetPort: {{ .Values.nodeConf.rpcadminPort }} +{{- if eq .Values.global.proxy.provider "ambassador" }} +{{- if .Values.tls.enabled }} --- apiVersion: getambassador.io/v3alpha1 kind: Host metadata: - name: {{ .Values.ambassador.component_name }}-host - namespace: {{ .Values.metadata.namespace }} + name: {{ .Release.Name }}-host + namespace: {{ .Release.Namespace }} spec: - hostname: {{ .Values.ambassador.component_name }}.{{ .Values.ambassador.external_url_suffix }} + hostname: {{ .Release.Name }}.{{ .Values.global.proxy.externalUrlSuffix }} acmeProvider: authority: none requestPolicy: insecure: action: Route tlsSecret: - name: {{ .Values.ambassador.component_name }}-ambassador-certs - namespace: {{ .Values.metadata.namespace }} + name: {{ .Release.Name }}-tls-certs + namespace: {{ .Release.Namespace }} +{{- end }} --- apiVersion: getambassador.io/v3alpha1 -kind: TLSContext +kind: Mapping metadata: - name: {{ .Values.ambassador.component_name }}-context - namespace: {{ .Values.metadata.namespace }} + name: {{ .Release.Name }}-p2p-mapping + namespace: {{ .Release.Namespace }} spec: - hosts: - - {{ .Values.ambassador.component_name }}.{{ .Values.ambassador.external_url_suffix }} - secret: {{ .Values.ambassador.component_name }}-ambassador-certs.{{ .Values.metadata.namespace }} - secret_namespacing: true - min_tls_version: v1.2 + host: {{ .Release.Name }}.{{ .Values.global.proxy.externalUrlSuffix }} + prefix: / + service: {{ .Release.Name }}.{{ .Release.Namespace }}:{{ .Values.nodeConf.p2pPort }} +{{- if .Values.tls.enabled }} + tls: {{ .Release.Name }}-tlscontext --- apiVersion: getambassador.io/v3alpha1 -kind: Mapping +kind: TLSContext metadata: - name: {{ .Values.ambassador.component_name }}-p2p-mapping - namespace: {{ .Values.metadata.namespace }} + name: {{ .Release.Name }}-tlscontext + namespace: {{ .Release.Namespace }} spec: - host: {{ .Values.ambassador.component_name }}.{{ .Values.ambassador.external_url_suffix }} - prefix: / - service: https://{{ .Values.ambassador.component_name }}.{{ .Values.metadata.namespace }}:{{ .Values.nodeConf.p2p.port }} - tls: {{ .Values.ambassador.component_name }}-context -{{ end }} - + hosts: + - {{ .Release.Name }}.{{ .Values.global.proxy.externalUrlSuffix }} + secret: {{ .Release.Name }}-tls-certs.{{ .Release.Namespace }} + secret_namespacing: true + min_tls_version: v1.2 +{{- end }} +{{- end }} diff --git a/platforms/r3-corda/charts/corda-node/templates/statefulset-db.yaml b/platforms/r3-corda/charts/corda-node/templates/statefulset-db.yaml new file mode 100644 index 00000000000..c880c8ded9a --- /dev/null +++ b/platforms/r3-corda/charts/corda-node/templates/statefulset-db.yaml @@ -0,0 +1,85 @@ +############################################################################################## +# Copyright Accenture. All Rights Reserved. +# +# SPDX-License-Identifier: Apache-2.0 +############################################################################################## +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ template "corda-node.fullname" . }}-db + namespace: {{ .Release.Namespace }} + labels: + app: {{ include "corda-node.fullname" . }} + app.kubernetes.io/name: h2-statefulset + app.kubernetes.io/component: database + app.kubernetes.io/part-of: {{ include "corda-node.fullname" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} +spec: + replicas: 1 + podManagementPolicy: OrderedReady + updateStrategy: + type: RollingUpdate + selector: + matchLabels: + app: {{ include "corda-node.fullname" . }} + app.kubernetes.io/name: h2-statefulset + app.kubernetes.io/component: database + app.kubernetes.io/part-of: {{ include "corda-node.fullname" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} + serviceName: {{ include "corda-node.fullname" . }} + volumeClaimTemplates: + - metadata: + name: data-h2 + spec: + storageClassName: storage-{{ .Release.Name }} + accessModes: [ "ReadWriteOnce" ] + resources: + requests: + storage: {{ .Values.storage.dbSize }} + template: + metadata: + labels: + app: {{ include "corda-node.fullname" . }} + app.kubernetes.io/name: h2-statefulset + app.kubernetes.io/component: database + app.kubernetes.io/part-of: {{ include "corda-node.fullname" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} + spec: + hostname: {{ .Release.Name }}db + imagePullSecrets: + {{- if .Values.image.pullSecret }} + - name: {{ .Values.image.pullSecret }} + {{- end }} + securityContext: + fsGroup: 1000 + containers: + - name: database + image: {{ .Values.image.h2 }} + resources: + limits: + memory: {{ .Values.resources.db.memLimit }} + requests: + memory: {{ .Values.resources.db.memRequest }} + ports: + - containerPort: 1521 + name: p2p + - containerPort: 81 + name: web + env: + - name: JAVA_OPTIONS + value: -Xmx512m + volumeMounts: + - name: data-h2 + mountPath: "/opt/h2-data" + readOnly: false + livenessProbe: + tcpSocket: + port: 1521 + initialDelaySeconds: 15 + periodSeconds: 20 diff --git a/platforms/r3-corda/charts/corda-node/templates/statefulset-node.yaml b/platforms/r3-corda/charts/corda-node/templates/statefulset-node.yaml new file mode 100644 index 00000000000..b371b55a173 --- /dev/null +++ b/platforms/r3-corda/charts/corda-node/templates/statefulset-node.yaml @@ -0,0 +1,621 @@ +############################################################################################## +# Copyright Accenture. All Rights Reserved. +# +# SPDX-License-Identifier: Apache-2.0 +############################################################################################## +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ template "corda-node.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + app: {{ include "corda-node.fullname" . }} + app.kubernetes.io/name: node-statefulset + app.kubernetes.io/component: corda + app.kubernetes.io/part-of: {{ include "corda-node.fullname" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} +spec: + replicas: 1 + podManagementPolicy: OrderedReady + updateStrategy: + type: RollingUpdate + selector: + matchLabels: + app: {{ include "corda-node.fullname" . }} + app.kubernetes.io/name: node-statefulset + app.kubernetes.io/component: corda + app.kubernetes.io/part-of: {{ include "corda-node.fullname" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} + serviceName: {{ include "corda-node.fullname" . }} + volumeClaimTemplates: + - metadata: + name: node-volume + spec: + storageClassName: storage-{{ .Release.Name }} + accessModes: [ "ReadWriteOnce" ] + resources: + requests: + storage: {{ .Values.storage.size }} + template: + metadata: + labels: + app: {{ include "corda-node.fullname" . }} + app.kubernetes.io/name: node-statefulset + app.kubernetes.io/component: corda + app.kubernetes.io/part-of: {{ include "corda-node.fullname" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} + spec: + serviceAccountName: {{ .Values.global.serviceAccountName }} + hostname: {{ .Release.Name }} + imagePullSecrets: + {{- if .Values.image.pullSecret }} + - name: {{ .Values.image.pullSecret }} + {{- end }} + securityContext: + fsGroup: 1000 + initContainers: + - name: db-healthcheck + image: {{ .Values.image.initContainer }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + command: ["sh", "-c"] + args: + - |- + #!/usr/bin/env sh + COUNTER=1 + FLAG=true + # Check if db is up and running before starting corda node + while [ "$COUNTER" -le 10 ] + do + DB_NODE={{ .Release.Name }}-db:{{ .Values.nodeConf.dbPort }} + STATUS=$(nc -vz $DB_NODE 2>&1 | grep -c open ) + if [ "$STATUS" == 0 ] + then + FLAG=false + else + FLAG=true + echo "DB up and running" + fi + if [ "$FLAG" == false ] + then + echo "Retry attempted $COUNTER times, retrying after 5 seconds" + COUNTER=`expr "$COUNTER" + 1` + sleep 5 + else + echo "SUCCESS!" + echo "DB up and running!" + exit 0 + break + fi + done + if [ "$COUNTER" -gt 10 ] || [ "$FLAG" == false ] + then + echo "Retry attempted $COUNTER times, no DB up and running. Giving up!" + exit 1 + break + fi + - name: init-nodeconf + image : {{ .Values.image.initContainer }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + env: + - name: BASE_DIR + value: "/base/corda" + {{- if (eq .Values.global.vault.type "hashicorp") }} + - name: VAULT_ADDR + value: {{ .Values.global.vault.address }} + - name: VAULT_SECRET_ENGINE + value: "{{ .Values.global.vault.secretEngine }}" + - name: VAULT_SECRET_PREFIX + value: "{{ .Values.global.vault.secretPrefix }}" + - name: KUBERNETES_AUTH_PATH + value: {{ .Values.global.vault.authPath }} + - name: VAULT_APP_ROLE + value: {{ .Values.global.vault.role }} + - name: VAULT_TYPE + value: "{{ .Values.global.vault.type }}" + {{- end }} + command: ["/bin/sh","-c"] + args: + - |- + #!/bin/bash + {{- if (eq .Values.global.vault.type "hashicorp") }} + . /scripts/bevel-vault.sh + echo "Getting vault Token..." + vaultBevelFunc "init" + #Read if secret exists in Vault + function checkSecret { + key=$1 + fpath=$2 + mkdir -p ${fpath} + vaultBevelFunc 'readJson' "${VAULT_SECRET_ENGINE}/${VAULT_SECRET_PREFIX}/${key}-registrationcerts" + if [ "$SECRETS_AVAILABLE" == "yes" ] + then + # Get secret from Vault and store in fpath + SSL_STORE=$(echo ${VAULT_SECRET} | jq -r '.["sslkeystore_base64"]') + SSL_TRUST=$(echo ${VAULT_SECRET} | jq -r '.["ssltruststore_base64"]') + NODE_STORE=$(echo ${VAULT_SECRET} | jq -r '.["nodekeystore_base64"]') + echo $SSL_STORE | base64 -d > ${fpath}/sslkeystore.jks + echo $SSL_TRUST | base64 -d > ${fpath}/truststore.jks + echo $NODE_STORE | base64 -d > ${fpath}/nodekeystore.jks + fi + } + {{- else }} + function checkSecret { + key=$1 + fpath=$2 + #Do nothing as certs are not stored as k8s secrets as of now + } + {{- end }} + # delete previously created node.conf, and create a new node.conf + rm -f ${BASE_DIR}/node.conf; + touch ${BASE_DIR}/node.conf; + + #For more information for node.Conf fields please refer to: https://docs.corda.r3.com/releases/4.0/corda-configuration-file.html + cat << 'EOF' > ${BASE_DIR}/node.conf + p2pAddress : "{{ .Release.Name }}.{{ .Release.Namespace }}:{{ .Values.nodeConf.p2pPort }}" + myLegalName : {{ .Values.nodeConf.legalName | quote }} + keyStorePassword : ${CONF_KEYSTOREPASSWORD} + trustStorePassword : ${CONF_TRUSTSTOREPASSWORD} + transactionCacheSizeMegaBytes : {{ .Values.nodeConf.transactionCacheSizeMegaBytes }} + attachmentContentCacheSizeMegaBytes : {{ .Values.nodeConf.attachmentContentCacheSizeMegaBytes }} + {{- if .Values.nodeConf.notary.enabled }} + notary : { + serviceLegalName : "{{ .Values.nodeConf.notary.serviceLegalName }}" + validating : {{ .Values.nodeConf.notary.validating }} + } + {{- end }} + detectPublicIp = {{ .Values.nodeConf.detectPublicIp }} + additionalP2PAddresses = ["{{ .Release.Name }}.{{ .Values.global.proxy.externalUrlSuffix }}:{{ .Values.global.proxy.p2p }}"] + devMode : {{ .Values.nodeConf.devMode }} + dataSourceProperties = { + dataSourceClassName = "{{ .Values.nodeConf.dataSourceClassName }}" + dataSource.url = "jdbc:h2:tcp://{{ .Release.Name }}-db:{{ .Values.nodeConf.dbPort }}/persistence;DB_CLOSE_ON_EXIT=FALSE;LOCK_TIMEOUT=10000;WRITE_DELAY=100;AUTO_RECONNECT=TRUE;" + dataSource.user = {{ .Values.nodeConf.dataSourceUser }} + dataSource.password = ${CONF_DATASOURCEPASSWORD} + } + database = { + exportHibernateJMXStatistics = {{ .Values.nodeConf.database.exportHibernateJMXStatistics }} + } + jarDirs = [{{ .Values.nodeConf.jarPath }}] + networkServices = { + doormanURL = "{{ .Values.nodeConf.doormanURL }}" + networkMapURL = "{{ .Values.nodeConf.networkMapURL }}" + } + EOF + + if [ -z "{{ .Values.nodeConf.jvmArgs }}" ] + then + echo 'jvmArgs is not configured' + else + echo 'jvmArgs = "{{ .Values.nodeConf.jvmArgs }}" ' >> ${BASE_DIR}/node.conf + fi + + if [ -z "{{ .Values.nodeConf.sshd.port }}" ] + then + echo 'sshd port is not configured' + else + echo 'sshd { port = {{ .Values.nodeConf.sshd.port }} } ' >> ${BASE_DIR}/node.conf + fi + + if [ -z "{{ .Values.nodeConf.systemProperties }}" ] + then + echo 'systemProperties is not configured' + else + echo 'systemProperties = {{ .Values.nodeConf.systemProperties }} ' >> ${BASE_DIR}/node.conf + fi + + if [ -z "{{ .Values.nodeConf.exportJMXTo }}" ] + then + echo 'exportJMXTo is not configured' + else + echo 'exportJMXTo = {{ .Values.nodeConf.exportJMXTo }} ' >> ${BASE_DIR}/node.conf + fi + + if [ -z "{{ .Values.nodeConf.messagingServerAddress }}" ] + then + echo 'The address of the ArtemisMQ broker instance is not configured' + else + echo 'messagingServerAddress : "{{ .Values.nodeConf.messagingServerAddress }}" ' >> ${BASE_DIR}/node.conf + fi + + if [ -z "{{ .Values.nodeConf.rpcUser }}" ] + then + echo 'rpc user is not configured' + else + echo 'rpcUsers : [' >> ${BASE_DIR}/node.conf + {{- range $.Values.nodeConf.rpcUser }} + echo '{ username={{ .name }} ,permissions={{ .permissions }}, ' >> ${BASE_DIR}/node.conf + echo " password={{ .password }} }" >> ${BASE_DIR}/node.conf + {{- end }} + echo ']' >> ${BASE_DIR}/node.conf + fi + + if [ "{{ .Values.nodeConf.rpcSettings.useSsl }}" == true ] + then + echo 'rpcSettings { + standAloneBroker = {{ .Values.nodeConf.rpcSettings.standAloneBroker }} + address = "{{ .Values.nodeConf.rpcSettings.address }}" + adminAddress = "{{ .Values.nodeConf.rpcSettings.adminAddress }}" + useSsl = {{ .Values.nodeConf.rpcSettings.useSsl }} + ssl = { + keyStorePassword = {{ .Values.nodeConf.sslkeyStorePassword }} + trustStorePassword = {{ .Values.nodeConf.ssltrustStorePassword }} + certificatesDirectory = ${BASE_DIR}/{{ .Values.nodeConf.rpcSettings.ssl.certificatesDirectory }} + sslKeystore = ${BASE_DIR}/{{ .Values.nodeConf.rpcSettings.ssl.certificatesDirectory }}/{{ .Values.nodeConf.rpcSettings.ssl.sslKeystoreFileName }} + trustStoreFile = ${BASE_DIR}/{{ .Values.nodeConf.rpcSettings.ssl.certificatesDirectory }}/{{ .Values.nodeConf.rpcSettings.ssl.trustStoreFileName }} + } + }' >> ${BASE_DIR}/node.conf + else + echo 'rpcSettings { + standAloneBroker = {{ .Values.nodeConf.rpcSettings.standAloneBroker }} + address = "{{ .Values.nodeConf.rpcSettings.address }}" + adminAddress = "{{ .Values.nodeConf.rpcSettings.adminAddress }}" + }' >> ${BASE_DIR}/node.conf + fi + echo "node.conf created in ${BASE_DIR}" + checkSecret {{ .Release.Name }} ${BASE_DIR}/certificates + volumeMounts: + - name: nodeconf + mountPath: "/base/corda" + readOnly: false + - name: certificates + mountPath: "/base/corda/certificates" + readOnly: false + - name: scripts-volume + mountPath: /scripts/bevel-vault.sh + subPath: bevel-vault.sh + - name: init-registration + image: {{ .Values.image.corda.repository }}:{{ .Values.image.corda.tag }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + volumeMounts: + - name: node-volume + mountPath: "/base/corda" + readOnly: false + - name: certificates + mountPath: "/base/corda/certificates" + readOnly: false + - name: node-certs + mountPath: "/opt/corda/certificates" + - name: nodeconf + mountPath: "/base/corda/node.conf" + subPath: "node.conf" + readOnly: false + {{- if .Values.tls.enabled }} + - name: nms-certs + mountPath: "/certs/nms" + - name: doorman-certs + mountPath: "/certs/doorman" + {{- end }} + env: + - name: BASE_DIR + value: /base/corda + - name: CORDA_HOME + value: /opt/corda + - name: JAVA_OPTIONS + value: {{ .Values.nodeConf.javaOptions | quote }} + - name: CONF_KEYSTOREPASSWORD + value: {{ .Values.nodeConf.defaultKeystorePassword }} + - name: CONF_TRUSTSTOREPASSWORD + value: {{ .Values.nodeConf.defaultTruststorePassword }} + - name: CONF_DATASOURCEPASSWORD + value: {{ .Values.nodeConf.dataSourcePassword }} + - name: KEYSTORE_PASSWORD + value: {{ .Values.nodeConf.keystorePassword }} + - name: TRUSTSTORE_PASSWORD + value: {{ .Values.nodeConf.truststorePassword }} + command: ["sh", "-c"] + args: + - |- + #!/usr/bin/env sh + if [ -e /opt/corda/certificates/truststore.jks ] + then + cp /opt/corda/certificates/* ${BASE_DIR}/certificates/ + echo "Initial Registration already complete for {{ .Release.Name }} ..." + else + echo "Initial registration start ..." + cp /opt/corda/certificates/* ${BASE_DIR}/certificates/ + chmod +w ${BASE_DIR}/certificates/nodekeystore.jks + + # import self signed tls certificate of doorman and networkmap, since java only trusts certificate signed by well known CA + {{- if .Values.tls.enabled }} + eval "yes | keytool -importcert -file /certs/nms/tls.crt -storepass changeit -alias {{ include "corda-node.nmsDomain" . }} -keystore /usr/lib/jvm/java-1.8-openjdk/jre/lib/security/cacerts" + eval "yes | keytool -importcert -file /certs/doorman/tls.crt -storepass changeit -alias {{ include "corda-node.doormanDomain" . }} -keystore /usr/lib/jvm/java-1.8-openjdk/jre/lib/security/cacerts" + {{- end }} + + # command to run corda jar and perform initial-registration + java $JAVA_OPTIONS -jar ${CORDA_HOME}/corda.jar initial-registration --network-root-truststore-password ${CONF_TRUSTSTOREPASSWORD} --network-root-truststore ${BASE_DIR}/certificates/network-map-truststore.jks --base-directory=${BASE_DIR} + + #changing password of keystore. + keytool -storepasswd -new ${KEYSTORE_PASSWORD} -keystore ${BASE_DIR}/certificates/nodekeystore.jks -storepass ${CONF_KEYSTOREPASSWORD} + if [ $? -ne 0 ]; then + echo "Error in initial-registration" + exit 1 + fi + keytool -storepasswd -new ${KEYSTORE_PASSWORD} -keystore ${BASE_DIR}/certificates/sslkeystore.jks -storepass ${CONF_KEYSTOREPASSWORD} + keytool -storepasswd -new ${TRUSTSTORE_PASSWORD} -keystore ${BASE_DIR}/certificates/truststore.jks -storepass ${CONF_TRUSTSTOREPASSWORD} + + #changing password of nodekeystore.jks certificate. + keytool -keypasswd -alias cordaclientca -keypass ${CONF_KEYSTOREPASSWORD} -new ${KEYSTORE_PASSWORD} -keystore ${BASE_DIR}/certificates/nodekeystore.jks -storepass ${KEYSTORE_PASSWORD} + keytool -keypasswd -alias identity-private-key -keypass ${CONF_KEYSTOREPASSWORD} -new ${KEYSTORE_PASSWORD} -keystore ${BASE_DIR}/certificates/nodekeystore.jks -storepass ${KEYSTORE_PASSWORD} + {{- if .Values.nodeConf.notary.enabled }} + keytool -keypasswd -alias distributed-notary-private-key -keypass ${CONF_KEYSTOREPASSWORD} -new ${KEYSTORE_PASSWORD} -keystore ${BASE_DIR}/certificates/nodekeystore.jks -storepass ${KEYSTORE_PASSWORD} + {{- end }} + #changing password of sslkeystore.jks certificate. + keytool -keypasswd -alias cordaclienttls -keypass ${CONF_KEYSTOREPASSWORD} -new ${KEYSTORE_PASSWORD} -keystore ${BASE_DIR}/certificates/sslkeystore.jks -storepass ${KEYSTORE_PASSWORD} + + echo "Initial Registration Complete" + fi; + - name: init-cordapps + image: {{ .Values.image.initContainer }} + imagePullPolicy: Always + command: ["sh", "-c"] + args: + - |- + # crearting cordapps dir in volume to keep jars + mkdir -p /base/corda/cordapps + {{- if .Values.cordApps.getCordApps }} + mkdir -p /tmp/downloaded-jars + REPO_USER_PASS=$(cat /secret/password) + REPO_USER=$(cat /secret/username) + + # Downloading official corda provided jars using curl + {{- range .Values.cordApps.jars }} + cd /tmp/downloaded-jars && curl -u $REPO_USER:$REPO_USER_PASS -O -L {{ .jar.url }} + {{- end }} + cp -ar /tmp/downloaded-jars/* /base/corda/cordapps + {{- end }} + volumeMounts: + - name: node-volume + mountPath: "/base/corda" + - name: maven-secrets + mountPath: "/secret" + containers: + - name: node + image: {{ .Values.image.corda.repository }}:{{ .Values.image.corda.tag }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + env: + - name: BASE_DIR + value: /base/corda + - name: CORDA_HOME + value: /opt/corda + - name: JAVA_OPTIONS + value: {{ .Values.nodeConf.javaOptions | quote }} + - name: CONF_KEYSTOREPASSWORD + value: {{ .Values.nodeConf.keystorePassword }} + - name: CONF_TRUSTSTOREPASSWORD + value: {{ .Values.nodeConf.truststorePassword }} + - name: CONF_DATASOURCEPASSWORD + value: {{ .Values.nodeConf.dataSourcePassword }} + command: ["sh", "-c"] + args: + - |- + #!/usr/bin/env sh + + # import self signed tls certificate of doorman and networkmap, since java only trusts certificate signed by well known CA + {{- if .Values.tls.enabled }} + eval "yes | keytool -importcert -file /certs/nms/tls.crt -storepass changeit -alias {{ include "corda-node.nmsDomain" . }} -keystore /usr/lib/jvm/java-1.8-openjdk/jre/lib/security/cacerts" + eval "yes | keytool -importcert -file /certs/doorman/tls.crt -storepass changeit -alias {{ include "corda-node.doormanDomain" . }} -keystore /usr/lib/jvm/java-1.8-openjdk/jre/lib/security/cacerts" + {{- end }} + + # to clean network-parameters on every restart + rm -rf ${BASE_DIR}/network-parameters + # Run schema migration scripts for corDApps + java -Djavax.net.ssl.keyStore=${BASE_DIR}/certificates/sslkeystore.jks -Djavax.net.ssl.keyStorePassword=$CONF_KEYSTOREPASSWORD $JAVA_OPTIONS -jar ${CORDA_HOME}/corda.jar run-migration-scripts --core-schemas --app-schemas --base-directory=${BASE_DIR} + + # command to run corda jar, we are setting javax.net.ssl.keyStore as ${BASE_DIR}/certificates/sslkeystore.jks since keystore gets reset when using h2 ssl + java -Djavax.net.ssl.keyStore=${BASE_DIR}/certificates/sslkeystore.jks -Djavax.net.ssl.keyStorePassword=$CONF_KEYSTOREPASSWORD $JAVA_OPTIONS -jar ${CORDA_HOME}/corda.jar --base-directory=${BASE_DIR} + resources: + limits: + memory: {{ .Values.resources.node.memLimit }} + requests: + memory: {{ .Values.resources.node.memRequest }} + ports: + - containerPort: {{ .Values.nodeConf.p2pPort }} + name: p2p + - containerPort: {{ .Values.nodeConf.rpcPort }} + name: rpc + - containerPort: {{ .Values.nodeConf.rpcadminPort }} + name: rpcadmin + volumeMounts: + - name: node-volume + mountPath: "/base/corda" + readOnly: false + - name: certificates + mountPath: "/base/corda/certificates" + readOnly: false + - name: nodeconf + mountPath: "/base/corda/node.conf" + subPath: "node.conf" + readOnly: false + {{- if .Values.tls.enabled }} + - name: nms-certs + mountPath: "/certs/nms" + - name: doorman-certs + mountPath: "/certs/doorman" + {{- end }} + livenessProbe: + tcpSocket: + port: {{ .Values.nodeConf.p2pPort }} + initialDelaySeconds: 65 + periodSeconds: 30 + - name: corda-logs + image: {{ .Values.image.initContainer }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + env: + - name: BASE_DIR + value: /base/corda + - name: CORDA_HOME + value: /opt/corda + - name: JAVA_OPTIONS + value: {{ .Values.nodeConf.javaOptions | quote }} + {{- if (eq .Values.global.vault.type "hashicorp") }} + - name: VAULT_ADDR + value: {{ .Values.global.vault.address }} + - name: VAULT_SECRET_ENGINE + value: "{{ .Values.global.vault.secretEngine }}" + - name: VAULT_SECRET_PREFIX + value: "{{ .Values.global.vault.secretPrefix }}" + - name: KUBERNETES_AUTH_PATH + value: {{ .Values.global.vault.authPath }} + - name: VAULT_APP_ROLE + value: {{ .Values.global.vault.role }} + - name: VAULT_TYPE + value: "{{ .Values.global.vault.type }}" + {{- end }} + command: ["sh", "-c"] + args: + - |- + #!/usr/bin/env sh + {{- if (eq .Values.global.vault.type "hashicorp") }} + . /scripts/bevel-vault.sh + echo "Getting vault Token..." + vaultBevelFunc "init" + function safeWriteSecret { + key=$1 + fpath=$2 + #Read if secret exists in Vault + vaultBevelFunc 'readJson' "${VAULT_SECRET_ENGINE}/${VAULT_SECRET_PREFIX}/${key}-registrationcerts" + if [ "$SECRETS_AVAILABLE" == "yes" ] + then + echo "Registration Secrets already stored on Vault" + else + # Save secrets to Vault + # Use -w0 to get single line base64 -w0 + NODE_STORE=$(cat ${fpath}/nodekeystore.jks | base64 -w0) + SSL_STORE=$(cat ${fpath}/sslkeystore.jks | base64 -w0) + SSL_TRUST=$(cat ${fpath}/truststore.jks | base64 -w0) + # create a JSON file for the data related to node crypto + echo " + { + \"data\": + { + \"nodekeystore_base64\": \"${NODE_STORE}\", + \"sslkeystore_base64\": \"${SSL_STORE}\", + \"ssltruststore_base64\": \"${SSL_TRUST}\" + } + }" > payload.json + vaultBevelFunc 'write' "${VAULT_SECRET_ENGINE}/${VAULT_SECRET_PREFIX}/${key}-registrationcerts" 'payload.json' + kubectl patch secret ${key}-certs -p "{\"data\":{\"nodekeystore.jks\":\"$NODE_STORE\", \"sslkeystore.jks\":\"$SSL_STORE\", \"truststore.jks\":\"$SSL_TRUST\" }}" + rm payload.json + fi + } + {{- else }} + function safeWriteSecret { + key=$1 + fpath=$2 + # Use -w0 to get single line base64 -w0 + NODE_STORE=$(cat ${fpath}/nodekeystore.jks | base64 -w0) + SSL_STORE=$(cat ${fpath}/sslkeystore.jks | base64 -w0) + SSL_TRUST=$(cat ${fpath}/truststore.jks | base64 -w0) + kubectl patch secret ${key}-certs -p "{\"data\":{\"nodekeystore.jks\":\"$NODE_STORE\", \"sslkeystore.jks\":\"$SSL_STORE\", \"truststore.jks\":\"$SSL_TRUST\" }}" + } + {{- end }} + {{- if .Values.nodeConf.notary.enabled }} + if [ -e ${BASE_DIR}/notaryregistered ] + then + echo "Notary already registered" + else + NMS_USER_ID={{ .Values.nodeConf.dataSourceUser }} + NMS_USER_PASSWORD={{ .Values.nodeConf.dataSourcePassword }} + + STATUS=0 + while [ "$STATUS" -ne 1 ] + do + # get node-info file name + cd ${BASE_DIR} + NOTARYNODEINFOFILENAME=$(ls ${BASE_DIR}/ | grep nodeInfo | awk '{print $1}'); + echo "NOTARYNODEINFOFILENAME=$NOTARYNODEINFOFILENAME" + if [ -z $NOTARYNODEINFOFILENAME ] + then + echo "node-info file not ready, sleeping for 10s" + sleep 10 + STATUS=0 + else + # get url for registration + url={{ .Values.nodeConf.networkMapURL }} + # check if notary type is validating or non validating, and form url accordingly + if [ {{ .Values.nodeConf.notary.validating }} == "true" ] + then + section=/admin/api/notaries/validating + else + section=/admin/api/notaries/nonValidating + fi + + # get one time login token from networkmap + token=$(curl -k --silent --show-error -X POST "$url/admin/api/login" -H "accept: text/plain" -H "Content-Type: application/json" -d "{ \"user\": \"${NMS_USER_ID}\", \"password\": \"${NMS_USER_PASSWORD}\"}" | awk '{print $1}'); + # curl command to register notary, if resonse is okay then registration is sucessfull + cd ${BASE_DIR} + response=$(curl -k --silent --show-error -X POST -H "Authorization: Bearer ${token}" -H "accept: text/plain" -H "Content-Type: application/octet-stream" --data-binary @${NOTARYNODEINFOFILENAME} ${url}${section} | awk '{print $1}') + echo "responsevar=$response" + if [ $response = "OK" ] + then + echo "Response is OK"; + echo "Registered notary with Networkmap successfully" + touch ${BASE_DIR}/notaryregistered + else + echo "Response from NMS is not ok"; + echo "Something went wrong" + fi + STATUS=1 + break + fi + done + fi + {{- end }} + {{- if and (ne .Values.global.cluster.provider "minikube") (.Values.global.cluster.cloudNativeServices) }} + safeWriteSecret {{ .Release.Name }}-sslkeystore ${BASE_DIR}/certificates/sslkeystore.jks + safeWriteSecret {{ .Release.Name }}-ssltruststore ${BASE_DIR}/certificates/truststore.jks + safeWriteSecret {{ .Release.Name }}-nodekeystore ${BASE_DIR}/certificates/nodekeystore.jks + {{- else }} + safeWriteSecret {{ .Release.Name }} ${BASE_DIR}/certificates + {{- end }} + echo "Completed ..." + if [ -e ${BASE_DIR}/logs/node-{{ include "corda-node.fullname" . }}-0.log ] + then + tail -f ${BASE_DIR}/logs/node-{{ include "corda-node.fullname" . }}-0.log + else + echo "waiting for corda to generate log, sleeping for 10s" + sleep 10 + fi + volumeMounts: + - name: node-volume + mountPath: "/base/corda" + readOnly: false + - name: certificates + mountPath: "/base/corda/certificates" + readOnly: false + - name: scripts-volume + mountPath: /scripts/bevel-vault.sh + subPath: bevel-vault.sh + volumes: + - name: scripts-volume + configMap: + name: bevel-vault-script + defaultMode: 0777 + - name: nodeconf + emptyDir: + medium: Memory + - name: certificates + emptyDir: + medium: Memory + - name: node-certs + secret: + secretName: {{ .Release.Name }}-certs +{{- if .Values.tls.enabled }} + - name: nms-certs + secret: + secretName: nms-tls-certs + - name: doorman-certs + secret: + secretName: doorman-tls-certs +{{- end }} +{{- if .Values.cordApps.mavenSecret }} + - name: maven-secrets + secret: + secretName: {{ .Values.cordApps.mavenSecret }} +{{- end }} diff --git a/platforms/r3-corda/charts/corda-node/values.yaml b/platforms/r3-corda/charts/corda-node/values.yaml index e590a2d72b3..ed029fc02fa 100644 --- a/platforms/r3-corda/charts/corda-node/values.yaml +++ b/platforms/r3-corda/charts/corda-node/values.yaml @@ -3,262 +3,140 @@ # # SPDX-License-Identifier: Apache-2.0 ############################################################################################## +global: + serviceAccountName: vault-auth + cluster: + provider: aws # choose from: minikube | aws | azure | gcp + cloudNativeServices: false # set to true to use Cloud Native Services (SecretsManager and IAM for AWS; KeyVault & Managed Identities for Azure) + vault: + type: hashicorp + role: vault-role + address: + authPath: supplychain + secretEngine: secretsv2 + secretPrefix: "data/supplychain" + proxy: + #This will be the proxy/ingress provider. Can have values "ambassador" or "none" + #Eg. provider: "ambassador" + provider: "ambassador" + #This field contains the external URL of the node + #Eg. externalUrlSuffix: test.blockchaincloudpoc.com + externalUrlSuffix: test.blockchaincloudpoc.com + p2p: 15010 -#Provide the nodeName for node -#Eg. nodeName: bank1 -nodeName: bank1 - -#Provide the replica set for node deployed -#Eg. replicas: 1 -replicas: 1 +storage: + #Provide the storage for node + #Eg. size: 1Gi + size: 1Gi + dbSize: 2Gi + allowedTopologies: + enabled: false +tls: + enabled: true -metadata: - #Provide the namespace - #Eg. namespace: default - namespace: - #Provide the custom labels - #NOTE: Provide labels other than name, release name , release service, chart version , chart name , app. - #Eg. labels: - # role: create_channel - labels: - image: - #Provide the containerName of image - #Eg. containerName: ghcr.io/hyperledger/bevel-corda:4.9 - containerName: ghcr.io/hyperledger/bevel-corda:4.9 - #Provide the name of image for init container - #Eg. name: ghcr.io/hyperledger/bevel-alpine:latest - initContainerName: ghcr.io/hyperledger/bevel-alpine:latest #Provide the image pull secret of image #Eg. pullSecret: regcred - imagePullSecret: regcred - #Provide true or false if private certificate to be added - #Eg. privateCertificate: true - privateCertificate: true - #Provide true or false if private certificate to be added - #Eg. doormanCertAlias: doorman.fracordakubetest7.com - doormanCertAlias: doorman.fracordakubetest7.com - #Provide true or false if private certificate to be added - #Eg. networkmapCertAlias: networkmap.fracordakubetest7.com - networkmapCertAlias: networkmap.fracordakubetest7.com - + pullSecret: + pullPolicy: IfNotPresent + h2: ghcr.io/hyperledger/h2:2018 + #Provide the containerName of image + #Eg. corda: ghcr.io/hyperledger/bevel-corda:4.9 + corda: + repository: ghcr.io/hyperledger/bevel-corda + tag: 4.9 + #Provide the name of image for init container + #Eg. initContainer: ghcr.io/hyperledger/bevel-alpine:latest + initContainer: ghcr.io/hyperledger/bevel-alpine:latest + hooks: + repository: ghcr.io/hyperledger/bevel-build + tag: jdk8-stable -#For more information for node.Conf fields please refer to: https://docs.corda.net/releases/release-V3.3/corda-configuration-file.html +#For more information for node.Conf fields please refer to: https://docs.r3.com/en/platform/corda/4.9/community/corda-configuration-fields.html nodeConf: + defaultKeystorePassword: cordacadevpass + defaultTruststorePassword: trustpass + keystorePassword: newpass + truststorePassword: newtrustpass + sslkeyStorePassword: sslpass + ssltrustStorePassword: ssltrustpass + removeKeysOnDelete: true + #Provide the rpcUser for corda node + rpcUser: + - name: nodeoperations + password: nodeoperationsAdmin + permissions: [ALL] #The host and port on which the node is available for protocol operations over ArtemisMQ. - p2p: - url: - port: - #Specify the ambassador host:port which will be advertised in addition to p2paddress - ambassadorAddress: + p2pPort: 10002 + rpcPort: 10003 + rpcadminPort: 10005 rpcSettings: - useSsl: - standAloneBroker: - address: - adminAddress: + useSsl: false + standAloneBroker: false + address: "0.0.0.0:10003" + adminAddress: "0.0.0.0:10005" ssl: - certificatesDirectory: - sslKeystorePath: - trustStoreFilePath: + certificatesDirectory: na-ssl-false + sslKeystorePath: na-ssl-false + trustStoreFilePath: na-ssl-false #Provide the legalName for node - #Eg. legalName: "O=Bank1,L=London,C=GB,CN=Bank1" - legalName: + #Eg. legalName: "O=Notary,OU=Notary,L=London,C=GB" + legalName: "O=Notary,OU=Notary,L=London,C=GB" messagingServerAddress: jvmArgs: systemProperties: sshd: port: exportJMXTo: - transactionCacheSizeMegaBytes: - attachmentContentCacheSizeMegaBytes: + transactionCacheSizeMegaBytes: 8 + attachmentContentCacheSizeMegaBytes: 10 notary: - validating: - detectPublicIp: - database: - exportHibernateJMXStatistics: - #Provide the h2Url for node - #Eg. h2Url: bank1h2 - dbUrl: bank1h2 - #Provide the h2Port for node - #Eg. h2Port: 9101 + enabled: true + validating: true + serviceLegalName: "O=Notary Service,OU=Notary,L=London,C=GB" + detectPublicIp: false + database: + exportHibernateJMXStatistics: false + #Provide the database port + #Eg. dbPort: 9101 dbPort: 9101 - dataSourceClassName: - dataSourceUrl: - jarPath: + dataSourceUser: sa + dataSourcePassword: admin + dataSourceClassName: "org.h2.jdbcx.JdbcDataSource" + jarPath: "/data/corda-workspace/h2/bin" #Provide the nms for node - #Eg. nms: "http://rp-elb-fra-corda-kube-cluster7-2016021309.us-west-1.elb.amazonaws.com:30050" - networkMapURL: - doormanURL: - compatibilityZoneURL: - webAddress: - #Provide the jar Version for corda jar and finanace jar - #Eg. jarVersion: 3.3-corda - jarVersion: 3.3-corda + #Eg. networkMapURL: "https://supplychain-nms.supplychain-ns" + networkMapURL: https://supplychain-nms.supplychain-ns + doormanURL: https://supplychain-doorman.supplychain-ns #Provide the devMode for corda node #Eg. devMode: true - devMode: true - #Provide the useHTTPS for corda node - #Eg. useHTTPS: false - useHTTPS: false - #Provide the enviroment variables to be set - env: - - name: JAVA_OPTIONS - value: - - name: CORDA_HOME - value: - - name: BASE_DIR - value: - -credentials: - #Provide the dataSourceUser for corda node - #Eg. dataSourceUser: - dataSourceUser: - #Provide the rpcUser for corda node - rpcUser: - - name: bank1operations - permissions: [ALL] - -volume: - #Provide the base path - #Eg. mountPath: "/opt/h2-data" - baseDir: - -resources: - #Provide the limit memory for node - #Eg. limits: "1Gi" - limits: "1Gi" - #Provide the requests memory for node - #Eg. requests: "1Gi" - requests: "1Gi" - -storage: - #Provide the provisioner for node - #Eg. provisioner: kubernetes.io/aws-ebs - provisioner: kubernetes.io/aws-ebs - #Provide the name for node - #Eg. name: bank1nodesc - name: bank1nodesc - #Provide the memory for node - #Eg. memory: 4Gi - memory: 4Gi - parameters: - #Provide the type for node - #Eg. type: gp2 - type: gp2 - # Provide whether the EBS volume should be encrypted or not - #Eg. encrypted: "true" - encrypted: "true" - # annotations: - # key: "value" - annotations: + devMode: false + #Provide the JAVA_OPTIONS for Corda Node as string + javaOptions: "-Xmx512m" - -service: -# Note: Target ports are dependent on image being used. Please change them accordingly -# nodePort should be kept empty while using service type as ClusterIP ( Values.service.type ) - #Provide the type of service - #Eg. type: NodePort or LoadBalancer etc - type: NodePort - p2p: - #Provide the p2p port for node - #Eg. port: 10007 - port: 10007 - #Provide the p2p node port for node - #Eg. port: 30007 - nodePort: - #Provide the p2p targetPort for node - #Eg. targetPort: 30007 - targetPort: 30007 - rpc: - #Provide the rpc port for node - #Eg. port: 10008 - port: 10008 - #Provide the rpc targetPort for node - #Eg. targetPort: 10003 - targetPort: 10003 - #Provide the rpc node port for node - #Eg. nodePort: 30007 - nodePort: - rpcadmin: - #Provide the rpcadmin port for node - #Eg. port: 10108 - port: 10108 - #Provide the rpcadmin targetPort for node - #Eg. targetPort: 10005 - targetPort: 10005 - #Provide the rpcadmin node port for node - #Eg. nodePort: 30007 - nodePort: - # annotations: - # key: "value" - annotations: - -deployment: - annotations: -# annotations: -# key: "value" - -pvc: - # annotations: - # key: "value" - annotations: - -vault: - #Provide the vault server address - #Eg. address: http://54.226.163.39:8200 - address: - #Provide the vaultrole - #Eg. role: vault-role - role: vault-role - #Provide the authpath - #Eg. authpath: cordabank1 - authpath: cordabank1 - #Provide the serviceaccountname - #Eg. serviceaccountname: vault-auth-issuer - serviceaccountname: vault-auth-issuer - #Provide the secretprefix - #Eg. dbsecretprefix: bank1/credentials/database - dbsecretprefix: bank1/credentials/database - #Provide the secretprefix - #Eg. rpcusersecretprefix: bank1/credentials/rpcusers - rpcusersecretprefix: bank1/credentials/rpcusers - #Provide the secretprefix - #Eg. keystoresecretprefix: bank1/credentials/keystore - keystoresecretprefix: bank1/credentials/keystore - #Provide the secretprefix - #Eg. certsecretprefix: bank1/certs - certsecretprefix: bank1/certs - #Provide the secretprefix - #Eg. cordappsreposecretprefix: bank1/credentials/cordapps - cordappsreposecretprefix: bank1/credentials/cordapps - -cordapps: - #Provide if you want to provide jars in cordapps - #Eg. getcordapps: true or false - getcordapps: - repository: +cordApps: + #Provide if you want to provide jars in cordApps + #Eg. getCordApps: true or false + getCordApps: false + mavenSecret: jars: #Provide url to download the jar using wget cmd #Eg. url: https://ci-artifactory.corda.r3cev.com/artifactory/corda-releases/net/corda/corda-finance/3.3-corda/corda-finance-3.3-corda.jar - url: - url: - -healthcheck: - #Provide the interval in seconds you want to iterate till db to be ready - #Eg. readinesscheckinterval: 5 - readinesscheckinterval: 5 - #Provide the threshold till you want to check if specified db up and running - #Eg. readinessthreshold: 2 - readinessthreshold: 2 - -ambassador: - #Provides component name - #Eg. component_name: node - component_name: node - #Provides the suffix to be used in external URL - #Eg. external_url_suffix: org1.blockchaincloudpoc.com - external_url_suffix: org1.blockchaincloudpoc.com - #Provide the p2p port for ambassador - #Eg. p2p_ambassador: 10007 - p2p_ambassador: +resources: + db: + #Provide the limit memory for node + #Eg. memLimit: "1Gi" + memLimit: "1G" + #Provide the requests memory for node + #Eg. memRequest: "1Gi" + memRequest: "512M" + node: + #Provide the limit memory for node + #Eg. memLimit: "1Gi" + memLimit: "2G" + #Provide the requests memory for node + #Eg. memRequest: "1Gi" + memRequest: "1G" diff --git a/platforms/r3-corda/charts/corda-notary-initial-registration/Chart.yaml b/platforms/r3-corda/charts/corda-notary-initial-registration/Chart.yaml deleted file mode 100644 index bd03c0e2602..00000000000 --- a/platforms/r3-corda/charts/corda-notary-initial-registration/Chart.yaml +++ /dev/null @@ -1,11 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -apiVersion: v1 -appVersion: "2.0" -description: "R3-corda-os: Job for initial notary node registration." -name: corda-notary-initial-registration -version: 1.0.0 diff --git a/platforms/r3-corda/charts/corda-notary-initial-registration/README.md b/platforms/r3-corda/charts/corda-notary-initial-registration/README.md deleted file mode 100644 index e4cad681e1d..00000000000 --- a/platforms/r3-corda/charts/corda-notary-initial-registration/README.md +++ /dev/null @@ -1,220 +0,0 @@ -[//]: # (##############################################################################################) -[//]: # (Copyright Accenture. All Rights Reserved.) -[//]: # (SPDX-License-Identifier: Apache-2.0) -[//]: # (##############################################################################################) - - -# Node Deployment - -- [Notary-initial-registration Deployment Helm Chart](#Notary-initial-registration-deployment-helm-chart) -- [Prerequisites](#prerequisites) -- [Chart Structure](#chart-structure) -- [Configuration](#configuration) -- [Deployment](#deployment) -- [Contributing](#contributing) -- [License](#license) - - -## notary-initial-registration Deployment Helm Chart ---- -This [Helm chart](https://github.com/hyperledger/bevel/tree/develop/platforms/r3-corda/charts/corda-notary-initial-registration) helps to deploy the job for initial notory node registration. - - -## Prerequisites ---- -Before deploying the chart please ensure you have the following prerequisites: - -- networkmap and Node's database up and running. -- Kubernetes cluster up and running. -- A HashiCorp Vault instance is set up and configured to use Kubernetes service account token-based authentication. -- The Vault is unsealed and initialized. -- Helm is installed. - - -This chart has following structue: -``` - . - ├── notary-initial-registration - │ ├── Chart.yaml - │ ├── templates - │ │ ├── _helpers.tpl - │ │ └── job.yaml - │ └── values.yaml -``` - -Type of files used: - -- `templates` : This directory contains the Kubernetes manifest templates that define the resources to be deployed. -- `job.yaml` : This file is a configuration file for deployement in Kubernetes.It creates a deployment file with a specified number of replicas and defines various settings for the deployment, Init container is responsible for intial node registration process is completed successfully before the main containers start.It also specifies volume mounts for storing certificates and data. -- `chart.yaml` : Provides metadata about the chart, such as its name, version, and description. -- `values.yaml` : Contains the default configuration values for the chart. It includes configuration for the image, nodeconfig, credenatials, storage, service , vault, etc. -- `_helpers.tpl` : A template file used for defining custom labels and ports for the metrics in the Helm chart. - - - -## Configuration ---- -The [values.yaml](https://github.com/hyperledger/bevel/blob/develop/platforms/r3-corda/charts/corda-notary-initial-registration/values.yaml) file contains configurable values for the Helm chart. We can modify these values according to the deployment requirements. Here are some important configuration options: - -## Parameters ---- - -### Name - -| Name | Description | Default Value | -| -----------| -------------------------------------------------- | ------------- | -| name | Provide the name of the node | bank1 | - -### Metadata - -| Name | Description | Default Value | -| ----------------| ---------------------------------------------------------------------------- | ------------- | -| namespace | Provide the namespace for the Notary-initial-registration Generator | default | -| labels | Provide any additional labels for the Notary-initial-registration Generator | "" | - -### Image - -| Name | Description | Default Value | -| ------------------------ | --------------------------------------------------------------------------------------- | --------------- | -| initContainerName | Provide the alpine utils image, which is used for all init-containers of deployments/jobs | "" | -| containerName | Provide the containerName of image | "" | -| imagePullSecret | Provide the image pull secret of image | regcred | -| privateCertificate | Provide true or false if private certificate to be added | "true" | -| doormanCertAlias | Provide true or false if private certificate to be added | "" | -| networkmapCertAlias | Provide true or false if private certificate to be added | "" | - -### NodeConf - -| Name | Description | Default Value | -| ------------------------ | -------------------------------------------------------------------------------------- | --------------- | -| p2p | The host and port on which the node is available for protocol operations over ArtemisMQ | "" | -| ambassadorAddress | Specify ambassador host:port which will be advertised in addition to p2paddress | "" | -| legalName | Provide the legalName for node | "" | -| dbUrl | Provide the h2Url for node | "bank1h2" | -| dbPort | Provide the h2Port for node | "9101" | -| networkMapURL | Provide the nms for node | "" | -| doormanURL | Provide the doorman for node | "" | -| jarVersion | Provide the jar Version for corda jar and finanace jar | "3.3-corda" | -| devMode | Provide the devMode for corda node | "true" | -| env | Provide the enviroment variables to be set | "" | - -### credentials - -| Name | Description | Default Value | -| ----------------| ----------------------------------------------| ------------- | -| dataSourceUser | Provide the dataSourceUser for corda node | "" | -| rpcUser | Provide the rpcUser for corda node | bank1operations| - -### Volume - -| Name | Description | Default Value | -| -----------------| -----------------------| ------------- | -| baseDir | Base directory | /home/bevel | - -### Resources - -| Name | Description | Default Value | -| ------------------------ | ------------------------------------------------------- | --------------- | -| limits | Provide the limit memory for node | "1Gi" | -| requests | Provide the requests memory for node | "1Gi" | - -### Service - -| Name | Description | Default Value | -| --------------------- | ------------------------------------------| ------------- | -| type | Provide the type of service | NodePort | -| p2p port | Provide the tcp port for node | 10007 | -| p2p nodePort | Provide the p2p nodeport for node | 30007 | -| p2p targetPort | Provide the p2p targetPort for node | 30007 | -| rpc port | Provide the tpc port for node | 10008 | -| rpc targetPort | Provide the rpc targetport for node | 10003 | -| rpc nodePort | Provide the rpc nodePort for node | 30007 | -| rpcadmin port | Provide the rpcadmin port for node | 10108 | -| rpcadmin targetPort | Provide the rpcadmin targetport for node | 10005 | -| rpcadmin nodePort | Provide the rpcadmin nodePort for node | 30007 | - -### Vault - -| Name | Description | Default Value | -| ------------------------- | --------------------------------------------------------------------------| ------------------------- | -| address | Address/URL of the Vault server | "" | -| role | Role used for authentication with Vault | vault-role | -| authpath | Authentication path for Vault | cordabank1 | -| serviceAccountName | Provide the already created service account name autheticated to vault | vault-auth-issuer | -| certSecretPrefix | Provide the vault path where the certificates are stored | bank1/certs | -| dbsecretprefix | Provide the secretprefix | bank1/credentials/database | -| rpcusersecretprefix | Provide the secretprefix | bank1/credentials/rpcusers | -| keystoresecretprefix | Provide the secretprefix | bank1/credentials/keystore | -| retires | Provide the no of retires | "" | - -### Healthcheck - -| Name | Description | Default Value | -| ----------------------------| ------------------------------------------------------------------------------| ------------- | -| readinesscheckinterval | Provide the interval in seconds you want to iterate till db to be ready | 5 | -| readinessthreshold | Provide the threshold till you want to check if specified db up and running | 2 | - - - -## Deployment ---- - -To deploy the notary-initial-registration Helm chart, follow these steps: - -1. Modify the [values.yaml](https://github.com/hyperledger/bevel/blob/develop/platforms/r3-corda/charts/corda-notary-initial-registration/values.yaml) file to set the desired configuration values. -2. Run the following Helm command to install, upgrade,verify, delete the chart: - -To install the chart: -```bash -helm repo add bevel https://hyperledger.github.io/bevel/ -helm install ./corda-notary-initial-registration -``` - -To upgrade the chart: -```bash -helm upgrade ./corda-notary-initial-registration -``` - -To verify the deployment: -```bash -kubectl get jobs -n -``` -Note : Replace `` with the actual namespace where the Job was created. This command will display information about the Job, including the number of completions and the current status of the Job's pods. - -To delete the chart: -```bash -helm uninstall -``` -Note : Replace `` with the desired name for the release. - - - -## Contributing ---- -If you encounter any bugs, have suggestions, or would like to contribute to the [notary-initial-registration Deployment Helm Chart](https://github.com/hyperledger/bevel/tree/develop/platforms/r3-corda/charts/corda-notary-initial-registration), please feel free to open an issue or submit a pull request on the [project's GitHub repository](https://github.com/hyperledger/bevel). - - - -## License - -This chart is licensed under the Apache v2.0 license. - -Copyright © 2023 Accenture - -### Attribution - -This chart is adapted from the [charts](https://hyperledger.github.io/bevel/) which is licensed under the Apache v2.0 License which is reproduced here: - -``` -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -``` diff --git a/platforms/r3-corda/charts/corda-notary-initial-registration/templates/_helpers.tpl b/platforms/r3-corda/charts/corda-notary-initial-registration/templates/_helpers.tpl deleted file mode 100644 index 7bf5f530a8e..00000000000 --- a/platforms/r3-corda/charts/corda-notary-initial-registration/templates/_helpers.tpl +++ /dev/null @@ -1,5 +0,0 @@ -{{- define "labels.custom" }} - {{ range $key, $val := $.Values.metadata.labels }} - {{ $key }}: {{ $val }} - {{ end }} -{{- end }} \ No newline at end of file diff --git a/platforms/r3-corda/charts/corda-notary-initial-registration/templates/job.yaml b/platforms/r3-corda/charts/corda-notary-initial-registration/templates/job.yaml deleted file mode 100644 index f8a26e74629..00000000000 --- a/platforms/r3-corda/charts/corda-notary-initial-registration/templates/job.yaml +++ /dev/null @@ -1,544 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -apiVersion: batch/v1 -kind: Job -metadata: - name: {{ .Values.nodeName }}-registration - namespace: {{ .Values.metadata.namespace }} - labels: - app: {{ .Values.nodeName }}-registration - app.kubernetes.io/name: {{ .Values.nodeName }}-registration - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} - {{- include "labels.custom" . | nindent 2 }} -spec: - backoffLimit: 6 - template: - metadata: - labels: - app: {{ .Values.nodeName }}-initial-registration - app.kubernetes.io/name: {{ .Values.nodeName }}-registration - app.kubernetes.io/instance: {{ .Release.Name }} - spec: - restartPolicy: "OnFailure" - serviceAccountName: {{ $.Values.vault.serviceaccountname }} - hostname: {{ .Values.nodeName }} - securityContext: - fsGroup: 1000 - containers: - - name: notary-initial-registration - image: {{ .Values.image.containerName }} - imagePullPolicy: Always - command: ["sh", "-c"] - args: - - |- - #!/usr/bin/env sh - # Setting up enviroment variables required for corda jar - {{- range $.Values.nodeConf.env }} - export {{ .name }}="{{ .value }}" - {{- end }} - rm -rf ${BASE_DIR}/certificates/done.txt - - # Setting up enviroment variables - export DEFAULT_TRUSTSTORE_PASSWORD=`cat /opt/node/creds/default_truststore_cred` - export KEYSTORE_PASSWORD=`cat /opt/node/creds/keystore_cred` - export TRUSTSTORE_PASSWORD=`cat /opt/node/creds/truststore_cred` - export DEFAULT_KEYSTORE_PASSWORD=`cat /opt/node/creds/default_keystore_cred` - - # import self signed tls certificate of doorman and networkmap, since java only trusts certificate signed by well known CA - {{- if .Values.image.privateCertificate }} - yes | keytool -importcert -file {{ $.Values.volume.baseDir }}/certificates/networkmap/networkmap.crt -storepass changeit -alias {{ $.Values.image.networkmapCertAlias }} -keystore /usr/lib/jvm/java-1.8-openjdk/jre/lib/security/cacerts - yes | keytool -importcert -file {{ $.Values.volume.baseDir }}/certificates/doorman/doorman.crt -storepass changeit -alias {{ $.Values.image.doormanCertAlias }} -keystore /usr/lib/jvm/java-1.8-openjdk/jre/lib/security/cacerts - {{- end }} - - # command to run corda jar and perform initial-registration - java $JAVA_OPTIONS -jar ${CORDA_HOME}/corda.jar initial-registration --network-root-truststore-password ${DEFAULT_TRUSTSTORE_PASSWORD} --network-root-truststore ${BASE_DIR}/certificates/network-map-truststore.jks --base-directory=${BASE_DIR} - - #changing password of keystore. - keytool -storepasswd -new ${KEYSTORE_PASSWORD} -keystore ${BASE_DIR}/certificates/nodekeystore.jks -storepass ${DEFAULT_KEYSTORE_PASSWORD} - keytool -storepasswd -new ${KEYSTORE_PASSWORD} -keystore ${BASE_DIR}/certificates/sslkeystore.jks -storepass ${DEFAULT_KEYSTORE_PASSWORD} - keytool -storepasswd -new ${TRUSTSTORE_PASSWORD} -keystore ${BASE_DIR}/certificates/truststore.jks -storepass ${DEFAULT_TRUSTSTORE_PASSWORD} - - #changing password of nodekeystore.jks certificate. - keytool -keypasswd -alias cordaclientca -keypass ${DEFAULT_KEYSTORE_PASSWORD} -new ${KEYSTORE_PASSWORD} -keystore ${BASE_DIR}/certificates/nodekeystore.jks -storepass ${KEYSTORE_PASSWORD} - keytool -keypasswd -alias identity-private-key -keypass ${DEFAULT_KEYSTORE_PASSWORD} -new ${KEYSTORE_PASSWORD} -keystore ${BASE_DIR}/certificates/nodekeystore.jks -storepass ${KEYSTORE_PASSWORD} - keytool -keypasswd -alias distributed-notary-private-key -keypass ${DEFAULT_KEYSTORE_PASSWORD} -new ${KEYSTORE_PASSWORD} -keystore ${BASE_DIR}/certificates/nodekeystore.jks -storepass ${KEYSTORE_PASSWORD} - - #changing password of sslkeystore.jks certificate. - keytool -keypasswd -alias cordaclienttls -keypass ${DEFAULT_KEYSTORE_PASSWORD} -new ${KEYSTORE_PASSWORD} -keystore ${BASE_DIR}/certificates/sslkeystore.jks -storepass ${KEYSTORE_PASSWORD} - - # create dummy file to perform check if last line of the container is executed or not - touch ${BASE_DIR}/certificates/done.txt - volumeMounts: - - name: node-volume - mountPath: "{{ $.Values.volume.baseDir }}" - readOnly: false - - name: certificates - mountPath: "{{ $.Values.volume.baseDir }}/certificates" - readOnly: false - - name: nodeconf - mountPath: "{{ $.Values.volume.baseDir }}/node.conf" - subPath: "node.conf" - readOnly: false - - name: creds - mountPath: "/opt/node/creds" - readOnly: false - - name: store-certs - image: {{ .Values.image.initContainerName }} - imagePullPolicy: Always - env: - - name: VAULT_ADDR - value: {{ $.Values.vault.address }} - - name: KUBERNETES_AUTH_PATH - value: {{ $.Values.vault.authpath }} - - name: VAULT_APP_ROLE - value: {{ $.Values.vault.role }} - - name: BASE_DIR - value: {{ $.Values.volume.baseDir }} - - name: CERTS_SECRET_PREFIX - value: {{ .Values.vault.certsecretprefix }} - - name: JAVA_OPTIONS - value: -Xmx512m - command: ["sh", "-c"] - args: - - |- - #!/usr/bin/env sh - - OUTPUT_PATH=${BASE_DIR} - - # setting up env to get secrets from vault - KUBE_SA_TOKEN=$(cat /var/run/secrets/kubernetes.io/serviceaccount/token) - VAULT_TOKEN=$(curl -sS --request POST ${VAULT_ADDR}/v1/auth/${KUBERNETES_AUTH_PATH}/login -H "Content-Type: application/json" -d '{"role":"'"${VAULT_APP_ROLE}"'","jwt":"'"${KUBE_SA_TOKEN}"'"}' | jq -r 'if .errors then . else .auth.client_token end') - - # perform check if certificates are ready or not, and upload certificate into vault when ready - COUNTER=1 - cd ${BASE_DIR}/certificates - while [ "$COUNTER" -lt {{ $.Values.healthcheck.readinessthreshold }} ] - do - if [ -e nodekeystore.jks ] && [ -e sslkeystore.jks ] && [ -e truststore.jks ] && [ -e done.txt ] - then - echo "found certificates, performing vault put" - (echo '{"data": {"nodekeystore.jks": "'; base64 ${BASE_DIR}/certificates/nodekeystore.jks; echo '"}}') | curl -H "X-Vault-Token: ${VAULT_TOKEN}" -d @- ${VAULT_ADDR}/v1/${CERTS_SECRET_PREFIX}/nodekeystore - (echo '{"data": {"sslkeystore.jks": "'; base64 ${BASE_DIR}/certificates/sslkeystore.jks; echo '"}}') | curl -H "X-Vault-Token: ${VAULT_TOKEN}" -d @- ${VAULT_ADDR}/v1/${CERTS_SECRET_PREFIX}/sslkeystore - (echo '{"data": {"truststore.jks": "'; base64 ${BASE_DIR}/certificates/truststore.jks; echo '"}}') | curl -H "X-Vault-Token: ${VAULT_TOKEN}" -d @- ${VAULT_ADDR}/v1/${CERTS_SECRET_PREFIX}/truststore - # get nodekeystore.jks from vault - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${CERTS_SECRET_PREFIX}/nodekeystore | jq -r 'if .errors then . else . end') - TLS_NODEKEYSTORE=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data[ "nodekeystore.jks" ]' 2>&1) - # get sslkeystore.jks from vault - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${CERTS_SECRET_PREFIX}/sslkeystore | jq -r 'if .errors then . else . end') - TLS_SSLKEYSTORE=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data[ "sslkeystore.jks" ]' 2>&1) - # get truststore.jks from vault - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${CERTS_SECRET_PREFIX}/truststore | jq -r 'if .errors then . else . end') - TLS_TRUSTSTORE=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data[ "truststore.jks" ]' 2>&1) - if [ "$TLS_NODEKEYSTORE" == "null" ] || [ "$TLS_SSLKEYSTORE" == "null" ] || [ "$TLS_TRUSTSTORE" == "null" ] || [[ "$TLS_NODEKEYSTORE" == "parse error"* ]] || [[ "$TLS_SSLKEYSTORE" == "parse error"* ]] || [[ "$TLS_TRUSTSTORE" == "parse error"* ]] - then - echo "certificates write or read fail" - sleep {{ $.Values.healthcheck.readinessthreshold }} - if [ "$COUNTER" -ge {{ $.Values.vault.retries }} ] - then - echo "Retry attempted $COUNTER times, certificates have not been saved" - exit 1 - fi - fi - COUNTER=`expr "$COUNTER" + 1` - fi - done - volumeMounts: - - name: node-volume - mountPath: "{{ $.Values.volume.baseDir }}" - readOnly: false - - name: certificates - mountPath: "{{ $.Values.volume.baseDir }}/certificates" - readOnly: false - - name: nodeconf - mountPath: "{{ $.Values.volume.baseDir }}/node.conf" - subPath: "node.conf" - readOnly: false - initContainers: - - name: init-nodeconf - image : {{ .Values.image.initContainerName }} - imagePullPolicy: Always - env: - - name: VAULT_ADDR - value: {{ $.Values.vault.address }} - - name: KUBERNETES_AUTH_PATH - value: {{ $.Values.vault.authpath }} - - name: VAULT_APP_ROLE - value: {{ $.Values.vault.role }} - - name: BASE_DIR - value: {{ $.Values.volume.baseDir }} - - name: KS_SECRET_PREFIX - value: {{ .Values.vault.keystoresecretprefix }} - - name: DB_SECRET_PREFIX - value: {{ .Values.vault.dbsecretprefix }} - - name: RPCUSER_SECRET_PREFIX - value: {{ .Values.vault.rpcusersecretprefix }} - command: ["/bin/sh","-c"] - args: - - |- - #!/bin/bash - # delete previously created node.conf, and create a new node.conf - rm -f ${BASE_DIR}/node.conf; - touch ${BASE_DIR}/node.conf; - - # setting up env to get secrets from vault - KUBE_SA_TOKEN=$(cat /var/run/secrets/kubernetes.io/serviceaccount/token) - echo "Getting secrets from Vault Server" - VAULT_TOKEN=$(curl -sS --request POST ${VAULT_ADDR}/v1/auth/${KUBERNETES_AUTH_PATH}/login -H "Content-Type: application/json" -d '{"role":"'"${VAULT_APP_ROLE}"'","jwt":"'"${KUBE_SA_TOKEN}"'"}' | jq -r 'if .errors then . else .auth.client_token end') - - # save keyStorePassword & trustStorePassword from vault - LOOKUP_PWD_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${KS_SECRET_PREFIX} | jq -r 'if .errors then . else . end') - CONF_KEYSTOREPASSWORD=$(echo ${LOOKUP_PWD_RESPONSE} | jq -r '.data.data["defaultKeyStorePassword"]') - CONF_TRUSTSTOREPASSWORD=$(echo ${LOOKUP_PWD_RESPONSE} | jq -r '.data.data["defaultTrustStorePassword"]') - - # save dataSourceUserPassword from vault - LOOKUP_PWD_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${DB_SECRET_PREFIX} | jq -r 'if .errors then . else . end') - CONF_DATASOURCEPASSWORD=$(echo ${LOOKUP_PWD_RESPONSE} | jq -r '.data.data["{{ .Values.credentials.dataSourceUser }}"]') - LOOKUP_PWD_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${RPCUSER_SECRET_PREFIX} | jq -r 'if .errors then . else . end') - - #For more information for node.Conf fields please refer to: https://docs.corda.r3.com/releases/4.0/corda-configuration-file.html - cat << EOF > ${BASE_DIR}/node.conf - p2pAddress : "{{ .Values.nodeConf.p2p.url }}:{{ .Values.nodeConf.p2p.port }}" - myLegalName : "{{ .Values.nodeConf.legalName }}" - keyStorePassword : "${CONF_KEYSTOREPASSWORD}" - trustStorePassword : "${CONF_TRUSTSTOREPASSWORD}" - transactionCacheSizeMegaBytes : {{ .Values.nodeConf.transactionCacheSizeMegaBytes }} - attachmentContentCacheSizeMegaBytes : {{ .Values.nodeConf.attachmentContentCacheSizeMegaBytes }} - notary : { - serviceLegalName : "{{ .Values.nodeConf.notary.serviceLegalName }}" - validating : {{ .Values.nodeConf.notary.validating }} - } - detectPublicIp = {{ .Values.nodeConf.detectPublicIp }} - additionalP2PAddresses = ["{{ .Values.nodeConf.ambassadorAddress }}"] - devMode : {{ .Values.nodeConf.devMode }} - dataSourceProperties = { - dataSourceClassName = "{{ .Values.nodeConf.dataSourceClassName }}" - dataSource.url = "{{ .Values.nodeConf.dataSourceUrl }}" - dataSource.user = {{ .Values.credentials.dataSourceUser }} - dataSource.password = "${CONF_DATASOURCEPASSWORD}" - } - database = { - exportHibernateJMXStatistics = {{ .Values.nodeConf.database.exportHibernateJMXStatistics }} - } - jarDirs = ["{{ .Values.nodeConf.jarPath }}"] - EOF - - if [ -z "{{ .Values.nodeConf.compatibilityZoneURL }}" ] - then - echo 'networkServices = { - doormanURL = "{{ .Values.nodeConf.doormanURL }}" - networkMapURL = "{{ .Values.nodeConf.networkMapURL }}" - }' >> ${BASE_DIR}/node.conf - else - echo 'compatibilityZoneURL : "{{ .Values.nodeConf.compatibilityZoneURL }}"' >> ${BASE_DIR}/node.conf - fi - - if [ -z "{{ .Values.nodeConf.jvmArgs }}" ] - then - echo 'jvmArgs is not configured' - else - echo 'jvmArgs = "{{ .Values.nodeConf.jvmArgs }}" ' >> ${BASE_DIR}/node.conf - fi - - if [ -z "{{ .Values.nodeConf.sshd.port }}" ] - then - echo 'sshd port is not configured' - else - echo 'sshd { port = {{ .Values.nodeConf.sshd.port }} } ' >> ${BASE_DIR}/node.conf - fi - - if [ -z "{{ .Values.nodeConf.systemProperties }}" ] - then - echo 'systemProperties is not configured' - else - echo 'systemProperties = {{ .Values.nodeConf.systemProperties }} ' >> ${BASE_DIR}/node.conf - fi - - if [ -z "{{ .Values.nodeConf.exportJMXTo }}" ] - then - echo 'exportJMXTo is not configured' - else - echo 'exportJMXTo = {{ .Values.nodeConf.exportJMXTo }} ' >> ${BASE_DIR}/node.conf - fi - - if [ -z "{{ .Values.nodeConf.messagingServerAddress }}" ] - then - echo 'The address of the ArtemisMQ broker instance is not configured' - else - echo 'messagingServerAddress : "{{ .Values.nodeConf.messagingServerAddress }}" ' >> ${BASE_DIR}/node.conf - fi - - if [ -z "{{ .Values.credentials.rpcUser }}" ] - then - echo 'rpc useer is not configured' - else - echo 'rpcUsers : [' >> ${BASE_DIR}/node.conf - {{- range $.Values.credentials.rpcUser }} - echo '{ username={{ .name }} ,permissions={{ .permissions }}, ' >> ${BASE_DIR}/node.conf - echo " password=$(echo ${LOOKUP_PWD_RESPONSE} | jq -r '.data.data["{{ .name }}"]') }" >> ${BASE_DIR}/node.conf - {{- end }} - echo ']' >> ${BASE_DIR}/node.conf - fi - - LOOKUP_PWD_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${KS_SECRET_PREFIX} | jq -r 'if .errors then . else . end') - if [ "{{ .Values.nodeConf.rpcSettings.useSsl }}" == true ] - then - echo "rpcSettings { - standAloneBroker = {{ .Values.nodeConf.rpcSettings.standAloneBroker }} - address = "{{ .Values.nodeConf.rpcSettings.address }}" - adminAddress = "{{ .Values.nodeConf.rpcSettings.adminAddress }}" - useSsl = {{ .Values.nodeConf.rpcSettings.useSsl }} - ssl = { - keyStorePassword = $(echo ${LOOKUP_PWD_RESPONSE} | jq -r '.data.data["sslkeyStorePassword"]') - trustStorePassword = $(echo ${LOOKUP_PWD_RESPONSE} | jq -r '.data.data["ssltrustStorePassword"]') - certificatesDirectory = ${BASE_DIR}/{{ .Values.nodeConf.rpcSettings.ssl.certificatesDirectory }} - sslKeystore = ${BASE_DIR}/{{ .Values.nodeConf.rpcSettings.ssl.certificatesDirectory }}/{{ .Values.nodeConf.rpcSettings.ssl.sslKeystoreFileName }} - trustStoreFile = ${BASE_DIR}/{{ .Values.nodeConf.rpcSettings.ssl.certificatesDirectory }}/{{ .Values.nodeConf.rpcSettings.ssl.trustStoreFileName }} - } - }" >> ${BASE_DIR}/node.conf - else - echo 'rpcSettings { - standAloneBroker = {{ .Values.nodeConf.rpcSettings.standAloneBroker }} - address = "{{ .Values.nodeConf.rpcSettings.address }}" - adminAddress = "{{ .Values.nodeConf.rpcSettings.adminAddress }}" - }' >> ${BASE_DIR}/node.conf - fi - echo "node.conf created in ${BASE_DIR}" - volumeMounts: - - name: nodeconf - mountPath: "{{ $.Values.volume.baseDir }}" - - name: init-certificates - image: {{ .Values.image.initContainerName }} - imagePullPolicy: Always - env: - - name: VAULT_ADDR - value: {{ $.Values.vault.address }} - - name: KUBERNETES_AUTH_PATH - value: {{ $.Values.vault.authpath }} - - name: VAULT_APP_ROLE - value: {{ $.Values.vault.role }} - - name: BASE_DIR - value: {{ $.Values.volume.baseDir }} - - name: CERTS_SECRET_PREFIX - value: {{.Values.vault.certsecretprefix}} - command: ["sh", "-c"] - args: - - |- - #!/usr/bin/env sh - validateVaultResponse () { - if echo ${2} | grep "errors"; then - echo "ERROR: unable to retrieve ${1}: ${2}" - exit 1 - fi - if [ "$3" == "LOOKUPSECRETRESPONSE" ] - then - http_code=$(curl -sS -o /dev/null -w "%{http_code}" \ - --header "X-Vault-Token: ${VAULT_TOKEN}" \ - ${VAULT_ADDR}/v1/${1}) - curl_response=$? - if test "$http_code" != "200" ; then - echo "Http response code from Vault - $http_code" - if test "$curl_response" != "0"; then - echo "Error: curl command failed with error code - $curl_response" - exit 1 - fi - fi - fi - } - - # To check if custom nodekeystore is retrived from vault, if yes then store it in nodekeystore.jks - validateVaultResponseCustomnodeKeystore () { - if echo ${2} | grep "errors"; - then - echo "custom nodekeystore.jks is not provided and new one will be created." - else - echo "Found custom nodekeystore.jks" - echo "${NODE_KEY}" | base64 -d > ${OUTPUT_PATH}/nodekeystore.jks - fi - } - - # To check if certificates are already present in vault or not - validateVaultResponseKeystore () { - if echo ${2} | grep "errors"; - then - echo "Initial registration will create keystore ${1}" - else - echo "Initial registration was performed before." - exit 1 - fi - } - - # setting up env to get secrets from vault - echo "Getting secrets from Vault Server" - KUBE_SA_TOKEN=$(cat /var/run/secrets/kubernetes.io/serviceaccount/token) - VAULT_TOKEN=$(curl -sS --request POST ${VAULT_ADDR}/v1/auth/${KUBERNETES_AUTH_PATH}/login -H "Content-Type: application/json" -d '{"role":"'"${VAULT_APP_ROLE}"'","jwt":"'"${KUBE_SA_TOKEN}"'"}' | jq -r 'if .errors then . else .auth.client_token end') - validateVaultResponse 'vault login token' "${VAULT_TOKEN}" - - OUTPUT_PATH=${BASE_DIR} - - # get customnodekeystore from vault - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${CERTS_SECRET_PREFIX}/customnodekeystore ) - NODE_KEY=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["nodekeystore.jks"]') - validateVaultResponseCustomnodeKeystore "secret (${CERTS_SECRET_PREFIX}/customnodekeystore)" "${LOOKUP_SECRET_RESPONSE}" - - # get network-map-truststore.jks from vault - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${CERTS_SECRET_PREFIX}/networkmaptruststore | jq -r 'if .errors then . else . end') - validateVaultResponse "secret (${CERTS_SECRET_PREFIX}/networkmaptruststore)" "${LOOKUP_SECRET_RESPONSE}" - TLS_NMS=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["network-map-truststore"]') - echo "${TLS_NMS}" | base64 -d > ${OUTPUT_PATH}/network-map-truststore.jks - - # To check if sslkeystore,nodekeystore,truststore are present in vault - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${CERTS_SECRET_PREFIX}/nodekeystore | jq -r 'if .errors then . else . end') - validateVaultResponseKeystore "secret on (${CERTS_SECRET_PREFIX}/nodekeystore)" "${LOOKUP_SECRET_RESPONSE}" - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${CERTS_SECRET_PREFIX}/sslkeystore | jq -r 'if .errors then . else . end') - validateVaultResponseKeystore "secret on (${CERTS_SECRET_PREFIX}/sslkeystore)" "${LOOKUP_SECRET_RESPONSE}" - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${CERTS_SECRET_PREFIX}/truststore | jq -r 'if .errors then . else . end') - validateVaultResponseKeystore "secret on (${CERTS_SECRET_PREFIX}/truststore)" "${LOOKUP_SECRET_RESPONSE}" - - # when using doorman and networkmap in TLS: true, and using private certificate then download certificate - if [ "{{ .Values.image.privateCertificate }}" == true ] - then - mkdir -p ${OUTPUT_PATH}/networkmap - mkdir -p ${OUTPUT_PATH}/doorman - - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${CERTS_SECRET_PREFIX}/networkmap | jq -r 'if .errors then . else . end') - validateVaultResponse "${CERTS_SECRET_PREFIX}/networkmap" "${LOOKUP_SECRET_RESPONSE}" "LOOKUPSECRETRESPONSE" - NETWORKMAP_CRT=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["networkmap.crt"]') - echo "${NETWORKMAP_CRT}" | base64 -d > ${OUTPUT_PATH}/networkmap/networkmap.crt - - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${CERTS_SECRET_PREFIX}/doorman | jq -r 'if .errors then . else . end') - validateVaultResponse "secret (${CERTS_SECRET_PREFIX}/doorman)" "${LOOKUP_SECRET_RESPONSE}" - DOORMAN_CRT=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["doorman.crt"]') - echo "${DOORMAN_CRT}" | base64 -d > ${OUTPUT_PATH}/doorman/doorman.crt - fi - chmod 777 -R ${BASE_DIR}/; - echo "Done" - volumeMounts: - - name: certificates - mountPath: {{ $.Values.volume.baseDir }} - - name: init-credential - image: {{ .Values.image.initContainerName }} - imagePullPolicy: Always - env: - - name: VAULT_ADDR - value: {{.Values.vault.address}} - - name: KUBERNETES_AUTH_PATH - value: {{.Values.vault.authpath}} - - name: VAULT_APP_ROLE - value: {{.Values.vault.role}} - - name: BASE_DIR - value: /opt/node/creds - - name: KS_SECRET_PREFIX - value: {{ .Values.vault.keystoresecretprefix }} - command: ["sh", "-c"] - args: - - |- - #!/usr/bin/env sh - validateVaultResponse () { - if echo ${2} | grep "errors"; then - echo "ERROR: unable to retrieve ${1}: ${2}" - exit 1 - fi - if [ "$3" == "LOOKUPSECRETRESPONSE" ] - then - http_code=$(curl -sS -o /dev/null -w "%{http_code}" \ - --header "X-Vault-Token: ${VAULT_TOKEN}" \ - ${VAULT_ADDR}/v1/${1}) - curl_response=$? - if test "$http_code" != "200" ; then - echo "Http response code from Vault - $http_code" - if test "$curl_response" != "0"; then - echo "Error: curl command failed with error code - $curl_response" - exit 1 - fi - fi - fi - } - - # setting up env to get secrets from vault - KUBE_SA_TOKEN=$(cat /var/run/secrets/kubernetes.io/serviceaccount/token) - echo "Getting secrets from Vault Server" - VAULT_TOKEN=$(curl -sS --request POST ${VAULT_ADDR}/v1/auth/${KUBERNETES_AUTH_PATH}/login -H "Content-Type: application/json" -d '{"role":"'"${VAULT_APP_ROLE}"'","jwt":"'"${KUBE_SA_TOKEN}"'"}' | jq -r 'if .errors then . else .auth.client_token end') - validateVaultResponse 'vault login token' "${VAULT_TOKEN}" - - OUTPUT_PATH=${BASE_DIR} - - # get keystore passwords from vault - LOOKUP_PWD_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${KS_SECRET_PREFIX} | jq -r 'if .errors then . else . end') - validateVaultResponse "${KS_SECRET_PREFIX}" "${LOOKUP_PWD_RESPONSE}" "LOOKUPSECRETRESPONSE" - DEFAULT_TRUSTSTOREPASSWORD=$(echo ${LOOKUP_PWD_RESPONSE} | jq -r '.data.data["defaultTrustStorePassword"]') - DEFAULT_KEYSTOREPASSWORD=$(echo ${LOOKUP_PWD_RESPONSE} | jq -r '.data.data["defaultKeyStorePassword"]') - KEYSTOREPASSWORD=$(echo ${LOOKUP_PWD_RESPONSE} | jq -r '.data.data["keyStorePassword"]') - TRUSTSTOREPASSWORD=$(echo ${LOOKUP_PWD_RESPONSE} | jq -r '.data.data["trustStorePassword"]') - echo "${DEFAULT_TRUSTSTOREPASSWORD}" >> ${BASE_DIR}/default_truststore_cred - echo "${KEYSTOREPASSWORD}" >> ${BASE_DIR}/keystore_cred - echo "${TRUSTSTOREPASSWORD}" >> ${BASE_DIR}/truststore_cred - echo "${DEFAULT_KEYSTOREPASSWORD}" >> ${BASE_DIR}/default_keystore_cred - - echo "Done" - volumeMounts: - - name: creds - mountPath: "/opt/node/creds" - readOnly: false - - name: db-healthcheck - image: {{ .Values.image.initContainerName }} - imagePullPolicy: Always - command: ["sh", "-c"] - args: - - |- - #!/usr/bin/env sh - COUNTER=1 - FLAG=true - # perform health check if db is up and running before starting corda node - while [ "$COUNTER" -le {{ $.Values.healthcheck.readinessthreshold }} ] - do - DB_NODE={{ .Values.nodeConf.dbUrl }}:{{ .Values.nodeConf.dbPort }} - STATUS=$(nc -vz $DB_NODE 2>&1 | grep -c open ) - - if [ "$STATUS" == 0 ] - then - FLAG=false - else - FLAG=true - echo "DB up and running" - fi - - if [ "$FLAG" == false ] - then - echo "Retry attempted $COUNTER times, retrying after {{ $.Values.healthcheck.readinesscheckinterval }} seconds" - COUNTER=`expr "$COUNTER" + 1` - sleep {{ $.Values.healthcheck.readinesscheckinterval }} - else - echo "SUCCESS!" - echo "DB up and running!" - exit 0 - break - fi - done - - if [ "$COUNTER" -gt {{ $.Values.healthcheck.readinessthreshold }} ] || [ "$FLAG" == false ] - then - echo "Retry attempted $COUNTER times, no DB up and running. Giving up!" - exit 1 - break - fi - imagePullSecrets: - - name: {{ .Values.image.imagePullSecret }} - volumes: - - name: node-volume - emptyDir: - medium: Memory - - name: certificates - emptyDir: - medium: Memory - - name: nodeconf - emptyDir: - medium: Memory - - name: creds - emptyDir: - medium: Memory \ No newline at end of file diff --git a/platforms/r3-corda/charts/corda-notary-initial-registration/values.yaml b/platforms/r3-corda/charts/corda-notary-initial-registration/values.yaml deleted file mode 100644 index eef0caeeba6..00000000000 --- a/platforms/r3-corda/charts/corda-notary-initial-registration/values.yaml +++ /dev/null @@ -1,204 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -#Provide the nodeName for node -#Eg. nodeName: bank1 -nodeName: bank1 - -#Provide the replica set for node deployed -#Eg. replicas: 1 -replicas: - -metadata: - #Provide the namespace - #Eg. namespace: default - namespace: default - #Provide the custom labels - #NOTE: Provide labels other than name, release name , release service, chart version , chart name , app. - #Eg. labels: - # role: create_channel - labels: - -image: - #Provide the containerName of image - #Eg. containerName: ghcr.io/hyperledger/bevel-corda:4.9 - containerName: ghcr.io/hyperledger/bevel-corda:4.9 - #Provide the name of image for init container - #Eg. name: ghcr.io/hyperledger/bevel-alpine:latest - initContainerName: ghcr.io/hyperledger/bevel-alpine:latest - - #Provide the image pull secret of image - #Eg. pullSecret: regcred - imagePullSecret: "" - #Provide true or false if private certificate to be added - #Eg. privateCertificate: true - privateCertificate: true - #Provide doorman domain alias - #Eg. doormanCertAlias: doorman.fracordakubetest7.com - doormanCertAlias: doorman.fracordakubetest7.com - #Provide networkmap domain alias - #Eg. networkmapCertAlias: networkmap.fracordakubetest7.com - networkmapCertAlias: networkmap.fracordakubetest7.com - - -#For more information for node.Conf fields please refer to: https://docs.corda.net/releases/release-V3.3/corda-configuration-file.html -nodeConf: - #The host and port on which the node is available for protocol operations over ArtemisMQ. - p2p: - url: - port: - #Specify the ambassador host:port which will be advertised in addition to p2paddress - ambassadorAddress: - rpcSettings: - useSsl: - standAloneBroker: - address: - adminAddress: - ssl: - certificatesDirectory: - sslKeystorePath: - trustStoreFilePath: - #Provide the legalName for node - #Eg. legalName: "O=Bank1,L=London,C=GB,CN=Bank1" - legalName: - messagingServerAddress: - jvmArgs: - systemProperties: - sshd: - port: - exportJMXTo: - transactionCacheSizeMegaBytes: - attachmentContentCacheSizeMegaBytes: - notary: - validating: - serviceLegalName: - detectPublicIp: - database: - exportHibernateJMXStatistics: - #Provide the h2Url for node - #Eg. h2Url: bank1h2 - dbUrl: bank1h2 - #Provide the h2Port for node - #Eg. h2Port: 9101 - dbPort: 9101 - dataSourceClassName: - dataSourceUrl: - jarPath: - #Provide the nms for node - #Eg. nms: "http://rp-elb-fra-corda-kube-cluster7-2016021309.us-west-1.elb.amazonaws.com:30050" - networkMapURL: - doormanURL: - # compatibilityZoneURL is for NMS only implementation - compatibilityZoneURL: - #Provide the jar Version for corda jar and finanace jar - #Eg. jarVersion: 3.3-corda - jarVersion: 3.3-corda - #Provide the devMode for corda node - #Eg. devMode: true - devMode: true - #Provide the enviroment variables to be set - env: - - name: JAVA_OPTIONS - value: - - name: CORDA_HOME - value: - - name: BASE_DIR - value: - -credentials: - #Provide the dataSourceUser for corda node - #Eg. dataSourceUser: - dataSourceUser: - #Provide the rpcUser for corda node - rpcUser: - - name: bank1operations - permissions: [ALL] - -volume: - #Provide the base path - #Eg. mountPath: "/opt/h2-data" - baseDir: - -resources: - #Provide the limit memory for node - #Eg. limits: "1Gi" - limits: "1Gi" - #Provide the requests memory for node - #Eg. requests: "1Gi" - requests: "1Gi" - -service: -# Note: Target ports are dependent on image being used. Please change them accordingly -# nodePort should be kept empty while using service type as ClusterIP ( Values.service.type ) - #Provide the type of service - #Eg. type: NodePort or LoadBalancer etc - type: NodePort - p2p: - #Provide the p2p port for node - #Eg. port: 10007 - port: 10007 - #Provide the p2p node port for node - #Eg. port: 30007 - nodePort: - #Provide the p2p targetPort for node - #Eg. targetPort: 30007 - targetPort: 30007 - rpc: - #Provide the rpc port for node - #Eg. port: 10008 - port: 10008 - #Provide the rpc targetPort for node - #Eg. targetPort: 10003 - targetPort: 10003 - #Provide the rpc node port for node - #Eg. nodePort: 10003 - nodePort: - rpcadmin: - #Provide the rpcadmin port for node - #Eg. port: 10108 - port: 10108 - #Provide the rpcadmin targetPort for node - #Eg. targetPort: 10005 - targetPort: 10005 - #Provide the rpcadmin node port for node - #Eg. nodePort: 30007 - nodePort: - -vault: - #Provide the vault server address - #Eg. address: http://54.226.163.39:8200 - address: - #Provide the vaultrole - #Eg. role: vault-role - role: vault-role - #Provide the authpath - #Eg. authpath: cordabank1 - authpath: cordabank1 - #Provide the serviceaccountname - #Eg. serviceaccountname: vault-auth-issuer - serviceaccountname: vault-auth-issuer - #Provide the secretprefix - #Eg. dbsecretprefix: bank1/credentials/database - dbsecretprefix: bank1/credentials/database - #Provide the secretprefix - #Eg. rpcusersecretprefix: bank1/credentials/rpcusers - rpcusersecretprefix: bank1/credentials/rpcusers - #Provide the secretprefix - #Eg. keystoresecretprefix: bank1/credentials/keystore - keystoresecretprefix: bank1/credentials/keystore - #Provide the secretprefix - #Eg. certsecretprefix: bank1/certs - certsecretprefix: bank1/certs - # Number of retries to check contents from vault -  retries: - -healthcheck: - #Provide the interval in seconds you want to iterate till db to be ready - #Eg. readinesscheckinterval: 5 - readinesscheckinterval: 5 - #Provide the threshold till you want to check if specified db up and running - #Eg. readinessthreshold: 2 - readinessthreshold: 2 diff --git a/platforms/r3-corda/charts/corda-notary/Chart.yaml b/platforms/r3-corda/charts/corda-notary/Chart.yaml deleted file mode 100644 index 2324c43e17e..00000000000 --- a/platforms/r3-corda/charts/corda-notary/Chart.yaml +++ /dev/null @@ -1,11 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -apiVersion: v1 -appVersion: "2.0" -description: "R3-corda-os: Deploys the notary node." -name: corda-notary -version: 1.0.0 diff --git a/platforms/r3-corda/charts/corda-notary/README.md b/platforms/r3-corda/charts/corda-notary/README.md deleted file mode 100644 index eaf7b56af35..00000000000 --- a/platforms/r3-corda/charts/corda-notary/README.md +++ /dev/null @@ -1,249 +0,0 @@ -[//]: # (##############################################################################################) -[//]: # (Copyright Accenture. All Rights Reserved.) -[//]: # (SPDX-License-Identifier: Apache-2.0) -[//]: # (##############################################################################################) - - -# Notary Deployment - -- [Notary Deployment Helm Chart](#Notary-deployment-helm-chart) -- [Prerequisites](#prerequisites) -- [Chart Structure](#chart-structure) -- [Configuration](#configuration) -- [Deployment](#deployment) -- [Contributing](#contributing) -- [License](#license) - - - -## Notary Deployment Helm Chart ---- -This [Helm chart](https://github.com/hyperledger/bevel/tree/develop/platforms/r3-corda/charts/corda-notary) helps to deploy the r3corda notory node. - - - -## Prerequisites ---- -Before deploying the chart please ensure you have the following prerequisites: - -- NetworkMap and Node's database up and running. -- Kubernetes cluster up and running. -- A HashiCorp Vault instance is set up and configured to use Kubernetes service account token-based authentication. -- The Vault is unsealed and initialized. -- Helm is installed. - -This chart has following structure: -``` - - ├── notary - │   ├── Chart.yaml - │   ├── templates - │   │   ├── deployment.yaml - │   │   ├── _helpers.tpl - │   │   ├── pvc.yaml - │   │   └── service.yaml - │   └── values.yaml -``` - -Type of files used: - -- `templates` : This directory contains the Kubernetes manifest templates that define the resources to be deployed. -- `deployment.yaml`: This file is a configuration file for deployement in Kubernetes.It creates a deployment file with a specified number of replicas and defines various settings for the deployment, Init container is responsible for node registration process is completed successfully before the main containers start.It also specifies volume mounts for storing certificates and data. -- `pvc.yaml` : A PersistentVolumeClaim (PVC) is a request for storage by a user. -- `service.yaml` : This file defines a Kubernetes Service with multiple ports for protocols and targets, and supports Ambassador proxy annotations for specific configurations when using the "ambassador" proxy provider. -- `chart.yaml` : Provides metadata about the chart, such as its name, version, and description. -- `values.yaml` : Contains the default configuration values for the chart. It includes configuration for the image, nodeconfig, credenatials, storage, service , vault, etc. -- `_helpers.tpl` : A template file used for defining custom labels and ports for the metrics in the Helm chart. - - - -## Configuration ---- -The [values.yaml](https://github.com/hyperledger/bevel/blob/develop/platforms/r3-corda/charts/corda-notary/values.yaml) file contains configurable values for the Helm chart. We can modify these values according to the deployment requirements. Here are some important configuration options: - -## Parameters ---- - -### Name - -| Name | Description | Default Value | -| -----------| -------------------------------------------------- | ------------- | -| name | Provide the name of the node | bank1 | - -### Metadata - -| Name | Description | Default Value | -| ----------------| -------------------------------------------------------------| ------------- | -| namespace | Provide the namespace for the Notary Generator | default | -| labels | Provide any additional labels for the Notary Generator | "" | - -### Image - -| Name | Description | Default Value | -| ------------------------ | --------------------------------------------------------------------------------------- | --------------- | -| initContainerName | Provide the alpine utils image, which is used for all init-containers of deployments/jobs | "" | -| containerName | Provide the containerName of image | "" | -| imagePullSecret | Provide the image pull secret of image | regcred | -| privateCertificate | Provide true or false if private certificate to be added | "true" | -| doormanCertAlias | Provide true or false if private certificate to be added | "" | -| networkmapCertAlias | Provide true or false if private certificate to be added | "" | - -### NodeConf - -| Name | Description | Default Value | -| ------------------------ | -------------------------------------------------------------------------------------- | --------------- | -| p2p | The host and port on which the node is available for protocol operations over ArtemisMQ | "" | -| ambassadorAddress | Specify ambassador host:port which will be advertised in addition to p2paddress | "" | -| legalName | Provide the legalName for node | "" | -| dbUrl | Provide the h2Url for node | "bank1h2" | -| dbPort | Provide the h2Port for node | "9101" | -| networkMapURL | Provide the nms for node | "" | -| doormanURL | Provide the doorman for node | "" | -| jarVersion | Provide the jar Version for corda jar and finanace jar | "3.3-corda" | -| devMode | Provide the devMode for corda node | "true" | -| env | Provide the enviroment variables to be set | "" | - -### credentials - -| Name | Description | Default Value | -| ----------------| ----------------------------------------------| ------------- | -| dataSourceUser | Provide the dataSourceUser for corda node | "" | -| rpcUser | Provide the rpcUser for corda node | bank1operations| - -### cordapps - -| Name | Description | Default Value | -| ------------------------ | ------------------------------------------------------- | --------------- | -| getcordapps | Provide if you want to provide jars in cordapps | "" | -| repository | Provide the repository of cordapps | "" | -| jars url | Provide url to download the jar using wget cmd | "" | - -### Volume - -| Name | Description | Default Value | -| -----------------| -----------------------| ------------- | -| baseDir | Base directory | /home/bevel | - -### Resources - -| Name | Description | Default Value | -| ------------------------ | ------------------------------------------------------- | --------------- | -| limits | Provide the limit memory for node | "1Gi" | -| requests | Provide the requests memory for node | "1Gi" | - -### PVC - -| Name | Description | Default Value | -| ------------------------ | ------------------------------------------------------- | --------------- | -| name | Provide the name for pvc | bank1-pvc | -| memory | Provide the memory for node | "4Gi" | -| storageClassName | Provide the name for the storageclass | bank1nodesc | - -### Service - -| Name | Description | Default Value | -| --------------------- | ------------------------------------------| ------------- | -| service Name | Provide the service | bank1 | -| type | Provide the type of service | NodePort | -| p2p port | Provide the tcp port for node | 10007 | -| p2p nodePort | Provide the p2p nodeport for node | 30007 | -| p2p targetPort | Provide the p2p targetPort for node | 30007 | -| rpc port | Provide the tpc port for node | 10008 | -| rpc targetPort | Provide the rpc targetport for node | 10003 | -| rpc nodePort | Provide the rpc nodePort for node | 30007 | -| rpcadmin port | Provide the rpcadmin port for node | 10108 | -| rpcadmin targetPort | Provide the rpcadmin targetport for node | 10005 | -| rpcadmin nodePort | Provide the rpcadmin nodePort for node | 30007 | - -### Vault - -| Name | Description | Default Value | -| ------------------------- | --------------------------------------------------------------------------| ------------------------- | -| address | Address/URL of the Vault server | "" | -| role | Role used for authentication with Vault | vault-role | -| authpath | Authentication path for Vault | cordabank1 | -| serviceAccountName | Provide the already created service account name autheticated to vault | vault-auth-issuer | -| certSecretPrefix | Provide the vault path where the certificates are stored | bank1/certs | -| dbsecretprefix | Provide the secretprefix | bank1/credentials/database | -| rpcusersecretprefix | Provide the secretprefix | bank1/credentials/rpcusers | -| keystoresecretprefix | Provide the secretprefix | bank1/credentials/keystore | -| cordappsreposecretprefix | Provide the secretprefix | bank1/credentials/cordapps | - -### Healthcheck - -| Name | Description | Default Value | -| ----------------------------| ------------------------------------------------------------------------------| ------------- | -| readinesscheckinterval | Provide the interval in seconds you want to iterate till db to be ready | 5 | -| readinessthreshold | Provide the threshold till you want to check if specified db up and running | 2 | - -### ambassador - -| Name | Description | Default Value | -| ------------------------ | ------------------------------------------------------- | -------------------------- | -| component_name | Provides component name | node | -| external_url_suffix | Provides the suffix to be used in external URL | org1.blockchaincloudpoc.com | -| p2p_ambassador | Provide the p2p port for ambassador | 10007 | - - - -## Deployment ---- - -To deploy the notary Helm chart, follow these steps: - -1. Modify the [values.yaml](https://github.com/hyperledger/bevel/blob/develop/platforms/r3-corda/charts/corda-notary/values.yaml) file to set the desired configuration values. -2. Run the following Helm command to install, upgrade,verify, delete the chart: - -To install the chart: -```bash -helm repo add bevel https://hyperledger.github.io/bevel/ -helm install ./corda-notary -``` - -To upgrade the chart: -```bash -helm upgrade ./corda-notary -``` - -To verify the deployment: -```bash -kubectl get jobs -n -``` -Note : Replace `` with the actual namespace where the Job was created. This command will display information about the Job, including the number of completions and the current status of the Job's pods. - -To delete the chart: -```bash -helm uninstall -``` -Note : Replace `` with the desired name for the release. - - - -## Contributing ---- -If you encounter any bugs, have suggestions, or would like to contribute to the [notary Deployment Helm Chart](https://github.com/hyperledger/bevel/tree/develop/platforms/r3-corda/charts/corda-notary), please feel free to open an issue or submit a pull request on the [project's GitHub repository](https://github.com/hyperledger/bevel). - - -## License - -This chart is licensed under the Apache v2.0 license. - -Copyright © 2023 Accenture - -### Attribution - -This chart is adapted from the [charts](https://hyperledger.github.io/bevel/) which is licensed under the Apache v2.0 License which is reproduced here: - -``` -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -``` diff --git a/platforms/r3-corda/charts/corda-notary/templates/_helpers.tpl b/platforms/r3-corda/charts/corda-notary/templates/_helpers.tpl deleted file mode 100644 index 7bf5f530a8e..00000000000 --- a/platforms/r3-corda/charts/corda-notary/templates/_helpers.tpl +++ /dev/null @@ -1,5 +0,0 @@ -{{- define "labels.custom" }} - {{ range $key, $val := $.Values.metadata.labels }} - {{ $key }}: {{ $val }} - {{ end }} -{{- end }} \ No newline at end of file diff --git a/platforms/r3-corda/charts/corda-notary/templates/deployment.yaml b/platforms/r3-corda/charts/corda-notary/templates/deployment.yaml deleted file mode 100644 index c21279777ae..00000000000 --- a/platforms/r3-corda/charts/corda-notary/templates/deployment.yaml +++ /dev/null @@ -1,617 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -apiVersion: apps/v1 -kind: Deployment -metadata: - name: {{ .Values.nodeName }} - {{- if .Values.deployment.annotations }} - annotations: -{{ toYaml .Values.deployment.annotations | indent 8 }} - {{- end }} - namespace: {{ .Values.metadata.namespace }} - labels: - app.kubernetes.io/name: {{ .Values.nodeName }} - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} - {{- include "labels.custom" . | nindent 2 }} -spec: - replicas: {{ .Values.replicas }} - selector: - matchLabels: - app: {{ .Values.nodeName }} - app.kubernetes.io/name: {{ .Values.nodeName }} - app.kubernetes.io/instance: {{ .Release.Name }} - strategy: - type: Recreate - rollingUpdate: null - template: - metadata: - labels: - app: {{ .Values.nodeName }} - app.kubernetes.io/name: {{ .Values.nodeName }} - app.kubernetes.io/instance: {{ .Release.Name }} - spec: - serviceAccountName: {{ $.Values.vault.serviceaccountname }} - hostname: {{ .Values.nodeName }} - securityContext: - fsGroup: 1000 - containers: - - name: notary - image: {{ .Values.image.containerName }} - imagePullPolicy: Always - command: ["sh", "-c"] - args: - - |- - #!/usr/bin/env sh - - # Setting up enviroment variables required for corda jar - {{- range $.Values.nodeConf.env }} - export {{ .name }}="{{ .value }}" - {{- end }} - - # import self signed tls certificate of doorman and networkmap, since java only trusts certificate signed by well known CA - {{- if .Values.image.privateCertificate }} - yes | keytool -importcert -file {{ $.Values.volume.baseDir }}/certificates/networkmap/networkmap.crt -storepass changeit -alias {{ $.Values.image.networkmapCertAlias }} -keystore /usr/lib/jvm/java-1.8-openjdk/jre/lib/security/cacerts - yes | keytool -importcert -file {{ $.Values.volume.baseDir }}/certificates/doorman/doorman.crt -storepass changeit -alias {{ $.Values.image.doormanCertAlias }} -keystore /usr/lib/jvm/java-1.8-openjdk/jre/lib/security/cacerts - {{- end }} - - # to clean network-parameters on every restart - rm -rf ${BASE_DIR}/network-parameters - - # Run schema migration scripts for corDApps - java -Djavax.net.ssl.keyStore=${BASE_DIR}/certificates/sslkeystore.jks -Djavax.net.ssl.keyStorePassword=newpass $JAVA_OPTIONS -jar ${CORDA_HOME}/corda.jar run-migration-scripts --core-schemas --app-schemas --base-directory=${BASE_DIR} - # command to run corda jar, we are setting javax.net.ssl.keyStore as ${BASE_DIR}/certificates/sslkeystore.jks since keystore gets reset when using h2 ssl - java -Djavax.net.ssl.keyStore=${BASE_DIR}/certificates/sslkeystore.jks -Djavax.net.ssl.keyStorePassword=newpass $JAVA_OPTIONS -jar ${CORDA_HOME}/corda.jar --base-directory=${BASE_DIR} - resources: - limits: - memory: {{ .Values.resources.limits }} - requests: - memory: {{ .Values.resources.requests }} - ports: - - containerPort: {{ .Values.service.p2p.targetPort }} - name: p2p - - containerPort: {{ .Values.service.rpc.targetPort }} - name: rpc - - containerPort: {{ .Values.service.rpcadmin.targetPort }} - name: rpcadmin - volumeMounts: - - name: notary-volume - mountPath: "{{ $.Values.volume.baseDir }}" - readOnly: false - - name: certificates - mountPath: "{{ $.Values.volume.baseDir }}/certificates" - readOnly: false - - name: nodeconf - mountPath: "{{ $.Values.volume.baseDir }}/node.conf" - subPath: "node.conf" - readOnly: false - livenessProbe: - tcpSocket: - port: {{ .Values.service.p2p.targetPort }} - initialDelaySeconds: 65 - periodSeconds: 30 - - name: corda-logs - image: {{ .Values.image.initContainerName }} - env: - - name: VAULT_ADDR - value: {{ $.Values.vault.address }} - - name: KUBERNETES_AUTH_PATH - value: {{ $.Values.vault.authpath }} - - name: VAULT_APP_ROLE - value: {{ $.Values.vault.role }} - - name: VAULT_NODE_NAME - value: {{ .Values.nodeName }} - - name: SECRET_PREFIX - value: {{.Values.vault.networkmapsecretprefix}} - - name: NMS_USER_ID - value: {{.Values.credentials.dataSourceUser}} - command: ["sh", "-c"] - args: - - |- - #!/usr/bin/env sh - {{- range $.Values.nodeConf.env }} - export {{ .name }}="{{ .value }}" - {{- end }} - COUNTER=0 - # setting up env to get secrets from vault - KUBE_SA_TOKEN=$(cat /var/run/secrets/kubernetes.io/serviceaccount/token) - VAULT_TOKEN=$(curl -sS --request POST ${VAULT_ADDR}/v1/auth/${KUBERNETES_AUTH_PATH}/login -H "Content-Type: application/json" -d '{"role":"'"${VAULT_APP_ROLE}"'","jwt":"'"${KUBE_SA_TOKEN}"'"}' | jq -r 'if .errors then . else .auth.client_token end') - - # save networkmap login passwoed from vault - LOOKUP_PWD_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${SECRET_PREFIX} | jq -r 'if .errors then . else . end') - NMS_USER_PASSWORD=$(echo ${LOOKUP_PWD_RESPONSE} | jq -r '.data.data["sa"]') - - STATUS=0 - - while [ "$STATUS" -ne 1 ] - do - # get node-info file name - cd ${BASE_DIR} - NOTARYNODEINFOFILENAME=$(ls ${BASE_DIR}/ | grep nodeInfo | awk '{print $1}'); - echo "NOTARYNODEINFOFILENAME=$NOTARYNODEINFOFILENAME" - if [ -z $NOTARYNODEINFOFILENAME ] - then - echo "node-info file not ready, sleeping for 10s" - sleep 10 - STATUS=0 - - else - # get url for registration - if [ -z "{{ .Values.nodeConf.compatibilityZoneURL }}" ] - then - url={{ .Values.nodeConf.networkMapURL }} - else - url={{ .Values.nodeConf.compatibilityZoneURL }} - fi - - # check if notary type is validating or non validating, and form url accordingly - if [ {{ .Values.nodeConf.notary.validating }} == "true" ] - then - section=/admin/api/notaries/validating - else - section=/admin/api/notaries/nonValidating - fi - - # get one time login token from networkmap - token=$(curl -k --silent --show-error -X POST "$url/admin/api/login" -H "accept: text/plain" -H "Content-Type: application/json" -d "{ \"user\": \"${NMS_USER_ID}\", \"password\": \"${NMS_USER_PASSWORD}\"}" | awk '{print $1}'); - - # curl command to register notary, if resonse is okay then registration is sucessfull - cd ${BASE_DIR} - - response=$(curl -k --silent --show-error -X POST -H "Authorization: Bearer ${token}" -H "accept: text/plain" -H "Content-Type: application/octet-stream" --data-binary @${NOTARYNODEINFOFILENAME} ${url}${section} | awk '{print $1}') - echo "responsevar=$response" - if [ $response = "OK" ] - then - echo "Response is OK"; - echo "Registered notary with Networkmap successfully" - else - echo "Response from NMS is not ok"; - echo "Something went wrong" - fi - - STATUS=1 - break - fi - done - if [ -e ${BASE_DIR}/logs/node-{{ .Values.nodeName }}.log ] - then - clear - tail -f ${BASE_DIR}/logs/node-{{ .Values.nodeName }}.log - else - echo "waiting for corda to generate log, sleeping for 10s" - sleep {{ $.Values.healthcheck.readinesscheckinterval }} - fi - volumeMounts: - - name: notary-volume - mountPath: "{{ $.Values.volume.baseDir }}" - readOnly: false - initContainers: - - name: init-checkregistration - image: {{ .Values.image.initContainerName }} - imagePullPolicy: Always - env: - - name: VAULT_ADDR - value: {{ $.Values.vault.address }} - - name: VAULT_APP_ROLE - value: {{.Values.vault.role}} - - name: KUBERNETES_AUTH_PATH - value: {{ $.Values.vault.authpath }} - - name: CERTS_SECRET_PREFIX - value: {{ .Values.vault.certsecretprefix }} - command: ["sh", "-c"] - args: - - |- - #!/usr/bin/env sh - validateVaultResponse () { - if echo ${2} | grep "errors"; then - echo "ERROR: unable to retrieve ${1}: ${2}" - exit 1 - fi - if [ "$3" == "LOOKUPSECRETRESPONSE" ] - then - http_code=$(curl -sS -o /dev/null -w "%{http_code}" \ - --header "X-Vault-Token: ${VAULT_TOKEN}" \ - ${VAULT_ADDR}/v1/${1}) - curl_response=$? - if test "$http_code" != "200" ; then - echo "Http response code from Vault - $http_code" - if test "$curl_response" != "0"; then - echo "Error: curl command failed with error code - $curl_response" - exit 1 - fi - fi - fi - } - - # setting up env to get secrets from vault - echo "Getting secrets from Vault Server" - KUBE_SA_TOKEN=$(cat /var/run/secrets/kubernetes.io/serviceaccount/token) - VAULT_TOKEN=$(curl -sS --request POST ${VAULT_ADDR}/v1/auth/${KUBERNETES_AUTH_PATH}/login -H "Content-Type: application/json" -d '{"role":"'"${VAULT_APP_ROLE}"'","jwt":"'"${KUBE_SA_TOKEN}"'"}' | jq -r 'if .errors then . else .auth.client_token end') - validateVaultResponse 'vault login token' "${VAULT_TOKEN}" - echo "logged into vault" - - COUNTER=1 - while [ "$COUNTER" -lt {{ $.Values.healthcheck.readinessthreshold }} ] - do - # get truststore from vault to see if registration is done or not - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${CERTS_SECRET_PREFIX}/truststore | jq -r 'if .errors then . else . end') - if echo ${LOOKUP_SECRET_RESPONSE} | grep "errors" - then - sleep {{ $.Values.healthcheck.readinesscheckinterval }} - else - break - fi - COUNTER=`expr "$COUNTER" + 1` - done - - if [ "$COUNTER" -ge {{ $.Values.healthcheck.readinessthreshold }} ] - then - # printing number of trial done before giving up - echo "$COUNTER" - echo "Node registration might not have been done." - exit 1 - fi - echo "Done" - - name: init-nodeconf - image : {{ .Values.image.initContainerName }} - imagePullPolicy: Always - env: - - name: VAULT_ADDR - value: {{ $.Values.vault.address }} - - name: KUBERNETES_AUTH_PATH - value: {{ $.Values.vault.authpath }} - - name: VAULT_APP_ROLE - value: {{ $.Values.vault.role }} - - name: BASE_DIR - value: {{ $.Values.volume.baseDir }} - - name: KS_SECRET_PREFIX - value: {{ .Values.vault.keystoresecretprefix }} - - name: DB_SECRET_PREFIX - value: {{ .Values.vault.dbsecretprefix }} - - name: RPCUSER_SECRET_PREFIX - value: {{ .Values.vault.rpcusersecretprefix }} - command: ["/bin/sh","-c"] - args: - - |- - #!/bin/bash - # delete previously created node.conf, and create a new node.conf - rm -f ${BASE_DIR}/node.conf; - touch ${BASE_DIR}/node.conf; - - # setting up env to get secrets from vault - KUBE_SA_TOKEN=$(cat /var/run/secrets/kubernetes.io/serviceaccount/token) - echo "Getting secrets from Vault Server" - VAULT_TOKEN=$(curl -sS --request POST ${VAULT_ADDR}/v1/auth/${KUBERNETES_AUTH_PATH}/login -H "Content-Type: application/json" -d '{"role":"'"${VAULT_APP_ROLE}"'","jwt":"'"${KUBE_SA_TOKEN}"'"}' | jq -r 'if .errors then . else .auth.client_token end') - - # save keyStorePassword & trustStorePassword from vault - LOOKUP_PWD_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${KS_SECRET_PREFIX} | jq -r 'if .errors then . else . end') - CONF_KEYSTOREPASSWORD=$(echo ${LOOKUP_PWD_RESPONSE} | jq -r '.data.data["keyStorePassword"]') - CONF_TRUSTSTOREPASSWORD=$(echo ${LOOKUP_PWD_RESPONSE} | jq -r '.data.data["trustStorePassword"]') - - # save dataSourceUserPassword from vault - LOOKUP_PWD_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${DB_SECRET_PREFIX} | jq -r 'if .errors then . else . end') - CONF_DATASOURCEPASSWORD=$(echo ${LOOKUP_PWD_RESPONSE} | jq -r '.data.data["{{ .Values.credentials.dataSourceUser }}"]') - LOOKUP_PWD_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${RPCUSER_SECRET_PREFIX} | jq -r 'if .errors then . else . end') - - #For more information for node.Conf fields please refer to: https://docs.corda.r3.com/releases/4.0/corda-configuration-file.html - cat << EOF > ${BASE_DIR}/node.conf - p2pAddress : "{{ .Values.nodeConf.p2p.url }}:{{ .Values.nodeConf.p2p.port }}" - myLegalName : "{{ .Values.nodeConf.legalName }}" - keyStorePassword : "${CONF_KEYSTOREPASSWORD}" - trustStorePassword : "${CONF_TRUSTSTOREPASSWORD}" - transactionCacheSizeMegaBytes : {{ .Values.nodeConf.transactionCacheSizeMegaBytes }} - attachmentContentCacheSizeMegaBytes : {{ .Values.nodeConf.attachmentContentCacheSizeMegaBytes }} - notary : { - serviceLegalName : "{{ .Values.nodeConf.notary.serviceLegalName }}" - validating : {{ .Values.nodeConf.notary.validating }} - } - detectPublicIp = {{ .Values.nodeConf.detectPublicIp }} - additionalP2PAddresses = ["{{ .Values.nodeConf.ambassadorAddress }}"] - devMode : {{ .Values.nodeConf.devMode }} - dataSourceProperties = { - dataSourceClassName = "{{ .Values.nodeConf.dataSourceClassName }}" - dataSource.url = "{{ .Values.nodeConf.dataSourceUrl }}" - dataSource.user = {{ .Values.credentials.dataSourceUser }} - dataSource.password = "${CONF_DATASOURCEPASSWORD}" - } - database = { - exportHibernateJMXStatistics = {{ .Values.nodeConf.database.exportHibernateJMXStatistics }} - } - jarDirs = [{{ .Values.nodeConf.jarPath }}] - EOF - if [ -z "{{ .Values.nodeConf.compatibilityZoneURL }}" ] - then - echo 'networkServices = { - doormanURL = "{{ .Values.nodeConf.doormanURL }}" - networkMapURL = "{{ .Values.nodeConf.networkMapURL }}" - }' >> ${BASE_DIR}/node.conf - else - echo 'compatibilityZoneURL : "{{ .Values.nodeConf.compatibilityZoneURL }}"' >> ${BASE_DIR}/node.conf - fi - - if [ -z "{{ .Values.nodeConf.jvmArgs }}" ] - then - echo 'jvmArgs is not configured' - else - echo 'jvmArgs = "{{ .Values.nodeConf.jvmArgs }}" ' >> ${BASE_DIR}/node.conf - fi - - if [ -z "{{ .Values.nodeConf.sshd.port }}" ] - then - echo 'sshd port is not configured' - else - echo 'sshd { port = {{ .Values.nodeConf.sshd.port }} } ' >> ${BASE_DIR}/node.conf - fi - - if [ -z "{{ .Values.nodeConf.systemProperties }}" ] - then - echo 'systemProperties is not configured' - else - echo 'systemProperties = {{ .Values.nodeConf.systemProperties }} ' >> ${BASE_DIR}/node.conf - fi - - if [ -z "{{ .Values.nodeConf.exportJMXTo }}" ] - then - echo 'exportJMXTo is not configured' - else - echo 'exportJMXTo = {{ .Values.nodeConf.exportJMXTo }} ' >> ${BASE_DIR}/node.conf - fi - - if [ -z "{{ .Values.nodeConf.messagingServerAddress }}" ] - then - echo 'The address of the ArtemisMQ broker instance is not configured' - else - echo 'messagingServerAddress : "{{ .Values.nodeConf.messagingServerAddress }}" ' >> ${BASE_DIR}/node.conf - fi - - if [ -z "{{ .Values.credentials.rpcUser }}" ] - then - echo 'rpc useer is not configured' - else - echo 'rpcUsers : [' >> ${BASE_DIR}/node.conf - {{- range $.Values.credentials.rpcUser }} - echo '{ username={{ .name }} ,permissions={{ .permissions }}, ' >> ${BASE_DIR}/node.conf - echo " password=$(echo ${LOOKUP_PWD_RESPONSE} | jq -r '.data.data["{{ .name }}"]') }" >> ${BASE_DIR}/node.conf - {{- end }} - echo ']' >> ${BASE_DIR}/node.conf - fi - - LOOKUP_PWD_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${KS_SECRET_PREFIX} | jq -r 'if .errors then . else . end') - if [ "{{ .Values.nodeConf.rpcSettings.useSsl }}" == true ] - then - echo "rpcSettings { - standAloneBroker = {{ .Values.nodeConf.rpcSettings.standAloneBroker }} - address = "{{ .Values.nodeConf.rpcSettings.address }}" - adminAddress = "{{ .Values.nodeConf.rpcSettings.adminAddress }}" - useSsl = {{ .Values.nodeConf.rpcSettings.useSsl }} - ssl = { - keyStorePassword = $(echo ${LOOKUP_PWD_RESPONSE} | jq -r '.data.data["sslkeyStorePassword"]') - trustStorePassword = $(echo ${LOOKUP_PWD_RESPONSE} | jq -r '.data.data["ssltrustStorePassword"]') - certificatesDirectory = ${BASE_DIR}/{{ .Values.nodeConf.rpcSettings.ssl.certificatesDirectory }} - sslKeystore = ${BASE_DIR}/{{ .Values.nodeConf.rpcSettings.ssl.certificatesDirectory }}/{{ .Values.nodeConf.rpcSettings.ssl.sslKeystoreFileName }} - trustStoreFile = ${BASE_DIR}/{{ .Values.nodeConf.rpcSettings.ssl.certificatesDirectory }}/{{ .Values.nodeConf.rpcSettings.ssl.trustStoreFileName }} - } - }" >> ${BASE_DIR}/node.conf - else - echo 'rpcSettings { - standAloneBroker = {{ .Values.nodeConf.rpcSettings.standAloneBroker }} - address = "{{ .Values.nodeConf.rpcSettings.address }}" - adminAddress = "{{ .Values.nodeConf.rpcSettings.adminAddress }}" - }' >> ${BASE_DIR}/node.conf - fi - echo "node.conf created in ${BASE_DIR}" - volumeMounts: - - name: nodeconf - mountPath: "{{ $.Values.volume.baseDir }}" - - name: init-certificates - image: {{ .Values.image.initContainerName }} - imagePullPolicy: Always - env: - - name: VAULT_ADDR - value: {{ $.Values.vault.address }} - - name: KUBERNETES_AUTH_PATH - value: {{ $.Values.vault.authpath }} - - name: VAULT_APP_ROLE - value: {{ $.Values.vault.role }} - - name: BASE_DIR - value: {{ $.Values.volume.baseDir }} - - name: CERTS_SECRET_PREFIX - value: {{ .Values.vault.certsecretprefix }} - - name: GIT_SECRET_PREFIX - value: {{ .Values.vault.gitsecretprefix }} - - name: AWS_SECRET_PREFIX - value: {{ .Values.vault.awssecretprefix }} - - name: H2SSL_SECRET_PREFIX - value: {{ .Values.vault.h2sslsecretprefix }} - command: ["sh", "-c"] - args: - - |- - #!/usr/bin/env sh - validateVaultResponse () { - if echo ${2} | grep "errors"; then - echo "ERROR: unable to retrieve ${1}: ${2}" - exit 1 - fi - if [ "$3" == "LOOKUPSECRETRESPONSE" ] - then - http_code=$(curl -sS -o /dev/null -w "%{http_code}" \ - --header "X-Vault-Token: ${VAULT_TOKEN}" \ - ${VAULT_ADDR}/v1/${1}) - curl_response=$? - if test "$http_code" != "200" ; then - echo "Http response code from Vault - $http_code" - if test "$curl_response" != "0"; then - echo "Error: curl command failed with error code - $curl_response" - exit 1 - fi - fi - fi - } - # setting up env to get secrets from vault - echo "Getting secrets from Vault Server" - KUBE_SA_TOKEN=$(cat /var/run/secrets/kubernetes.io/serviceaccount/token) - VAULT_TOKEN=$(curl -sS --request POST ${VAULT_ADDR}/v1/auth/${KUBERNETES_AUTH_PATH}/login -H "Content-Type: application/json" -d '{"role":"'"${VAULT_APP_ROLE}"'","jwt":"'"${KUBE_SA_TOKEN}"'"}' | jq -r 'if .errors then . else .auth.client_token end') - validateVaultResponse 'vault login token' "${VAULT_TOKEN}" - - OUTPUT_PATH=${BASE_DIR} - - # get nodekeystore.jks from vault - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${CERTS_SECRET_PREFIX}/nodekeystore | jq -r 'if .errors then . else . end') - validateVaultResponse "${CERTS_SECRET_PREFIX}/nodekeystore" "${LOOKUP_SECRET_RESPONSE}" "LOOKUPSECRETRESPONSE" - TLS_NODEKEYSTORE=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["nodekeystore.jks"]') - echo "${TLS_NODEKEYSTORE}" | base64 -d > ${OUTPUT_PATH}/nodekeystore.jks - - # get sslkeystore.jks from vault - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${CERTS_SECRET_PREFIX}/sslkeystore | jq -r 'if .errors then . else . end') - validateVaultResponse "${CERTS_SECRET_PREFIX}/sslkeystore" "${LOOKUP_SECRET_RESPONSE}" "LOOKUPSECRETRESPONSE" - TLS_SSLKEYSTORE=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["sslkeystore.jks"]') - echo "${TLS_SSLKEYSTORE}" | base64 -d > ${OUTPUT_PATH}/sslkeystore.jks - - # get truststore.jks from vault - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${CERTS_SECRET_PREFIX}/truststore | jq -r 'if .errors then . else . end') - validateVaultResponse "${CERTS_SECRET_PREFIX}/truststore" "${LOOKUP_SECRET_RESPONSE}" "LOOKUPSECRETRESPONSE" - TLS_TRUSTSTORE=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["truststore.jks"]') - echo "${TLS_TRUSTSTORE}" | base64 -d > ${OUTPUT_PATH}/truststore.jks - - # get network-map-truststore.jks from vault - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${CERTS_SECRET_PREFIX}/networkmaptruststore | jq -r 'if .errors then . else . end') - validateVaultResponse "${CERTS_SECRET_PREFIX}/networkmaptruststore" "${LOOKUP_SECRET_RESPONSE}" "LOOKUPSECRETRESPONSE" - TLS_NMS=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["network-map-truststore"]') - echo "${TLS_NMS}" | base64 -d > ${OUTPUT_PATH}/network-map-truststore.jks - - # when using doorman and networkmap in TLS: true, and using private certificate then download certificate - if [ "{{ .Values.image.privateCertificate }}" == true ] - then - mkdir -p ${OUTPUT_PATH}/networkmap - mkdir -p ${OUTPUT_PATH}/doorman - - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${CERTS_SECRET_PREFIX}/networkmap | jq -r 'if .errors then . else . end') - validateVaultResponse "${CERTS_SECRET_PREFIX}/networkmap" "${LOOKUP_SECRET_RESPONSE}" "LOOKUPSECRETRESPONSE" - NETWORKMAP_CRT=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["networkmap.crt"]') - echo "${NETWORKMAP_CRT}" | base64 -d > ${OUTPUT_PATH}/networkmap/networkmap.crt - - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${CERTS_SECRET_PREFIX}/doorman | jq -r 'if .errors then . else . end') - validateVaultResponse "${CERTS_SECRET_PREFIX}/doorman" "${LOOKUP_SECRET_RESPONSE}" "LOOKUPSECRETRESPONSE" - DOORMAN_CRT=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["doorman.crt"]') - echo "${DOORMAN_CRT}" | base64 -d > ${OUTPUT_PATH}/doorman/doorman.crt - fi - - # when using custom sslKeystore while setting in node.conf - if [ "{{ .Values.nodeConf.rpcSettings.useSsl }}" == true ] - then - mkdir -p ${OUTPUT_PATH}/${SSL_CERT_PATH} - chmod -R ${OUTPUT_PATH}/${SSL_CERT_PATH} - SSL_CERT_PATH={{ .Values.nodeConf.rpcSettings.ssl.certificatesDirectory }} - SSL_KEYSTORE_FILE_NAME_KEY={{ .Values.nodeConf.rpcSettings.ssl.sslKeystoreFileName }} - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${CERTS_SECRET_PREFIX}/${SSL_KEYSTORE_FILE_NAME_KEY} | jq -r 'if .errors then . else . end') - validateVaultResponse "${CERTS_SECRET_PREFIX}/${SSL_KEYSTORE_FILE_NAME_KEY}" "${LOOKUP_SECRET_RESPONSE}" "LOOKUPSECRETRESPONSE" - SSLKEYSTORE=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["SSL_KEYSTORE_FILE_NAME_KEY"]') - echo "${SSLKEYSTORE}" | base64 -d > ${OUTPUT_PATH}/${SSL_CERT_PATH}/${SSL_KEYSTORE_FILE_NAME_KEY} - TRUSTKEYSTORE_FILE_NAME_KEY={{ .Values.nodeConf.rpcSettings.ssl.sslKeystoreFileName }} - LOOKUP_SECRET_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${CERTS_SECRET_PREFIX}/${TRUSTKEYSTORE_FILE_NAME_KEY} | jq -r 'if .errors then . else . end') - validateVaultResponse "${CERTS_SECRET_PREFIX}/${TRUSTKEYSTORE_FILE_NAME_KEY}" "${LOOKUP_SECRET_RESPONSE}" "LOOKUPSECRETRESPONSE" - TRUSTSTORE=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["TRUSTKEYSTORE_FILE_NAME_KEY"]') - echo "${TRUSTSTORE}" | base64 -d > ${OUTPUT_PATH}/${SSL_CERT_PATH}/${TRUSTKEYSTORE_FILE_NAME_KEY} - else - echo "" - fi - echo "Done" - volumeMounts: - - name: certificates - mountPath: {{ $.Values.volume.baseDir }} - - name: db-healthcheck - image: {{ .Values.image.initContainerName }} - imagePullPolicy: Always - command: ["sh", "-c"] - args: - - |- - #!/usr/bin/env sh - COUNTER=1 - FLAG=true - # perform health check if db is up and running before starting corda node - while [ "$COUNTER" -le {{ $.Values.healthcheck.readinessthreshold }} ] - do - DB_NODE={{ .Values.nodeConf.dbUrl }}:{{ .Values.nodeConf.dbPort }} - STATUS=$(nc -vz $DB_NODE 2>&1 | grep -c open ) - if [ "$STATUS" == 0 ] - then - FLAG=false - else - FLAG=true - echo "DB up and running" - fi - if [ "$FLAG" == false ] - then - echo "Retry attempted $COUNTER times, retrying after {{ $.Values.healthcheck.readinesscheckinterval }} seconds" - COUNTER=`expr "$COUNTER" + 1` - sleep {{ $.Values.healthcheck.readinesscheckinterval }} - else - echo "SUCCESS!" - echo "DB up and running!" - exit 0 - break - fi - done - if [ "$COUNTER" -gt {{ $.Values.healthcheck.readinessthreshold }} ] || [ "$FLAG" == false ] - then - echo "Retry attempted $COUNTER times, no DB up and running. Giving up!" - exit 1 - break - fi - - name: init-cordapps - image: {{ .Values.image.initContainerName }} - imagePullPolicy: Always - env: - - name: BASE_DIR - value: {{ $.Values.volume.baseDir }} - - name: VAULT_APP_ROLE - value: {{.Values.vault.role}} - - name: VAULT_ADDR - value: {{ $.Values.vault.address }} - - name: KUBERNETES_AUTH_PATH - value: {{ $.Values.vault.authpath }} - - name: SECRET_PREFIX - value: {{ $.Values.vault.cordappsreposecretprefix }} - command: ["sh", "-c"] - args: - - |- - # crearting cordapps dir in volume to keep jars - mkdir -p {{ .Values.volume.baseDir }}/cordapps - {{- if .Values.cordapps.getcordapps }} - mkdir -p /tmp/downloaded-jars - # setting up env to get secrets from vault - KUBE_SA_TOKEN=$(cat /var/run/secrets/kubernetes.io/serviceaccount/token) - echo "Getting secrets from Vault Server" - VAULT_TOKEN=$(curl -sS --request POST ${VAULT_ADDR}/v1/auth/${KUBERNETES_AUTH_PATH}/login -H "Content-Type: application/json" -d '{"role":"'"${VAULT_APP_ROLE}"'","jwt":"'"${KUBE_SA_TOKEN}"'"}' | jq -r 'if .errors then . else .auth.client_token end') - - # save cordapps repository login password from vault - LOOKUP_PWD_RESPONSE=$(curl -sS --header "X-Vault-Token: ${VAULT_TOKEN}" ${VAULT_ADDR}/v1/${SECRET_PREFIX} | jq -r 'if .errors then . else . end') - REPO_USER_PASS=$(echo ${LOOKUP_PWD_RESPONSE} | jq -r '.data.data["repo_password"]') - REPO_USER=$(echo ${LOOKUP_PWD_RESPONSE} | jq -r '.data.data["repo_username"]') - - # Downloading official corda provided jars using curl - {{- range .Values.cordapps.jars }} - cd /tmp/downloaded-jars && curl -u $REPO_USER:$REPO_USER_PASS -O -L {{ .url }} - {{- end }} - cp -ar /tmp/downloaded-jars/* {{ $.Values.volume.baseDir }}/cordapps - {{- end }} - volumeMounts: - - name: notary-volume - mountPath: "{{ $.Values.volume.baseDir }}" - imagePullSecrets: - - name: {{ .Values.image.imagePullSecret }} - volumes: - - name: notary-volume - persistentVolumeClaim: - claimName: {{ .Values.pvc.name }} - - name: certificates - emptyDir: - medium: Memory - - name: nodeconf - emptyDir: - medium: Memory diff --git a/platforms/r3-corda/charts/corda-notary/templates/pvc.yaml b/platforms/r3-corda/charts/corda-notary/templates/pvc.yaml deleted file mode 100644 index 5e240c5faee..00000000000 --- a/platforms/r3-corda/charts/corda-notary/templates/pvc.yaml +++ /dev/null @@ -1,29 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - ---- -kind: PersistentVolumeClaim -apiVersion: v1 -metadata: - name: {{ .Values.pvc.name }} - {{- if .Values.pvc.annotations }} - annotations: -{{ toYaml .Values.pvc.annotations | indent 8 }} - {{- end }} - namespace: {{ .Values.metadata.namespace }} - labels: - app.kubernetes.io/name: {{ .Values.pvc.name }} - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} - {{- include "labels.custom" . | nindent 2 }} -spec: - storageClassName: {{ .Values.pvc.storageClassName }} - accessModes: - - ReadWriteOnce - resources: - requests: - storage: {{ .Values.pvc.memory }} \ No newline at end of file diff --git a/platforms/r3-corda/charts/corda-notary/templates/service.yaml b/platforms/r3-corda/charts/corda-notary/templates/service.yaml deleted file mode 100644 index 27e8ee8d056..00000000000 --- a/platforms/r3-corda/charts/corda-notary/templates/service.yaml +++ /dev/null @@ -1,93 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -apiVersion: v1 -kind: Service -metadata: - name: {{ .Values.service.name }} - annotations: - namespace: {{ .Values.metadata.namespace }} - labels: - run: {{ .Values.service.name }} - app.kubernetes.io/name: {{ .Values.service.name }} - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} - {{- include "labels.custom" . | nindent 2 }} -spec: - type: {{ .Values.service.type }} - {{- if .Values.service.clusterIP }} - clusterIP: "{{ .Values.service.clusterIP }}" - {{- end }} - selector: - app: {{ .Values.nodeName }} - ports: - # for p2p communication among corda node - - name: p2p - protocol: TCP - port: {{ .Values.service.p2p.port }} - targetPort: {{ .Values.service.p2p.targetPort }} - {{- if .Values.service.p2p.nodePort }} - nodePort: {{ .Values.service.p2p.nodePort}} - {{- end }} - # for rpc communication between corda node and webserver - - name: rpc - protocol: TCP - port: {{ .Values.service.rpc.port }} - targetPort: {{ .Values.service.rpc.targetPort }} - {{- if .Values.service.rpc.nodePort }} - nodePort: {{ .Values.service.rpc.nodePort}} - {{- end }} - # for rpc admin communication - - name: rpcadmin - protocol: TCP - port: {{ .Values.service.rpcadmin.port }} - targetPort: {{ .Values.service.rpcadmin.targetPort }} - {{- if .Values.service.rpcadmin.nodePort }} - nodePort: {{ .Values.service.rpcadmin.nodePort}} - {{- end }} -{{- if $.Values.ambassador }} ---- -apiVersion: getambassador.io/v3alpha1 -kind: Host -metadata: - name: {{ .Values.ambassador.component_name }}-host - namespace: {{ .Values.metadata.namespace }} -spec: - hostname: {{ .Values.ambassador.component_name }}.{{ .Values.ambassador.external_url_suffix }} - acmeProvider: - authority: none - requestPolicy: - insecure: - action: Route - tlsSecret: - name: {{ .Values.ambassador.component_name }}-ambassador-certs - namespace: {{ .Values.metadata.namespace }} ---- -apiVersion: getambassador.io/v3alpha1 -kind: TLSContext -metadata: - name: {{ .Values.ambassador.component_name }}-context - namespace: {{ .Values.metadata.namespace }} -spec: - hosts: - - {{ .Values.ambassador.component_name }}.{{ .Values.ambassador.external_url_suffix }} - secret: {{ .Values.ambassador.component_name }}-ambassador-certs.{{ .Values.metadata.namespace }} - secret_namespacing: true - min_tls_version: v1.2 ---- -apiVersion: getambassador.io/v3alpha1 -kind: Mapping -metadata: - name: {{ .Values.ambassador.component_name }}-p2p-mapping - namespace: {{ .Values.metadata.namespace }} -spec: - host: {{ .Values.ambassador.component_name }}.{{ .Values.ambassador.external_url_suffix }} - prefix: / - service: https://{{ .Values.ambassador.component_name }}.{{ .Values.metadata.namespace }}:{{ .Values.nodeConf.p2p.port }} - tls: {{ .Values.ambassador.component_name }}-context -{{- end }} - diff --git a/platforms/r3-corda/charts/corda-notary/values.yaml b/platforms/r3-corda/charts/corda-notary/values.yaml deleted file mode 100644 index 6b7bd230c19..00000000000 --- a/platforms/r3-corda/charts/corda-notary/values.yaml +++ /dev/null @@ -1,247 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -#Provide the nodeName for node -#Eg. nodeName: bank1 -nodeName: bank1 - -#Provide the replica set for node deployed -#Eg. replicas: 1 -replicas: 1 - -metadata: - #Provide the namespace - #Eg. namespace: default - namespace: default - #Provide the custom labels - #NOTE: Provide labels other than name, release name , release service, chart version , chart name , app. - #Eg. labels: - # role: create_channel - labels: - -image: - #Provide the containerName of image - #Eg. containerName: ghcr.io/hyperledger/bevel-corda:4.9 - containerName: ghcr.io/hyperledger/bevel-corda:4.9 - #Provide the name of image for init container - #Eg. name: ghcr.io/hyperledger/bevel-alpine:latest - initContainerName: ghcr.io/hyperledger/bevel-alpine:latest - #Provide the image pull secret of image - #Eg. pullSecret: regcred - imagePullSecret: "" - #Provide true or false if private certificate to be added - #Eg. privateCertificate: true - privateCertificate: true - #Provide doorman domain alias - #Eg. doormanCertAlias: doorman.fracordakubetest7.com - doormanCertAlias: doorman.fracordakubetest7.com - #Provide netwrokmap domain alias - #Eg. networkmapCertAlias: networkmap.fracordakubetest7.com - networkmapCertAlias: networkmap.fracordakubetest7.com - - -#For more information for node.Conf fields please refer to: https://docs.corda.net/releases/release-V3.3/corda-configuration-file.html -nodeConf: - #The host and port on which the node is available for protocol operations over ArtemisMQ. - p2p: - url: - port: - #Specify the ambassador host:port which will be advertised in addition to p2paddress - ambassadorAddress: - rpcSettings: - useSsl: - standAloneBroker: - address: - adminAddress: - ssl: - certificatesDirectory: - sslKeystorePath: - trustStoreFilePath: - #Provide the legalName for node - #Eg. legalName: "O=Bank1,L=London,C=GB,CN=Bank1" - legalName: - messagingServerAddress: - jvmArgs: - systemProperties: - sshd: - port: - exportJMXTo: - transactionCacheSizeMegaBytes: - attachmentContentCacheSizeMegaBytes: - notary: - validating: - serviceLegalName: - detectPublicIp: - database: - exportHibernateJMXStatistics: - #Provide the h2Url for node - #Eg. h2Url: bank1h2 - dbUrl: bank1h2 - #Provide the h2Port for node - #Eg. h2Port: 9101 - dbPort: 9101 - dataSourceClassName: - dataSourceUrl: - jarPath: - #Provide the nms for node - #Eg. nms: "http://rp-elb-fra-corda-kube-cluster7-2016021309.us-west-1.elb.amazonaws.com:30050" - networkMapURL: - doormanURL: - compatibilityZoneURL: - #Provide the jar Version for corda jar and finanace jar - #Eg. jarVersion: 3.3-corda - jarVersion: 3.3-corda - #Provide the devMode for corda node - #Eg. devMode: true - devMode: true - #Provide the enviroment variables to be set - env: - - name: JAVA_OPTIONS - value: - - name: CORDA_HOME - value: - - name: BASE_DIR - value: - -credentials: - #Provide the dataSourceUser for corda node - #Eg. dataSourceUser: - dataSourceUser: - #Provide the rpcUser for corda node - rpcUser: - - name: bank1operations - permissions: [ALL] -cordapps: - #Provide if you want to provide jars in cordapps - #Eg. getcordapps: true or false - getcordapps: true - repository: - jars: - #Provide url to download the jar using wget cmd - #Eg. url: https://ci-artifactory.corda.r3cev.com/artifactory/corda-releases/net/corda/corda-finance/3.3-corda/corda-finance-3.3-corda.jar - - url: - - url: - -volume: - #Provide the base path - #Eg. mountPath: "/opt/h2-data" - baseDir: - -resources: - #Provide the limit memory for node - #Eg. limits: "1Gi" - limits: "1Gi" - #Provide the requests memory for node - #Eg. requests: "1Gi" - requests: "1Gi" - -pvc: - # annotations: - # key: "value" - annotations: - #Provide the name for pvc - #Eg. name: bank1-pvc - name: bank1-pvc - #Provide the memory for node - #Eg. memory: 4Gi - memory: 4Gi - #Provide the name for the storageclass - #Eg. name: bank1nodesc - storageClassName: bank1nodesc - - -service: - #Provide the service - #Eg. name: bank1 - name: bank1 -# Note: Target ports are dependent on image being used. Please change them accordingly -# nodePort should be kept empty while using service type as ClusterIP ( Values.service.type ) - #Provide the type of service - #Eg. type: NodePort or LoadBalancer etc - type: NodePort - p2p: - #Provide the p2p port for node - #Eg. port: 10007 - port: 10007 - #Provide the p2p node port for node - #Eg. port: 30007 - nodePort: - #Provide the p2p targetPort for node - #Eg. targetPort: 30007 - targetPort: 30007 - rpc: - #Provide the rpc port for node - #Eg. port: 10008 - port: 10008 - #Provide the rpc targetPort for node - #Eg. targetPort: 10003 - targetPort: 10003 - #Provide the rpc node port for node - #Eg. nodePort: 30007 - nodePort: - rpcadmin: - #Provide the rpcadmin port for node - #Eg. port: 10108 - port: 10108 - #Provide the rpcadmin targetPort for node - #Eg. targetPort: 10005 - targetPort: 10005 - #Provide the rpcadmin node port for node - #Eg. nodePort: 30007 - nodePort: - -deployment: - annotations: -# annotations: -# key: "value" - -vault: - #Provide the vault server address - #Eg. address: http://54.226.163.39:8200 - address: - #Provide the vaultrole - #Eg. role: vault-role - role: vault-role - #Provide the authpath - #Eg. authpath: cordabank1 - authpath: cordabank1 - #Provide the serviceaccountname - #Eg. serviceaccountname: vault-auth-issuer - serviceaccountname: vault-auth-issuer - #Provide the secretprefix - #Eg. dbsecretprefix: bank1/credentials/database - dbsecretprefix: bank1/credentials/database - #Provide the secretprefix - #Eg. rpcusersecretprefix: bank1/credentials/rpcusers - rpcusersecretprefix: bank1/credentials/rpcusers - #Provide the secretprefix - #Eg. keystoresecretprefix: bank1/credentials/keystore - keystoresecretprefix: bank1/credentials/keystore - #Provide the secretprefix - #Eg. certsecretprefix: bank1/certs - certsecretprefix: bank1/certs - #Provide the secretprefix - #Eg. cordappsreposecretprefix: bank1/credentials/cordapps - cordappsreposecretprefix: bank1/credentials/cordapps - -healthcheck: - #Provide the interval in seconds you want to iterate till db to be ready - #Eg. readinesscheckinterval: 5 - readinesscheckinterval: 5 - #Provide the threshold till you want to check if specified db up and running - #Eg. readinessthreshold: 2 - readinessthreshold: 2 - -ambassador: - #Provides component name - #Eg. component_name: node - component_name: node - #Provides the suffix to be used in external URL - #Eg. external_url_suffix: org1.blockchaincloudpoc.com - external_url_suffix: org1.blockchaincloudpoc.com - #Provide the p2p port for ambassador - #Eg. p2p_ambassador: 10007 - p2p_ambassador: diff --git a/platforms/r3-corda/charts/values/noproxy-and-novault/init.yaml b/platforms/r3-corda/charts/values/noproxy-and-novault/init.yaml new file mode 100644 index 00000000000..ab17c9b6ad0 --- /dev/null +++ b/platforms/r3-corda/charts/values/noproxy-and-novault/init.yaml @@ -0,0 +1,9 @@ +#helm install init -f values/noproxy-and-novault/init.yaml -n supplychain-ns corda-init +global: + serviceAccountName: bevel-auth + vault: + type: kubernetes + network: corda + cluster: + provider: aws + cloudNativeServices: false diff --git a/platforms/r3-corda/charts/values/noproxy-and-novault/network-service.yaml b/platforms/r3-corda/charts/values/noproxy-and-novault/network-service.yaml new file mode 100644 index 00000000000..4ab3eaad295 --- /dev/null +++ b/platforms/r3-corda/charts/values/noproxy-and-novault/network-service.yaml @@ -0,0 +1,37 @@ +--- +#helm install supplychain -f values/noproxy-and-novault/network-service.yaml -n supplychain-ns corda-network-service +#helm upgrade supplychain -f values/noproxy-and-novault/network-service.yaml -n supplychain-ns corda-network-service +global: + serviceAccountName: bevel-auth + cluster: + provider: aws + cloudNativeServices: false + vault: + type: kubernetes + proxy: + provider: none + externalUrlSuffix: svc.cluster.local + +storage: + size: "1Gi" + dbSize: 1Gi + +settings: + removeKeysOnDelete: true # this will erase keys + rootSubject: "CN=DLT Root CA,OU=DLT,O=DLT,L=New York,C=US" + mongoSubject: "C=US,ST=New York,L=New York,O=Lite,OU=DBA,CN=mongoDB" + +doorman: + subject: "CN=Corda Doorman CA,OU=DOORMAN,O=DOORMAN,L=New York,C=US" + username: doorman + authPassword: admin + dbPassword: newdbnm + +nms: + subject: "CN=Network Map,OU=FRA,O=FRA,L=Berlin,C=DE" + username: networkmap + authPassword: admin + dbPassword: newdbnm + +tls: + enabled: false diff --git a/platforms/r3-corda/charts/values/noproxy-and-novault/node.yaml b/platforms/r3-corda/charts/values/noproxy-and-novault/node.yaml new file mode 100644 index 00000000000..0c163c84168 --- /dev/null +++ b/platforms/r3-corda/charts/values/noproxy-and-novault/node.yaml @@ -0,0 +1,33 @@ +--- +#helm install manufacturer -f values/noproxy-and-novault/node.yaml -n manufacturer-ns corda-node +#helm upgrade manufacturer -f values/noproxy-and-novault/node.yaml -n manufacturer-ns corda-node +global: + serviceAccountName: bevel-auth + cluster: + provider: aws + cloudNativeServices: false + vault: + type: kubernetes + proxy: + provider: none + externalUrlSuffix: svc.cluster.local + +storage: + size: "1Gi" + dbSize: 1Gi + +tls: + enabled: false + +image: + corda: + repository: ghcr.io/hyperledger/bevel-corda + tag: 4.9 + +nodeConf: + removeKeysOnDelete: true + legalName: "O=Manufacturer,OU=Manufacturer,L=47.38/8.54/Zurich,C=CH" + notary: + enabled: false + networkMapURL: http://supplychain-nms.supplychain-ns:8080 + doormanURL: http://supplychain-doorman.supplychain-ns:8080 diff --git a/platforms/r3-corda/charts/values/noproxy-and-novault/notary.yaml b/platforms/r3-corda/charts/values/noproxy-and-novault/notary.yaml new file mode 100644 index 00000000000..31564efb56e --- /dev/null +++ b/platforms/r3-corda/charts/values/noproxy-and-novault/notary.yaml @@ -0,0 +1,36 @@ +--- +#helm install notary -f values/noproxy-and-novault/notary.yaml -n supplychain-ns corda-node +#helm upgrade notary -f values/noproxy-and-novault/notary.yaml -n supplychain-ns corda-node +global: + serviceAccountName: bevel-auth + cluster: + provider: aws + cloudNativeServices: false + vault: + type: kubernetes + proxy: + provider: none + externalUrlSuffix: svc.cluster.local + +storage: + size: "1Gi" + dbSize: 1Gi + +tls: + enabled: false + nameOverride: notary + +image: + corda: + repository: ghcr.io/hyperledger/bevel-corda + tag: 4.9 + +nodeConf: + removeKeysOnDelete: true + legalName: "O=Notary,OU=Notary,L=London,C=GB" + notary: + enabled: true + validating: true + serviceLegalName: "O=Notary Service,OU=Notary,L=London,C=GB" + networkMapURL: http://supplychain-nms.supplychain-ns:8080 + doormanURL: http://supplychain-doorman.supplychain-ns:8080 diff --git a/platforms/r3-corda/charts/values/proxy-and-vault/init-sec.yaml b/platforms/r3-corda/charts/values/proxy-and-vault/init-sec.yaml new file mode 100644 index 00000000000..2ddab47b55a --- /dev/null +++ b/platforms/r3-corda/charts/values/proxy-and-vault/init-sec.yaml @@ -0,0 +1,18 @@ +#helm install init -f values/noproxy-and-novault/init-sec.yaml -n manufacturer-ns corda-init +global: + serviceAccountName: vault-auth + vault: + type: hashicorp + role: vault-role + address: http://vault.url:8200 + authPath: manufacturer + secretEngine: secretsv2 + secretPrefix: "data/manufacturer" + cluster: + provider: aws + cloudNativeServices: false + kubernetesUrl: "https://yourkubernetes.com" + +settings: + # Flag to copy doorman and nms certs only when tls: true + secondaryInit: true diff --git a/platforms/r3-corda/charts/values/proxy-and-vault/init.yaml b/platforms/r3-corda/charts/values/proxy-and-vault/init.yaml new file mode 100644 index 00000000000..729ba1cf7a5 --- /dev/null +++ b/platforms/r3-corda/charts/values/proxy-and-vault/init.yaml @@ -0,0 +1,14 @@ +#helm install init -f values/noproxy-and-novault/init.yaml -n supplychain-ns corda-init +global: + serviceAccountName: vault-auth + vault: + type: hashicorp + role: vault-role + address: http://vault.url:8200 + authPath: supplychain + secretEngine: secretsv2 + secretPrefix: "data/supplychain" + cluster: + provider: aws + cloudNativeServices: false + kubernetesUrl: "https://yourkubernetes.com" diff --git a/platforms/r3-corda/charts/values/proxy-and-vault/network-service.yaml b/platforms/r3-corda/charts/values/proxy-and-vault/network-service.yaml new file mode 100644 index 00000000000..3a70704d5e4 --- /dev/null +++ b/platforms/r3-corda/charts/values/proxy-and-vault/network-service.yaml @@ -0,0 +1,44 @@ +--- +#helm install supplychain -f values/noproxy-and-novault/network-service.yaml -n supplychain-ns corda-network-service +#helm upgrade supplychain -f values/noproxy-and-novault/network-service.yaml -n supplychain-ns corda-network-service +global: + serviceAccountName: vault-auth + cluster: + provider: aws + cloudNativeServices: false + vault: + type: hashicorp + role: vault-role + address: http://vault.url:8200 + authPath: supplychain + secretEngine: secretsv2 + secretPrefix: "data/supplychain" + proxy: + provider: "ambassador" + externalUrlSuffix: test.blockchaincloud.com + +storage: + size: "1Gi" + dbSize: 1Gi + +settings: + removeKeysOnDelete: true # this will erase keys + rootSubject: "CN=DLT Root CA,OU=DLT,O=DLT,L=New York,C=US" + mongoSubject: "C=US,ST=New York,L=New York,O=Lite,OU=DBA,CN=mongoDB" + +doorman: + subject: "CN=Corda Doorman CA,OU=DOORMAN,O=DOORMAN,L=New York,C=US" + username: doorman + authPassword: admin + dbPassword: newdbnm + +nms: + subject: "CN=Network Map,OU=FRA,O=FRA,L=Berlin,C=DE" + username: networkmap + authPassword: admin + dbPassword: newdbnm + +tls: + enabled: true + settings: + networkServices: true diff --git a/platforms/r3-corda/charts/values/proxy-and-vault/node.yaml b/platforms/r3-corda/charts/values/proxy-and-vault/node.yaml new file mode 100644 index 00000000000..12b43c012f0 --- /dev/null +++ b/platforms/r3-corda/charts/values/proxy-and-vault/node.yaml @@ -0,0 +1,39 @@ +--- +#helm install manufacturer -f values/noproxy-and-novault/node.yaml -n manufacturer-ns corda-node +#helm upgrade manufacturer -f values/noproxy-and-novault/node.yaml -n manufacturer-ns corda-node +global: + serviceAccountName: vault-auth + cluster: + provider: aws + cloudNativeServices: false + vault: + type: hashicorp + role: vault-role + address: http://vault.url:8200 + authPath: manufacturer + secretEngine: secretsv2 + secretPrefix: "data/manufacturer" + proxy: + provider: "ambassador" + externalUrlSuffix: test.blockchaincloud.com + p2p: 15010 + +storage: + size: "1Gi" + dbSize: 1Gi + +tls: + enabled: true + +image: + corda: + repository: ghcr.io/hyperledger/bevel-corda + tag: 4.9 + +nodeConf: + removeKeysOnDelete: true + legalName: "O=Manufacturer,OU=Manufacturer,L=47.38/8.54/Zurich,C=CH" + notary: + enabled: false + networkMapURL: https://supplychain-nms.test.blockchaincloud.com + doormanURL: https://supplychain-doorman.test.blockchaincloud.com diff --git a/platforms/r3-corda/charts/values/proxy-and-vault/notary.yaml b/platforms/r3-corda/charts/values/proxy-and-vault/notary.yaml new file mode 100644 index 00000000000..df49e13a59c --- /dev/null +++ b/platforms/r3-corda/charts/values/proxy-and-vault/notary.yaml @@ -0,0 +1,42 @@ +--- +#helm install notary -f values/noproxy-and-novault/notary.yaml -n supplychain-ns corda-node +#helm upgrade notary -f values/noproxy-and-novault/notary.yaml -n supplychain-ns corda-node +global: + serviceAccountName: vault-auth + cluster: + provider: aws + cloudNativeServices: false + vault: + type: hashicorp + role: vault-role + address: http://vault.url:8200 + authPath: supplychain + secretEngine: secretsv2 + secretPrefix: "data/supplychain" + proxy: + provider: "ambassador" + externalUrlSuffix: test.blockchaincloud.com + p2p: 15010 + +storage: + size: "1Gi" + dbSize: 1Gi + +tls: + enabled: true + nameOverride: notary + +image: + corda: + repository: ghcr.io/hyperledger/bevel-corda + tag: 4.9 + +nodeConf: + removeKeysOnDelete: true + legalName: "O=Notary,OU=Notary,L=London,C=GB" + notary: + enabled: true + validating: true + serviceLegalName: "O=Notary Service,OU=Notary,L=London,C=GB" + networkMapURL: https://supplychain-nms.test.blockchaincloud.com + doormanURL: https://supplychain-doorman.test.blockchaincloud.com diff --git a/platforms/r3-corda/configuration/cleanup.yaml b/platforms/r3-corda/configuration/cleanup.yaml index 8f1f05e3f9e..25a4a49adbf 100644 --- a/platforms/r3-corda/configuration/cleanup.yaml +++ b/platforms/r3-corda/configuration/cleanup.yaml @@ -20,7 +20,8 @@ - include_role: name: "delete/vault_secrets" vars: - component_name: "{{ item.name | lower }}-ns" + org_name: "{{ item.name | lower }}" + component_ns: "{{ item.name | lower }}-ns" component_type: "{{ item.type | lower }}" services: "{{ item.services }}" kubernetes: "{{ item.k8s }}" diff --git a/platforms/r3-corda/configuration/deploy-network.yaml b/platforms/r3-corda/configuration/deploy-network.yaml index caa332071b2..277f631c2dc 100644 --- a/platforms/r3-corda/configuration/deploy-network.yaml +++ b/platforms/r3-corda/configuration/deploy-network.yaml @@ -4,11 +4,21 @@ # SPDX-License-Identifier: Apache-2.0 ############################################################################################## -############################################################################################## -# Playbook to create deployment files for namespaces, service account and clusterrolebinding -# Playbook arguments: complete network.yaml -############################################################################################## +# This playbook deploys a DLT network on existing Kubernetes clusters +# The Kubernetes clusters should already be created and the infomation to connect to the +# clusters be updated in the network.yaml file that is used as an input to this playbook + +################################################################################################################## + +# To Run this playbook from this directory, use the following command (network.yaml also in this directory) +# ansible-playbook deploy-network.yaml -e "@./network.yaml" + +################################################################################################################## +# Note: Please ensure that the ../../shared/configuration playbooks have been run using the same network.yaml + +################################################################################################################## +--- - hosts: ansible_provisioners gather_facts: no no_log: "{{ no_ansible_log | default(false) }}" @@ -23,67 +33,64 @@ - name: "Create namespace" include_role: name: create/namespace + vars: + component_name: "{{ org.name }}-ns" loop: "{{ network['organizations'] }}" loop_control: loop_var: org - # Create Storageclass - - name: Create StorageClass + # Create necessary secrets + - name: "Create k8s secrets" include_role: - name: "{{ playbook_dir }}/../../../platforms/shared/configuration/roles/setup/storageclass" + name: create/secrets vars: - org_name: "{{ org.name | lower }}" - sc_name: "{{ org_name }}-bevel-storageclass" - region: "{{ org.k8s.region | default('eu-west-1') }}" + component_ns: "{{ org.name | lower }}-ns" + kubernetes: "{{ org.k8s }}" + vault: "{{ org.vault }}" loop: "{{ network['organizations'] }}" loop_control: loop_var: org + when: + - org.org_status is not defined or org.org_status == 'new' - # Setup Vault-Kubernetes accesses - - name: "Setup vault Kubernetes accesses" - include_role: - name: "{{ playbook_dir }}/../../shared/configuration/roles/setup/vault_kubernetes" - vars: - name: "{{ item.name | lower }}" - org_name: "{{ item.name | lower }}" - component_ns: "{{ item.name | lower }}-ns" - component_name: "{{ item.name | lower }}-vaultk8s-job" - component_auth: "{{ network.env.type }}{{ name }}" - component_type: "organization" - kubernetes: "{{ item.k8s }}" - vault: "{{ item.vault }}" - gitops: "{{ item.gitops }}" - loop: "{{ network['organizations'] }}" - - # Deploy Doorman node - - name: Deploy Doorman service node + # Execute primary init for the network-service organization + - name: "Setup primary init with network-service org" include_role: - name: setup/doorman + name: init vars: - services: "{{ item.services }}" - name: "{{ item.name | lower }}" - sc_name: "{{ name }}-bevel-storageclass" - component_ns: "{{ item.name | lower }}-ns" - kubernetes: "{{ item.k8s }}" - vault: "{{ item.vault }}" - gitops: "{{ item.gitops }}" + build_path: "./build" + name: "{{ org.name | lower }}" + component_ns: "{{ org.name | lower }}-ns" + init_type: "primary_init" + kubernetes: "{{ org.k8s }}" + vault: "{{ org.vault }}" + charts_dir: "{{ org.gitops.chart_source }}" + values_dir: "./build/{{ component_ns }}" loop: "{{ network['organizations'] }}" - when: item.type.find('doorman') != -1 + loop_control: + loop_var: org + when: + - org.type == 'network-service' + - org.org_status is not defined or org.org_status == 'new' - # Deploy NMS node - - name: Deploy Networkmap service node + # Deploy Corda network services + - name: Deploy Network service include_role: - name: setup/nms + name: setup/network_service vars: - services: "{{ item.services }}" - name: "{{ item.name | lower }}" - sc_name: "{{ name }}-bevel-storageclass" - component_ns: "{{ item.name | lower }}-ns" - kubernetes: "{{ item.k8s }}" - vault: "{{ item.vault }}" - gitops: "{{ item.gitops }}" + name: "{{ org.name | lower }}" + component_ns: "{{ org.name | lower }}-ns" + kubernetes: "{{ org.k8s }}" + vault: "{{ org.vault }}" + gitops: "{{ org.gitops }}" + charts_dir: "{{ org.gitops.chart_source }}" + values_dir: "{{playbook_dir}}/../../../{{org.gitops.release_dir}}/{{ component_ns }}" loop: "{{ network['organizations'] }}" - when: item.type.find('nms') != -1 + loop_control: + loop_var: org + when: + - org.type == 'network-service' + - org.org_status is not defined or org.org_status == 'new' # Wait for network services to respond - name: Check that network services uri are reachable @@ -95,38 +102,44 @@ loop: "{{ network['network_services'] }}" retries: "{{ network.env.retry_count}}" delay: 50 - ignore_errors: yes + ignore_errors: true when: network.env.proxy != 'none' # Deploy notaries - - name: 'Deploy notary' + - name: Deploy notary service include_role: name: setup/notary vars: - services: "{{ item.services }}" - node: "{{ item.services.notary }}" - name: "{{ item.name | lower }}" - sc_name: "{{ name }}-bevel-storageclass" - component_ns: "{{ item.name | lower }}-ns" - kubernetes: "{{ item.k8s }}" - vault: "{{ item.vault }}" - gitops: "{{ item.gitops }}" - cordapps: "{{ item.cordapps | default() }}" + name: "{{ org.name | lower }}" + node: "{{ org.services.notary }}" + component_ns: "{{ org.name | lower }}-ns" + kubernetes: "{{ org.k8s }}" + vault: "{{ org.vault }}" + gitops: "{{ org.gitops }}" + charts_dir: "{{ org.gitops.chart_source }}" + values_dir: "{{playbook_dir}}/../../../{{org.gitops.release_dir}}/{{ component_ns }}" loop: "{{ network['organizations'] }}" - when: item.type.find('notary') != -1 + loop_control: + loop_var: org + when: + - org.services.notary is defined + - org.org_status is not defined or org.org_status == 'new' # Deploy all other nodes - - name: 'Deploy nodes' + - name: Deploy Corda nodes include_role: name: setup/node vars: - name: "{{ item.name | lower }}" - sc_name: "{{ name }}-bevel-storageclass" - component_ns: "{{ item.name | lower }}-ns" - services: "{{ item.services }}" - kubernetes: "{{ item.k8s }}" - vault: "{{ item.vault }}" - cordapps: "{{ item.cordapps | default() }}" - gitops: "{{ item.gitops }}" - loop: "{{ network['organizations'] }}" - when: item.type == 'node' + name: "{{ org.name | lower }}" + component_ns: "{{ org.name | lower }}-ns" + kubernetes: "{{ org.k8s }}" + vault: "{{ org.vault }}" + gitops: "{{ org.gitops }}" + charts_dir: "{{ org.gitops.chart_source }}" + values_dir: "{{playbook_dir}}/../../../{{org.gitops.release_dir}}/{{ component_ns }}" + loop: "{{ network['organizations'] }}" + loop_control: + loop_var: org + when: + - org.type == 'node' + - org.org_status is not defined or org.org_status == 'new' diff --git a/platforms/r3-corda/configuration/deploy-nodes.yaml b/platforms/r3-corda/configuration/deploy-nodes.yaml index 9b4c0f87b54..e56268e2ca9 100644 --- a/platforms/r3-corda/configuration/deploy-nodes.yaml +++ b/platforms/r3-corda/configuration/deploy-nodes.yaml @@ -23,39 +23,46 @@ retries: "{{ network.env.retry_count}}" delay: 70 ignore_errors: yes + when: network.env.proxy != 'none' # Deploy notaries - - name: 'Deploy notary' + - name: Deploy notary service include_role: name: setup/notary vars: - services: "{{ item.services }}" - node: "{{ item.services.notary }}" - name: "{{ item.name | lower }}" - sc_name: "{{ name }}-bevel-storageclass" - component_ns: "{{ item.name | lower }}-ns" - kubernetes: "{{ item.k8s }}" - vault: "{{ item.vault }}" - gitops: "{{ item.gitops }}" - cordapps: "{{ item.cordapps | default() }}" + name: "{{ org.name | lower }}" + node: "{{ org.services.notary }}" + component_ns: "{{ org.name | lower }}-ns" + kubernetes: "{{ org.k8s }}" + vault: "{{ org.vault }}" + gitops: "{{ org.gitops }}" + charts_dir: "{{ org.gitops.chart_source }}" + values_dir: "{{playbook_dir}}/../../../{{org.gitops.release_dir}}/{{ component_ns }}" loop: "{{ network['organizations'] }}" - when: network['type'] == 'corda' and item.type.find('notary') != -1 + loop_control: + loop_var: org + when: + - org.services.notary is defined + - org.org_status is not defined or org.org_status == 'new' # Deploy all other nodes - - name: 'Deploy nodes' + - name: Deploy Corda nodes include_role: name: setup/node vars: - name: "{{ item.name | lower }}" - sc_name: "{{ name }}-bevel-storageclass" - component_ns: "{{ item.name | lower }}-ns" - services: "{{ item.services }}" - kubernetes: "{{ item.k8s }}" - vault: "{{ item.vault }}" - cordapps: "{{ item.cordapps | default() }}" - gitops: "{{ item.gitops }}" - loop: "{{ network['organizations'] }}" - when: network['type'] == 'corda' and item.type == 'node' + name: "{{ org.name | lower }}" + component_ns: "{{ org.name | lower }}-ns" + kubernetes: "{{ org.k8s }}" + vault: "{{ org.vault }}" + gitops: "{{ org.gitops }}" + charts_dir: "{{ org.gitops.chart_source }}" + values_dir: "{{playbook_dir}}/../../../{{org.gitops.release_dir}}/{{ component_ns }}" + loop: "{{ network['organizations'] }}" + loop_control: + loop_var: org + when: + - org.type == 'node' + - org.org_status is not defined or org.org_status == 'new' # delete build directory - name: Remove build directory diff --git a/platforms/r3-corda/configuration/roles/create/certificates/ambassador/tasks/main.yaml b/platforms/r3-corda/configuration/roles/create/certificates/ambassador/tasks/main.yaml deleted file mode 100644 index 5da9860e313..00000000000 --- a/platforms/r3-corda/configuration/roles/create/certificates/ambassador/tasks/main.yaml +++ /dev/null @@ -1,125 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -# This role generates certificates for ambassador -# and places them in vault. Certificates are created using openssl -# This also creates the Kubernetes secrets ---- -# Check if ambassadortls dir is there -- name: "check if dir exists or not" - stat: - path: "{{ ambassadortls }}" - register: ambassadortlsdir_check - -# Ensure ambassador tls dir exists -- name: Ensure ambassador tls dir exists - include_role: - name: "{{ playbook_dir }}/../../shared/configuration/roles/check/directory" - vars: - path: "{{ ambassadortls }}" - when: not ambassadortlsdir_check.stat.exists - -# Check ambassador tls certs already created -- name: Check if ambassador tls already created - shell: | - vault kv get -field=tlscacerts {{ vault.secret_path | default(name) }}/{{ name }}/{{ component_name }}/tlscerts - environment: - VAULT_ADDR: "{{ vault.url }}" - VAULT_TOKEN: "{{ vault.root_token }}" - register: ambassador_tls_certs - ignore_errors: yes - -# Gets the existing ambassador tls certs -- name: Get ambassador and tls certs from Vault - shell: | - vault kv get -format=yaml {{ vault.secret_path | default(name) }}/{{ name }}/{{ component_name }}/tlscerts - environment: - VAULT_ADDR: "{{ vault.url }}" - VAULT_TOKEN: "{{ vault.root_token }}" - register: ambassador_tls_certs_yaml - when: not ambassador_tls_certs.failed - -# Get ambassador tls certs -- name: Get ambassador tls certs - include_role: - name: "setup/get_crypto" - vars: - vault_output: "{{ ambassador_tls_certs_yaml.stdout | from_yaml }}" - type: "ambassador" - cert_path: "{{ ambassadortls }}" - when: ambassador_tls_certs.failed == False - -# check if ambassadortls dir is there -- name: "check if openssl conf file exists or not" - stat: - path: "./build/openssl{{ component_name }}.conf" - register: openssl_conf_check - -# Generates the openssl file for domain -- name: Generate openssl conf file - shell: | - cd ./build - cat <openssl{{ component_name }}.conf - [dn] - CN={{ domain_name }} - [req] - distinguished_name = dn - [EXT] - keyUsage=digitalSignature - extendedKeyUsage=serverAuth - subjectAltName = @alt_names - - [alt_names] - DNS.1 = {{ domain_name }} - DNS.2 = {{ domain_name_api }} - DNS.3 = {{ domain_name_web }} - EOF - vars: - domain_name: "{{ component_name }}.{{ item.external_url_suffix }}" - domain_name_api: "{{ component_name }}api.{{ item.external_url_suffix }}" - domain_name_web: "{{ component_name }}web.{{ item.external_url_suffix }}" - when: ambassador_tls_certs.failed == True and (not openssl_conf_check.stat.exists) - -# Generates the ambassador tls certificates if already not generated -- name: Generate ambassador tls certs - shell: | - openssl req -x509 -out {{ ambassadortls }}/ambassador.crt -keyout {{ ambassadortls }}/ambassador.key -newkey rsa:2048 -nodes -sha256 -subj "/CN={{ domain_name }}" -extensions EXT -config "{{playbook_dir}}/build/openssl{{ component_name }}.conf" - vars: - domain_name: "{{ component_name }}.{{ item.external_url_suffix }}" - when: ambassador_tls_certs.failed == True and (not openssl_conf_check.stat.exists) - -# Stores the genreated ambassador tls certificates to vault -- name: Putting tls certs to vault - shell: | - vault kv put {{ vault.secret_path | default(name) }}/{{ name }}/{{ component_name }}/tlscerts tlscacerts="$(cat {{ ambassadortls }}/ambassador.crt | base64)" tlskey="$(cat {{ ambassadortls }}/ambassador.key | base64)" - environment: - VAULT_ADDR: "{{ vault.url }}" - VAULT_TOKEN: "{{ vault.root_token }}" - when: ambassador_tls_certs.failed == True - -# Check if Ambassador credentials exist already -- name: Check Ambassador cred exists - k8s_info: - kind: Secret - namespace: "{{ component_ns }}" - name: "{{ component_name }}-ambassador-certs" - kubeconfig: "{{ kubernetes.config_file }}" - context: "{{ kubernetes.context }}" - register: get_ambassador_secret - -# Create the Ambassador TLS credentials for ambassador -- name: Create the Ambassador credentials - shell: | - KUBECONFIG={{ kubernetes.config_file }} kubectl create secret tls {{ component_name }}-ambassador-certs --cert={{ ambassadortls }}/ambassador.crt --key={{ ambassadortls }}/ambassador.key -n {{ component_ns }} - when: get_ambassador_secret.resources|length == 0 - -# Copy generated crt to build location for doorman and networkmap -- name: Copy generated ambassador tls certs to given build location - copy: - src: "{{ ambassadortls }}/ambassador.crt" - dest: "{{ cert_file }}" - follow: yes - when: cert_file is defined diff --git a/platforms/r3-corda/configuration/roles/create/certificates/ambassador/vars/main.yaml b/platforms/r3-corda/configuration/roles/create/certificates/ambassador/vars/main.yaml deleted file mode 100644 index a6705fc45cb..00000000000 --- a/platforms/r3-corda/configuration/roles/create/certificates/ambassador/vars/main.yaml +++ /dev/null @@ -1,7 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -ambassadortls: "{{playbook_dir}}/build/corda/{{component_name}}/tls" diff --git a/platforms/r3-corda/configuration/roles/create/certificates/doorman/tasks/main.yaml b/platforms/r3-corda/configuration/roles/create/certificates/doorman/tasks/main.yaml deleted file mode 100644 index 0b9ca9ec61d..00000000000 --- a/platforms/r3-corda/configuration/roles/create/certificates/doorman/tasks/main.yaml +++ /dev/null @@ -1,232 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -# This role generates certificates for doorman and rootca -# and places them in vault. Certificates are created using openssl ---- - -# Check if rootca dir is there -- name: "check if file exists or not" - stat: - path: "{{ rootca }}" - register: rootcadir_check - -# Create the root directory where CA root certificates and key will be placed -- name: Ensure rootca dir exists - include_role: - name: "{{ playbook_dir }}/../../shared/configuration/roles/check/directory" - vars: - path: "{{ rootca }}" - when: not rootcadir_check.stat.exists - -# Check if doormanca dir is there -- name: "check if file exists or not" - stat: - path: "{{ doormanca }}" - register: doormancadir_check - -# Create the doormanca directory where doorman root certificates and key will be placed -- name: Ensure doormanca dir exists - include_role: - name: "{{ playbook_dir }}/../../shared/configuration/roles/check/directory" - vars: - path: "{{ doormanca }}" - when: not doormancadir_check.stat.exists - -# Check if mongorootca dir is there -- name: "check if file exists or not" - stat: - path: "{{ mongorootca }}" - register: mongorootcadir_check - -# Ensure mongorootca dir exists -- name: Ensure mongorootca dir exists - include_role: - name: "{{ playbook_dir }}/../../shared/configuration/roles/check/directory" - vars: - path: "{{ mongorootca }}" - when: services.doorman.tls == 'on' and (not mongorootcadir_check.stat.exists) - -# Check if mongodbtca dir is there -- name: "check if file exists or not" - stat: - path: "{{ mongodbca }}" - register: mongodbcadir_check - -# Ensure mongodbca dir exists -- name: Ensure mongodbca dir exists - include_role: - name: "{{ playbook_dir }}/../../shared/configuration/roles/check/directory" - vars: - path: "{{ mongodbca }}" - when: services.doorman.tls == 'on' and (not mongodbcadir_check.stat.exists) - -# Check if certificates for doorman are already created and stored in vault or not -- name: Check if root certs already created - shell: | - vault kv get -field=cacerts {{ vault.secret_path | default(name) }}/{{ name }}/{{ component_name }}/certs - environment: - VAULT_ADDR: "{{ vault.url }}" - VAULT_TOKEN: "{{ vault.root_token }}" - register: root_certs - ignore_errors: yes - -# Get the existing root certificates if any. -- name: Get root certs from Vault - shell: | - vault kv get -format=yaml {{ vault.secret_path | default(name) }}/{{ name }}/{{ component_name }}/certs - environment: - VAULT_ADDR: "{{ vault.url }}" - VAULT_TOKEN: "{{ vault.root_token }}" - register: root_certs_yaml - when: not root_certs.failed - -# Get root certs -- name: Get root certs - include_role: - name: "setup/get_crypto" - vars: - vault_output: "{{ root_certs_yaml.stdout | from_yaml }}" - type: "rootca" - cert_path: "{{ rootca }}" - when: root_certs.failed == False - tags: - - notest - -# Store the exisiting key.jks file -- name: check root certs - stat: - path: "{{ rootca }}/keys.jks" - register: rootca_stat_result - -# Generation of CA Root certificates -- name: Generate CAroot certificate - shell: | - cd {{ rootca }} - eval "keytool -genkey -keyalg RSA -alias key -dname {{ root_subject | quote }} -keystore keys.jks -storepass changeme -keypass changeme" - eval "openssl ecparam -name prime256v1 -genkey -noout -out cordarootca.key" - eval "openssl req -x509 -config {{playbook_dir}}/openssl.conf -new -nodes -key cordarootca.key -days 1024 -out cordarootca.pem -extensions v3_ca -subj '/{{ cert_subject }}'" - eval "openssl pkcs12 -export -name cert -inkey cordarootca.key -in cordarootca.pem -out cordarootcacert.pkcs12 -cacerts -passin pass:'changeme' -passout pass:'changeme'" - eval "openssl pkcs12 -export -name key -inkey cordarootca.key -in cordarootca.pem -out cordarootcakey.pkcs12 -passin pass:'changeme' -passout pass:'changeme'" - eval "yes | keytool -importkeystore -srckeystore cordarootcacert.pkcs12 -srcstoretype PKCS12 -srcstorepass changeme -destkeystore keys.jks -deststorepass changeme" - eval "yes | keytool -importkeystore -srckeystore cordarootcakey.pkcs12 -srcstoretype PKCS12 -srcstorepass changeme -destkeystore keys.jks -deststorepass changeme" - when: root_certs.failed == True and rootca_stat_result.stat.exists == False - -# Check if doorman certs already created -- name: Check if doorman certs already created - shell: | - vault kv get -field=doorman.jks {{ vault.secret_path | default(name) }}/{{ name }}/{{ component_name }}/certs > {{ doormanca }}/tempkeys.jks - environment: - VAULT_ADDR: "{{ vault.url }}" - VAULT_TOKEN: "{{ vault.root_token }}" - register: doorman_certs - ignore_errors: yes - -# Decode base64 -- name: Decode base64 - shell: | - cat {{ doormanca }}/tempkeys.jks | base64 -d > {{ doormanca }}/keys.jks - when: not doorman_certs.failed - tags: - - notest - -# Generate DoormanCA from generated root CA certificate -- name: Generate DoormanCA from generated root CA certificate - shell: | - cd {{ doormanca }} - rm keys.jks - eval "keytool -genkey -keyalg RSA -alias key -dname {{ doorman_subject | quote }} -keystore keys.jks -storepass changeme -keypass changeme" - eval "openssl ecparam -name prime256v1 -genkey -noout -out cordadoormanca.key" - eval "openssl req -new -nodes -key cordadoormanca.key -days 1000 -out cordadoormanca.csr -subj '/{{ doorman_cert_subject }}'" - eval "openssl x509 -req -days 1000 -in cordadoormanca.csr -CA {{ rootca }}/cordarootca.pem -CAkey {{rootca}}/cordarootca.key -out cordadoormanca.pem -CAcreateserial -CAserial serial -extfile {{playbook_dir}}/openssl.conf -extensions doorman" - eval "openssl pkcs12 -export -name cert -inkey cordadoormanca.key -in cordadoormanca.pem -out cordadoormancacert.pkcs12 -cacerts -passin pass:'changeme' -passout pass:'changeme'" - eval "openssl pkcs12 -export -name key -inkey cordadoormanca.key -in cordadoormanca.pem -out cordadoormancakey.pkcs12 -passin pass:'changeme' -passout pass:'changeme'" - eval "yes | keytool -importkeystore -srckeystore cordadoormancacert.pkcs12 -srcstoretype PKCS12 -srcstorepass changeme -destkeystore keys.jks -deststorepass changeme" - eval "yes | keytool -importkeystore -srckeystore cordadoormancakey.pkcs12 -srcstoretype PKCS12 -srcstorepass changeme -destkeystore keys.jks -deststorepass changeme" - when: doorman_certs.failed == True and rootca_stat_result.stat.exists == False - -# Checking root certificates for mongodb -- name: Check if mongoroot certs already created - shell: | - vault kv get -field=mongoCA.crt {{ vault.secret_path | default(name) }}/{{ name }}/{{ component_name }}/certs > {{ mongorootca }}/tempmongoCA.crt - environment: - VAULT_ADDR: "{{ vault.url }}" - VAULT_TOKEN: "{{ vault.root_token }}" - register: mongoCA_certs - ignore_errors: yes - when: services.doorman.tls == 'on' - -# Decode base64 -- name: Decode base64 - shell: | - cat {{ mongorootca }}/tempmongoCA.crt | base64 -d > {{ mongorootca }}/mongoCA.crt - when: not mongoCA_certs.failed - tags: - - notest - -# Generating mongoroot certificates -- name: Generating Mongoroot certificates - shell: | - cd {{ mongorootca }} - openssl genrsa -out mongoCA.key 3072 - openssl req -x509 -config {{playbook_dir}}/openssl.conf -new -extensions v3_ca -key mongoCA.key -days 365 -out mongoCA.crt -subj '{{ mongoroot_cert_subj }}' - when: services.doorman.tls == 'on' and mongoCA_certs.failed == True and rootca_stat_result.stat.exists == False - -# checking if mongodb certificate already created -- name: Check if mongodb certs already created - shell: | - vault kv get -field=mongodb-{{component_name}}.pem {{ vault.secret_path | default(name) }}/{{ name }}/{{ component_name }}/certs > {{ mongodbca }}/tempmongodb-{{component_name}}.pem - environment: - VAULT_ADDR: "{{ vault.url }}" - VAULT_TOKEN: "{{ vault.root_token }}" - register: mongodb_certs - ignore_errors: yes - when: services.doorman.tls == 'on' - -# Decode base64 -- name: Decode base64 - shell: | - cat {{ mongodbca }}/tempmongodb-{{component_name}}.pem | base64 -d > {{ mongodbca }}/mongodb-{{component_name}}.pem - when: not mongodb_certs.failed - tags: - - notest - -# Generating mongodb certificates. -- name: Generating mongodb certificates - shell: | - cd {{ mongodbca }} - openssl req -new -nodes -newkey rsa:4096 -keyout mongodb-{{ component_name}}.key -out mongodb-{{ component_name }}.csr -subj '{{ mongoroot_cert_subj }}-{{component_name}}' - openssl x509 -CA {{ mongorootca }}/mongoCA.crt -CAkey {{ mongorootca }}/mongoCA.key -CAcreateserial -CAserial serial -req -days 365 -in mongodb-{{component_name}}.csr -out mongodb-{{component_name}}.crt - cat mongodb-{{ component_name }}.key mongodb-{{ component_name}}.crt > mongodb-{{ component_name }}.pem - when: services.doorman.tls == 'on' and mongodb_certs.failed == True and rootca_stat_result.stat.exists == False - -# Putting certs to vault for root -- name: Putting certs to vault for root - shell: | - vault kv put {{ vault.secret_path | default(name) }}/{{ name }}/{{ component_name }}/certs rootcakey="$(cat {{ rootca }}/keys.jks | base64)" cacerts="$(cat {{ rootca }}/cordarootca.pem | base64)" keystore="$(cat {{ rootca }}/cordarootca.key | base64)" - environment: - VAULT_ADDR: "{{ vault.url }}" - VAULT_TOKEN: "{{ vault.root_token }}" - when: root_certs.failed == True - -# Putting certs and credential to vault for doorman -- name: Putting certs and credential to vault for doorman - shell: | - vault kv put {{ vault.secret_path | default(name) }}/{{ name }}/{{ component_name }}/credentials/userpassword sa="{{ userpassword_sa }}" - vault kv put {{ vault.secret_path | default(name) }}/{{ name }}/{{ component_name }}/credentials/mongodb mongodbPassword="{{ mongodbPassword }}" - vault kv put {{ vault.secret_path | default(name) }}/{{ name }}/{{ component_name }}/certs doorman.jks="$(cat {{ doormanca }}/keys.jks | base64)" rootcakey="$(cat {{ rootca }}/keys.jks | base64)" cacerts="$(cat {{ rootca }}/cordarootca.pem | base64)" keystore="$(cat {{ rootca }}/cordarootca.key | base64)" mongodb-{{ component_name }}.pem="$(cat {{ mongodbca }}/mongodb-{{ component_name }}.pem | base64)" mongoCA.crt="$(cat {{ mongorootca }}/mongoCA.crt | base64)" - environment: - VAULT_ADDR: "{{ vault.url }}" - VAULT_TOKEN: "{{ vault.root_token }}" - when: doorman_certs.failed == True - -# Create Ambassador certificates -- name: Create Ambassador certificates - include_role: - name: "create/certificates/ambassador" - vars: - cert_file: "{{ network | json_query('network_services[?type==`doorman`].certificate') | first }}" - when: network.env.proxy == 'ambassador' diff --git a/platforms/r3-corda/configuration/roles/create/certificates/doorman/vars/main.yaml b/platforms/r3-corda/configuration/roles/create/certificates/doorman/vars/main.yaml deleted file mode 100644 index ba60a581065..00000000000 --- a/platforms/r3-corda/configuration/roles/create/certificates/doorman/vars/main.yaml +++ /dev/null @@ -1,12 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -rootca: "{{playbook_dir}}/build/corda/cordarootca" -doormanca: "{{playbook_dir}}/build/corda/cordadoormanca" -userpassword_sa: admin -mongodbPassword: newdbnm -mongorootca: "{{playbook_dir}}/build/corda/mongorootca" -mongodbca: "{{playbook_dir}}/build/corda/mongodbca" diff --git a/platforms/r3-corda/configuration/roles/create/certificates/nms/tasks/main.yaml b/platforms/r3-corda/configuration/roles/create/certificates/nms/tasks/main.yaml deleted file mode 100644 index b9e304a5f6f..00000000000 --- a/platforms/r3-corda/configuration/roles/create/certificates/nms/tasks/main.yaml +++ /dev/null @@ -1,212 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -# This role generates certificates for networkmap -# and places them in vault. Certificates are created using openssl ---- -# Create the root directory where CA root certificates and key will be placed -- name: Ensure rootca dir exists - include_role: - name: "{{ playbook_dir }}/../../shared/configuration/roles/check/directory" - vars: - path: "{{ rootca }}" - -# Ensure nmsca dir exists -- name: Ensure nmsca dir exists - include_role: - name: "{{ playbook_dir }}/../../shared/configuration/roles/check/directory" - vars: - path: "{{ nmsca }}" - -# Ensure mongorootca dir exists -- name: Ensure mongorootca dir exists - include_role: - name: "{{ playbook_dir }}/../../shared/configuration/roles/check/directory" - vars: - path: "{{ mongorootca }}" - when: services.nms.tls == 'on' - -# Ensure mongodbca dir exists -- name: Ensure mongodbca dir exists - include_role: - name: "{{ playbook_dir }}/../../shared/configuration/roles/check/directory" - vars: - path: "{{ mongodbca }}" - when: services.nms.tls == 'on' - -# Check if root certs already created -- name: Check if root certs already created - shell: | - vault kv get -field=cacerts {{ vault.secret_path | default(name) }}/{{ name }}/{{ component_name }}/certs - environment: - VAULT_ADDR: "{{ vault.url }}" - VAULT_TOKEN: "{{ vault.root_token }}" - register: nms_root_certs - ignore_errors: yes - -# Get all root certs data from Vault -- name: Get all root certs data from Vault - shell: | - vault kv get -format=yaml {{ vault.secret_path | default(name) }}/{{ name }}/{{ component_name }}/certs - environment: - VAULT_ADDR: "{{ vault.url }}" - VAULT_TOKEN: "{{ vault.root_token }}" - register: nms_root_certs_yaml - ignore_errors: yes - -# Get root certs -- name: Get root certs - include_role: - name: "setup/get_crypto" - vars: - vault_output: "{{ nms_root_certs_yaml.stdout | from_yaml }}" - type: "rootca" - cert_path: "{{ rootca }}" - when: nms_root_certs.failed == False - tags: - - notest - -# Check root certs -- name: Check root certs - stat: - path: "{{ rootca }}/keys.jks" - register: rootca_stat_result - -# Generation of CA Root certificates -- name: Generate CAroot certificate - shell: | - cd {{ rootca }} - eval "keytool -genkey -keyalg RSA -alias key -dname {{ root_subject | quote }} -keystore keys.jks -storepass changeme -keypass changeme" - eval "openssl ecparam -name prime256v1 -genkey -noout -out cordarootca.key" - eval "openssl req -x509 -config {{playbook_dir}}/openssl.conf -new -nodes -key cordarootca.key -days 1024 -out cordarootca.pem -extensions v3_ca -subj '/{{ cert_subject }}'" - eval "openssl pkcs12 -export -name cert -inkey cordarootca.key -in cordarootca.pem -out cordarootcacert.pkcs12 -cacerts -passin pass:'changeme' -passout pass:'changeme'" - eval "openssl pkcs12 -export -name key -inkey cordarootca.key -in cordarootca.pem -out cordarootcakey.pkcs12 -passin pass:'changeme' -passout pass:'changeme'" - eval "yes | keytool -importkeystore -srckeystore cordarootcacert.pkcs12 -srcstoretype PKCS12 -srcstorepass changeme -destkeystore keys.jks -deststorepass changeme" - eval "yes | keytool -importkeystore -srckeystore cordarootcakey.pkcs12 -srcstoretype PKCS12 -srcstorepass changeme -destkeystore keys.jks -deststorepass changeme" - when: nms_root_certs.failed == True and rootca_stat_result.stat.exists == False - -# Check if networkmap certs already created -- name: Check if networkmap certs already created - shell: | - vault kv get -field=networkmap.jks {{ vault.secret_path | default(name) }}/{{ name }}/{{ component_name }}/certs > {{ nmsca }}/tempkeys.jks - environment: - VAULT_ADDR: "{{ vault.url }}" - VAULT_TOKEN: "{{ vault.root_token }}" - register: networkmap_certs - ignore_errors: yes - -# Decode base64 -- name: Decode base64 - shell: | - cat {{ nmsca }}/tempkeys.jks | base64 -d > {{ nmsca }}/keys.jks - when: not networkmap_certs.failed - tags: - - notest - -# Creating nms certificates -- name: Generate NMS certificate - shell: | - cd {{ nmsca }} - rm keys.jks - eval "keytool -genkey -keyalg RSA -alias key -dname {{ nms_subject | quote }} -keystore keys.jks -storepass changeme -keypass changeme" - eval "openssl ecparam -name prime256v1 -genkey -noout -out cordanetworkmap.key" - eval "openssl req -new -nodes -key cordanetworkmap.key -days 1000 -out cordanetworkmap.csr -subj '/{{ nms_cert_subject }}'" - eval "openssl x509 -req -days 1000 -in cordanetworkmap.csr -CA {{ rootca }}/cordarootca.pem -CAkey {{ rootca }}/cordarootca.key -out cordanetworkmap.pem -CAcreateserial -CAserial serial -extfile {{playbook_dir}}/openssl.conf -extensions networkMap" - eval "openssl pkcs12 -export -name cert -inkey cordanetworkmap.key -in cordanetworkmap.pem -out cordanetworkmapcacert.pkcs12 -cacerts -passin pass:'changeme' -passout pass:'changeme'" - eval "openssl pkcs12 -export -name key -inkey cordanetworkmap.key -in cordanetworkmap.pem -out cordanetworkmapcakey.pkcs12 -passin pass:'changeme' -passout pass:'changeme'" - eval "yes | keytool -importkeystore -srckeystore cordanetworkmapcacert.pkcs12 -srcstoretype PKCS12 -srcstorepass changeme -destkeystore keys.jks -deststorepass changeme" - eval "yes | keytool -importkeystore -srckeystore cordanetworkmapcakey.pkcs12 -srcstoretype PKCS12 -srcstorepass changeme -destkeystore keys.jks -deststorepass changeme" - when: networkmap_certs.failed == True - -# Checking root certificates for mongodb -- name: Check if mongoroot certs already created - shell: | - vault kv get -field=mongoCA.crt {{ vault.secret_path | default(name) }}/{{ name }}/{{ component_name }}/certs > {{ mongorootca }}/tempmongoCA.crt - environment: - VAULT_ADDR: "{{ vault.url }}" - VAULT_TOKEN: "{{ vault.root_token }}" - register: mongoCA_certs - ignore_errors: yes - when: services.nms.tls == 'on' - -# Decode base64 -- name: Decode base64 - shell: | - cat {{ mongorootca }}/tempmongoCA.crt | base64 -d > {{ mongorootca }}/mongoCA.crt - when: not mongoCA_certs.failed - tags: - - notest - -# Check mongoroot certs -- name: check mongoroot certs - stat: - path: "{{ mongorootca }}/mongoCA.crt" - register: mongoCA_stat_result - when: services.nms.tls == 'on' - -# Generating mongoroot certificates -- name: Generating Mongoroot certificates - shell: | - cd {{ mongorootca }} - openssl genrsa -out mongoCA.key 3072 - openssl req -x509 -config {{playbook_dir}}/openssl.conf -new -extensions v3_ca -key mongoCA.key -days 365 -out mongoCA.crt -subj '{{ mongoroot_cert_subj }}' - when: services.nms.tls == 'on' and mongoCA_stat_result.stat.exists == False and mongoCA_certs.failed == True - -# Checking if mongodb certificate already created -- name: Check if mongodb certs already created - shell: | - vault kv get -field=mongodb-{{component_name}}.pem {{ vault.secret_path | default(name) }}/{{ name }}/{{ component_name }}/certs > {{ mongodbca }}/tempmongodb-{{component_name}}.pem - environment: - VAULT_ADDR: "{{ vault.url }}" - VAULT_TOKEN: "{{ vault.root_token }}" - register: mongodb_certs - ignore_errors: yes - when: services.nms.tls == 'on' - -# Decode base64 -- name: Decode base64 - shell: | - cat {{ mongodbca }}/tempmongodb-{{component_name}}.pem | base64 -d > {{ mongodbca }}/mongodb-{{component_name}}.pem - when: not mongodb_certs.failed - tags: - - notest - -# Generating mongodb certificates. -- name: Generating mongodb certificates - shell: | - cd {{ mongodbca }} - openssl req -new -nodes -newkey rsa:4096 -keyout mongodb-{{ component_name}}.key -out mongodb-{{ component_name }}.csr -subj '{{ mongoroot_cert_subj }}-{{component_name}}' - openssl x509 -CA {{ mongorootca }}/mongoCA.crt -CAkey {{ mongorootca }}/mongoCA.key -CAcreateserial -CAserial serial -req -days 365 -in mongodb-{{component_name}}.csr -out mongodb-{{component_name}}.crt - cat mongodb-{{ component_name }}.key mongodb-{{ component_name}}.crt > mongodb-{{ component_name }}.pem - when: services.nms.tls == 'on' and mongodb_certs.failed == True - -# Putting certs to vault for root -- name: Putting certs to vault for root - shell: | - vault kv put {{ vault.secret_path | default(name) }}/{{ name }}/{{ component_name }}/certs rootcakey="$(cat {{ rootca }}/keys.jks | base64)" cacerts="$(cat {{ rootca }}/cordarootca.pem | base64)" keystore="$(cat {{ rootca }}/cordarootca.key | base64)" - environment: - VAULT_ADDR: "{{ vault.url }}" - VAULT_TOKEN: "{{ vault.root_token }}" - when: nms_root_certs.failed == True - -# Putting certs and credential to vault for networkmap -- name: Putting certs and credential to vault for networkmap - shell: | - vault kv put {{ vault.secret_path | default(name) }}/{{ name }}/{{ component_name }}/credentials/mongodb mongodbPassword="{{ mongodbPassword_networkmap }}" - vault kv put {{ vault.secret_path | default(name) }}/{{ name }}/{{ component_name }}/credentials/userpassword sa="{{ userpassword_networkmap }}" - vault kv put {{ vault.secret_path | default(name) }}/{{ name }}/{{ component_name }}/certs networkmap.jks="$(cat {{ nmsca }}/keys.jks | base64)" rootcakey="$(cat {{ rootca }}/keys.jks | base64)" cacerts="$(cat {{ rootca }}/cordarootca.pem | base64)" keystore="$(cat {{ rootca }}/cordarootca.key | base64)" mongodb-{{ component_name }}.pem="$(cat {{ mongodbca }}/mongodb-{{ component_name }}.pem | base64)" mongoCA.crt="$(cat {{ mongorootca }}/mongoCA.crt | base64)" - environment: - VAULT_ADDR: "{{ vault.url }}" - VAULT_TOKEN: "{{ vault.root_token }}" - when: networkmap_certs.failed == True - -# Create Ambassador certificates -- name: Create Ambassador certificates - include_role: - name: "create/certificates/ambassador" - vars: - cert_file: "{{ network | json_query('network_services[?type==`networkmap`].certificate') | first }}" - when: network.env.proxy == 'ambassador' diff --git a/platforms/r3-corda/configuration/roles/create/certificates/nms/vars/main.yaml b/platforms/r3-corda/configuration/roles/create/certificates/nms/vars/main.yaml deleted file mode 100644 index 937ada4bbbd..00000000000 --- a/platforms/r3-corda/configuration/roles/create/certificates/nms/vars/main.yaml +++ /dev/null @@ -1,12 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -userpassword_networkmap: admin -mongodbPassword_networkmap: newdbnm -rootca: "{{playbook_dir}}/build/corda/cordarootca" -nmsca: "{{playbook_dir}}/build/corda/cordanetworkmap" -mongorootca: "{{playbook_dir}}/build/corda/mongorootca" -mongodbca: "{{playbook_dir}}/build/corda/mongodbca" diff --git a/platforms/r3-corda/configuration/roles/create/certificates/node/tasks/main.yaml b/platforms/r3-corda/configuration/roles/create/certificates/node/tasks/main.yaml deleted file mode 100644 index 94a51c82b49..00000000000 --- a/platforms/r3-corda/configuration/roles/create/certificates/node/tasks/main.yaml +++ /dev/null @@ -1,165 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -######################################################################################## -# This role download certificates from nms and loads into vault -######################################################################################## - -######################################################################################## - ---- -# Ensure directory exist, if not creates a new one -- name: "Ensure build dir exists" - include_role: - name: "{{ playbook_dir }}/../../shared/configuration/roles/check/directory" - vars: - path: "{{ node_certs }}" - -# Check if truststore already created -- name: Check if truststore already created - shell: | - vault kv get -field=network-map-truststore {{ vault.secret_path | default(name) }}/{{ name }}/{{ component_name }}/certs/networkmaptruststore - environment: - VAULT_ADDR: "{{ vault.url }}" - VAULT_TOKEN: "{{ vault.root_token }}" - register: truststore_result - ignore_errors: yes - changed_when: false - -# Download the certificates from NMS -- name: "Download certs from nms" - get_url: - url: "{{ nms_url }}/network-map/truststore" - dest: "{{ node_certs }}/network-map-truststore.jks" - validate_certs: no - when: truststore_result.failed == True and network.env.proxy != 'none' - tags: - - notest - -# Download the certificates from NMS pod -- name: "Download certs from nms pod" - vars: - nms_namespace: "{{ network | json_query('organizations[?type==`doorman-nms-notary`].name') | first }}-ns" - nms_name: "{{ network | json_query('organizations[?type==`doorman-nms-notary`].services.nms.name') | first }}" - shell: | - export NMS_POD=$(KUBECONFIG={{ kubernetes.config_file }} kubectl get po -n {{ nms_namespace }} -l app={{ nms_name | lower }} | grep {{ nms_name | lower }} | awk '{print $1}') - KUBECONFIG={{ kubernetes.config_file }} kubectl cp {{ nms_namespace }}/${NMS_POD}:../../opt/networkmap/network-map-truststore.jks -c logs {{ node_certs }}/network-map-truststore.jks - when: truststore_result.failed == True and network.env.proxy == 'none' - changed_when: false - -# Store the certificates in the vault -- name: "Write networkmaptruststore to vault" - shell: | - vault kv put {{ vault.secret_path | default(name) }}/{{ name }}/{{ component_name }}/certs/networkmaptruststore network-map-truststore="$(cat {{ node_certs }}/network-map-truststore.jks | base64)" - environment: - VAULT_ADDR: "{{ vault.url }}" - VAULT_TOKEN: "{{ vault.root_token }}" - when: truststore_result.failed == True - changed_when: false - -# Check if certificates already created -- name: Check if certificates already created - shell: | - vault kv get -field=nodekeystore.jks {{ vault.secret_path | default(name) }}/{{ name }}/{{ component_name }}/certs/customnodekeystore - environment: - VAULT_ADDR: "{{ vault.url }}" - VAULT_TOKEN: "{{ vault.root_token }}" - register: certs_result - ignore_errors: yes - changed_when: false - -# Generate node certs -- name: Generate node certs - shell: | - cd {{ node_certs }} - openssl genrsa -out {{ component_name }}.key 3072 - openssl req -new -x509 -key {{ component_name }}.key -out {{ component_name }}.cer -days 365 -subj '/{{ cert_subject }}' - openssl dgst -sha256 -sign {{ component_name }}.key {{ component_name }}.cer | base64 | cat {{ component_name }}.cer - openssl pkcs12 -export -in {{ component_name }}.cer -inkey {{ component_name }}.key -out testkeystore.p12 -passin pass:'cordacadevpass' -passout pass:'cordacadevpass' - keytool -importkeystore -srckeystore testkeystore.p12 -srcstoretype pkcs12 -srcstorepass cordacadevpass -destkeystore nodekeystore.jks -deststorepass cordacadevpass -deststoretype JKS - when: certs_result.failed == True - changed_when: false - -# Write certificates to vault -- name: "Write certificates to vault" - shell: | - vault kv put {{ vault.secret_path | default(name) }}/{{ name }}/{{ component_name }}/certs/customnodekeystore nodekeystore.jks="$(cat {{ node_certs }}/nodekeystore.jks | base64)" - vault kv put {{ vault.secret_path | default(name) }}/{{ name }}/{{ component_name }}/certs {{ component_name }}.cer="$(cat {{ node_certs }}/{{ component_name }}.cer | base64)" {{ component_name }}.key="$(cat {{ node_certs }}/{{ component_name }}.key | base64)" - environment: - VAULT_ADDR: "{{ vault.url }}" - VAULT_TOKEN: "{{ vault.root_token }}" - when: certs_result.failed == True - changed_when: false - -# Check if doorman certs already created -- name: Check if doorman certs already created - shell: | - vault kv get -field=doorman.crt {{ vault.secret_path | default(name) }}/{{ name }}/{{ component_name }}/certs/doorman - environment: - VAULT_ADDR: "{{ vault.url }}" - VAULT_TOKEN: "{{ vault.root_token }}" - register: doorman_result - ignore_errors: yes - changed_when: false - -# Write certificates to vault -- name: "Write certificates to vault" - shell: | - vault kv put {{ vault.secret_path | default(name) }}/{{ name }}/{{ component_name }}/certs/doorman doorman.crt="$(cat {{ doorman_cert_file }} | base64)" - environment: - VAULT_ADDR: "{{ vault.url }}" - VAULT_TOKEN: "{{ vault.root_token }}" - when: doorman_result.failed == True and doorman_cert_file != '' - changed_when: false - -# Check if networkmap certs already created -- name: Check if networkmap certs already created - shell: | - vault kv get -field=networkmap.crt {{ vault.secret_path | default(name) }}/{{ name }}/{{ component_name }}/certs/networkmap - environment: - VAULT_ADDR: "{{ vault.url }}" - VAULT_TOKEN: "{{ vault.root_token }}" - register: networkmap_result - ignore_errors: yes - changed_when: false - -# Write certificates to vault -- name: "Write certificates to vault" - shell: | - vault kv put {{ vault.secret_path | default(name) }}/{{ name }}/{{ component_name }}/certs/networkmap networkmap.crt="$(cat {{ nms_cert_file }} | base64)" - environment: - VAULT_ADDR: "{{ vault.url }}" - VAULT_TOKEN: "{{ vault.root_token }}" - when: networkmap_result.failed == True - changed_when: false - -# Write credentials to vault -- name: "Write credentials to vault" - shell: | - vault kv put {{ vault.secret_path | default(name) }}/{{ name }}/{{ component_name }}/credentials/database sa="newh2pass" {{ component_name }}User1="xyz1234" {{ component_name }}User2="xyz1236" - vault kv put {{ vault.secret_path | default(name) }}/{{ name }}/{{ component_name }}/credentials/rpcusers {{ component_name }}operations="usera" {{ component_name }}operations1="usera" {{ component_name }}operations2="usera" {{ component_name }}admin="usera" - vault kv put {{ vault.secret_path | default(name) }}/{{ name }}/{{ component_name }}/credentials/keystore keyStorePassword="newpass" trustStorePassword="newpass" defaultTrustStorePassword="trustpass" defaultKeyStorePassword="cordacadevpass" sslkeyStorePassword="sslpass" ssltrustStorePassword="sslpass" - vault kv put {{ vault.secret_path | default(name) }}/{{ name }}/{{ component_name }}/credentials/networkmappassword sa="admin" - environment: - VAULT_ADDR: "{{ vault.url }}" - VAULT_TOKEN: "{{ vault.root_token }}" - changed_when: false - -# Write cordapps credentials to vault -- name: "Write cordapps credentials to vault" - shell: | - vault kv put {{ vault.secret_path | default(name) }}/{{ name }}/{{ component_name }}/credentials/cordapps repo_username="{{ cordapps_details.username }}" repo_password="{{ cordapps_details.password }}" - environment: - VAULT_ADDR: "{{ vault.url }}" - VAULT_TOKEN: "{{ vault.root_token }}" - when: cordapps_details != "" - changed_when: false - -# Create Ambassador certificates -- name: Create Ambassador certificates - include_role: - name: "create/certificates/ambassador" - when: network.env.proxy != 'none' diff --git a/platforms/r3-corda/configuration/roles/create/certificates/node/vars/main.yaml b/platforms/r3-corda/configuration/roles/create/certificates/node/vars/main.yaml deleted file mode 100644 index 3aa660e1bc3..00000000000 --- a/platforms/r3-corda/configuration/roles/create/certificates/node/vars/main.yaml +++ /dev/null @@ -1,7 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -node_certs: "{{playbook_dir}}/build/corda/{{ component_name }}" \ No newline at end of file diff --git a/platforms/r3-corda/configuration/roles/create/certificates/notary/tasks/main.yaml b/platforms/r3-corda/configuration/roles/create/certificates/notary/tasks/main.yaml deleted file mode 100644 index 9b104cff780..00000000000 --- a/platforms/r3-corda/configuration/roles/create/certificates/notary/tasks/main.yaml +++ /dev/null @@ -1,164 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -######################################################################################## -# This role download certificates from nms and loads into vault -######################################################################################## - -######################################################################################## - ---- -# Ensure directory existance, if not creates a new one -- name: "Ensure build dir exists" - include_role: - name: "{{ playbook_dir }}/../../shared/configuration/roles/check/directory" - vars: - path: "{{ notary_certs }}" - -# Check if truststore already created -- name: Check if truststore already created - shell: | - vault kv get -field=network-map-truststore {{ vault.secret_path | default(name) }}/{{ name }}/{{ component_name }}/certs/networkmaptruststore - environment: - VAULT_ADDR: "{{ vault.url }}" - VAULT_TOKEN: "{{ vault.root_token }}" - register: truststore_result - ignore_errors: yes - -# Download the certificates from NMS -- name: "Download certs from nms" - get_url: - url: "{{ nms_url }}/network-map/truststore" - dest: "{{ notary_certs }}/network-map-truststore.jks" - validate_certs: no - when: truststore_result.failed == True and network.env.proxy != 'none' - tags: - - notest - -# Wait till the nms is running -- name: Waiting for nms pod to come up - include_role: - name: "{{ playbook_dir }}/../../shared/configuration/roles/check/helm_component" - vars: - component_type: "Pod" - namespace: "{{ component_ns }}" - component_name: "{{ item.services.nms.name | lower }}" - kubernetes: "{{ item.k8s }}" - label_selectors: - - app = {{ component_name }} - tags: - - notest - -# Download the certificates from NMS -- name: "Download certs from nms pod" - shell: | - export NMS_POD=$(KUBECONFIG={{ kubernetes.config_file }} kubectl get po -n {{ component_ns }} -l app={{ item.services.nms.name | lower }} | grep {{ item.services.nms.name | lower }} | awk '{print $1}') - KUBECONFIG={{ kubernetes.config_file }} kubectl cp {{ component_ns }}/${NMS_POD}:../../opt/networkmap/network-map-truststore.jks -c logs {{ notary_certs }}/network-map-truststore.jks - when: truststore_result.failed == True and network.env.proxy == 'none' - -# Write networkmaptruststore to vault -- name: "Write networkmaptruststore to vault" - shell: | - vault kv put {{ vault.secret_path | default(name) }}/{{ name }}/{{ component_name }}/certs/networkmaptruststore network-map-truststore="$(cat {{ notary_certs }}/network-map-truststore.jks | base64)" - environment: - VAULT_ADDR: "{{ vault.url }}" - VAULT_TOKEN: "{{ vault.root_token }}" - when: truststore_result.failed == True - -# Check if certificates already created -- name: Check if certificates already created - shell: | - vault kv get -field=nodekeystore.jks {{ vault.secret_path | default(name) }}/{{ name }}/{{ component_name }}/certs/customnodekeystore - environment: - VAULT_ADDR: "{{ vault.url }}" - VAULT_TOKEN: "{{ vault.root_token }}" - register: certs_result - ignore_errors: yes - -# Generate node certs -- name: Generate node certs - shell: | - cd {{ notary_certs }} - openssl genrsa -out Notary.key 3072 - openssl req -new -x509 -key Notary.key -out Notary.cer -days 365 -subj '/{{ cert_subject }}' - openssl dgst -sha256 -sign Notary.key Notary.cer | base64 | cat Notary.cer - openssl pkcs12 -export -in Notary.cer -inkey Notary.key -out testkeystore.p12 -passin pass:'cordacadevpass' -passout pass:'cordacadevpass' - keytool -importkeystore -srckeystore testkeystore.p12 -srcstoretype pkcs12 -srcstorepass cordacadevpass -destkeystore nodekeystore.jks -deststorepass cordacadevpass -deststoretype JKS - when: certs_result.failed == True - -# Write certificates to vault -- name: "Write certificates to vault" - shell: | - vault kv put {{ vault.secret_path | default(name) }}/{{ name }}/{{ component_name }}/certs/customnodekeystore nodekeystore.jks="$(cat {{ notary_certs }}/nodekeystore.jks | base64)" - vault kv put {{ vault.secret_path | default(name) }}/{{ name }}/{{ component_name }}/certs Notary.cer="$(cat {{ notary_certs }}/Notary.cer | base64)" Notary.key="$(cat {{ notary_certs }}/Notary.key | base64)" - environment: - VAULT_ADDR: "{{ vault.url }}" - VAULT_TOKEN: "{{ vault.root_token }}" - when: certs_result.failed == True - -# Check if doorman certs already created -- name: Check if doorman certs already created - shell: | - vault kv get -field=doorman.crt {{ vault.secret_path | default(name) }}/{{ name }}/{{ component_name }}/certs/doorman - environment: - VAULT_ADDR: "{{ vault.url }}" - VAULT_TOKEN: "{{ vault.root_token }}" - register: doorman_result - ignore_errors: yes - -# Write certificates to vault -- name: "Write certificates to vault" - shell: | - vault kv put {{ vault.secret_path | default(name) }}/{{ name }}/{{ component_name }}/certs/doorman doorman.crt="$(cat {{ doorman_cert_file }} | base64)" - environment: - VAULT_ADDR: "{{ vault.url }}" - VAULT_TOKEN: "{{ vault.root_token }}" - when: doorman_result.failed == True - -# Check if networkmap certs already created -- name: Check if networkmap certs already created - shell: | - vault kv get -field=networkmap.crt {{ vault.secret_path | default(name) }}/{{ name }}/{{ component_name }}/certs/networkmap - environment: - VAULT_ADDR: "{{ vault.url }}" - VAULT_TOKEN: "{{ vault.root_token }}" - register: networkmap_result - ignore_errors: yes - -# Write certificates to vault -- name: "Write certificates to vault" - shell: | - vault kv put {{ vault.secret_path | default(name) }}/{{ name }}/{{ component_name }}/certs/networkmap networkmap.crt="$(cat {{ nms_cert_file }} | base64)" - environment: - VAULT_ADDR: "{{ vault.url }}" - VAULT_TOKEN: "{{ vault.root_token }}" - when: networkmap_result.failed == True - -# Write credentials to vault -- name: "Write credentials to vault" - shell: | - vault kv put {{ vault.secret_path | default(name) }}/{{ name }}/{{ component_name }}/credentials/database sa="newh2pass" notaryUser1="xyz1234" notaryUser2="xyz1236" - vault kv put {{ vault.secret_path | default(name) }}/{{ name }}/{{ component_name }}/credentials/rpcusers {{ component_name }}operations="usera" {{ component_name }}operations1="usera" {{ component_name }}operations2="usera" {{ component_name }}admin="usera" - vault kv put {{ vault.secret_path | default(name) }}/{{ name }}/{{ component_name }}/credentials/keystore keyStorePassword="newpass" trustStorePassword="newpass" defaultTrustStorePassword="trustpass" defaultKeyStorePassword="cordacadevpass" sslkeyStorePassword="sslpass" ssltrustStorePassword="sslpass" - vault kv put {{ vault.secret_path | default(name) }}/{{ name }}/{{ component_name }}/credentials/networkmappassword sa="admin" - environment: - VAULT_ADDR: "{{ vault.url }}" - VAULT_TOKEN: "{{ vault.root_token }}" - -# Write cordapps credentials to vault -- name: "Write cordapps credentials to vault" - shell: | - vault kv put {{ vault.secret_path | default(name) }}/{{ name }}/{{ component_name }}/credentials/cordapps repo_username="{{ cordapps_details.username }}" repo_password="{{ cordapps_details.password }}" - environment: - VAULT_ADDR: "{{ vault.url }}" - VAULT_TOKEN: "{{ vault.root_token }}" - when: cordapps_details != "" - -# Create Ambassador certificates -- name: Create Ambassador certificates - include_role: - name: "create/certificates/ambassador" - when: network.env.proxy != 'none' diff --git a/platforms/r3-corda/configuration/roles/create/certificates/notary/vars/main.yaml b/platforms/r3-corda/configuration/roles/create/certificates/notary/vars/main.yaml deleted file mode 100644 index e4213892319..00000000000 --- a/platforms/r3-corda/configuration/roles/create/certificates/notary/vars/main.yaml +++ /dev/null @@ -1,7 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -notary_certs: "{{playbook_dir}}/build/corda/notary" \ No newline at end of file diff --git a/platforms/r3-corda/configuration/roles/create/helm_component/tasks/main.yaml b/platforms/r3-corda/configuration/roles/create/helm_component/tasks/main.yaml new file mode 100644 index 00000000000..20784534032 --- /dev/null +++ b/platforms/r3-corda/configuration/roles/create/helm_component/tasks/main.yaml @@ -0,0 +1,34 @@ +############################################################################################## +# Copyright Accenture. All Rights Reserved. +# +# SPDX-License-Identifier: Apache-2.0 +############################################################################################## +############################################################################################# +# This role generates the value file for the helm releases +############################################################################################# + +# Ensure that the directory exists, and creates it, if it does not exist +- name: Ensures {{ values_dir }} dir exists + include_role: + name: "{{ playbook_dir }}/../../shared/configuration/roles/check/directory" + vars: + path: "{{ values_dir }}" + +############################################################################################## +# Create deployment file for notaries. It is executed only if is_notary is true +- name: "create value file for {{ component_name }}" + template: + src: "{{ dlt_templates[type] | default('helm_component.tpl') }}" + dest: "{{ values_dir }}/{{ component_name }}.yaml" + +################################################################################################ +# Test the value file for syntax errors/ missing values +# This is done by calling the helm_lint role and passing the value file parameter +# When a new k8_component is added, changes should be made in helm_lint role as well +- name: Helm lint + include_role: + name: "{{ playbook_dir }}/../../shared/configuration/roles/helm_lint" + vars: + helmtemplate_type: "{{ type }}" + chart_path: "{{ charts_dir }}" + value_file: "{{ values_dir }}/{{ component_name }}.yaml" diff --git a/platforms/r3-corda/configuration/roles/create/helm_component/templates/network_service.tpl b/platforms/r3-corda/configuration/roles/create/helm_component/templates/network_service.tpl new file mode 100644 index 00000000000..ee97570612b --- /dev/null +++ b/platforms/r3-corda/configuration/roles/create/helm_component/templates/network_service.tpl @@ -0,0 +1,73 @@ +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: {{ component_name }} + annotations: + fluxcd.io/automated: "false" + namespace: {{ component_ns }} +spec: + releaseName: {{ component_name }} + interval: 1m + chart: + spec: + chart: {{ gitops.chart_source }}/corda-network-service + sourceRef: + kind: GitRepository + name: flux-{{ network.env.type }} + namespace: flux-{{ network.env.type }} + values: + global: + serviceAccountName: vault-auth + cluster: + provider: {{ org.cloud_provider }} + cloudNativeServices: false + vault: + type: hashicorp + network: corda + address: {{ vault.url }} + authPath: {{ network.env.type }}{{ name }} + secretEngine: {{ vault.secret_path | default("secretsv2") }} + secretPrefix: "data/{{ network.env.type }}{{ name }}" + role: vault-role + proxy: + provider: {{ network.env.proxy }} + externalUrlSuffix: {{ org.external_url_suffix }} + storage: + size: 1Gi + dbSize: 1Gi + allowedTopologies: + enabled: false + tls: + enabled: true + settings: + networkServices: true + image: +{% if network.docker.username is defined %} + pullSecret: regcred +{% endif %} + pullPolicy: IfNotPresent + mongo: + repository: mongo + tag: 3.6.6 + hooks: + repository: {{ network.docker.url }}/bevel-build + tag: jdk8-stable + doorman: {{ network.docker.url }}/bevel-doorman-linuxkit:latest + nms: {{ network.docker.url }}/bevel-networkmap-linuxkit:latest + + settings: + removeKeysOnDelete: true + rootSubject: "CN=DLT Root CA,OU=DLT,O=DLT,L=New York,C=US" + mongoSubject: "{{ doorman.db_subject }}" + + doorman: + subject: "{{ doorman.subject }}" + authPassword: admin + dbPassword: newdbnm + port: {{ doorman.ports.servicePort }} + + nms: + subject: {{ nms.subject }} + authPassword: admin + dbPassword: newdbnm + port: {{ nms.ports.servicePort }} diff --git a/platforms/r3-corda/configuration/roles/create/helm_component/templates/node.tpl b/platforms/r3-corda/configuration/roles/create/helm_component/templates/node.tpl new file mode 100644 index 00000000000..e55d0d19197 --- /dev/null +++ b/platforms/r3-corda/configuration/roles/create/helm_component/templates/node.tpl @@ -0,0 +1,117 @@ +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: {{ component_name }} + annotations: + fluxcd.io/automated: "false" + namespace: {{ component_ns }} +spec: + releaseName: {{ component_name }} + interval: 1m + chart: + spec: + chart: {{ gitops.chart_source }}/corda-node + sourceRef: + kind: GitRepository + name: flux-{{ network.env.type }} + namespace: flux-{{ network.env.type }} + values: + global: + serviceAccountName: vault-auth + cluster: + provider: {{ org.cloud_provider }} + cloudNativeServices: false + vault: + type: hashicorp + network: corda + role: vault-role + address: {{ vault.url }} + authPath: {{ network.env.type }}{{ name }} + secretEngine: {{ vault.secret_path | default("secretsv2") }} + secretPrefix: "data/{{ network.env.type }}{{ name }}" + proxy: + provider: {{ network.env.proxy }} + externalUrlSuffix: {{ org.external_url_suffix }} + p2p: {{ node.p2p.ambassador | default('15010') }} + storage: + size: 1Gi + dbSize: 2Gi + allowedTopologies: + enabled: false + tls: + enabled: true + image: +{% if network.docker.username is defined %} + pullSecret: regcred +{% endif %} + pullPolicy: IfNotPresent + h2: ghcr.io/hyperledger/h2:2018 + corda: + repository: {{ network.docker.url }}/bevel-corda + tag: {{ network.version }} + initContainer: {{ network.docker.url }}/bevel-alpine:latest + hooks: + repository: ghcr.io/hyperledger/bevel-build + tag: jdk8-stable + nodeConf: + defaultKeystorePassword: cordacadevpass + defaultTruststorePassword: trustpass + keystorePassword: newpass + truststorePassword: newtrustpass + sslkeyStorePassword: sslpass + ssltrustStorePassword: ssltrustpass + removeKeysOnDelete: true + rpcUser: + - name: nodeoperations + password: nodeoperationsAdmin + permissions: [ALL] + p2pPort: {{ node.p2p.port|e }} + rpcPort: {{ node.rpc.port|e }} + rpcadminPort: {{ node.rpcadmin.port|e }} + rpcSettings: + useSsl: false + standAloneBroker: false + address: "0.0.0.0:{{ node.rpc.port|e }}" + adminAddress: "0.0.0.0:{{ node.rpcadmin.port|e }}" + ssl: + certificatesDirectory: na-ssl-false + sslKeystorePath: na-ssl-false + trustStoreFilePath: na-ssl-false + legalName: {{ node.subject|e }} #use peer-node level subject for legalName + messagingServerAddress: + jvmArgs: + systemProperties: + sshd: + port: + exportJMXTo: + transactionCacheSizeMegaBytes: 8 + attachmentContentCacheSizeMegaBytes: 10 + notary: + enabled: false + detectPublicIp: false + database: + exportHibernateJMXStatistics: false + dbPort: {{ node.dbtcp.port|e }} + dataSourceUser: sa + dataSourcePassword: admin + dataSourceClassName: "org.h2.jdbcx.JdbcDataSource" + jarPath: "/data/corda-workspace/h2/bin" + networkMapURL: {{ nms_url | quote }} + doormanURL: {{ doorman_url | quote }} + devMode: false + javaOptions: "-Xmx512m" +{% if org.cordapps is defined %} + cordApps: + getCordApps: true + jars: {{ org.cordapps.jars }} +{% if org.cordapps.username is defined %} + mavenSecret: "maven-secrets" +{% endif %} +{% endif %} + resources: + db: + memLimit: "1G" + memRequest: "512M" + node: + memLimit: "2G" + memRequest: "512M" diff --git a/platforms/r3-corda/configuration/roles/create/helm_component/templates/notary.tpl b/platforms/r3-corda/configuration/roles/create/helm_component/templates/notary.tpl new file mode 100644 index 00000000000..ec38e8ec03e --- /dev/null +++ b/platforms/r3-corda/configuration/roles/create/helm_component/templates/notary.tpl @@ -0,0 +1,120 @@ +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: {{ component_name }} + annotations: + fluxcd.io/automated: "false" + namespace: {{ component_ns }} +spec: + releaseName: {{ component_name }} + interval: 1m + chart: + spec: + chart: {{ gitops.chart_source }}/corda-node + sourceRef: + kind: GitRepository + name: flux-{{ network.env.type }} + namespace: flux-{{ network.env.type }} + values: + global: + serviceAccountName: vault-auth + cluster: + provider: {{ org.cloud_provider }} + cloudNativeServices: false + vault: + type: hashicorp + network: corda + role: vault-role + address: {{ vault.url }} + authPath: {{ network.env.type }}{{ name }} + secretEngine: {{ vault.secret_path | default("secretsv2") }} + secretPrefix: "data/{{ network.env.type }}{{ name }}" + proxy: + provider: {{ network.env.proxy }} + externalUrlSuffix: {{ org.external_url_suffix }} + p2p: {{ node.p2p.ambassador | default('15010') }} + storage: + size: 1Gi + dbSize: 2Gi + allowedTopologies: + enabled: false + tls: + enabled: true + nameOverride: {{ component_name }} + image: +{% if network.docker.username is defined %} + pullSecret: regcred +{% endif %} + pullPolicy: IfNotPresent + h2: ghcr.io/hyperledger/h2:2018 + corda: + repository: {{ network.docker.url }}/bevel-corda + tag: {{ network.version }} + initContainer: {{ network.docker.url }}/bevel-alpine:latest + hooks: + repository: ghcr.io/hyperledger/bevel-build + tag: jdk8-stable + nodeConf: + defaultKeystorePassword: cordacadevpass + defaultTruststorePassword: trustpass + keystorePassword: newpass + truststorePassword: newtrustpass + sslkeyStorePassword: sslpass + ssltrustStorePassword: ssltrustpass + removeKeysOnDelete: true + rpcUser: + - name: notaryoperations + password: notaryoperationsAdmin + permissions: [ALL] + p2pPort: {{ node.p2p.port|e }} + rpcPort: {{ node.rpc.port|e }} + rpcadminPort: {{ node.rpcadmin.port|e }} + rpcSettings: + useSsl: false + standAloneBroker: false + address: "0.0.0.0:{{ node.rpc.port|e }}" + adminAddress: "0.0.0.0:{{ node.rpcadmin.port|e }}" + ssl: + certificatesDirectory: na-ssl-false + sslKeystorePath: na-ssl-false + trustStoreFilePath: na-ssl-false + legalName: {{ node.subject|e }} #use peer-node level subject for legalName + messagingServerAddress: + jvmArgs: + systemProperties: + sshd: + port: + exportJMXTo: + transactionCacheSizeMegaBytes: 8 + attachmentContentCacheSizeMegaBytes: 10 + notary: + enabled: true + validating: {{ node.validating }} + serviceLegalName: {{ node.serviceName | default() }} + detectPublicIp: false + database: + exportHibernateJMXStatistics: false + dbPort: {{ node.dbtcp.port|e }} + dataSourceUser: sa + dataSourcePassword: admin + dataSourceClassName: "org.h2.jdbcx.JdbcDataSource" + jarPath: "/data/corda-workspace/h2/bin" + networkMapURL: {{ nms_url | quote }} + doormanURL: {{ doorman_url | quote }} + devMode: false + javaOptions: "-Xmx512m" +{% if org.cordapps is defined %} + cordApps: + getCordApps: true + jars: {{ org.cordapps.jars }} +{% if org.cordapps.username is defined %} + mavenSecret: "maven-secrets" +{% endif %} +{% endif %} + resources: + db: + memLimit: "1G" + memRequest: "512M" + node: + memLimit: "2G" + memRequest: "512M" diff --git a/platforms/r3-corda/configuration/roles/create/node_component/vars/main.yaml b/platforms/r3-corda/configuration/roles/create/helm_component/vars/main.yaml similarity index 84% rename from platforms/r3-corda/configuration/roles/create/node_component/vars/main.yaml rename to platforms/r3-corda/configuration/roles/create/helm_component/vars/main.yaml index 6edf2cd2bc3..c700d85589d 100644 --- a/platforms/r3-corda/configuration/roles/create/node_component/vars/main.yaml +++ b/platforms/r3-corda/configuration/roles/create/helm_component/vars/main.yaml @@ -5,9 +5,9 @@ ############################################################################################## dlt_templates: - db: h2.tpl - job: job.tpl - node: node.tpl + corda_notary: notary.tpl + corda_node: node.tpl + network_service: network_service.tpl corda_image: corda-4.0: corda:4.0-linuxkit corda-4.1: corda:4.1-linuxkit diff --git a/platforms/r3-corda/configuration/roles/create/k8_component/templates/create_doorman.tpl b/platforms/r3-corda/configuration/roles/create/k8_component/templates/create_doorman.tpl index 7ccbdf2e630..6597d46a18a 100644 --- a/platforms/r3-corda/configuration/roles/create/k8_component/templates/create_doorman.tpl +++ b/platforms/r3-corda/configuration/roles/create/k8_component/templates/create_doorman.tpl @@ -1,4 +1,4 @@ -apiVersion: helm.toolkit.fluxcd.io/v2beta1 +apiVersion: helm.toolkit.fluxcd.io/v2 kind: HelmRelease metadata: name: {{ services.doorman.name }} diff --git a/platforms/r3-corda/configuration/roles/create/k8_component/templates/create_mongodb.tpl b/platforms/r3-corda/configuration/roles/create/k8_component/templates/create_mongodb.tpl index 50e15440f6f..f12ef877f7d 100644 --- a/platforms/r3-corda/configuration/roles/create/k8_component/templates/create_mongodb.tpl +++ b/platforms/r3-corda/configuration/roles/create/k8_component/templates/create_mongodb.tpl @@ -1,4 +1,4 @@ -apiVersion: helm.toolkit.fluxcd.io/v2beta1 +apiVersion: helm.toolkit.fluxcd.io/v2 kind: HelmRelease metadata: name: mongodb-{{ nodename }} @@ -39,8 +39,8 @@ spec: certsecretprefix: {{ vault.secret_path | default(org_name) }}/data/{{ org_name }}/{{ nodename }}/certs service: tcp: - port: 27017 - targetPort: 27017 + port: 27017 + targetPort: 27017 type: NodePort annotations: {} deployment: diff --git a/platforms/r3-corda/configuration/roles/create/k8_component/templates/network_map.tpl b/platforms/r3-corda/configuration/roles/create/k8_component/templates/network_map.tpl index af93d1e1990..cd74145f780 100644 --- a/platforms/r3-corda/configuration/roles/create/k8_component/templates/network_map.tpl +++ b/platforms/r3-corda/configuration/roles/create/k8_component/templates/network_map.tpl @@ -1,4 +1,4 @@ -apiVersion: helm.toolkit.fluxcd.io/v2beta1 +apiVersion: helm.toolkit.fluxcd.io/v2 kind: HelmRelease metadata: name: {{ component_name }} diff --git a/platforms/r3-corda/configuration/roles/create/namespace/tasks/main.yaml b/platforms/r3-corda/configuration/roles/create/namespace/tasks/main.yaml index 46a6d8c428a..1dc43f01e94 100644 --- a/platforms/r3-corda/configuration/roles/create/namespace/tasks/main.yaml +++ b/platforms/r3-corda/configuration/roles/create/namespace/tasks/main.yaml @@ -11,7 +11,6 @@ name: "{{ playbook_dir }}/../../shared/configuration/roles/check/k8_component" vars: component_type: Namespace - component_name: "{{ org.name }}-ns" kubernetes: "{{ org.k8s }}" type: "no_retry" @@ -25,7 +24,6 @@ include_role: name: create/k8_component vars: - component_name: "{{ org.name }}-ns" component_type: "namespace" helm_lint: "false" release_dir: "{{ playbook_dir }}/../../../{{ org.gitops.release_dir }}" @@ -40,4 +38,4 @@ vars: GIT_DIR: "{{ playbook_dir }}/../../../" gitops: "{{ org.gitops }}" - msg: "[ci skip] Pushing deployment files for namespace and service accounts" + msg: "[ci skip] Pushing deployment files for namespace" diff --git a/platforms/r3-corda/configuration/roles/create/node_component/tasks/main.yaml b/platforms/r3-corda/configuration/roles/create/node_component/tasks/main.yaml deleted file mode 100644 index b4f599b0df2..00000000000 --- a/platforms/r3-corda/configuration/roles/create/node_component/tasks/main.yaml +++ /dev/null @@ -1,65 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -############################################################################################## -# This role creates the job value file for notaries and nodes -############################################################################################## - -############################################################################################## - ---- -# Ensure the directory existance, if not exits it creates a new one -- set_fact: - release_dir_path: "{{ release_dir }}/{{ component_name }}" - when: node_type == "node" - -# Set a fact values_dir_path -- set_fact: - release_dir_path: "{{ release_dir }}/{{ org_name }}/{{ component_name }}" - when: node_type != "node" - -# Ensures dir exists -- name: Ensures {{ release_dir_path }} dir exists - include_role: - name: "{{ playbook_dir }}/../../shared/configuration/roles/check/directory" - vars: - path: "{{ release_dir_path }}" - -############################################################################################## -# Create deployment file for notaries. It is executed only if is_notary is true -- name: "create value file for {{ component_name }} {{ component_type }}" - template: - src: "{{ dlt_templates[component_type] }}" - dest: "{{ values_file }}" - vars: - values_file: "{{ release_dir_path }}/{{ component_type }}.yaml" - chart: "corda-notary" - docker_image: "{{ corda_image[corda_version] }}" - when: node_type == "notary" - -############################################################################################## -# Create deployment file for nodes. It is executed only if is_notary is false -- name: "create value file for {{ component_name }} {{ component_type }}" - template: - src: "{{ dlt_templates[component_type] }}" - dest: "{{ values_file }}" - vars: - values_file: "{{ release_dir_path }}/{{ component_type }}.yaml" - chart: "corda-node" - docker_image: "{{ corda_image[corda_version] }}" - when: node_type == "node" - -################################################################################################ -# Test the value file for syntax errors/ missing values -# This is done by calling the helm_lint role and passing the value file parameter -# When a new k8_component is added, changes should be made in helm_lint role as well -- name: Helm lint - include_role: - name: "{{ playbook_dir }}/../../shared/configuration/roles/helm_lint" - vars: - helmtemplate_type: "{{ node_type }}{{ component_type }}" - chart_path: "{{ gitops.chart_source }}" - value_file: "{{ release_dir_path }}/{{ component_type }}.yaml" diff --git a/platforms/r3-corda/configuration/roles/create/node_component/templates/h2.tpl b/platforms/r3-corda/configuration/roles/create/node_component/templates/h2.tpl deleted file mode 100644 index 7d3e70e3771..00000000000 --- a/platforms/r3-corda/configuration/roles/create/node_component/templates/h2.tpl +++ /dev/null @@ -1,53 +0,0 @@ -apiVersion: helm.toolkit.fluxcd.io/v2beta1 -kind: HelmRelease -metadata: - name: {{ component_name }}db - annotations: - fluxcd.io/automated: "false" - namespace: {{ component_ns }} -spec: - releaseName: {{ component_name }}db - interval: 1m - chart: - spec: - chart: {{ gitops.chart_source }}/corda-h2 - sourceRef: - kind: GitRepository - name: flux-{{ network.env.type }} - namespace: flux-{{ network.env.type }} - values: - replicaCount: 1 - nodeName: {{ component_name }} - metadata: - namespace: {{ component_ns }} - image: - containerName: {{ network.docker.url }}/h2:2018 -{% if network.docker.username is defined %} - imagePullSecret: regcred -{% endif %} - resources: - limits: "512Mi" - requests: "512Mi" - storage: - memory: 512Mi - mountPath: "/opt/h2-data" - name: {{ sc_name }} - service: - type: NodePort - p2p: - port: {{ node.p2p.port|e }} - rpc: - port: {{ node.rpc.port|e }} - rpcadmin: - port: {{ node.rpcadmin.port|e }} - tcp: - port: {{ node.dbtcp.port|e }} - targetPort: {{ node.dbtcp.targetPort|e }} - web: - targetPort: {{ node.dbweb.targetPort|e }} - port: {{ node.dbweb.port|e }} - annotations: {} - deployment: - annotations: {} - pvc: - annotations: {} diff --git a/platforms/r3-corda/configuration/roles/create/node_component/templates/job.tpl b/platforms/r3-corda/configuration/roles/create/node_component/templates/job.tpl deleted file mode 100644 index 2e3ea7af3c3..00000000000 --- a/platforms/r3-corda/configuration/roles/create/node_component/templates/job.tpl +++ /dev/null @@ -1,121 +0,0 @@ -apiVersion: helm.toolkit.fluxcd.io/v2beta1 -kind: HelmRelease -metadata: - name: {{ component_name }}-initial-registration - annotations: - fluxcd.io/automated: "false" - namespace: {{ component_ns }} -spec: - releaseName: {{ component_name }}-initial-registration - interval: 1m - chart: - spec: - chart: {{ gitops.chart_source }}/{{ chart }}-initial-registration - sourceRef: - kind: GitRepository - name: flux-{{ network.env.type }} - namespace: flux-{{ network.env.type }} - values: - nodeName: {{ component_name }} - replicas: 1 - metadata: - namespace: {{ component_ns }} - image: - containerName: {{ network.docker.url }}/{{ docker_image }} - initContainerName: {{ network.docker.url }}/alpine-utils:1.0 -{% if network.docker.username is defined %} - imagePullSecret: regcred -{% endif %} - privateCertificate: true - doormanCertAlias: {{ doorman_domain | regex_replace('/', '') }} - networkmapCertAlias: {{ nms_domain | regex_replace('/', '') }} - nodeConf: - p2p: - url: {{ component_name }}.{{ component_ns }} - port: {{ node.p2p.port|e }} - ambassadorAddress: {{ component_name|e }}.{{ item.external_url_suffix }}:{{ node.p2p.ambassador | default('10002') }} - rpcSettings: - useSsl: false - standAloneBroker: false - address: "0.0.0.0:{{ node.rpc.port|e }}" - adminAddress: "0.0.0.0:{{ node.rpcadmin.port|e }}" - ssl: - certificatesDirectory: na-ssl-false - sslKeystorePath: na-ssl-false - trustStoreFilePath: na-ssl-false - legalName: {{ node.subject|e }} #use peer-node level subject for legalName - messagingServerAddress: - jvmArgs: - systemProperties: - sshd: - port: - exportJMXTo: - transactionCacheSizeMegaBytes: 8 - attachmentContentCacheSizeMegaBytes: 10 - {% if chart == 'corda-notary' %} - notary: - validating: {{ node.validating }} - serviceLegalName: {{ node.serviceName | default() }} - {% endif %} - detectPublicIp: false - database: - exportHibernateJMXStatistics: false - dbUrl: {{ component_name|e }}db - dbPort: {{ node.dbtcp.port|e }} - dataSourceClassName: "org.h2.jdbcx.JdbcDataSource" - dataSourceUrl: "jdbc:h2:tcp://{{ component_name|e }}db:{{ node.dbtcp.port|e }}/persistence;DB_CLOSE_ON_EXIT=FALSE;LOCK_TIMEOUT=10000;WRITE_DELAY=100;AUTO_RECONNECT=TRUE;" - jarPath: "/data/corda-workspace/h2/bin" -{% if doorman_url|length %} - networkMapURL: {{ nms_url | quote }} - doormanURL: {{ doorman_url | quote }} - compatibilityZoneURL: -{% else %} - compatibilityZoneURL: {{ nms_url | quote }} - networkMapURL: - doormanURL: -{% endif %} - jarVersion: {{ network.version | quote }} - devMode: false - env: - - name: JAVA_OPTIONS - value: -Xmx512m - - name: CORDA_HOME - value: /opt/corda - - name: BASE_DIR - value: /base/corda - credentials: - dataSourceUser: sa - rpcUser: - - name: {{ component_name|e }}operations - permissions: [ALL] - - volume: - baseDir: /base/corda - resources: - limits: "512Mi" - requests: "512Mi" - - service: - type: ClusterIP - p2p: - port: {{ node.p2p.port|e }} - targetPort: {{ node.p2p.targetPort|e }} - rpc: - port: {{ node.rpc.port|e }} - rpcadmin: - port: {{ node.rpcadmin.port|e }} - - vault: - address: {{ vault.url }} - role: vault-role - authpath: {{ component_auth }} - serviceaccountname: vault-auth - dbsecretprefix: {{ vault.secret_path | default(org_name) }}/data/{{ org_name}}/{{ component_name }}/credentials/database - rpcusersecretprefix: {{ vault.secret_path | default(org_name) }}/data/{{ org_name}}/{{ component_name }}/credentials/rpcusers - keystoresecretprefix: {{ vault.secret_path | default(org_name) }}/data/{{ org_name}}/{{ component_name }}/credentials/keystore - certsecretprefix: {{ vault.secret_path | default(org_name) }}/data/{{ org_name}}/{{ component_name }}/certs - retries: 10 - - healthcheck: - readinesscheckinterval: 10 - readinessthreshold: 15 diff --git a/platforms/r3-corda/configuration/roles/create/node_component/templates/node.tpl b/platforms/r3-corda/configuration/roles/create/node_component/templates/node.tpl deleted file mode 100644 index 792429b0bf0..00000000000 --- a/platforms/r3-corda/configuration/roles/create/node_component/templates/node.tpl +++ /dev/null @@ -1,152 +0,0 @@ -apiVersion: helm.toolkit.fluxcd.io/v2beta1 -kind: HelmRelease -metadata: - name: {{ component_name }} - annotations: - fluxcd.io/automated: "false" - namespace: {{ component_ns }} -spec: - releaseName: {{ component_name }} - interval: 1m - chart: - spec: - chart: {{ gitops.chart_source }}/{{ chart }} - sourceRef: - kind: GitRepository - name: flux-{{ network.env.type }} - namespace: flux-{{ network.env.type }} - values: - nodeName: {{ component_name }} - replicas: 1 - metadata: - namespace: {{ component_ns }} - image: - containerName: {{ network.docker.url }}/{{ docker_image }} - initContainerName: {{ network.docker.url }}/alpine-utils:1.0 -{% if network.docker.username is defined %} - imagePullSecret: regcred -{% endif %} - privateCertificate: true - doormanCertAlias: {{ doorman_domain | regex_replace('/', '') }} - networkmapCertAlias: {{ nms_domain | regex_replace('/', '') }} - nodeConf: - p2p: - url: {{ component_name }}.{{ component_ns }} - port: {{ node.p2p.port|e }} - ambassadorAddress: {{ component_name|e }}.{{ item.external_url_suffix }}:{{ node.p2p.ambassador | default('10002') }} - rpcSettings: - useSsl: false - standAloneBroker: false - address: "0.0.0.0:{{ node.rpc.port|e }}" - adminAddress: "0.0.0.0:{{ node.rpcadmin.port|e }}" - ssl: - certificatesDirectory: na-ssl-false - sslKeystorePath: na-ssl-false - trustStoreFilePath: na-ssl-false - legalName: {{ node.subject|e }} #use peer-node level subject for legalName - messagingServerAddress: - jvmArgs: - systemProperties: - sshd: - port: - exportJMXTo: - transactionCacheSizeMegaBytes: 8 - attachmentContentCacheSizeMegaBytes: 10 - {% if chart == 'corda-notary' %} - notary: - validating: {{ node.validating }} - serviceLegalName: {{ node.serviceName | default() }} - {% endif %} - detectPublicIp: false - database: - exportHibernateJMXStatistics: false - dbUrl: {{ component_name|e }}db - dbPort: {{ node.dbtcp.port|e }} - dataSourceClassName: "org.h2.jdbcx.JdbcDataSource" - dataSourceUrl: "jdbc:h2:tcp://{{ component_name|e }}db:{{ node.dbtcp.port|e }}/persistence;DB_CLOSE_ON_EXIT=FALSE;LOCK_TIMEOUT=10000;WRITE_DELAY=100;AUTO_RECONNECT=TRUE;" - jarPath: "/data/corda-workspace/h2/bin" -{% if doorman_url|length %} - networkMapURL: {{ nms_url | quote }} - doormanURL: {{ doorman_url | quote }} - compatibilityZoneURL: -{% else %} - compatibilityZoneURL: {{ nms_url | quote }} - networkMapURL: - doormanURL: -{% endif %} - jarVersion: {{ network.version | quote }} - devMode: false - env: - - name: JAVA_OPTIONS - value: -Xmx512m - - name: CORDA_HOME - value: /opt/corda - - name: BASE_DIR - value: /base/corda - credentials: - dataSourceUser: sa - rpcUser: - - name: {{ component_name|e }}operations - permissions: [ALL] - -{% if cordapps_details|length %} - cordapps: - getcordapps: true - jars: - {% for jars in cordapps.jars %} -- url: {{ jars.jar.url }} - {% endfor %} -{% else %} - cordapps: - getcordapps: false -{% endif %} - - volume: - baseDir: /base/corda - resources: - limits: "1Gi" - requests: "1Gi" - pvc: - name: {{ component_name|e }}-pvc - annotations: {} - memory: 512Mi - storageClassName: {{ sc_name }} - - service: - name: {{ component_name|e }} - type: ClusterIP - p2p: - port: {{ node.p2p.port|e }} - targetPort: {{ node.p2p.targetPort|e }} - rpc: - port: {{ node.rpc.port|e }} - targetPort: {{ node.rpc.targetPort|e }} - rpcadmin: - port: {{ node.rpcadmin.port|e }} - targetPort: {{ node.rpcadmin.targetPort|e }} - - deployment: - annotations: {} - vault: - address: {{ vault.url }} - role: vault-role - authpath: {{ component_auth }} - serviceaccountname: vault-auth - dbsecretprefix: {{ vault.secret_path | default(org_name) }}/data/{{ org_name}}/{{ component_name }}/credentials/database - rpcusersecretprefix: {{ vault.secret_path | default(org_name) }}/data/{{ org_name}}/{{ component_name }}/credentials/rpcusers - keystoresecretprefix: {{ vault.secret_path | default(org_name) }}/data/{{ org_name}}/{{ component_name }}/credentials/keystore - certsecretprefix: {{ vault.secret_path | default(org_name) }}/data/{{ org_name}}/{{ component_name }}/certs - networkmapsecretprefix: {{ vault.secret_path | default(org_name) }}/data/{{ org_name}}/{{ component_name }}/credentials/networkmappassword - cordappsreposecretprefix: {{ vault.secret_path | default(org_name) }}/data/{{ org_name}}/{{ component_name }}/credentials/cordapps - retries: 10 - - - healthcheck: - readinesscheckinterval: 20 - readinessthreshold: 20 -{% if network.env.proxy == 'ambassador' %} - ambassador: - component_name: {{ component_name | e }} - external_url_suffix: {{ item.external_url_suffix }} - p2p_ambassador: {{ node.p2p.ambassador | default('10002') }} -{% endif %} diff --git a/platforms/r3-corda/configuration/roles/create/secrets/tasks/main.yaml b/platforms/r3-corda/configuration/roles/create/secrets/tasks/main.yaml new file mode 100644 index 00000000000..c5a19d71fbd --- /dev/null +++ b/platforms/r3-corda/configuration/roles/create/secrets/tasks/main.yaml @@ -0,0 +1,42 @@ +############################################################################################## +# Copyright Accenture. All Rights Reserved. +# +# SPDX-License-Identifier: Apache-2.0 +############################################################################################## + +# Wait for namespace to be created by flux +- name: "Wait for the namespace {{ component_ns }} to be created" + include_role: + name: "{{ playbook_dir }}/../../shared/configuration/roles/check/k8_component" + vars: + component_type: "Namespace" + component_name: "{{ component_ns }}" + type: "retry" + +# Create the vault roottoken secret +- name: "Create vault token secret" + include_role: + name: "{{ playbook_dir }}/../../shared/configuration/roles/create/shared_k8s_secrets" + vars: + namespace: "{{ component_ns }}" + check: "token_secret" + +# Create the docker pull credentials for image registry +- name: "Create docker credentials secret" + include_role: + name: "{{ playbook_dir }}/../../shared/configuration/roles/create/shared_k8s_secrets" + vars: + namespace: "{{ component_ns }}" + check: "docker_credentials" + when: + - network.docker.username is defined + +# Create the user credentials for cordapp registry +- name: "Create maven credentials secret" + include_role: + name: "{{ playbook_dir }}/../../shared/configuration/roles/create/shared_k8s_secrets" + vars: + namespace: "{{ component_ns }}" + check: "maven_credentials" + when: + - org.cordapps.username is defined diff --git a/platforms/r3-corda/configuration/roles/delete/vault_secrets/tasks/main.yaml b/platforms/r3-corda/configuration/roles/delete/vault_secrets/tasks/main.yaml index 1ef82bf4334..52f873da6fd 100644 --- a/platforms/r3-corda/configuration/roles/delete/vault_secrets/tasks/main.yaml +++ b/platforms/r3-corda/configuration/roles/delete/vault_secrets/tasks/main.yaml @@ -12,42 +12,48 @@ - name: Delete docker creds k8s: kind: Secret - namespace: "{{ component_name }}" + namespace: "{{ component_ns }}" name: "regcred" state: absent kubeconfig: "{{ kubernetes.config_file }}" context: "{{ kubernetes.context }}" - ignore_errors: yes + ignore_errors: true changed_when: false -# Delete service creds -- name: "Delete service creds" - include_tasks: nested_main.yaml - loop: "{{ services | dict2items }}" - loop_control: - loop_var: service - when: component_type != 'node' - -# Delete the Ambassador Creds -- name: Delete Ambassador creds - k8s: - kind: Secret - namespace: "{{ component_name }}" - name: "{{ node.name }}-ambassador-certs" - kubeconfig: "{{ kubernetes.config_file }}" - context: "{{ kubernetes.context }}" - state: absent +# Delete crypto materials from vault +- name: Delete Crypto for nodes + shell: | + vault kv delete {{ item.vault.secret_path | default('secretsv2') }}/{{ network.env.type }}{{ org_name }}/{{ peer.name }}-certs + vault kv delete {{ item.vault.secret_path | default('secretsv2') }}/{{ network.env.type }}{{ org_name }}/{{ peer.name }}-registrationcerts + vault kv delete {{ item.vault.secret_path | default('secretsv2') }}/{{ network.env.type }}{{ org_name }}/{{ peer.name }}-tlscerts + environment: + VAULT_ADDR: "{{ item.vault.url }}" + VAULT_TOKEN: "{{ item.vault.root_token }}" loop: "{{ services.peers }}" loop_control: - loop_var: node - when: component_type == 'node' - ignore_errors: yes - changed_when: false + loop_var: peer + when: services.peers is defined + ignore_errors: true -# Delete crypto materials from vault -- name: Delete Crypto for nodes +- name: Delete Crypto for notary + shell: | + vault kv delete {{ item.vault.secret_path | default('secretsv2') }}/{{ network.env.type }}{{ org_name }}/{{ peer.name }}-certs + vault kv delete {{ item.vault.secret_path | default('secretsv2') }}/{{ network.env.type }}{{ org_name }}/{{ peer.name }}-registrationcerts + vault kv delete {{ item.vault.secret_path | default('secretsv2') }}/{{ network.env.type }}{{ org_name }}/{{ peer.name }}-tlscerts + environment: + VAULT_ADDR: "{{ item.vault.url }}" + VAULT_TOKEN: "{{ item.vault.root_token }}" + vars: + peer: "{{ services.notary }}" + when: services.notary is defined + ignore_errors: true + +- name: Delete Crypto for network service shell: | - vault secrets disable {{ item.vault.secret_path | default(item.name) }} + vault kv delete {{ item.vault.secret_path | default('secretsv2') }}/{{ network.env.type }}{{ org_name }}/{{ org_name }}-certs + vault kv delete {{ item.vault.secret_path | default('secretsv2') }}/{{ network.env.type }}{{ org_name }}/{{ org_name }}-tlscerts environment: VAULT_ADDR: "{{ item.vault.url }}" VAULT_TOKEN: "{{ item.vault.root_token }}" + ignore_errors: true + when: component_type == 'network-service' diff --git a/platforms/r3-corda/configuration/roles/delete/vault_secrets/tasks/nested_main.yaml b/platforms/r3-corda/configuration/roles/delete/vault_secrets/tasks/nested_main.yaml index edcacaac954..1f31afb9508 100644 --- a/platforms/r3-corda/configuration/roles/delete/vault_secrets/tasks/nested_main.yaml +++ b/platforms/r3-corda/configuration/roles/delete/vault_secrets/tasks/nested_main.yaml @@ -17,5 +17,5 @@ kubeconfig: "{{ kubernetes.config_file }}" context: "{{ kubernetes.context }}" state: absent - ignore_errors: yes + ignore_errors: true changed_when: false diff --git a/platforms/r3-corda/configuration/roles/get/certs/tasks/main.yaml b/platforms/r3-corda/configuration/roles/get/certs/tasks/main.yaml new file mode 100644 index 00000000000..eb23c7486af --- /dev/null +++ b/platforms/r3-corda/configuration/roles/get/certs/tasks/main.yaml @@ -0,0 +1,30 @@ +############################################################################################## +# Copyright Accenture. All Rights Reserved. +# +# SPDX-License-Identifier: Apache-2.0 +############################################################################################## + +# Get the doorman cert from network-service org +- name: Get doorman cert from current org + kubernetes.core.k8s_info: + kubeconfig: "{{ kubernetes.config_file }}" + kind: Secret + name: "doorman-tls-certs" + namespace: "{{ component_ns }}" + register: doorman_data + +# Get the networkmap cert from network-service org +- name: Get nms cert from current org + kubernetes.core.k8s_info: + kubeconfig: "{{ kubernetes.config_file }}" + kind: Secret + name: "nms-tls-certs" + namespace: "{{ component_ns }}" + register: nms_data + +- name: Save certs locally for secondary init + shell: | + echo {{ doorman_data.resources[0].data['tls.crt'] }} > {{ files_loc }}/doorman.crt + echo {{ nms_data.resources[0].data['tls.crt'] }} > {{ files_loc }}/nms.crt + vars: + files_loc: "{{playbook_dir}}/../../../{{ charts_dir }}/corda-init/files" diff --git a/platforms/r3-corda/configuration/roles/init/tasks/main.yaml b/platforms/r3-corda/configuration/roles/init/tasks/main.yaml new file mode 100644 index 00000000000..b358c18bc92 --- /dev/null +++ b/platforms/r3-corda/configuration/roles/init/tasks/main.yaml @@ -0,0 +1,34 @@ +############################################################################################## +# Copyright Accenture. All Rights Reserved. +# +# SPDX-License-Identifier: Apache-2.0 +############################################################################################## + +# Wait for namespace creation +- name: "Wait for namespace creation for {{ name }}" + include_role: + name: "{{ playbook_dir }}/../../shared/configuration/roles/check/k8_component" + vars: + component_type: "Namespace" + component_name: "{{ component_ns }}" + type: "retry" + +# create build directory +- name: Create build directory if it does not exist + file: + path: "{{ build_path }}" + state: directory + +- name: Get the kubernetes server url + shell: | + KUBECONFIG={{ kubernetes.config_file }} kubectl config view --minify | grep server | cut -f 2- -d ":" | tr -d " " + register: kubernetes_server_url + +# This task runs the Corda init +- name: Corda init helm install + include_role: + name: "{{ playbook_dir }}/../../shared/configuration/roles/create/job_component" + vars: + component_name: "init" + type: "{{ init_type }}" + kubernetes_url: "{{ kubernetes_server_url.stdout }}" diff --git a/platforms/r3-corda/configuration/roles/setup/doorman/tasks/main.yml b/platforms/r3-corda/configuration/roles/setup/doorman/tasks/main.yml deleted file mode 100644 index 620878d8e43..00000000000 --- a/platforms/r3-corda/configuration/roles/setup/doorman/tasks/main.yml +++ /dev/null @@ -1,108 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -############################################################################################## -# This role creates the value file for doorman -############################################################################################## - -# Wait for namespace creation -- name: "Wait for namespace creation for {{ name }}" - include_role: - name: "{{ playbook_dir }}/../../shared/configuration/roles/check/k8_component" - vars: - component_type: "Namespace" - component_name: "{{ component_ns }}" - type: "retry" - -# Generate Doorman certificates -- name: "Create certificates for doorman" - include_role: - name: create/certificates/doorman - vars: - component_name: "{{ services.doorman.name | lower }}" - root_subject: "{{ item.subject }}" - cert_subject: "{{ item.subject | regex_replace(',', '/') }}" - doorman_subject: "{{ services.doorman.subject }}" - doorman_cert_subject: "{{ services.doorman.subject | regex_replace(',', '/') }}" - mongoroot_cert_subj: "{{ services.doorman.db_subject }}" - -# Create deployment file for doorman mongodb node when tls is off -- name: "create mongodb for doorman" - include_role: - name: create/k8_component - vars: - component_name: "doormanmongodb" - component_type: "mongodb" - org_name: "{{ item.name | lower }}" - helm_lint: "true" - nodename: "{{ services.doorman.name }}" - charts_dir: "{{ gitops.chart_source }}" - component_auth: "{{ network.env.type }}{{ org_name }}" - org: "{{ item }}" - release_dir: "{{ playbook_dir }}/../../../{{ gitops.release_dir }}" - chart: "corda-mongodb" - when: services.doorman.tls == 'off' - -# Create deployment file for doorman mongodb node when tls is on -- name: "create mongodb for doorman" - include_role: - name: create/k8_component - vars: - component_name: "doormanmongodb" - component_type: "mongodb" - org_name: "{{ item.name | lower }}" - helm_lint: "true" - nodename: "{{ services.doorman.name }}" - charts_dir: "{{ gitops.chart_source }}" - component_auth: "{{ network.env.type }}{{ org_name }}" - org: "{{ item }}" - release_dir: "{{ playbook_dir }}/../../../{{ gitops.release_dir }}" - chart: "corda-mongodb-tls" - when: services.doorman.tls == 'on' - -# create deployment file for doorman node -- name: "create doorman" - include_role: - name: create/k8_component - vars: - component_name: "{{ services.doorman.name }}" - org_name: "{{ item.name | lower }}" - component_type: "doorman" - helm_lint: "true" - charts_dir: "{{ gitops.chart_source }}" - component_auth: "{{ network.env.type }}{{ org_name }}" - org: "{{ item }}" - release_dir: "{{ playbook_dir }}/../../../{{ gitops.release_dir }}" - chart: "corda-doorman" - chart_tls: "false" - when: services.doorman.tls == 'off' - -# create deployment file for doorman node when tls is on -- name: "create doorman" - include_role: - name: create/k8_component - vars: - component_name: "{{ services.doorman.name }}" - org_name: "{{ item.name | lower }}" - component_type: "doorman" - helm_lint: "true" - charts_dir: "{{ gitops.chart_source }}" - component_auth: "{{ network.env.type }}{{ org_name }}" - org: "{{ item }}" - release_dir: "{{ playbook_dir }}/../../../{{ gitops.release_dir }}" - chart: "corda-doorman-tls" - chart_tls: "true" - when: services.doorman.tls == 'on' - -# Push the doorman deployment files to repository -- name: "Push the created deployment files to repository" - include_role: - name: "{{ playbook_dir }}/../../shared/configuration/roles/git_push" - vars: - GIT_DIR: "{{ playbook_dir }}/../../../" - msg: "[ci skip] Pushing deployment files for doorman node and doorman mongodb node" - tags: - - notest diff --git a/platforms/r3-corda/configuration/roles/setup/get_crypto/tasks/main.yaml b/platforms/r3-corda/configuration/roles/setup/get_crypto/tasks/main.yaml deleted file mode 100644 index 78b2a53e864..00000000000 --- a/platforms/r3-corda/configuration/roles/setup/get_crypto/tasks/main.yaml +++ /dev/null @@ -1,41 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -############################################################################################# -# This role saves the crypto from Vault into ansible_provisioner -############################################################################################# - -# Ensure admincerts directory is present in build -- name: Ensure directory exists - include_role: - name: "{{ playbook_dir }}/../../shared/configuration/roles/check/directory" - vars: - path: "{{ cert_path }}" - -# Save the cert file -- name: Save cert - local_action: copy content="{{ vault_output['data'].data.tlscacerts | b64decode }}" dest="{{ cert_path }}/{{ type }}.crt" - when: type == 'ambassador' - -# Save the key file -- name: Save key - local_action: copy content="{{ vault_output['data'].data.tlskey | b64decode }}" dest="{{ cert_path }}/{{ type }}.key" - when: type == 'ambassador' - -# Save root keychain -- name: Save root keychain - local_action: copy content="{{ vault_output['data'].data.rootcakey | b64decode }}" dest="{{ cert_path }}/keys.jks" - when: type == 'rootca' - -# Save root cert -- name: Save root cert - local_action: copy content="{{ vault_output['data'].data.cacerts | b64decode }}" dest="{{ cert_path }}/cordarootca.pem" - when: type == 'rootca' - -# Save root key -- name: Save root key - local_action: copy content="{{ vault_output['data'].data.keystore | b64decode }}" dest="{{ cert_path }}/cordarootca.key" - when: type == 'rootca' diff --git a/platforms/r3-corda/configuration/roles/setup/network_service/tasks/main.yaml b/platforms/r3-corda/configuration/roles/setup/network_service/tasks/main.yaml new file mode 100644 index 00000000000..094b35e0864 --- /dev/null +++ b/platforms/r3-corda/configuration/roles/setup/network_service/tasks/main.yaml @@ -0,0 +1,41 @@ +############################################################################################## +# Copyright Accenture. All Rights Reserved. +# +# SPDX-License-Identifier: Apache-2.0 +############################################################################################## + +# Create deployment file for network services +- name: Create helm release file for network service + include_role: + name: create/helm_component + vars: + component_name: "{{ name }}" + type: "network_service" + doorman: "{{ org.services.doorman }}" + nms: "{{ org.services.nms }}" + +# Git Push : Pushes the above generated files to git +- name: Git Push + include_role: + name: "{{ playbook_dir }}/../../shared/configuration/roles/git_push" + vars: + GIT_DIR: "{{ playbook_dir }}/../../../" + msg: "[ci skip] Pushing network-service files" + tags: + - notest + +# Wait for the NMS be running +- name: "Wait for the NMS to run" + include_role: + name: "{{ playbook_dir }}/../../shared/configuration/roles/check/helm_component" + vars: + label_selectors: + - app.kubernetes.io/component = nms + component_type: "Pod" + component_name: "nms" + namespace: "{{ component_ns }}" + +# Get the doorman and nms certs +- name: Get doorman and nms certs + include_role: + name: get/certs diff --git a/platforms/r3-corda/configuration/roles/setup/nms/tasks/main.yaml b/platforms/r3-corda/configuration/roles/setup/nms/tasks/main.yaml deleted file mode 100644 index b48a022db81..00000000000 --- a/platforms/r3-corda/configuration/roles/setup/nms/tasks/main.yaml +++ /dev/null @@ -1,109 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -############################################################################################## -# This role creates the value file for nms -############################################################################################## - -# Wait for namespace creation -- name: "Wait for namespace creation for {{ organisation }}" - include_role: - name: "{{ playbook_dir }}/../../shared/configuration/roles/check/k8_component" - vars: - component_type: "Namespace" - component_name: "{{ component_ns }}" - type: "retry" - tags: - - notest - -# generate NMS certificates -- name: "Create certificates for nms" - include_role: - name: create/certificates/nms - vars: - component_name: "{{ services.nms.name | lower }}" - root_subject: "{{ item.subject }}" - cert_subject: "{{ item.subject | regex_replace(',', '/') }}" - nms_subject: "{{ services.nms.subject }}" - nms_cert_subject: "{{ services.nms.subject | regex_replace(',', '/') }}" - mongoroot_cert_subj: "{{ services.nms.db_subject }}" - -# Create deployment file for nms mongodb node when tls is off -- name: "create mongodb for networkmap" - include_role: - name: create/k8_component - vars: - component_name: "networkmapmongodb" - component_type: "mongodb" - helm_lint: "true" - charts_dir: "{{ gitops.chart_source}}" - nodename: "{{ services.nms.name | lower }}" - org_name: "{{ item.name | lower }}" - component_auth: "{{ network.env.type }}{{ org_name }}" - org: "{{ item }}" - release_dir: "{{ playbook_dir }}/../../../{{ gitops.release_dir }}" - chart: "corda-mongodb" - when: services.nms.tls == 'off' - -# Create deployment file for nms mongodb node when tls is on -- name: "create mongodb for networkmap" - include_role: - name: create/k8_component - vars: - component_name: "networkmapmongodb" - component_type: "mongodb" - org_name: "{{ item.name | lower }}" - helm_lint: "true" - charts_dir: "{{ gitops.chart_source}}" - nodename: "{{ services.nms.name | lower }}" - component_auth: "{{ network.env.type }}{{ org_name }}" - org: "{{ item }}" - release_dir: "{{ playbook_dir }}/../../../{{ gitops.release_dir }}" - chart: "corda-mongodb-tls" - when: services.nms.tls == 'on' - -# create deployment file for networkmap node when tls is off -- name: "create nms" - include_role: - name: create/k8_component - vars: - component_name: "{{ services.nms.name | lower }}" - component_type: "nms" - org_name: "{{ item.name | lower }}" - helm_lint: "true" - charts_dir: "{{ gitops.chart_source }}" - component_auth: "{{ network.env.type }}{{ org_name }}" - org: "{{ item }}" - release_dir: "{{ playbook_dir }}/../../../{{ gitops.release_dir }}" - chart: "corda-networkmap" - chart_tls: "false" - when: services.nms.tls == 'off' - -# create deployment file for networkmap node when tls is on -- name: "create nms" - include_role: - name: create/k8_component - vars: - component_name: "{{ services.nms.name | lower }}" - component_type: "nms" - org_name: "{{ item.name | lower }}" - helm_lint: "true" - charts_dir: "{{ gitops.chart_source }}" - component_auth: "{{ network.env.type }}{{ org_name }}" - org: "{{ item }}" - release_dir: "{{ playbook_dir }}/../../../{{ gitops.release_dir }}" - chart: "corda-networkmap-tls" - chart_tls: "true" - when: services.nms.tls == 'on' - - -# Push the nms deployment files to repository -- name: "Push the created deployment files to repository" - include_role: - name: "{{ playbook_dir }}/../../shared/configuration/roles/git_push" - vars: - GIT_DIR: "{{ playbook_dir }}/../../../" - msg: "[ci skip] Pushing deployment files for networkmap node and networkmapmongodb node" diff --git a/platforms/r3-corda/configuration/roles/setup/node/tasks/main.yaml b/platforms/r3-corda/configuration/roles/setup/node/tasks/main.yaml index 8aa6fa15758..9144d99e609 100644 --- a/platforms/r3-corda/configuration/roles/setup/node/tasks/main.yaml +++ b/platforms/r3-corda/configuration/roles/setup/node/tasks/main.yaml @@ -8,121 +8,31 @@ # This role creates the deployment files for node and pushes them to repository ############################################################################################## -# Wait for namespace creation for nodes -- name: "Wait for namespace creation for nodes" +- name: "Setup primary init with network-service org" include_role: - name: "{{ playbook_dir }}/../../shared/configuration/roles/check/k8_component" + name: init vars: - component_type: "Namespace" - component_name: "{{ component_ns }}" - type: "retry" - tags: - - notest - -# Generate crypto for nodes -- name: Generate crypto for nodes - include_role: - name: create/certificates/node - vars: - component_name: "{{ node.name | lower }}" - cordapps_details: "{{ cordapps }}" - nms_url: "{{ network | json_query('network_services[?type==`networkmap`].uri') | first }}" - nms_cert_file: "{{ network | json_query('network_services[?type==`networkmap`].certificate') | first }}" - doorman_cert_file: "{{ network | json_query('network_services[?type==`doorman`].certificate') | first }}" - cert_subject: "{{ item.subject | regex_replace(',', '/') }}" #Use org level subject for certificate generation - loop: "{{ services.peers }}" - loop_control: - loop_var: node - -# Create deployment files for h2 for node -- name: 'Create node db deployment file' - include_role: - name: create/node_component - vars: - node_type: "node" - component_type: "db" - org_name: "{{ item.name | lower }}" - component_name: "{{ node.name }}" - corda_version: "corda-{{ network.version }}" - release_dir: "{{ playbook_dir }}/../../../{{ gitops.release_dir }}" - loop: "{{ services.peers }}" - loop_control: - loop_var: node - -# Check if nodekeystore already created -- name: Check if nodekeystore already created - shell: | - vault kv get -field=nodekeystore.jks {{ vault.secret_path | default(name) }}/{{ name }}/{{ node.name }}/certs/nodekeystore - environment: - VAULT_ADDR: "{{ vault.url }}" - VAULT_TOKEN: "{{ vault.root_token }}" - register: nodekeystore_result - loop: "{{ services.peers }}" - loop_control: - loop_var: node - ignore_errors: yes - changed_when: false - -# create deployment files for job for node -- name: 'Create node initial-registration job file' - include_role: - name: create/node_component - vars: - node_type: "node" - component_type: "job" - org_name: "{{ item.name | lower }}" - component_auth: "{{ network.env.type }}{{ org_name }}" - component_name: "{{ node.name }}" - nms_url: "{{ network | json_query('network_services[?type==`networkmap`].uri') | first }}" - nms_domain: "{{ nms_url.split(':')[1] }}" - doorman_url: "{{ network | json_query('network_services[?type==`doorman`].uri') | first }}" - doorman_domain: "{{ doorman_url.split(':')[1] }}" - corda_version: "corda-{{ network.version }}" - release_dir: "{{ playbook_dir }}/../../../{{ gitops.release_dir }}" - loop: "{{ services.peers }}" - loop_control: - loop_var: node - when: nodekeystore_result.results[0].failed == True - -# Git Push: Push the created deployment files to repository -- name: "Push the created deployment files to repository" - include_role: - name: "{{ playbook_dir }}/../../shared/configuration/roles/git_push" - vars: - GIT_DIR: "{{ playbook_dir }}/../../../" - msg: "[ci skip] Pushing db and job deployment files for {{ node.name }}" - loop: "{{ services.peers }}" - loop_control: - loop_var: node + build_path: "./build" + init_type: "secondary_init" + values_dir: "./build/{{ component_ns }}" # Create deployment file for node node -- name: 'Create node deployment file' +- name: Create node deployment file include_role: - name: create/node_component + name: create/helm_component vars: - node_type: "node" - component_type: "node" - org_name: "{{ item.name | lower }}" - component_auth: "{{ network.env.type }}{{ org_name }}" - component_name: "{{ node.name }}" + type: "corda_node" + component_name: "{{ node.name | lower }}" nms_url: "{{ network | json_query('network_services[?type==`networkmap`].uri') | first }}" - nms_domain: "{{ nms_url.split(':')[1] }}" doorman_url: "{{ network | json_query('network_services[?type==`doorman`].uri') | first }}" - doorman_domain: "{{ doorman_url.split(':')[1] }}" - corda_version: "corda-{{ network.version }}" - cordapps_details: "{{ cordapps }}" - release_dir: "{{ playbook_dir }}/../../../{{ gitops.release_dir }}" - loop: "{{ services.peers }}" + loop: "{{ org.services.peers }}" loop_control: loop_var: node -# Git Push: Push the deployment files for h2, job and node of node -- name: 'Push node deployment files' +# Git Push: Push the deployment files for node +- name: Push node deployment files include_role: name: "{{ playbook_dir }}/../../shared/configuration/roles/git_push" vars: GIT_DIR: "{{ playbook_dir }}/../../../" - msg: "[ci skip] Pushing node deployment file for {{ node.name }}" - loop: "{{ services.peers }}" - loop_control: - loop_var: node + msg: "[ci skip] Pushing node deployment file for node" diff --git a/platforms/r3-corda/configuration/roles/setup/notary/tasks/main.yaml b/platforms/r3-corda/configuration/roles/setup/notary/tasks/main.yaml index 55732ef41da..4a23f943837 100644 --- a/platforms/r3-corda/configuration/roles/setup/notary/tasks/main.yaml +++ b/platforms/r3-corda/configuration/roles/setup/notary/tasks/main.yaml @@ -9,7 +9,7 @@ ############################################################################################## # Wait for namespace creation for notary -- name: "Wait for namespace creation for notary" +- name: Wait for namespace creation for notary include_role: name: "{{ playbook_dir }}/../../shared/configuration/roles/check/k8_component" vars: @@ -19,88 +19,18 @@ tags: - notest -# Generate crypto for notary -- name: Generate crypto for notary - include_role: - name: create/certificates/notary - vars: - component_name: "{{ services.notary.name | lower }}" - nms_url: "{{ network | json_query('network_services[?type==`networkmap`].uri') | first }}" - nms_cert_file: "{{ network | json_query('network_services[?type==`networkmap`].certificate') | first }}" - doorman_cert_file: "{{ network | json_query('network_services[?type==`doorman`].certificate') | first }}" - cordapps_details: "{{ cordapps }}" - cert_subject: "{{ item.subject | regex_replace(',', '/') }}" #Use org level subject for certificate generation - -# Create deployment files for h2 for notaries -- name: 'Create notary db deployment file' - include_role: - name: create/node_component - vars: - node_type: "notary" - component_type: "db" - component_name: "{{ node.name }}" - org_name: "{{ item.name | lower }}" - component_auth: "{{ network.env.type }}{{ org_name }}" - service_name: "{{ node.name | lower }}" - corda_version: "corda-{{ network.version }}" - release_dir: "{{ playbook_dir }}/../../../{{ gitops.release_dir }}" - -# Check if nodekeystore already created -- name: Check if nodekeystore already created - shell: | - vault kv get -field=nodekeystore.jks {{ vault.secret_path | default(name) }}/{{ name }}/{{ node.name }}/certs/nodekeystore - environment: - VAULT_ADDR: "{{ vault.url }}" - VAULT_TOKEN: "{{ vault.root_token }}" - register: nodekeystore_result - ignore_errors: yes - -# Create deployment files for job for notaries -- name: 'Create notary initial-registration job file' - include_role: - name: create/node_component - vars: - node_type: "notary" - component_type: "job" - component_name: "{{ node.name }}" - org_name: "{{ item.name | lower }}" - component_auth: "{{ network.env.type }}{{ org_name }}" - nms_url: "{{ network | json_query('network_services[?type==`networkmap`].uri') | first }}" - nms_domain: "{{ nms_url.split(':')[1] }}" - doorman_url: "{{ network | json_query('network_services[?type==`doorman`].uri') | first }}" - doorman_domain: "{{ doorman_url.split(':')[1] }}" - corda_version: "corda-{{ network.version }}" - release_dir: "{{ playbook_dir }}/../../../{{ gitops.release_dir }}" - when: nodekeystore_result.failed == True - -# Git Push: Push the created deployment files to repository -- name: "Push the created deployment files to repository" - include_role: - name: "{{ playbook_dir }}/../../shared/configuration/roles/git_push" - vars: - GIT_DIR: "{{ playbook_dir }}/../../../" - msg: "[ci skip] Pushing db and job deployment files for {{ node.name }}" - # Create deployment file for notary node -- name: 'Create notary node deployment file' +- name: Create notary node deployment file include_role: - name: create/node_component + name: create/helm_component vars: - node_type: "notary" - component_type: "node" - org_name: "{{ item.name | lower }}" - component_auth: "{{ network.env.type }}{{ org_name }}" - component_name: "{{ node.name }}" + type: "corda_notary" + component_name: "{{ node.name | lower }}" nms_url: "{{ network | json_query('network_services[?type==`networkmap`].uri') | first }}" - nms_domain: "{{ nms_url.split(':')[1] }}" doorman_url: "{{ network | json_query('network_services[?type==`doorman`].uri') | first }}" - doorman_domain: "{{ doorman_url.split(':')[1] }}" - corda_version: "corda-{{ network.version }}" - cordapps_details: "{{ cordapps }}" - release_dir: "{{ playbook_dir }}/../../../{{ gitops.release_dir }}" # Git Push: Push the deployment files for notary node -- name: 'Push notary deployment files' +- name: Push notary deployment files include_role: name: "{{ playbook_dir }}/../../shared/configuration/roles/git_push" vars: diff --git a/platforms/r3-corda/configuration/roles/setup/springboot_services/tasks/main.yaml b/platforms/r3-corda/configuration/roles/setup/springboot_services/tasks/main.yaml deleted file mode 100644 index d53843b113a..00000000000 --- a/platforms/r3-corda/configuration/roles/setup/springboot_services/tasks/main.yaml +++ /dev/null @@ -1,29 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - ---- -# Ensure the directory existance, if not exits it creates a new one -- name: Ensures {{ release_dir }}/{{ component_type_name }} dir exists - file: - path: "{{ release_dir }}/{{ component_type_name }}" - state: directory - vars: - component_type_name: "{{ item.key }}" - loop: "{{ dlt['corda']['nodes'] | dict2items }}" - -# Create corresponding value files for each component name -- name: "create value file for {{ component_type_name }} {{ type }}" - template: - src: "{{ dlt_templates[type] }}" - dest: "{{ values_file }}" - vars: - component_type_name: "{{ item.key }}" - component_name: "node" - type: "node" - values_file: "{{ release_dir }}/{{ component_type_name }}/{{ component_name }}.yaml" - config: "{{ item.value['config'] }}" - vault_addr: "{{ ansible_env.VAULT_ADDR }}" - loop: "{{ dlt['corda']['nodes'] | dict2items }}" diff --git a/platforms/r3-corda/configuration/roles/setup/springboot_services/templates/web.tpl b/platforms/r3-corda/configuration/roles/setup/springboot_services/templates/web.tpl deleted file mode 100644 index 67b8c2e4819..00000000000 --- a/platforms/r3-corda/configuration/roles/setup/springboot_services/templates/web.tpl +++ /dev/null @@ -1,52 +0,0 @@ -apiVersion: helm.fluxcd.io/v1 -kind: HelmRelease -metadata: - name: {{ config.name }}-web - annotations: - fluxcd.io/automated: "false" - {{ config.metadata | to_nice_yaml | indent(2) }} -spec: - releaseName: {{ config.name }}-web - chart: - path: {{ org.value.config.constants.chart_path }}/springbootwebserver - {{ org.value.config.constants.chart | to_nice_yaml | indent(4) }} - values: - nodeName: {{ config.name }} - metadata: - {{ config.metadata | to_nice_yaml | indent(6) }} - image: - containerName: {{ network.docker.url }}/corda:3.3.0-corda-webserver-test-20190219 - initContainerName: {{ network.docker.url }}/alpine-utils:1.0 -{% if network.docker.username is defined %} - imagePullSecret: regcred -{% endif %} - nodeConf: - {{ config['spec']['values'].nodeConf | to_nice_yaml | indent(6) }} - useSSL: false - controllerName: Controller - trustStorePath: /opt/corda/certificates/sslkeystore.jks - trustStoreProvider: jks - tlsAlias: cordaclienttls - port: 10004 - credentials: - {{ config['spec']['values'].credentials | to_nice_yaml | indent(6) }} - resources: - limits: "512Mi" - requests: "512Mi" - storage: - memory: 512Mi - mountPath: "/opt/h2-data" - name: {{ dlt['corda']['storageclass']['sc']['config']['name'] }} - vault: - address: {{ org.value.config.constants.vault_addr }} - {{ config['spec']['values'].vault | to_nice_yaml | indent(6) }} - db: - readinesscheckinterval: 10 - readinessthreshold: 15 - service: - {{ config['spec']['values'].service | to_nice_yaml | indent(6) }} - annotations: {} - deployment: - annotations: {} - pvc: - annotations: {} diff --git a/platforms/r3-corda/configuration/samples/network-cordav2.yaml b/platforms/r3-corda/configuration/samples/network-cordav2.yaml index 712a02a8d3d..a38820f9326 100644 --- a/platforms/r3-corda/configuration/samples/network-cordav2.yaml +++ b/platforms/r3-corda/configuration/samples/network-cordav2.yaml @@ -15,15 +15,16 @@ network: frontend: enabled #Flag for frontend to enabled for nodes/peers #Environment section for Kubernetes setup env: - type: "dev" # tag for the environment. Important to run multiple flux on single cluster + type: corda # tag for the environment. Important to run multiple flux on single cluster proxy: ambassador # value has to be 'ambassador' as 'haproxy' has not been implemented for Corda + proxy_namespace: "ambassador" ambassadorPorts: # Any additional Ambassador ports can be given here, this is valid only if proxy='ambassador' portRange: # For a range of ports from: 15010 to: 15020 # ports: 15020,15021 # For specific ports loadBalancerSourceRanges: '0.0.0.0/0' # comma-separated list without spaces of IP adresses for proxy='ambassador' allowed access - retry_count: 20 # Retry count for the checks + retry_count: 40 # Retry count for the checks external_dns: enabled # Should be enabled if using external-dns for automatic route configuration # Docker registry details where images are stored. This will be used to create k8s secrets # Please ensure all required images are built and stored in this registry. @@ -36,12 +37,10 @@ network: network_services: - service: type: doorman - uri: https://doorman.test.corda.blockchaincloudpoc.com - certificate: home_dir/platforms/r3-corda/configuration/build/corda/doorman/tls/ambassador.crt + uri: https://supplychain-doorman.test.corda.blockchaincloudpoc.com - service: type: networkmap - uri: https://networkmap.test.corda.blockchaincloudpoc.com - certificate: home_dir/platforms/r3-corda/configuration/build/corda/networkmap/tls/ambassador.crt + uri: https://supplychain-nms.test.corda.blockchaincloudpoc.com # Allows specification of one or many organizations that will be connecting to a network. # If an organization is also hosting the root of the network (e.g. doorman, membership service, etc), # then these services should be listed in this section as well. @@ -52,7 +51,7 @@ network: state: London location: London subject: "CN=DLT Root CA,OU=DLT,O=DLT,L=London,C=GB" - type: doorman-nms-notary + type: network-service external_url_suffix: test.corda.blockchaincloudpoc.com cloud_provider: aws # Options: aws, azure, gcp aws: @@ -68,6 +67,7 @@ network: vault: url: "vault_addr" root_token: "vault_root_token" + secret_path: "secretsv2" # Git Repo details which will be used by GitOps/Flux. # Do not check-in git_access_token gitops: @@ -100,21 +100,19 @@ network: doorman: name: doorman subject: "CN=Corda Doorman CA,OU=DLT,O=DLT,L=Berlin,C=DE" - db_subject: "/C=US/ST=California/L=San Francisco/O=My Company Ltd/OU=DBA/CN=mongoDB" + db_subject: "C=US,ST=California,L=San Francisco,O=DB,OU=DBA,CN=mongoDB" type: doorman ports: servicePort: 8080 targetPort: 8080 - tls: "on" # off/on based on TLS mode off/on for doorman nms: name: networkmap subject: "CN=Network Map,OU=FRA,O=FRA,L=Berlin,C=DE" - db_subject: "/C=US/ST=California/L=San Francisco/O=My Company Ltd/OU=DBA/CN=mongoDB" + db_subject: "C=US,ST=California,L=San Francisco,O=DB,OU=DBA,CN=mongoDB" type: networkmap ports: servicePort: 8080 targetPort: 8080 - tls: "on" # off/on based on TLS mode off/on for nms # Currently only supporting a single notary cluster, but may want to expand in the future notary: name: notary @@ -165,6 +163,7 @@ network: vault: url: "vault_addr" root_token: "vault_root_token" + secret_path: "secretsv2" # Git Repo details which will be used by GitOps/Flux. # Do not check-in git_access_token @@ -247,6 +246,7 @@ network: vault: url: "vault_addr" root_token: "vault_root_token" + secret_path: "secretsv2" # Git Repo details which will be used by GitOps/Flux. # Do not check-in git_access_token @@ -328,6 +328,7 @@ network: vault: url: "vault_addr" root_token: "vault_root_token" + secret_path: "secretsv2" # Git Repo details which will be used by GitOps/Flux. # Do not check-in git_access_token @@ -409,6 +410,7 @@ network: vault: url: "vault_addr" root_token: "vault_root_token" + secret_path: "secretsv2" # Git Repo details which will be used by GitOps/Flux. # Do not check-in git_access_token diff --git a/platforms/r3-corda/configuration/samples/network-minikube.yaml b/platforms/r3-corda/configuration/samples/network-minikube.yaml index 0c967a8d8b8..c3dfeda12a3 100644 --- a/platforms/r3-corda/configuration/samples/network-minikube.yaml +++ b/platforms/r3-corda/configuration/samples/network-minikube.yaml @@ -12,7 +12,7 @@ network: # Network level configuration specifies the attributes required for each organization # to join an existing network. type: corda - version: 4.4 + version: 4.9 frontend: enabled #Flag for frontend to enabled for nodes/peers @@ -32,7 +32,7 @@ network: # Please ensure all required images are built and stored in this registry. # Do not check-in docker_password. docker: - url: "index.docker.io/hyperledgerlabs" + url: "ghcr.io/hyperledger" username: "docker_username" password: "docker_password" @@ -40,13 +40,11 @@ network: network_services: - service: type: doorman - uri: http://:30030 - certificate: /home/bevel/platforms/r3-corda/configuration/build/corda/doorman/tls/ambassador.crt + uri: http://minikube-ip.address:30030 - service: type: networkmap - uri: http://:30040 - certificate: /home/bevel/platforms/r3-corda/configuration/build/corda/networkmap/tls/ambassador.crt - + uri: http://minikub-ip.address:30040 + # Allows specification of one or many organizations that will be connecting to a network. # If an organization is also hosting the root of the network (e.g. doorman, membership service, etc), # then these services should be listed in this section as well. @@ -58,7 +56,7 @@ network: state: London location: London subject: "CN=DLT Root CA,OU=DLT,O=DLT,L=London,C=GB" - type: doorman-nms-notary + type: network-service external_url_suffix: develop.local.com cloud_provider: minikube # Options: aws, azure, gcp @@ -74,6 +72,7 @@ network: vault: url: "vault_addr" root_token: "vault_root_token" + secret_path: "secretsv2" # Git Repo details which will be used by GitOps/Flux. # Do not check-in git_access_token @@ -95,29 +94,28 @@ network: doorman: name: doorman subject: "CN=Corda Doorman CA,OU=DLT,O=DLT,L=Berlin,C=DE" - db_subject: "/C=US/ST=California/L=San Francisco/O=My Company Ltd/OU=DBA/CN=mongoDB" + db_subject: "C=US,ST=California,L=San Francisco,O=DB,OU=DBA,CN=mongoDB" type: doorman ports: nodePort: 30030 servicePort: 8080 targetPort: 8080 - tls: "off" # off/on based on TLS mode off/on for doorman nms: name: networkmap subject: "CN=Network Map,OU=FRA,O=FRA,L=Berlin,C=DE" - db_subject: "/C=US/ST=California/L=San Francisco/O=My Company Ltd/OU=DBA/CN=mongoDB" + db_subject: "C=US,ST=California,L=San Francisco,O=DB,OU=DBA,CN=mongoDB" type: networkmap ports: nodePort: 30040 servicePort: 8080 targetPort: 8080 - tls: "off" # off/on based on TLS mode off/on for nms # Currently only supporting a single notary cluster, but may want to expand in the future notary: name: notary subject: "O=Notary,OU=Notary,L=London,C=GB" + serviceName: "O=Notary Service,OU=Notary,L=London,C=GB" + validating: false type: notary - validating: false p2p: port: 10002 targetPort: 10002 @@ -158,6 +156,7 @@ network: vault: url: "vault_addr" root_token: "vault_root_token" + secret_path: "secretsv2" # Git Repo details which will be used by GitOps/Flux. # Do not check-in git_access_token diff --git a/platforms/r3-corda/configuration/samples/workflow/network-no-proxy-corda.yaml b/platforms/r3-corda/configuration/samples/workflow/network-no-proxy-corda.yaml new file mode 100644 index 00000000000..db5c839db84 --- /dev/null +++ b/platforms/r3-corda/configuration/samples/workflow/network-no-proxy-corda.yaml @@ -0,0 +1,407 @@ +############################################################################################## +# Copyright Accenture. All Rights Reserved. +# +# SPDX-License-Identifier: Apache-2.0 +############################################################################################## +--- +# yaml-language-server: $schema=../../../../platforms/network-schema.json +# This is a sample configuration file for SupplyChain App on Single K8s Cluster. +# For multiple K8s clusters, there should be multiple configuration files. +network: + # Network level configuration specifies the attributes required for each organization + # to join an existing network. + type: corda + version: NETWORK_VERSION + frontend: enabled #Flag for frontend to enabled for nodes/peers + #Environment section for Kubernetes setup + env: + type: "FLUX_SUFFIX" # tag for the environment. Important to run multiple flux on single cluster + proxy: "none" + proxy_namespace: "ambassador" # value has to be 'ambassador' as 'haproxy' has not been implemented for Corda + ambassadorPorts: # Any additional Ambassador ports can be given here, this is valid only if proxy='ambassador' + portRange: # For a range of ports + from: PORT_RANGE_FROM + to: PORT_RANGE_TO + # ports: 15020,15021 # For specific ports + loadBalancerSourceRanges: '0.0.0.0/0' # comma-separated list without spaces of IP adresses for proxy='ambassador' allowed access + retry_count: 20 # Retry count for the checks + external_dns: enabled # Should be enabled if using external-dns for automatic route configuration + # Docker registry details where images are stored. This will be used to create k8s secrets + # Please ensure all required images are built and stored in this registry. + # Do not check-in docker_password. + docker: + url: "DOCKER_URL" + username: "DOCKER_USERNAME" + password: "DOCKER_PASSWORD" + # Remote connection information for doorman and networkmap (will be blank or removed for hosting organization) + network_services: + - service: + type: doorman + uri: https://doorman.EXTERNAL_URL_SUFFIX + certificate: USER_DIRECTORY/platforms/r3-corda/configuration/build/corda/doorman/tls/ambassador.crt + - service: + type: networkmap + uri: https://networkmap.EXTERNAL_URL_SUFFIX + certificate: USER_DIRECTORY/platforms/r3-corda/configuration/build/corda/networkmap/tls/ambassador.crt + # Allows specification of one or many organizations that will be connecting to a network. + # If an organization is also hosting the root of the network (e.g. doorman, membership service, etc), + # then these services should be listed in this section as well. + organizations: + - organization: + name: supplychain + country: UK + state: London + location: London + subject: "CN=DLT Root CA,OU=DLT,O=DLT,L=London,C=GB" + type: doorman-nms-notary + external_url_suffix: "EXTERNAL_URL_SUFFIX" + cloud_provider: aws # Options: aws, azure, gcp + aws: + access_key: "AWS_ACCESS_KEY" # AWS Access key, only used when cloud_provider=aws + secret_key: "AWS_SECRET_KEY" # AWS Secret key, only used when cloud_provider=aws + # Kubernetes cluster deployment variables. The config file path and name has to be provided in case + # the cluster has already been created. + k8s: + context: "CLUSTER_CONTEXT" + config_file: "CLUSTER_CONFIG" + # Hashicorp Vault server address and root-token. Vault should be unsealed. + # Do not check-in root_token + vault: + url: "VAULT_ADDR" + root_token: "VAULT_ROOT_TOKEN" + # Git Repo details which will be used by GitOps/Flux. + # Do not check-in git_access_token + gitops: + git_protocol: "https" # Option for git over https or ssh + git_url: "https://github.com/GIT_USERNAME/bevel.git" # Gitops https or ssh url for flux value files + branch: "GIT_BRANCH" # Git branch where release is being made + release_dir: "platforms/r3-corda/releases/dev" # Relative Path in the Git repo for flux sync per environment. + chart_source: "platforms/r3-corda/charts" # Relative Path where the Helm charts are stored in Git repo + git_repo: "github.com/GIT_USERNAME/bevel.git" # Gitops git repository URL for git push + username: "GIT_USERNAME" # Git Service user who has rights to check-in in all branches + password: "GIT_TOKEN" # Git Server user password/token (Optional for ssh; Required for https) + email: "GIT_EMAIL_ADDR" # Email to use in git config + private_key: "PRIVATE_KEY_PATH" # Path to private key file which has write-access to the git repo (Optional for https; Required for ssh) + + # Services maps to the pods that will be deployed on the k8s cluster + # This sample has doorman, nms and notary on one cluster but different namespaces + services: + doorman: + name: doorman + subject: "CN=Corda Doorman CA,OU=DLT,O=DLT,L=Berlin,C=DE" + db_subject: "/C=US/ST=California/L=San Francisco/O=My Company Ltd/OU=DBA/CN=mongoDB" + type: doorman + ports: + servicePort: 8080 + targetPort: 8080 + tls: "on" # off/on based on TLS mode off/on for doorman + nms: + name: networkmap + subject: "CN=Network Map,OU=FRA,O=FRA,L=Berlin,C=DE" + db_subject: "/C=US/ST=California/L=San Francisco/O=My Company Ltd/OU=DBA/CN=mongoDB" + type: networkmap + ports: + servicePort: 8080 + targetPort: 8080 + tls: "on" # off/on based on TLS mode off/on for nms + # Currently only supporting a single notary cluster, but may want to expand in the future + notary: + name: notary + subject: "O=Notary,OU=Notary,L=London,C=GB" + serviceName: "O=Notary Service,OU=Notary,L=London,C=GB" + validating: true #true - if notary is validating, false - if notary is non-validating + type: notary + p2p: + port: 10002 + targetPort: 10002 + ambassador: 15010 #Port for ambassador service (must be from env.ambassadorPorts above) + rpc: + port: 10003 + targetPort: 10003 + rpcadmin: + port: 10005 + targetPort: 10005 + dbtcp: + port: 9101 + targetPort: 1521 + dbweb: + port: 8080 + targetPort: 81 + + # Specification for the 2nd organization. Each organization maps to a VPC and a separate k8s cluster + - organization: + name: manufacturer + country: CH + state: Zurich + location: Zurich + subject: "O=Manufacturer,OU=Manufacturer,L=Zurich,C=CH" + type: node + external_url_suffix: "EXTERNAL_URL_SUFFIX" + + cloud_provider: aws # Options: aws, azure, gcp + aws: + access_key: "AWS_ACCESS_KEY" # AWS Access key, only used when cloud_provider=aws + secret_key: "AWS_SECRET_KEY" # AWS Secret key, only used when cloud_provider=aws + + # Kubernetes cluster deployment variables. The config file path and name has to be provided in case + # the cluster has already been created. + k8s: + context: "CLUSTER_CONTEXT" + config_file: "CLUSTER_CONFIG" + + # Hashicorp Vault server address and root-token. Vault should be unsealed. + # Do not check-in root_token + vault: + url: "VAULT_ADDR" + root_token: "VAULT_ROOT_TOKEN" + + # Git Repo details which will be used by GitOps/Flux. + # Do not check-in git_access_token + gitops: + git_protocol: "https" # Option for git over https or ssh + git_url: "https://github.com/GIT_USERNAME/bevel.git" # Gitops https or ssh url for flux value files + branch: "GIT_BRANCH" # Git branch where release is being made + release_dir: "platforms/r3-corda/releases/dev" # Relative Path in the Git repo for flux sync per environment. + chart_source: "platforms/r3-corda/charts" # Relative Path where the Helm charts are stored in Git repo + git_repo: "github.com/GIT_USERNAME/bevel.git" # Gitops git repository URL for git push + username: "GIT_USERNAME" # Git Service user who has rights to check-in in all branches + password: "GIT_TOKEN" # Git Server user password/token (Optional for ssh; Required for https) + email: "GIT_EMAIL_ADDR" # Email to use in git config + private_key: "PRIVATE_KEY_PATH" # Path to private key file which has write-access to the git repo (Optional for https; Required for ssh) + + # The participating nodes are named as peers + services: + peers: + - peer: + name: manufacturer + subject: "O=Manufacturer,OU=Manufacturer,L=47.38/8.54/Zurich,C=CH" # This is the node identity. L=lat/long is mandatory for supplychain sample app + type: node + p2p: + port: 10002 + targetPort: 10002 + ambassador: 15010 #Port for ambassador service (must be from env.ambassadorPorts above) + rpc: + port: 10003 + targetPort: 10003 + rpcadmin: + port: 10005 + targetPort: 10005 + dbtcp: + port: 9101 + targetPort: 1521 + dbweb: + port: 8080 + targetPort: 81 + springboot: # This is for the springboot server + targetPort: 20001 + port: 20001 + expressapi: # This is for the express api server + targetPort: 3000 + port: 3000 + + - organization: + name: carrier + country: GB + state: London + location: London + subject: "O=Carrier,OU=Carrier,L=London,C=GB" + type: node + external_url_suffix: "EXTERNAL_URL_SUFFIX" + + cloud_provider: aws # Options: aws, azure, gcp + aws: + access_key: "AWS_ACCESS_KEY" # AWS Access key, only used when cloud_provider=aws + secret_key: "AWS_SECRET_KEY" # AWS Secret key, only used when cloud_provider=aws + + # Kubernetes cluster deployment variables. The config file path and name has to be provided in case + # the cluster has already been created. + k8s: + context: "CLUSTER_CONTEXT" + config_file: "CLUSTER_CONFIG" + + # Hashicorp Vault server address and root-token. Vault should be unsealed. + # Do not check-in root_token + vault: + url: "VAULT_ADDR" + root_token: "VAULT_ROOT_TOKEN" + + # Git Repo details which will be used by GitOps/Flux. + # Do not check-in git_access_token + gitops: + git_protocol: "https" # Option for git over https or ssh + git_url: "https://github.com/GIT_USERNAME/bevel.git" # Gitops https or ssh url for flux value files + branch: "GIT_BRANCH" # Git branch where release is being made + release_dir: "platforms/r3-corda/releases/dev" # Relative Path in the Git repo for flux sync per environment. + chart_source: "platforms/r3-corda/charts" # Relative Path where the Helm charts are stored in Git repo + git_repo: "github.com/GIT_USERNAME/bevel.git" # Gitops git repository URL for git push + username: "GIT_USERNAME" # Git Service user who has rights to check-in in all branches + password: "GIT_TOKEN" # Git Server user password/token (Optional for ssh; Required for https) + email: "GIT_EMAIL_ADDR" # Email to use in git config + private_key: "PRIVATE_KEY_PATH" # Path to private key file which has write-access to the git repo (Optional for https; Required for ssh) + + services: + peers: + - peer: + name: carrier + subject: "O=Carrier,OU=Carrier,L=51.50/-0.13/London,C=GB" # This is the node subject. L=lat/long is mandatory for supplychain sample app + type: node + p2p: + port: 10002 + targetPort: 10002 + ambassador: 15020 #Port for ambassador service (must be from env.ambassadorPorts above) + rpc: + port: 10003 + targetPort: 10003 + rpcadmin: + port: 10005 + targetPort: 10005 + dbtcp: + port: 9101 + targetPort: 1521 + dbweb: + port: 8080 + targetPort: 81 + springboot: + targetPort: 20001 + port: 20001 + expressapi: + targetPort: 3000 + port: 3000 + + - organization: + name: store + country: US + state: New York + location: New York + subject: "O=Store,OU=Store,L=New York,C=US" + type: node + external_url_suffix: "EXTERNAL_URL_SUFFIX" + + cloud_provider: aws # Options: aws, azure, gcp + aws: + access_key: "AWS_ACCESS_KEY" # AWS Access key, only used when cloud_provider=aws + secret_key: "AWS_SECRET_KEY" # AWS Secret key, only used when cloud_provider=aws + + # Kubernetes cluster deployment variables. The config file path and name has to be provided in case + # the cluster has already been created. + k8s: + context: "CLUSTER_CONTEXT" + config_file: "CLUSTER_CONFIG" + + # Hashicorp Vault server address and root-token. Vault should be unsealed. + # Do not check-in root_token + vault: + url: "VAULT_ADDR" + root_token: "VAULT_ROOT_TOKEN" + + # Git Repo details which will be used by GitOps/Flux. + # Do not check-in git_access_token + gitops: + git_protocol: "https" # Option for git over https or ssh + git_url: "https://github.com/GIT_USERNAME/bevel.git" # Gitops https or ssh url for flux value files + branch: "GIT_BRANCH" # Git branch where release is being made + release_dir: "platforms/r3-corda/releases/dev" # Relative Path in the Git repo for flux sync per environment. + chart_source: "platforms/r3-corda/charts" # Relative Path where the Helm charts are stored in Git repo + git_repo: "github.com/GIT_USERNAME/bevel.git" # Gitops git repository URL for git push + username: "GIT_USERNAME" # Git Service user who has rights to check-in in all branches + password: "GIT_TOKEN" # Git Server user password/token (Optional for ssh; Required for https) + email: "GIT_EMAIL_ADDR" # Email to use in git config + private_key: "PRIVATE_KEY_PATH" # Path to private key file which has write-access to the git repo (Optional for https; Required for ssh) + + services: + peers: + - peer: + name: store + subject: "O=Store,OU=Store,L=40.73/-74/New York,C=US" # This is the node identity. L=lat/long is mandatory for supplychain sample app + type: node + p2p: + port: 10002 + targetPort: 10002 + ambassador: 15020 #Port for ambassador service (must be from env.ambassadorPorts above) + rpc: + port: 10003 + targetPort: 10003 + rpcadmin: + port: 10005 + targetPort: 10005 + dbtcp: + port: 9101 + targetPort: 1521 + dbweb: + port: 8080 + targetPort: 81 + springboot: + targetPort: 20001 + port: 20001 + expressapi: + targetPort: 3000 + port: 3000 + + - organization: + name: warehouse + country: US + state: Massachusetts + location: Boston + subject: "O=Warehouse,OU=Warehouse,L=Boston,C=US" + type: node + external_url_suffix: "EXTERNAL_URL_SUFFIX" + + cloud_provider: aws # Options: aws, azure, gcp + aws: + access_key: "AWS_ACCESS_KEY" # AWS Access key, only used when cloud_provider=aws + secret_key: "AWS_SECRET_KEY" # AWS Secret key, only used when cloud_provider=aws + + # Kubernetes cluster deployment variables. The config file path and name has to be provided in case + # the cluster has already been created. + k8s: + context: "CLUSTER_CONTEXT" + config_file: "CLUSTER_CONFIG" + + # Hashicorp Vault server address and root-token. Vault should be unsealed. + # Do not check-in root_token + vault: + url: "VAULT_ADDR" + root_token: "VAULT_ROOT_TOKEN" + + # Git Repo details which will be used by GitOps/Flux. + # Do not check-in git_access_token + gitops: + git_protocol: "https" # Option for git over https or ssh + git_url: "https://github.com/GIT_USERNAME/bevel.git" # Gitops https or ssh url for flux value files + branch: "GIT_BRANCH" # Git branch where release is being made + release_dir: "platforms/r3-corda/releases/dev" # Relative Path in the Git repo for flux sync per environment. + chart_source: "platforms/r3-corda/charts" # Relative Path where the Helm charts are stored in Git repo + git_repo: "github.com/GIT_USERNAME/bevel.git" # Gitops git repository URL for git push + username: "GIT_USERNAME" # Git Service user who has rights to check-in in all branches + password: "GIT_TOKEN" # Git Server user password/token (Optional for ssh; Required for https) + email: "GIT_EMAIL_ADDR" # Email to use in git config + private_key: "PRIVATE_KEY_PATH" # Path to private key file which has write-access to the git repo (Optional for https; Required for ssh) + + services: + peers: + - peer: + name: warehouse + subject: "O=Warehouse,OU=Warehouse,L=42.36/-71.06/Boston,C=US" # This is the node identity. L=lat/long is mandatory for supplychain sample app + type: node + p2p: + port: 10002 + targetPort: 10002 + ambassador: 15020 #Port for ambassador service (must be from env.ambassadorPorts above) + rpc: + port: 10003 + targetPort: 10003 + rpcadmin: + port: 10005 + targetPort: 10005 + dbtcp: + port: 9101 + targetPort: 1521 + dbweb: + port: 8080 + targetPort: 81 + springboot: + targetPort: 20001 + port: 20001 + expressapi: + targetPort: 3000 + port: 3000 diff --git a/platforms/r3-corda/configuration/samples/workflow/network-proxy-corda.yaml b/platforms/r3-corda/configuration/samples/workflow/network-proxy-corda.yaml new file mode 100644 index 00000000000..7eb9a46b327 --- /dev/null +++ b/platforms/r3-corda/configuration/samples/workflow/network-proxy-corda.yaml @@ -0,0 +1,407 @@ +############################################################################################## +# Copyright Accenture. All Rights Reserved. +# +# SPDX-License-Identifier: Apache-2.0 +############################################################################################## +--- +# yaml-language-server: $schema=../../../../platforms/network-schema.json +# This is a sample configuration file for SupplyChain App on Single K8s Cluster. +# For multiple K8s clusters, there should be multiple configuration files. +network: + # Network level configuration specifies the attributes required for each organization + # to join an existing network. + type: corda + version: NETWORK_VERSION + frontend: enabled #Flag for frontend to enabled for nodes/peers + #Environment section for Kubernetes setup + env: + type: "FLUX_SUFFIX" # tag for the environment. Important to run multiple flux on single cluster + proxy: "ambassador" + proxy_namespace: "ambassador" # value has to be 'ambassador' as 'haproxy' has not been implemented for Corda + ambassadorPorts: # Any additional Ambassador ports can be given here, this is valid only if proxy='ambassador' + portRange: # For a range of ports + from: PORT_RANGE_FROM + to: PORT_RANGE_TO + # ports: 15020,15021 # For specific ports + loadBalancerSourceRanges: '0.0.0.0/0' # comma-separated list without spaces of IP adresses for proxy='ambassador' allowed access + retry_count: 20 # Retry count for the checks + external_dns: enabled # Should be enabled if using external-dns for automatic route configuration + # Docker registry details where images are stored. This will be used to create k8s secrets + # Please ensure all required images are built and stored in this registry. + # Do not check-in docker_password. + docker: + url: "DOCKER_URL" + username: "DOCKER_USERNAME" + password: "DOCKER_PASSWORD" + # Remote connection information for doorman and networkmap (will be blank or removed for hosting organization) + network_services: + - service: + type: doorman + uri: https://doorman.EXTERNAL_URL_SUFFIX + certificate: USER_DIRECTORY/platforms/r3-corda/configuration/build/corda/doorman/tls/ambassador.crt + - service: + type: networkmap + uri: https://networkmap.EXTERNAL_URL_SUFFIX + certificate: USER_DIRECTORY/platforms/r3-corda/configuration/build/corda/networkmap/tls/ambassador.crt + # Allows specification of one or many organizations that will be connecting to a network. + # If an organization is also hosting the root of the network (e.g. doorman, membership service, etc), + # then these services should be listed in this section as well. + organizations: + - organization: + name: supplychain + country: UK + state: London + location: London + subject: "CN=DLT Root CA,OU=DLT,O=DLT,L=London,C=GB" + type: doorman-nms-notary + external_url_suffix: "EXTERNAL_URL_SUFFIX" + cloud_provider: aws # Options: aws, azure, gcp + aws: + access_key: "AWS_ACCESS_KEY" # AWS Access key, only used when cloud_provider=aws + secret_key: "AWS_SECRET_KEY" # AWS Secret key, only used when cloud_provider=aws + # Kubernetes cluster deployment variables. The config file path and name has to be provided in case + # the cluster has already been created. + k8s: + context: "CLUSTER_CONTEXT" + config_file: "CLUSTER_CONFIG" + # Hashicorp Vault server address and root-token. Vault should be unsealed. + # Do not check-in root_token + vault: + url: "VAULT_ADDR" + root_token: "VAULT_ROOT_TOKEN" + # Git Repo details which will be used by GitOps/Flux. + # Do not check-in git_access_token + gitops: + git_protocol: "https" # Option for git over https or ssh + git_url: "https://github.com/GIT_USERNAME/bevel.git" # Gitops https or ssh url for flux value files + branch: "GIT_BRANCH" # Git branch where release is being made + release_dir: "platforms/r3-corda/releases/dev" # Relative Path in the Git repo for flux sync per environment. + chart_source: "platforms/r3-corda/charts" # Relative Path where the Helm charts are stored in Git repo + git_repo: "github.com/GIT_USERNAME/bevel.git" # Gitops git repository URL for git push + username: "GIT_USERNAME" # Git Service user who has rights to check-in in all branches + password: "GIT_TOKEN" # Git Server user password/token (Optional for ssh; Required for https) + email: "GIT_EMAIL_ADDR" # Email to use in git config + private_key: "PRIVATE_KEY_PATH" # Path to private key file which has write-access to the git repo (Optional for https; Required for ssh) + + # Services maps to the pods that will be deployed on the k8s cluster + # This sample has doorman, nms and notary on one cluster but different namespaces + services: + doorman: + name: doorman + subject: "CN=Corda Doorman CA,OU=DLT,O=DLT,L=Berlin,C=DE" + db_subject: "/C=US/ST=California/L=San Francisco/O=My Company Ltd/OU=DBA/CN=mongoDB" + type: doorman + ports: + servicePort: 8080 + targetPort: 8080 + tls: "on" # off/on based on TLS mode off/on for doorman + nms: + name: networkmap + subject: "CN=Network Map,OU=FRA,O=FRA,L=Berlin,C=DE" + db_subject: "/C=US/ST=California/L=San Francisco/O=My Company Ltd/OU=DBA/CN=mongoDB" + type: networkmap + ports: + servicePort: 8080 + targetPort: 8080 + tls: "on" # off/on based on TLS mode off/on for nms + # Currently only supporting a single notary cluster, but may want to expand in the future + notary: + name: notary + subject: "O=Notary,OU=Notary,L=London,C=GB" + serviceName: "O=Notary Service,OU=Notary,L=London,C=GB" + validating: true #true - if notary is validating, false - if notary is non-validating + type: notary + p2p: + port: 10002 + targetPort: 10002 + ambassador: 15010 #Port for ambassador service (must be from env.ambassadorPorts above) + rpc: + port: 10003 + targetPort: 10003 + rpcadmin: + port: 10005 + targetPort: 10005 + dbtcp: + port: 9101 + targetPort: 1521 + dbweb: + port: 8080 + targetPort: 81 + + # Specification for the 2nd organization. Each organization maps to a VPC and a separate k8s cluster + - organization: + name: manufacturer + country: CH + state: Zurich + location: Zurich + subject: "O=Manufacturer,OU=Manufacturer,L=Zurich,C=CH" + type: node + external_url_suffix: "EXTERNAL_URL_SUFFIX" + + cloud_provider: aws # Options: aws, azure, gcp + aws: + access_key: "AWS_ACCESS_KEY" # AWS Access key, only used when cloud_provider=aws + secret_key: "AWS_SECRET_KEY" # AWS Secret key, only used when cloud_provider=aws + + # Kubernetes cluster deployment variables. The config file path and name has to be provided in case + # the cluster has already been created. + k8s: + context: "CLUSTER_CONTEXT" + config_file: "CLUSTER_CONFIG" + + # Hashicorp Vault server address and root-token. Vault should be unsealed. + # Do not check-in root_token + vault: + url: "VAULT_ADDR" + root_token: "VAULT_ROOT_TOKEN" + + # Git Repo details which will be used by GitOps/Flux. + # Do not check-in git_access_token + gitops: + git_protocol: "https" # Option for git over https or ssh + git_url: "https://github.com/GIT_USERNAME/bevel.git" # Gitops https or ssh url for flux value files + branch: "GIT_BRANCH" # Git branch where release is being made + release_dir: "platforms/r3-corda/releases/dev" # Relative Path in the Git repo for flux sync per environment. + chart_source: "platforms/r3-corda/charts" # Relative Path where the Helm charts are stored in Git repo + git_repo: "github.com/GIT_USERNAME/bevel.git" # Gitops git repository URL for git push + username: "GIT_USERNAME" # Git Service user who has rights to check-in in all branches + password: "GIT_TOKEN" # Git Server user password/token (Optional for ssh; Required for https) + email: "GIT_EMAIL_ADDR" # Email to use in git config + private_key: "PRIVATE_KEY_PATH" # Path to private key file which has write-access to the git repo (Optional for https; Required for ssh) + + # The participating nodes are named as peers + services: + peers: + - peer: + name: manufacturer + subject: "O=Manufacturer,OU=Manufacturer,L=47.38/8.54/Zurich,C=CH" # This is the node identity. L=lat/long is mandatory for supplychain sample app + type: node + p2p: + port: 10002 + targetPort: 10002 + ambassador: 15010 #Port for ambassador service (must be from env.ambassadorPorts above) + rpc: + port: 10003 + targetPort: 10003 + rpcadmin: + port: 10005 + targetPort: 10005 + dbtcp: + port: 9101 + targetPort: 1521 + dbweb: + port: 8080 + targetPort: 81 + springboot: # This is for the springboot server + targetPort: 20001 + port: 20001 + expressapi: # This is for the express api server + targetPort: 3000 + port: 3000 + + - organization: + name: carrier + country: GB + state: London + location: London + subject: "O=Carrier,OU=Carrier,L=London,C=GB" + type: node + external_url_suffix: "EXTERNAL_URL_SUFFIX" + + cloud_provider: aws # Options: aws, azure, gcp + aws: + access_key: "AWS_ACCESS_KEY" # AWS Access key, only used when cloud_provider=aws + secret_key: "AWS_SECRET_KEY" # AWS Secret key, only used when cloud_provider=aws + + # Kubernetes cluster deployment variables. The config file path and name has to be provided in case + # the cluster has already been created. + k8s: + context: "CLUSTER_CONTEXT" + config_file: "CLUSTER_CONFIG" + + # Hashicorp Vault server address and root-token. Vault should be unsealed. + # Do not check-in root_token + vault: + url: "VAULT_ADDR" + root_token: "VAULT_ROOT_TOKEN" + + # Git Repo details which will be used by GitOps/Flux. + # Do not check-in git_access_token + gitops: + git_protocol: "https" # Option for git over https or ssh + git_url: "https://github.com/GIT_USERNAME/bevel.git" # Gitops https or ssh url for flux value files + branch: "GIT_BRANCH" # Git branch where release is being made + release_dir: "platforms/r3-corda/releases/dev" # Relative Path in the Git repo for flux sync per environment. + chart_source: "platforms/r3-corda/charts" # Relative Path where the Helm charts are stored in Git repo + git_repo: "github.com/GIT_USERNAME/bevel.git" # Gitops git repository URL for git push + username: "GIT_USERNAME" # Git Service user who has rights to check-in in all branches + password: "GIT_TOKEN" # Git Server user password/token (Optional for ssh; Required for https) + email: "GIT_EMAIL_ADDR" # Email to use in git config + private_key: "PRIVATE_KEY_PATH" # Path to private key file which has write-access to the git repo (Optional for https; Required for ssh) + + services: + peers: + - peer: + name: carrier + subject: "O=Carrier,OU=Carrier,L=51.50/-0.13/London,C=GB" # This is the node subject. L=lat/long is mandatory for supplychain sample app + type: node + p2p: + port: 10002 + targetPort: 10002 + ambassador: 15020 #Port for ambassador service (must be from env.ambassadorPorts above) + rpc: + port: 10003 + targetPort: 10003 + rpcadmin: + port: 10005 + targetPort: 10005 + dbtcp: + port: 9101 + targetPort: 1521 + dbweb: + port: 8080 + targetPort: 81 + springboot: + targetPort: 20001 + port: 20001 + expressapi: + targetPort: 3000 + port: 3000 + + - organization: + name: store + country: US + state: New York + location: New York + subject: "O=Store,OU=Store,L=New York,C=US" + type: node + external_url_suffix: "EXTERNAL_URL_SUFFIX" + + cloud_provider: aws # Options: aws, azure, gcp + aws: + access_key: "AWS_ACCESS_KEY" # AWS Access key, only used when cloud_provider=aws + secret_key: "AWS_SECRET_KEY" # AWS Secret key, only used when cloud_provider=aws + + # Kubernetes cluster deployment variables. The config file path and name has to be provided in case + # the cluster has already been created. + k8s: + context: "CLUSTER_CONTEXT" + config_file: "CLUSTER_CONFIG" + + # Hashicorp Vault server address and root-token. Vault should be unsealed. + # Do not check-in root_token + vault: + url: "VAULT_ADDR" + root_token: "VAULT_ROOT_TOKEN" + + # Git Repo details which will be used by GitOps/Flux. + # Do not check-in git_access_token + gitops: + git_protocol: "https" # Option for git over https or ssh + git_url: "https://github.com/GIT_USERNAME/bevel.git" # Gitops https or ssh url for flux value files + branch: "GIT_BRANCH" # Git branch where release is being made + release_dir: "platforms/r3-corda/releases/dev" # Relative Path in the Git repo for flux sync per environment. + chart_source: "platforms/r3-corda/charts" # Relative Path where the Helm charts are stored in Git repo + git_repo: "github.com/GIT_USERNAME/bevel.git" # Gitops git repository URL for git push + username: "GIT_USERNAME" # Git Service user who has rights to check-in in all branches + password: "GIT_TOKEN" # Git Server user password/token (Optional for ssh; Required for https) + email: "GIT_EMAIL_ADDR" # Email to use in git config + private_key: "PRIVATE_KEY_PATH" # Path to private key file which has write-access to the git repo (Optional for https; Required for ssh) + + services: + peers: + - peer: + name: store + subject: "O=Store,OU=Store,L=40.73/-74/New York,C=US" # This is the node identity. L=lat/long is mandatory for supplychain sample app + type: node + p2p: + port: 10002 + targetPort: 10002 + ambassador: 15020 #Port for ambassador service (must be from env.ambassadorPorts above) + rpc: + port: 10003 + targetPort: 10003 + rpcadmin: + port: 10005 + targetPort: 10005 + dbtcp: + port: 9101 + targetPort: 1521 + dbweb: + port: 8080 + targetPort: 81 + springboot: + targetPort: 20001 + port: 20001 + expressapi: + targetPort: 3000 + port: 3000 + + - organization: + name: warehouse + country: US + state: Massachusetts + location: Boston + subject: "O=Warehouse,OU=Warehouse,L=Boston,C=US" + type: node + external_url_suffix: "EXTERNAL_URL_SUFFIX" + + cloud_provider: aws # Options: aws, azure, gcp + aws: + access_key: "AWS_ACCESS_KEY" # AWS Access key, only used when cloud_provider=aws + secret_key: "AWS_SECRET_KEY" # AWS Secret key, only used when cloud_provider=aws + + # Kubernetes cluster deployment variables. The config file path and name has to be provided in case + # the cluster has already been created. + k8s: + context: "CLUSTER_CONTEXT" + config_file: "CLUSTER_CONFIG" + + # Hashicorp Vault server address and root-token. Vault should be unsealed. + # Do not check-in root_token + vault: + url: "VAULT_ADDR" + root_token: "VAULT_ROOT_TOKEN" + + # Git Repo details which will be used by GitOps/Flux. + # Do not check-in git_access_token + gitops: + git_protocol: "https" # Option for git over https or ssh + git_url: "https://github.com/GIT_USERNAME/bevel.git" # Gitops https or ssh url for flux value files + branch: "GIT_BRANCH" # Git branch where release is being made + release_dir: "platforms/r3-corda/releases/dev" # Relative Path in the Git repo for flux sync per environment. + chart_source: "platforms/r3-corda/charts" # Relative Path where the Helm charts are stored in Git repo + git_repo: "github.com/GIT_USERNAME/bevel.git" # Gitops git repository URL for git push + username: "GIT_USERNAME" # Git Service user who has rights to check-in in all branches + password: "GIT_TOKEN" # Git Server user password/token (Optional for ssh; Required for https) + email: "GIT_EMAIL_ADDR" # Email to use in git config + private_key: "PRIVATE_KEY_PATH" # Path to private key file which has write-access to the git repo (Optional for https; Required for ssh) + + services: + peers: + - peer: + name: warehouse + subject: "O=Warehouse,OU=Warehouse,L=42.36/-71.06/Boston,C=US" # This is the node identity. L=lat/long is mandatory for supplychain sample app + type: node + p2p: + port: 10002 + targetPort: 10002 + ambassador: 15020 #Port for ambassador service (must be from env.ambassadorPorts above) + rpc: + port: 10003 + targetPort: 10003 + rpcadmin: + port: 10005 + targetPort: 10005 + dbtcp: + port: 9101 + targetPort: 1521 + dbweb: + port: 8080 + targetPort: 81 + springboot: + targetPort: 20001 + port: 20001 + expressapi: + targetPort: 3000 + port: 3000 diff --git a/platforms/shared/charts/bevel-scripts/scripts/bevel-vault.sh b/platforms/shared/charts/bevel-scripts/scripts/bevel-vault.sh index 6c61847eb4c..04fbdd7b07f 100644 --- a/platforms/shared/charts/bevel-scripts/scripts/bevel-vault.sh +++ b/platforms/shared/charts/bevel-scripts/scripts/bevel-vault.sh @@ -37,7 +37,7 @@ initHashicorpVaultToken() { # Extract error message (if any) from the response using jq ERROR=$(echo "$RESPONSE" | jq -r '.errors[0]') # Extract the Vault secret data from the response using jq - VAULT_TOKEN=$(echo "$RESPONSE" | jq -r '.auth.client_token') + export VAULT_TOKEN=$(echo "$RESPONSE" | jq -r '.auth.client_token') # Check if the Vault token is empty, null, or contains errors if [ -z "$VAULT_TOKEN" ] || [ "$VAULT_TOKEN" = "null" ] || echo "$VAULT_TOKEN" | grep -q "errors"; then diff --git a/platforms/shared/charts/bevel-storageclass/README.md b/platforms/shared/charts/bevel-storageclass/README.md index cc51766df9a..57d0369d785 100644 --- a/platforms/shared/charts/bevel-storageclass/README.md +++ b/platforms/shared/charts/bevel-storageclass/README.md @@ -13,7 +13,7 @@ helm repo add bevel https://hyperledger.github.io/bevel helm install my-storageclass bevel/bevel-storageclass ``` -## Prerequisitess +## Prerequisites - Kubernetes 1.19+ - Helm 3.2.0+ diff --git a/platforms/shared/charts/bevel-vault-mgmt/Chart.yaml b/platforms/shared/charts/bevel-vault-mgmt/Chart.yaml index f769ba6ffe1..e9c1b1288aa 100644 --- a/platforms/shared/charts/bevel-vault-mgmt/Chart.yaml +++ b/platforms/shared/charts/bevel-vault-mgmt/Chart.yaml @@ -6,7 +6,7 @@ apiVersion: v2 name: bevel-vault-mgmt description: "Shared: Vault and Kubernetes configuration" -version: 1.0.0 +version: 1.0.1 appVersion: "latest" keywords: - bevel diff --git a/platforms/shared/charts/bevel-vault-mgmt/README.md b/platforms/shared/charts/bevel-vault-mgmt/README.md index b1a6e22de96..bbe9a4b81b0 100644 --- a/platforms/shared/charts/bevel-vault-mgmt/README.md +++ b/platforms/shared/charts/bevel-vault-mgmt/README.md @@ -13,7 +13,7 @@ helm repo add bevel https://hyperledger.github.io/bevel helm install my-release bevel/bevel-vault-mgmt ``` -## Prerequisitess +## Prerequisites - Kubernetes 1.19+ - HashiCorp Vault Server 1.13.1+ @@ -58,8 +58,8 @@ These parameters are refered to as same in each parent or child chart | `global.vault.address`| URL of the Vault server. | `""` | | `global.vault.authPath` | Authentication path for Vault | `supplychain` | | `global.vault.network` | Network type which will determine the vault policy | `besu` | -| `global.vault.secretEngine` | Provide the value for vault secret engine name | `secretsv2` | -| `global.vault.secretPrefix` | Provide the value for vault secret prefix which must start with `data/` | `data/supplychain` | +| `global.vault.secretEngine` | Vault secret engine name | `secretsv2` | +| `global.vault.secretPrefix` | Vault secret prefix which must start with `data/` | `data/supplychain` | | `global.vault.tls` | Enable or disable TLS for vault communication if value present or not | `""` | ### Image @@ -68,7 +68,7 @@ These parameters are refered to as same in each parent or child chart |------------|-----------|---------| | `image.repository` | Docker image repo which will be used for this job | `ghcr.io/hyperledger/bevel-alpine` | | `image.tag` | Docker image tag which will be used for this job | `latest` | -| `image.pullSecret` | Provide the docker secret name | `""` | +| `image.pullSecret` | Secret name in the namespace containing private image registry credentials | `""` | ### Common parameters diff --git a/platforms/shared/charts/bevel-vault-mgmt/templates/configmap.yaml b/platforms/shared/charts/bevel-vault-mgmt/templates/configmap.yaml index 21cfb336ccf..97ce03f870e 100644 --- a/platforms/shared/charts/bevel-vault-mgmt/templates/configmap.yaml +++ b/platforms/shared/charts/bevel-vault-mgmt/templates/configmap.yaml @@ -15,7 +15,6 @@ metadata: helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} app.kubernetes.io/managed-by: {{ .Release.Service }} app.kubernetes.io/instance: {{ .Release.Name }} - {{- include "labels.custom" . | nindent 2 }} data: policies-config.json: |- {{ if eq .Values.global.vault.network "besu" -}} @@ -51,7 +50,7 @@ data: {{ if eq .Values.global.vault.network "quorum" -}} { "policy": - "path \"{{ .Values.global.vault.secretEngine }}/{{ .Values.global.vault.secretPrefix }}/crypto/*\" { + "path \"{{ .Values.global.vault.secretEngine }}/{{ .Values.global.vault.secretPrefix }}/*\" { capabilities = [\"list\", \"read\", \"create\", \"update\"] } path \"{{ .Values.global.vault.secretEngine }}/{{ .Values.global.vault.secretPrefix }}/smartContracts/*\" { @@ -75,3 +74,11 @@ data: }" } {{ end }} + {{ if eq .Values.global.vault.network "indy" -}} + { + "policy": + "path \"{{ .Values.global.vault.secretEngine }}/{{ .Values.global.vault.secretPrefix }}/*\" { + capabilities = [\"list\", \"read\", \"create\", \"update\"] + }" + } + {{ end }} diff --git a/platforms/shared/charts/bevel-vault-mgmt/templates/job.yaml b/platforms/shared/charts/bevel-vault-mgmt/templates/job.yaml index 83b1159e7b8..db29388554f 100644 --- a/platforms/shared/charts/bevel-vault-mgmt/templates/job.yaml +++ b/platforms/shared/charts/bevel-vault-mgmt/templates/job.yaml @@ -17,7 +17,6 @@ metadata: helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} app.kubernetes.io/managed-by: {{ .Release.Service }} app.kubernetes.io/instance: {{ .Release.Name }} - {{- include "labels.custom" . | nindent 2 }} spec: backoffLimit: 6 template: diff --git a/platforms/shared/charts/haproxy-ingress/haproxy-ingress-0.13.9.tgz b/platforms/shared/charts/haproxy-ingress/haproxy-ingress-0.13.9.tgz deleted file mode 100644 index bea4454db17..00000000000 Binary files a/platforms/shared/charts/haproxy-ingress/haproxy-ingress-0.13.9.tgz and /dev/null differ diff --git a/platforms/shared/charts/haproxy-ingress/haproxy-ingress-0.14.6.tgz b/platforms/shared/charts/haproxy-ingress/haproxy-ingress-0.14.6.tgz new file mode 100644 index 00000000000..a45fd764beb Binary files /dev/null and b/platforms/shared/charts/haproxy-ingress/haproxy-ingress-0.14.6.tgz differ diff --git a/platforms/shared/charts/haproxy-ingress/values.yaml b/platforms/shared/charts/haproxy-ingress/values.yaml index 6ec3870fb57..654c8627ad0 100644 --- a/platforms/shared/charts/haproxy-ingress/values.yaml +++ b/platforms/shared/charts/haproxy-ingress/values.yaml @@ -4,6 +4,8 @@ rbac: secret: write: false security: + # Configures PodSecurityPolicy. This resource was removed on Kubernetes v1.25, + # so it is ignored on clusters version v1.25 or newer. enable: false # Create ServiceAccount @@ -13,14 +15,17 @@ serviceAccount: # The name of the ServiceAccount to use. # If not set and create is true, a name is generated using the fullname template name: + # Automount API credentials for the ServiceAccount. + automountServiceAccountToken: true nameOverride: "" fullnameOverride: "haproxy-ingress" controller: image: - repository: quay.io/jcmoraisjr/haproxy-ingress - tag: v0.13.9 + registry: quay.io + repository: jcmoraisjr/haproxy-ingress + tag: v0.14.6 pullPolicy: IfNotPresent imagePullSecrets: [] @@ -71,7 +76,7 @@ controller: ## Uses ingressClass as name for the IngressClass ## ingressClassResource: - enabled: false + enabled: true default: false controllerClass: "" parameters: {} @@ -98,6 +103,10 @@ controller: successThreshold: 1 timeoutSeconds: 1 + ## Annotations to be added to DaemonSet/Deployment definitions + ## + annotations: {} + ## Annotations to be added to controller pods ## podAnnotations: {} @@ -142,6 +151,9 @@ controller: # hello_again.lua: | # core.Debug("Hello again HAProxy!\n") + # Automount API credentials to the controller's pod + automountServiceAccountToken: true + # Required for use with CNI based kubernetes installations (such as ones set up by kubeadm), # since CNI and hostport don't mix yet. Can be deprecated once https://github.com/kubernetes/kubernetes/issues/23920 # is merged @@ -176,7 +188,7 @@ controller: ## DaemonSet or Deployment ## - kind: Deployment + kind: DaemonSet # TCP service key:value pairs # : /:[:[][:]] @@ -184,6 +196,11 @@ controller: tcp: {} # 8080: "default/example-tcp-svc:9000" + # default values for http and https containerPorts + containerPorts: + http: 80 + https: 443 + # optionally disable static ports, including the default 80 and 443 enableStaticPorts: true @@ -217,7 +234,8 @@ controller: # Deployment replicaCount: 1 - # PodDisruptionBudget + # A PodDisruptionBudget is created only if minAvailable is + # greater than 0 (zero) and lesser than the replicaCount minAvailable: 1 resources: {} @@ -252,7 +270,7 @@ controller: ## nodeSelector: {} - ## The 'publishService' setting allows customization of the source of the IP address or FQDN to report + ## The 'publishService' setting allows customization of the source of the IP address or FQDN to report ## in the ingress status field. If disabled (default), the field will not be set by the controller. ## If enabled, it reads the information provided by the service, unless pathOverride is specified. ## If a value for 'publish-service' is specified in controller.extraArgs, it overrides this setting. @@ -284,6 +302,7 @@ controller: # ipFamilies: [IPv4] # ipFamilyPolicy: PreferDualStack + loadBalancerClass: "" loadBalancerIP: "" loadBalancerSourceRanges: [] @@ -319,8 +338,9 @@ controller: enabled: false image: + registry: docker.io repository: haproxy - tag: "2.3.21-alpine" + tag: "2.6.14-alpine" pullPolicy: IfNotPresent ## Additional command line arguments to pass to haproxy @@ -334,6 +354,12 @@ controller: # cpu: 500m # memory: 768Mi + # Configure container lifecycle. When scaling replicas down this can be + # used to prevent controller container from terminating quickly and drop in-flight requests. + # For example, when the controller runs behind Network Load Balancer this can be used + # to configure preStop hook to sleep along with deregistration_delay. + lifecycle: {} + ## Container Security Context for the haproxy container ## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ ## @@ -380,7 +406,8 @@ controller: # (scrapes the stats port and exports metrics to prometheus) # Only used if embedded == false image: - repository: quay.io/prometheus/haproxy-exporter + registry: quay.io + repository: prometheus/haproxy-exporter tag: "v0.11.0" pullPolicy: IfNotPresent @@ -415,6 +442,7 @@ controller: ## externalIPs: [] + loadBalancerClass: "" loadBalancerIP: "" loadBalancerSourceRanges: [] servicePort: 9101 @@ -423,6 +451,7 @@ controller: ## If controller.stats.enabled = true and controller.metrics.enabled = true and controller.serviceMonitor.enabled = true, Prometheus ServiceMonitor will be created ## Ref: https://coreos.com/operators/prometheus/docs/latest/api.html#servicemonitor + ## Ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md ## serviceMonitor: enabled: false @@ -469,11 +498,13 @@ controller: enabled: false # syslog for haproxy - # https://github.com/whereisaaron/kube-syslog-sidecar + # https://github.com/crisu1710/kube-syslog-sidecar # (listens on UDP port 514 and outputs to stdout) + # registry needs to be in quotes image: - repository: whereisaaron/kube-syslog-sidecar - tag: latest + registry: "ghcr.io" + repository: crisu1710/kube-syslog-sidecar + tag: "0.2.0" pullPolicy: IfNotPresent ## Additional volume mounts @@ -505,10 +536,14 @@ defaultBackend: name: default-backend image: - repository: k8s.gcr.io/defaultbackend-amd64 + registry: k8s.gcr.io + repository: defaultbackend-amd64 tag: "1.5" pullPolicy: IfNotPresent + imagePullSecrets: [] + # - name: secret-name + ## Node tolerations for server scheduling to nodes with taints ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ ## @@ -536,7 +571,8 @@ defaultBackend: # Deployment replicaCount: 1 - # PodDisruptionBudget + # A PodDisruptionBudget is created only if minAvailable is + # greater than 0 (zero) and lesser than the replicaCount minAvailable: 1 resources: @@ -557,8 +593,10 @@ defaultBackend: ## externalIPs: [] + loadBalancerClass: "" loadBalancerIP: "" loadBalancerSourceRanges: [] + servicePort: 8080 type: ClusterIP @@ -571,3 +609,7 @@ defaultBackend: ## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ ## securityContext: {} + + ## Priority Class for the default backend container + ## + priorityClassName: "" diff --git a/platforms/shared/configuration/delete-network.yaml b/platforms/shared/configuration/delete-network.yaml index b02859b2dda..5ecbd6ad4a2 100644 --- a/platforms/shared/configuration/delete-network.yaml +++ b/platforms/shared/configuration/delete-network.yaml @@ -12,8 +12,6 @@ gather_facts: no no_log: "{{ no_ansible_log | default(false) }}" tasks: - # ---------------------------------------------------------------------- - # Uninstalling Flux for organisation - name: Delete Flux include_role: @@ -108,4 +106,4 @@ "corda": "ns", "besu": "bes", "substrate": "subs" - }] + }] diff --git a/platforms/shared/configuration/roles/check/directory/tasks/main.yaml b/platforms/shared/configuration/roles/check/directory/tasks/main.yaml index 6c39a8c4b42..118db9034bd 100644 --- a/platforms/shared/configuration/roles/check/directory/tasks/main.yaml +++ b/platforms/shared/configuration/roles/check/directory/tasks/main.yaml @@ -19,4 +19,4 @@ recurse: yes mode: '0755' state: directory - when: not dir_check.stat.exists + # when: not dir_check.stat.exists diff --git a/platforms/shared/configuration/roles/check/helm_component/Readme.md b/platforms/shared/configuration/roles/check/helm_component/Readme.md deleted file mode 100644 index 73ec24be163..00000000000 --- a/platforms/shared/configuration/roles/check/helm_component/Readme.md +++ /dev/null @@ -1,65 +0,0 @@ -[//]: # (##############################################################################################) -[//]: # (Copyright Accenture. All Rights Reserved.) -[//]: # (SPDX-License-Identifier: Apache-2.0) -[//]: # (##############################################################################################) - -## ROLE: helm_component -This roles check if Pod is deployed or not and Job being deployed and completed or not. - -### Tasks -(Variables with * are fetched from the playbook which is calling this role) -#### 1. Wait for {{ component_type }} {{ component_name }} in {{ namespace }} -Task to check if Job deployed and completed . This task will try for a maximum number of times which is described in network.yaml with an interval of 30 seconds between each try. -##### Input Variables - - *component_type: The type of resource/organisation. - *component_ns: The organisation's namespace - kubernetes.config_file: The kubernetes config file - kubernetes.context: The kubernetes current context - -**retries**: It means this task will try to deploy the value file for a maximum time of retries mentioned i.e 10. -**delay**: It means each retry will happen at a gap of mentioned delay i.e 60 seconds. -**until**: It runs until *component_data.resources|length* > 0 and component_data.resources[0].status.succeeded is defined and component_data.resources[0].status.succeeded == 1, i.e. it will keep on retrying untill said resource if deployed and completed within mentioned retries. -**when**: It runs when *component_type* == "Job" , i.e. this task will run for Job . - -##### Output Variables - - component_data: This variable stores the output whether the job is deployed and completed. - -#### 2. Check for {{ job_title }} job on {{ component_name }} -Task to check if Job deployed and completed without the retry. -##### Input Variables - - *component_type: The type of resource/organisation. - *component_ns: The organisation's namespace - kubernetes.config_file: The kubernetes config file - kubernetes.context: The kubernetes current context - -**when**: It runs when *component_type* == "Job" , i.e. this task will run for Job . - -##### Output Variables - - result: This variable stores the output whether the job is deployed and completed. - -#### 3. Wait for {{ component_type }} {{ component_name }} in {{ namespace }} -Task to check if Pod deployed and running . This task will try for a maximum number of times as described in network.yaml or defined by the role calling it with an interval of 30 seconds between each try. Any role calling this task needs to have a variable called label_selectors. An implementation of label_selectors could be as follows - -```yaml -label_selectors: - - app = {{ component_name }} -``` -##### Input Variables - - *component_type: The type of resource/organisation. - *component_ns: The organisation's namespace - kubernetes.config_file: The kubernetes config file - kubernetes.context: The kubernetes current context - -**retries**: It means this task will try to deploy the value file for a maximum time of retries mentioned i.e 10. -**delay**: It means each retry will happen at a gap of mentioned delay i.e 60 seconds. -**until**: It runs untill *component_data.resources|length* > 0, i.e. it will keep on retrying untill said resource is up within mentioned retries. -**when**: It runs when *component_type* == "Pod" , i.e. this task will run for Pod . - -##### Output Variables - - component_data: This variable stores the output whether the pod is up and running or not. diff --git a/platforms/shared/configuration/roles/check/helm_component/tasks/main.yaml b/platforms/shared/configuration/roles/check/helm_component/tasks/main.yaml index ae6f56c337d..5513f70020b 100644 --- a/platforms/shared/configuration/roles/check/helm_component/tasks/main.yaml +++ b/platforms/shared/configuration/roles/check/helm_component/tasks/main.yaml @@ -8,7 +8,7 @@ # Task to check if Job deployed and completed # This task will try for a maximum of 10 times with an interval of # 60 seconds between each try -- name: "Wait for {{ component_type }} {{ component_name }} in {{ namespace }}" +- name: "Wait for job {{ component_name }} to complete in {{ namespace }}" k8s_info: kind: "Job" namespace: "{{ namespace }}" @@ -23,7 +23,7 @@ when: component_type == "Job" # one time job check and registers the result variable -- name: "Check for {{ job_title }} job on {{ component_name }}" +- name: "Check for job {{ component_name }} in {{ namespace }}" k8s_info: kind: "Pod" namespace: "{{ namespace }}" @@ -37,7 +37,7 @@ # Task to check if Pod is deployed and running # This task will try for a maximum of 10 times with an interval of # 60 seconds between each try -- name: "Wait for {{ component_type }} {{ component_name }} in {{ namespace }}" +- name: "Wait for pod {{ component_name }} to start in {{ namespace }}" k8s_info: kind: "Pod" namespace: "{{ namespace }}" diff --git a/platforms/shared/configuration/roles/create/job_component/tasks/main.yaml b/platforms/shared/configuration/roles/create/job_component/tasks/main.yaml index aed786c8241..3be1a403b78 100644 --- a/platforms/shared/configuration/roles/create/job_component/tasks/main.yaml +++ b/platforms/shared/configuration/roles/create/job_component/tasks/main.yaml @@ -25,8 +25,8 @@ # Dependency update and test the value file for syntax errors/ missing values - name: Helm dependency update and lint shell: | - helm dependency update "{{playbook_dir}}/../../../{{charts_dir}}/{{charts[type]}}" - helm lint -f "{{ values_dir }}/{{ component_name }}.yaml" "{{playbook_dir}}/../../../{{charts_dir}}/{{charts[type]}}" + helm dependency update "{{ playbook_dir }}/../../../{{ charts_dir }}/{{ charts[type] }}" + helm lint -f "{{ values_dir }}/{{ component_name }}.yaml" "{{ playbook_dir }}/../../../{{ charts_dir }}/{{ charts[type] }}" - name: Check if helm release already exists in {{ component_ns }} kubernetes.core.helm_info: @@ -40,7 +40,7 @@ kubernetes.core.helm: release_name: "{{ component_name }}" release_namespace: "{{ component_ns }}" - chart_ref: "{{playbook_dir}}/../../../{{charts_dir}}/{{charts[type]}}" + chart_ref: "{{ playbook_dir }}/../../../{{ charts_dir }}/{{ charts[type] }}" values_files: - "{{ values_dir }}/{{ component_name }}.yaml" force: true diff --git a/platforms/shared/configuration/roles/create/job_component/templates/corda_ent_cenm.tpl b/platforms/shared/configuration/roles/create/job_component/templates/corda_ent_cenm.tpl new file mode 100644 index 00000000000..ef9eb6a3e09 --- /dev/null +++ b/platforms/shared/configuration/roles/create/job_component/templates/corda_ent_cenm.tpl @@ -0,0 +1,56 @@ +global: + serviceAccountName: vault-auth + cluster: + provider: "{{ cloud_provider }}" + cloudNativeServices: false + kubernetesUrl: "{{ kubernetes_server }}" + vault: + type: hashicorp + role: vault-role + network: corda-enterprise + address: "{{ vault.url }}" + authPath: "{{ org_name }}" + secretEngine: secretsv2 + secretPrefix: "data/{{ org_name }}" + proxy: + provider: ambassador + externalUrlSuffix: {{ external_url_suffix }} + cenm: + sharedCreds: + truststore: password + keystore: password + identityManager: + port: 10000 + revocation: + port: 5053 + internal: + port: 5052 + auth: + port: 8081 + gateway: + port: 8080 + zone: + enmPort: 25000 + adminPort: 12345 + networkmap: + internal: + port: 5050 +settings: + removeKeysOnDelete: true +tls: + enabled: true + settings: + networkServices: true +storage: + size: 1Gi + dbSize: 5Gi + allowedTopologies: + enabled: false +subjects: + auth: {{ auth_subject }} + tlscrlsigner: {{ signer_subject}} + tlscrlissuer: {{ idman_crlissuer_subject }} + rootca: {{ root_ca }} + subordinateca: {{ subordinate_ca }} + idmanca: {{ idman_subject }} + networkmap: {{ networkmap_subject }} diff --git a/platforms/shared/configuration/roles/create/job_component/templates/corda_ent_init.tpl b/platforms/shared/configuration/roles/create/job_component/templates/corda_ent_init.tpl new file mode 100644 index 00000000000..8710e7531b9 --- /dev/null +++ b/platforms/shared/configuration/roles/create/job_component/templates/corda_ent_init.tpl @@ -0,0 +1,18 @@ +global: + serviceAccountName: vault-auth + cluster: + provider: "{{ cloud_provider }}" + cloudNativeServices: false + kubernetesUrl: "{{ kubernetes_server }}" + vault: + type: hashicorp + role: vault-role + network: corda-enterprise + address: "{{ vault.url }}" + authPath: "{{ org_name }}" + secretEngine: secretsv2 + secretPrefix: "data/{{ org_name }}" +proxy: + provider: ambassador +settings: + secondaryInit: {{ secondaryInit }} diff --git a/platforms/shared/configuration/roles/create/job_component/templates/create_channel_job.tpl b/platforms/shared/configuration/roles/create/job_component/templates/create_channel_job.tpl new file mode 100644 index 00000000000..985d5e199ed --- /dev/null +++ b/platforms/shared/configuration/roles/create/job_component/templates/create_channel_job.tpl @@ -0,0 +1,39 @@ +global: + version: {{ network.version }} + serviceAccountName: vault-auth + cluster: + provider: {{ org.cloud_provider }} + cloudNativeServices: false + vault: + type: hashicorp + network: fabric + address: {{ vault.url }} + authPath: {{ network.env.type }}{{ name }} + secretEngine: {{ vault.secret_path | default("secretsv2") }} + secretPrefix: "data/{{ network.env.type }}{{ name }}" + role: vault-role + tls: false + proxy: + provider: {{ network.env.proxy | quote }} + externalUrlSuffix: {{ org.external_url_suffix }} + +image: + fabricTools: {{ docker_url }}/{{ fabric_tools_image }} + alpineUtils: {{ docker_url }}/bevel-alpine:{{ bevel_alpine_version }} +{% if network.docker.username is defined and network.docker.password is defined %} + pullSecret: regcred +{% else %} + pullSecret: "" +{% endif %} + +peer: + name: {{ peer_name }} +{% if provider == 'none' %} + address: {{ peer_name }}.{{ component_ns }}:7051 +{% else %} + address: {{ peer_adress }} +{% endif %} + localMspId: {{ org.name | lower }}MSP + logLevel: debug + tlsStatus: true + ordererAddress: {{ peer.ordererAddress }} diff --git a/platforms/shared/configuration/roles/create/job_component/templates/fabric_genesis.tpl b/platforms/shared/configuration/roles/create/job_component/templates/fabric_genesis.tpl new file mode 100755 index 00000000000..30ab3805790 --- /dev/null +++ b/platforms/shared/configuration/roles/create/job_component/templates/fabric_genesis.tpl @@ -0,0 +1,95 @@ +global: + version: {{ network.version }} + serviceAccountName: vault-auth + cluster: + provider: {{ org.cloud_provider }} + cloudNativeServices: false + vault: + type: hashicorp + network: fabric + address: {{ vault.url }} + authPath: {{ network.env.type }}{{ name }} + secretEngine: {{ vault.secret_path | default("secretsv2") }} + secretPrefix: "data/{{ network.env.type }}{{ name }}" + role: vault-role + tls: false + proxy: + provider: {{ network.env.proxy | quote }} + externalUrlSuffix: {{ org.external_url_suffix }} + +image: + alpineUtils: {{ docker_url }}/bevel-alpine:{{ bevel_alpine_version }} + fabricTools: {{ docker_url }}/{{ fabric_tools_image }} +{% if network.docker.username is defined and network.docker.password is defined %} + pullSecret: regcred +{% else %} + pullSecret: "" +{% endif %} + +organizations: +{% for organization in network.organizations %} +{% for data, value in organization.items() %} +{% if data == 'name' %} + - name: {{ value }} +{% endif %} +{% endfor %} +{% for service in organization.services %} +{% if service == 'orderers' %} + orderers: +{% for orderer in organization.services.orderers %} +{% for key, value in orderer.items() %} +{% if key == 'name' %} + - name: {{ value }} +{% endif %} +{% if key == 'ordererAddress' %} + ordererAddress: {{ value }} +{% endif %} +{% endfor %} +{% endfor %} +{% endif %} +{% if service == 'peers' %} + peers: +{% for peer in organization.services.peers %} +{% for key, value in peer.items() %} +{% if key == 'name' %} + - name: {{ value }} +{% endif %} +{% if key == 'peerAddress' %} + peerAddress: {{ value }} +{% endif %} +{% endfor %} +{% endfor %} +{% endif %} +{% endfor %} +{% endfor %} + +consensus: {{ consensus.name }} + +{% if consensus.name == 'kafka' %} +kafka: + brokers: +{% for i in range(consensus.replicas) %} + - {{ consensus.name }}-{{ i }}.{{ consensus.type }}.{{ component_ns }}.svc.cluster.local:{{ consensus.grpc.port }} +{% endfor %} +{% endif %} + +channels: +{% for channel in network.channels %} +{% if channel.channel_status == 'new' %} + - name: {{ channel.channel_name | lower }} + consortium: {{ channel.consortium }} + orderers: +{% for ordererOrg in channel.orderers %} + - {{ ordererOrg }} +{% endfor %} + participants: +{% for participant in channel.participants %} + - {{ participant.name | lower }} +{% endfor %} +{% endif %} +{% endfor %} + +settings: + generateGenesis: {{ generateGenisisBLock }} + removeConfigMapOnDelete: false + diff --git a/platforms/shared/configuration/roles/create/job_component/templates/indy_endorser.tpl b/platforms/shared/configuration/roles/create/job_component/templates/indy_endorser.tpl new file mode 100644 index 00000000000..d139fd07a55 --- /dev/null +++ b/platforms/shared/configuration/roles/create/job_component/templates/indy_endorser.tpl @@ -0,0 +1,8 @@ +image: + cli: ghcr.io/hyperledger/bevel-indy-ledger-txn:latest + pullSecret: +network: bevel +admin: {{ trustee }} +newIdentity: + name: {{ endorser }} + role: ENDORSER diff --git a/platforms/shared/configuration/roles/create/job_component/templates/indy_genesis.tpl b/platforms/shared/configuration/roles/create/job_component/templates/indy_genesis.tpl new file mode 100644 index 00000000000..b287c74c623 --- /dev/null +++ b/platforms/shared/configuration/roles/create/job_component/templates/indy_genesis.tpl @@ -0,0 +1,34 @@ +global: + serviceAccountName: vault-auth + cluster: + provider: "{{ cloud_provider }}" + cloudNativeServices: false + kubernetesUrl: "{{ kubernetes_server }}" + vault: + type: hashicorp + role: vault-role + network: indy + address: "{{ vault.url }}" + authPath: "{{ org_name }}" + secretEngine: secretsv2 + secretPrefix: "data/{{ org_name }}" +proxy: + provider: ambassador +settings: + removeKeysOnDelete: true + secondaryGenesis: {{ secondaryGenesis }} +{% if (not secondaryGenesis) and (trustee_list is defined) %} + trustees: +{% for trustee in trustee_list %} + - name: "{{ trustee }}" +{% endfor %} +{% if steward_list is defined %} + stewards: +{% for steward in steward_list %} + - name: "{{ steward.name }}" + publicIp: {{ steward.publicIp }} + nodePort: {{ steward.nodePort }} + clientPort: {{ steward.clientPort }} +{% endfor %} +{% endif %} +{% endif %} diff --git a/platforms/shared/configuration/roles/create/job_component/templates/join_channel_job.tpl b/platforms/shared/configuration/roles/create/job_component/templates/join_channel_job.tpl new file mode 100644 index 00000000000..3a897b0b781 --- /dev/null +++ b/platforms/shared/configuration/roles/create/job_component/templates/join_channel_job.tpl @@ -0,0 +1,41 @@ +global: + version: {{ network.version }} + serviceAccountName: vault-auth + cluster: + provider: {{ org.cloud_provider }} + cloudNativeServices: false + vault: + type: hashicorp + network: fabric + address: {{ vault.url }} + authPath: {{ network.env.type }}{{ name }} + secretEngine: {{ vault.secret_path | default("secretsv2") }} + secretPrefix: "data/{{ network.env.type }}{{ name }}" + role: vault-role + tls: false + proxy: + provider: {{ network.env.proxy | quote }} + externalUrlSuffix: {{ org.external_url_suffix }} + +image: + fabricTools: {{ docker_url }}/{{ fabric_tools_image }} + alpineUtils: {{ docker_url }}/bevel-alpine:{{ bevel_alpine_version }} +{% if network.docker.username is defined and network.docker.password is defined %} + pullSecret: regcred +{% else %} + pullSecret: "" +{% endif %} + +peer: + name: {{ peer_name }} + type: {{ peer_type }} +{% if network.env.proxy == 'none' %} + address: {{ peer.name }}.{{ component_ns }}:7051 +{% else %} + address: {{ peer.peerAddress }} +{% endif %} + localMspId: {{ org.name | lower}}MSP + logLevel: debug + tlsStatus: true + channelName: {{ channel_name }} + ordererAddress: {{ participant.ordererAddress }} diff --git a/platforms/shared/configuration/roles/create/job_component/templates/osn_create_channel_job.tpl b/platforms/shared/configuration/roles/create/job_component/templates/osn_create_channel_job.tpl new file mode 100644 index 00000000000..afd95b42c2d --- /dev/null +++ b/platforms/shared/configuration/roles/create/job_component/templates/osn_create_channel_job.tpl @@ -0,0 +1,46 @@ +global: + version: {{ network.version }} + serviceAccountName: vault-auth + cluster: + provider: {{ org.cloud_provider }} + cloudNativeServices: false + vault: + type: hashicorp + network: fabric + address: {{ vault.url }} + authPath: {{ network.env.type }}{{ name }} + secretEngine: {{ vault.secret_path | default("secretsv2") }} + secretPrefix: "data/{{ network.env.type }}{{ name }}" + role: vault-role + tls: false + proxy: + provider: {{ network.env.proxy | quote }} + externalUrlSuffix: {{ org.external_url_suffix }} + +image: + fabricTools: {{ docker_url }}/{{ fabric_tools_image }} + alpineUtils: {{ docker_url }}/bevel-alpine:{{ bevel_alpine_version }} +{% if network.docker.username is defined and network.docker.password is defined %} + pullSecret: regcred +{% else %} + pullSecret: "" +{% endif %} + +orderers: +{% for orderer in orderers_list %} +{% for key, value in orderer.items() %} +{% if key == 'name' %} + - name: {{ value }} + adminAddress: {{ value }}.{{ component_ns }}:7055 +{% endif %} +{% endfor %} +{% endfor %} + +addOrderer: {{ add_orderer_value }} + +{% if add_orderer is defined and add_orderer is sameas true %} +orderer: + name: {{ first_orderer.name }} + localMspId: {{ org.name | lower}}MSP + address: {{ first_orderer.ordererAddress }} +{% endif %} diff --git a/platforms/shared/configuration/roles/create/job_component/templates/primary_genesis.tpl b/platforms/shared/configuration/roles/create/job_component/templates/primary_genesis.tpl index 662596f19f8..2d79849d53e 100644 --- a/platforms/shared/configuration/roles/create/job_component/templates/primary_genesis.tpl +++ b/platforms/shared/configuration/roles/create/job_component/templates/primary_genesis.tpl @@ -2,8 +2,8 @@ global: serviceAccountName: vault-auth vault: - type: hashicorp - network: besu + type: {{ vault.type | default("hashicorp") }} + network: {{ network.type }} address: {{ vault.url }} authPath: {{ network.env.type }}{{ name }} secretEngine: {{ vault.secret_path | default("secretsv2") }} diff --git a/platforms/shared/configuration/roles/create/job_component/templates/primary_init.tpl b/platforms/shared/configuration/roles/create/job_component/templates/primary_init.tpl new file mode 100644 index 00000000000..755af8939ea --- /dev/null +++ b/platforms/shared/configuration/roles/create/job_component/templates/primary_init.tpl @@ -0,0 +1,17 @@ +# template for corda-init chart/job +global: + serviceAccountName: vault-auth + vault: + type: hashicorp + network: corda + address: {{ vault.url }} + authPath: {{ network.env.type }}{{ name }} + secretEngine: {{ vault.secret_path | default("secretsv2") }} + secretPrefix: "data/{{ network.env.type }}{{ name }}" + role: vault-role + cluster: + provider: {{ org.cloud_provider }} + cloudNativeServices: false + kubernetesUrl: {{ kubernetes_url }} +settings: + secondaryInit: false diff --git a/platforms/shared/configuration/roles/create/job_component/templates/secondary_genesis.tpl b/platforms/shared/configuration/roles/create/job_component/templates/secondary_genesis.tpl index e2a92aa2ef1..33cdc13d656 100644 --- a/platforms/shared/configuration/roles/create/job_component/templates/secondary_genesis.tpl +++ b/platforms/shared/configuration/roles/create/job_component/templates/secondary_genesis.tpl @@ -2,8 +2,8 @@ global: serviceAccountName: vault-auth vault: - type: hashicorp - network: besu + type: {{ vault.type | default("hashicorp") }} + network: {{ network.type }} address: {{ vault.url }} authPath: {{ network.env.type }}{{ name }} secretEngine: {{ vault.secret_path | default("secretsv2") }} diff --git a/platforms/shared/configuration/roles/create/job_component/templates/secondary_init.tpl b/platforms/shared/configuration/roles/create/job_component/templates/secondary_init.tpl new file mode 100644 index 00000000000..d779936bdca --- /dev/null +++ b/platforms/shared/configuration/roles/create/job_component/templates/secondary_init.tpl @@ -0,0 +1,17 @@ +# template for corda-init chart/job +global: + serviceAccountName: vault-auth + vault: + type: hashicorp + network: corda + address: {{ vault.url }} + authPath: {{ network.env.type }}{{ name }} + secretEngine: {{ vault.secret_path | default("secretsv2") }} + secretPrefix: "data/{{ network.env.type }}{{ name }}" + role: vault-role + cluster: + provider: {{ org.cloud_provider }} + cloudNativeServices: false + kubernetesUrl: {{ kubernetes_url }} +settings: + secondaryInit: true diff --git a/platforms/shared/configuration/roles/create/job_component/vars/main.yaml b/platforms/shared/configuration/roles/create/job_component/vars/main.yaml index b33ff008c1e..c5784c53697 100644 --- a/platforms/shared/configuration/roles/create/job_component/vars/main.yaml +++ b/platforms/shared/configuration/roles/create/job_component/vars/main.yaml @@ -7,6 +7,29 @@ job_templates: primary_genesis: primary_genesis.tpl secondary_genesis: secondary_genesis.tpl + primary_init: primary_init.tpl + secondary_init: secondary_init.tpl + fabric_genesis: fabric_genesis.tpl + osn_create_channel_job: osn_create_channel_job.tpl + create_channel_job: create_channel_job.tpl + join_channel_job: join_channel_job.tpl + indy_genesis: indy_genesis.tpl + indy_endorser: indy_endorser.tpl + corda_ent_init: corda_ent_init.tpl + corda_ent_cenm: corda_ent_cenm.tpl charts: - primary_genesis: besu-genesis - secondary_genesis: besu-genesis + primary_genesis: "{{ network.type }}-genesis" + secondary_genesis: "{{ network.type }}-genesis" + primary_init: corda-init + secondary_init: corda-init + fabric_genesis: fabric-genesis + osn_create_channel_job: fabric-osnadmin-channel-create + create_channel_job: fabric-channel-create + join_channel_job: fabric-channel-join + +bevel_alpine_version: latest +fabric_tools_image: bevel-fabric-tools +indy_genesis: indy-genesis +indy_endorser: indy-register-identity +corda_ent_init: enterprise-init +corda_ent_cenm: cenm diff --git a/platforms/shared/configuration/roles/create/shared_helm_component/templates/vault_kubernetes_job.tpl b/platforms/shared/configuration/roles/create/shared_helm_component/templates/vault_kubernetes_job.tpl index 7772b894788..0522e7de6f9 100644 --- a/platforms/shared/configuration/roles/create/shared_helm_component/templates/vault_kubernetes_job.tpl +++ b/platforms/shared/configuration/roles/create/shared_helm_component/templates/vault_kubernetes_job.tpl @@ -1,4 +1,4 @@ -apiVersion: helm.toolkit.fluxcd.io/v2beta1 +apiVersion: helm.toolkit.fluxcd.io/v2 kind: HelmRelease metadata: name: {{ component_name }} diff --git a/platforms/shared/configuration/roles/create/shared_k8s_secrets/Readme.md b/platforms/shared/configuration/roles/create/shared_k8s_secrets/Readme.md deleted file mode 100644 index 231b996c53d..00000000000 --- a/platforms/shared/configuration/roles/create/shared_k8s_secrets/Readme.md +++ /dev/null @@ -1,153 +0,0 @@ -[//]: # (##############################################################################################) -[//]: # (Copyright Accenture. All Rights Reserved.) -[//]: # (SPDX-License-Identifier: Apache-2.0) -[//]: # (##############################################################################################) - -## ROLE: k8s_secrets -This role creates secrets to store the following resources: root token, reviewer token, and docker credentials -#### 1. Check if root token exists in the namespace -This task checks if the root token exists -##### Input Variables - - *kind: This defines the kind of Kubernetes resource - *namespace: Namespace of the component - *name: The name of secret - *kubeconfig: The config file of the cluster - *context: This refer to the required kubernetes cluster context -##### Output Variables - - root_token_secret: This variable stores the output of root token check query. - -#### 2. Put root token of every organization -This task creates the root tooken secret -##### Input Variables - *namespace: Namespace of the component - *vault: Contains the root token, Fetched using 'vault.' from network.yaml -**when**: Condition is specified here, runs only when *root_token_secret.resources* is not found. - -#### 3. Check if reviewer token exists in the namespace -This task checks if the reviewer token exists -##### Input Variables - - *kind: This defines the kind of Kubernetes resource - *namespace: Namespace of the component - *name: The name of secret - *kubeconfig: The config file of the cluster - *context: This refer to the required kubernetes cluster context -##### Output Variables - - reviewer_token_secret: This variable stores the output of reviewer token check query. - -#### 4. Put reviewer token of every organization -This task creates the reviewer tooken secrets -##### Input Variables - *KUBECONFIG: Contains config file of cluster, Fetched using 'kubernetes.' from network.yaml - *namespace: Namespace of the component -**shell** : This command creates the reviewer token secret. -**when**: Condition is specified here, runs only when *reviewer_token_secret.resources* is not found. - -#### 5. Check docker cred exists -This task checks if the docker credentials exists -##### Input Variables - - *kind: This defines the kind of Kubernetes resource - *namespace: Namespace of the component - *name: The name of credentials - *kubeconfig: The config file of the cluster - *context: This refer to the required kubernetes cluster context -##### Output Variables - - get_regcred: This variable stores the output of docker credentials check query. - -#### 6. Create the docker pull credentials -This task creates the docker pull credentials -##### Input Variables - *KUBECONFIG: Contains config file of cluster, Fetched using 'kubernetes.' from network.yaml - *namespace: Namespace of the component -**when**: Condition is specified here, runs only when *get_regcred.resources* is not found. - -#### 7. Check Ambassador cred exists -This task checks if Ambassador credentials exists already -##### Input Variables - - *kind: This defines the kind of Kubernetes resource - *namespace: Namespace of the component - *name: The name of credentials - *kubeconfig: The config file of the cluster - *context: This refer to the required kubernetes cluster context -##### Output Variables - - get_secret: This variable stores the output of Ambassador credentials check query. -**when**: Condition is specified here, runs only when *network.env.proxy* is ambassador. - -#### 8. Check if ca certs already created -This tasks checks if the CA certificates are already created or not. -##### Input Variables - - *VAULT_ADDR: Contains Vault URL, Fetched using 'vault.' from network.yaml - *VAULT_TOKEN: Contains Vault Token, Fetched using 'vault.' from network.yaml -##### Output Variables - - vault_capem_result: This variable stores the output of ca certificates check query. -**when**: Condition is specified here, runs only when *network.env.proxy* is ambassador. - -#### 9. Check if ca key already created -This tasks checks if the CA key are already created or not. -##### Input Variables - - *VAULT_ADDR: Contains Vault URL, Fetched using 'vault.' from network.yaml - *VAULT_TOKEN: Contains Vault Token, Fetched using 'vault.' from network.yaml -##### Output Variables - - vault_cakey_result: This variable stores the output of ca certificates check query. -**when**: Condition is specified here, runs only when *network.env.proxy* is ambassador. - -#### 10. Create the Ambassador credentials -This task creates the Ambassador TLS credentials -##### Input Variables - *KUBECONFIG: Contains config file of cluster, Fetched using 'kubernetes.' from network.yaml - *namespace: Namespace of the component -**when**: Conditions is specified here, runs only when *get_secret.resources* is not found, *vault_capem_result.failed* is False, *vault_cakey_result.failed* is False and *network.env.proxy* is ambassador. - -#### 11. Check Ambassador cred exists for orderers -This task checks if Ambassador credentials exists already for orderes -##### Input Variables - - *kind: This defines the kind of Kubernetes resource - *namespace: Namespace of the component - *name: The name of credentials - *kubeconfig: The config file of the cluster - *context: This refer to the required kubernetes cluster context -##### Output Variables - - get_secret: This variable stores the output of Ambassador credentials check query. -**when**: Condition is specified here, runs only when *network.env.proxy* is ambassador. - -#### 12. Check if ca certs already created for orderers -This tasks checks if the CA certificates are already created or not for orderers. -##### Input Variables - - *VAULT_ADDR: Contains Vault URL, Fetched using 'vault.' from network.yaml - *VAULT_TOKEN: Contains Vault Token, Fetched using 'vault.' from network.yaml -##### Output Variables - - vault_capem_result: This variable stores the output of ca certificates check query. -**when**: Condition is specified here, runs only when *network.env.proxy* is ambassador. - -#### 13. Check if ca key already created for orderers -This tasks checks if the CA key are already created or not for orderers. -##### Input Variables - - *VAULT_ADDR: Contains Vault URL, Fetched using 'vault.' from network.yaml - *VAULT_TOKEN: Contains Vault Token, Fetched using 'vault.' from network.yaml -##### Output Variables - - vault_cakey_result: This variable stores the output of ca certificates check query. -**when**: Condition is specified here, runs only when *network.env.proxy* is ambassador. - -#### 14. Create the Ambassador credentials for orderers -This task creates the Ambassador TLS credentials for orderers -##### Input Variables - *KUBECONFIG: Contains config file of cluster, Fetched using 'kubernetes.' from network.yaml - *namespace: Namespace of the component -**when**: Conditions is specified here, runs only when *get_orderer_secret.resources* is not found, *vault_orderercert_result.failed* is False, *vault_ordererkey_result.failed* is False and *network.env.proxy* is ambassador. diff --git a/platforms/shared/configuration/roles/create/shared_k8s_secrets/tasks/main.yaml b/platforms/shared/configuration/roles/create/shared_k8s_secrets/tasks/main.yaml index 37d449706c4..3fc535cebb7 100644 --- a/platforms/shared/configuration/roles/create/shared_k8s_secrets/tasks/main.yaml +++ b/platforms/shared/configuration/roles/create/shared_k8s_secrets/tasks/main.yaml @@ -77,3 +77,31 @@ kubeconfig: "{{ kubernetes.config_file }}" context: "{{ kubernetes.context }}" when: check == "docker_credentials" and get_regcred.resources|length == 0 + +# Check if maven secret exists of in org namespace +- name: Check if maven secret exists in the namespace + k8s_info: + kind: Secret + namespace: "{{ namespace }}" + name: "maven-secrets" + kubeconfig: "{{ kubernetes.config_file }}" + context: "{{ kubernetes.context }}" + register: maven_secret + when: check == "maven_credentials" + +# Create maven secretin org namespace +- name: Create maven secret for organization + k8s: + definition: + apiVersion: v1 + kind: Secret + metadata: + name: "maven-secrets" + namespace: "{{ namespace }}" + stringData: + username: "{{ org.cordapps.username }}" + password: "{{ org.cordapps.password }}" + state: present + kubeconfig: "{{ kubernetes.config_file }}" + context: "{{ kubernetes.context }}" + when: check == "maven_credentials" and maven_secret.resources|length == 0 diff --git a/platforms/shared/configuration/roles/delete/k8s_resources/tasks/main.yaml b/platforms/shared/configuration/roles/delete/k8s_resources/tasks/main.yaml index 1f73c34025b..2dc78f62cb6 100644 --- a/platforms/shared/configuration/roles/delete/k8s_resources/tasks/main.yaml +++ b/platforms/shared/configuration/roles/delete/k8s_resources/tasks/main.yaml @@ -10,6 +10,17 @@ KUBECONFIG={{ kubernetes.config_file }} helm ls -drq -n {{ organization_ns }} register: helm_info +# Remove all Helm releases of organization except genesis +- name: Delete Helm releases + kubernetes.core.helm: + kubeconfig: "{{ kubernetes.config_file }}" + name: "{{ item }}" + release_namespace: "{{ organization_ns }}" + state: absent + with_items: "{{ helm_info.stdout_lines }}" + when: + - network.type == "fabric" + # Remove all Helm releases of organization except genesis - name: Delete Helm releases kubernetes.core.helm: @@ -29,6 +40,8 @@ release_namespace: "{{ organization_ns }}" state: absent ignore_errors: yes # Ignore failure until all platforms have genesis similar to Besu + when: + - network.type != "fabric" - name: Get Helm releases shell: | diff --git a/platforms/shared/configuration/roles/git_push/tasks/main.yaml b/platforms/shared/configuration/roles/git_push/tasks/main.yaml index 36fdda731b0..e9624b56178 100644 --- a/platforms/shared/configuration/roles/git_push/tasks/main.yaml +++ b/platforms/shared/configuration/roles/git_push/tasks/main.yaml @@ -28,9 +28,8 @@ when: - gitops.git_protocol is defined - gitops.git_protocol == "ssh" - ignore_errors: yes - tags: - - notest + ignore_errors: true + # Git push the new files, reset config files - name: "Execute git push for https" @@ -47,9 +46,7 @@ register: GIT_OUTPUT when: gitops.git_protocol is not defined or gitops.git_protocol == "https" - ignore_errors: yes - tags: - - notest + ignore_errors: true # Display output of shell excution - name: "stdout for SSH gitpush" @@ -58,8 +55,6 @@ when: - gitops.git_protocol is defined - gitops.git_protocol == "ssh" - tags: - - notest - name: "stderr for SSH gitpush" debug: @@ -67,21 +62,15 @@ when: - gitops.git_protocol is defined - gitops.git_protocol == "ssh" - tags: - - notest # Display output of shell excution - name: "stdout for gitpush" debug: msg: "{{ GIT_OUTPUT.stdout.split('\n') }}" when: gitops.git_protocol is not defined or gitops.git_protocol == "https" - tags: - - notest # Display error of shell task - name: "stderr for git_push" debug: msg: "{{ GIT_OUTPUT.stderr.split('\n') }}" when: gitops.git_protocol is not defined or gitops.git_protocol == "https" - tags: - - notest diff --git a/platforms/shared/configuration/roles/helm_lint/tasks/main.yaml b/platforms/shared/configuration/roles/helm_lint/tasks/main.yaml index 1278e9d8106..f2b416cf20f 100644 --- a/platforms/shared/configuration/roles/helm_lint/tasks/main.yaml +++ b/platforms/shared/configuration/roles/helm_lint/tasks/main.yaml @@ -40,5 +40,5 @@ # Execute helm lint. If this fails, fix the errors - name: "Run helm lint" shell: | - helm lint -f "./build/test/{{metadata.name}}.yaml" "{{playbook_dir}}/../../../{{chart_path}}/{{charts[helmtemplate_type]}}" + helm lint -f "./build/test/{{ metadata.name }}.yaml" "{{ playbook_dir }}/../../../{{ chart_path }}/{{ charts[helmtemplate_type] }}" when: value_stat_result.stat.exists == True diff --git a/platforms/shared/configuration/roles/helm_lint/vars/main.yaml b/platforms/shared/configuration/roles/helm_lint/vars/main.yaml index 00a0f4c4352..d41ef961995 100644 --- a/platforms/shared/configuration/roles/helm_lint/vars/main.yaml +++ b/platforms/shared/configuration/roles/helm_lint/vars/main.yaml @@ -5,8 +5,7 @@ ############################################################################################## charts: - ca-orderer: fabric-ca-server - ca-peer: fabric-ca-server + ca-server: fabric-ca-server ca-tools: fabric-catools cas: fabric-ca-server orderers: fabric-orderernode @@ -21,36 +20,30 @@ charts: instantiate_chaincode_job: fabric-chaincode-instantiate invoke_chaincode_job: fabric-chaincode-invoke upgrade_chaincode_job: fabric-chaincode-upgrade - nms: corda-networkmap - mongodb: corda-mongodb - doorman: corda-doorman - notarydb: corda-h2 - nodedb: corda-h2 - db: corda-ent-h2 - notaryjob: corda-notary-initial-registration - nodejob: corda-node-initial-registration - notarynode: corda-notary - nodenode: corda-node + network_service: corda-network-service + corda_notary: corda-node + corda_node: corda-node quorum_tessera: quorum-tessera-node quorum: quorum-member-node - validatorquorum: quorum-validator-node - memberquorum: quorum-member-node + validatorquorum: quorum-node + memberquorum: quorum-node pki-generator: cenm-pki-gen - pki-generator-node: corda-ent-node-pki-gen signer: cenm-signer gateway: cenm-gateway idman: cenm-idman zone: cenm-zone nmap: cenm-networkmap + pki-generator-node: corda-ent-node-pki-gen + db: corda-ent-h2 notary-initial-registration: corda-ent-notary-initial-registration notary: corda-ent-notary bridge: corda-ent-bridge float: corda-ent-float node_registration: corda-ent-node-initial-registration + node: corda-ent-node cli: fabric-cli commit_chaincode_job: fabric-chaincode-commit approve_chaincode_job: fabric-chaincode-approve - node: corda-ent-node crypto_tessera: quorum-tessera-key-mgmt besu_member: besu-node validator: besu-node @@ -67,9 +60,14 @@ charts: node_substrate: substrate-node genesis_job: substrate-genesis dscp_ipfs_node: dscp-ipfs-node - certs-ambassador-quorum: quorum-tlscerts-gen + certs-ambassador-quorum: quorum-tlscert-gen crypto_raft_job: quorum-raft-crypto-gen fabric-connector: fabric-cacti-connector quorum-connector: quorum-cacti-connector external_chaincode: fabric-external-chaincode install_external_chaincode_job: fabric-external-chaincode-install + generate-keys: indy-key-mgmt + generate-genesis: indy-genesis + stewards: indy-node + corda_ent_notary: enterprise-node + corda_ent_network_map: cenm-networkmap diff --git a/platforms/shared/configuration/roles/setup/cactus-connector/templates/besu-connector.tpl b/platforms/shared/configuration/roles/setup/cactus-connector/templates/besu-connector.tpl index b98684bd8e9..422ccdb6156 100644 --- a/platforms/shared/configuration/roles/setup/cactus-connector/templates/besu-connector.tpl +++ b/platforms/shared/configuration/roles/setup/cactus-connector/templates/besu-connector.tpl @@ -1,4 +1,4 @@ -apiVersion: helm.toolkit.fluxcd.io/v2beta1 +apiVersion: helm.toolkit.fluxcd.io/v2 kind: HelmRelease metadata: name: {{ name }}-cactus diff --git a/platforms/shared/configuration/roles/setup/cactus-connector/templates/fabric-connector.tpl b/platforms/shared/configuration/roles/setup/cactus-connector/templates/fabric-connector.tpl index 0bb796c7de3..97b9e6b0229 100644 --- a/platforms/shared/configuration/roles/setup/cactus-connector/templates/fabric-connector.tpl +++ b/platforms/shared/configuration/roles/setup/cactus-connector/templates/fabric-connector.tpl @@ -1,4 +1,4 @@ -apiVersion: helm.toolkit.fluxcd.io/v2beta1 +apiVersion: helm.toolkit.fluxcd.io/v2 kind: HelmRelease metadata: name: {{ name }}-cactus diff --git a/platforms/shared/configuration/roles/setup/cactus-connector/templates/quorum-connector.tpl b/platforms/shared/configuration/roles/setup/cactus-connector/templates/quorum-connector.tpl index 905b6ff2999..746657577b4 100644 --- a/platforms/shared/configuration/roles/setup/cactus-connector/templates/quorum-connector.tpl +++ b/platforms/shared/configuration/roles/setup/cactus-connector/templates/quorum-connector.tpl @@ -1,4 +1,4 @@ -apiVersion: helm.toolkit.fluxcd.io/v2beta1 +apiVersion: helm.toolkit.fluxcd.io/v2 kind: HelmRelease metadata: name: {{ name }}-cactus diff --git a/platforms/shared/configuration/roles/setup/edge-stack/tasks/main.yaml b/platforms/shared/configuration/roles/setup/edge-stack/tasks/main.yaml index f7d2b15864a..82a209c1d57 100644 --- a/platforms/shared/configuration/roles/setup/edge-stack/tasks/main.yaml +++ b/platforms/shared/configuration/roles/setup/edge-stack/tasks/main.yaml @@ -47,7 +47,7 @@ - name: Create custom values for aes helm chart vars: ports: "{{ network.env.ambassadorPorts.ports | default([]) }}" - elastic_ip: "{{ allocation_ips_stdout | default('') }}" + elastic_ip: "{{ allocation_ips_stdout | default(item.publicIps[0] | default('')) }}" lbSourceRangeDefault: - 0.0.0.0/0 loadBalancerSourceRanges: "{{ network.env.loadBalancerSourceRanges | default(lbSourceRangeDefault) }}" diff --git a/platforms/shared/configuration/roles/setup/edge-stack/templates/aes-custom-resources.tpl b/platforms/shared/configuration/roles/setup/edge-stack/templates/aes-custom-resources.tpl index 7f4c7bf0374..6e1b890005d 100644 --- a/platforms/shared/configuration/roles/setup/edge-stack/templates/aes-custom-resources.tpl +++ b/platforms/shared/configuration/roles/setup/edge-stack/templates/aes-custom-resources.tpl @@ -23,7 +23,7 @@ apiVersion: getambassador.io/v3alpha1 kind: Module metadata: name: ambassador-module - namespace: ambassador + namespace: {{ proxy_namespace }} spec: config: use_proxy_proto: true diff --git a/platforms/shared/configuration/roles/setup/edge-stack/templates/aes-custom-values.tpl b/platforms/shared/configuration/roles/setup/edge-stack/templates/aes-custom-values.tpl index 4140f684117..e58cbba2ae3 100644 --- a/platforms/shared/configuration/roles/setup/edge-stack/templates/aes-custom-values.tpl +++ b/platforms/shared/configuration/roles/setup/edge-stack/templates/aes-custom-values.tpl @@ -73,3 +73,4 @@ licenseKey: secretName: # Annotations to attach to the license-key-secret. annotations: {} + diff --git a/platforms/shared/configuration/roles/setup/haproxy-ingress/defaults/main.yaml b/platforms/shared/configuration/roles/setup/haproxy-ingress/defaults/main.yaml index c46ce820d8a..78940acc149 100644 --- a/platforms/shared/configuration/roles/setup/haproxy-ingress/defaults/main.yaml +++ b/platforms/shared/configuration/roles/setup/haproxy-ingress/defaults/main.yaml @@ -8,4 +8,4 @@ tmp_directory: "{{ lookup('env', 'TMPDIR') | default('/tmp',true) }}" default: - version: "0.13.9" + version: "0.14.6" diff --git a/platforms/shared/configuration/roles/setup/istio/tasks/main.yaml b/platforms/shared/configuration/roles/setup/istio/tasks/main.yaml index f9b496078f0..88f17b5ef21 100644 --- a/platforms/shared/configuration/roles/setup/istio/tasks/main.yaml +++ b/platforms/shared/configuration/roles/setup/istio/tasks/main.yaml @@ -28,9 +28,9 @@ shell: | KUBECONFIG={{ kubeconfig_path }} helm repo add istio https://istio-release.storage.googleapis.com/charts --force-update KUBECONFIG={{ kubeconfig_path }} helm repo update - KUBECONFIG={{ kubeconfig_path }} helm install istio-base istio/base -n istio-system --create-namespace - KUBECONFIG={{ kubeconfig_path }} helm install istiod istio/istiod -n istio-system --wait - KUBECONFIG={{ kubeconfig_path }} helm install istio-ingressgateway istio/gateway -n istio-system + KUBECONFIG={{ kubeconfig_path }} helm install istio-base istio/base -n istio-system --version=1.20.7 --create-namespace + KUBECONFIG={{ kubeconfig_path }} helm install istiod istio/istiod -n istio-system --version=1.20.7 --wait + KUBECONFIG={{ kubeconfig_path }} helm install istio-ingressgateway istio/gateway -n istio-system --version=1.20.7 when: (not istio_installed) tags: - istio diff --git a/platforms/shared/configuration/setup-k8s-environment.yaml b/platforms/shared/configuration/setup-k8s-environment.yaml index 6a78682ed3f..558b34de2ee 100644 --- a/platforms/shared/configuration/setup-k8s-environment.yaml +++ b/platforms/shared/configuration/setup-k8s-environment.yaml @@ -30,21 +30,10 @@ git_protocol: "{{ item.gitops.git_protocol | default('https') }}" git_url: "{{ item.gitops.git_url }}" git_key: "{{ item.gitops.private_key | default() }}" - flux_version: "0.41.2" + flux_version: "2.3.0" with_items: "{{ network.organizations }}" when: network.env.type != 'operator' - # Prepare ports for Indy - - name: Prepare nodes and clients ports for ambassador - vars: - name: "{{ organizationItem.name }}" - set_fact: - stewards: "{{ stewards | default([]) + organizationItem.services.stewards | list }}" - loop: "{{ network.organizations }}" - loop_control: - loop_var: organizationItem - when: organizationItem.services.stewards is defined and network['type'] == 'indy' - # Setup ambassador edge stack (enabled for besu and quorum) - include_role: name: setup/edge-stack diff --git a/platforms/shared/configuration/site.yaml b/platforms/shared/configuration/site.yaml index 732de7c6baf..c621744d79b 100755 --- a/platforms/shared/configuration/site.yaml +++ b/platforms/shared/configuration/site.yaml @@ -32,7 +32,7 @@ - network.type == 'fabric' - network.env.type != 'operator' - (reset is undefined or reset == 'false') - - (network.upgrade is not defined) + - (network.upgrade is not defined or network.upgrade == false) - import_playbook: "{{ playbook_dir }}/../../hyperledger-fabric/configuration/deploy-operator-network.yaml" vars: @@ -41,7 +41,7 @@ - network.type == 'fabric' - network.env.type == 'operator' - (reset is undefined or reset == 'false') - - (network.upgrade is not defined) + - (network.upgrade is not defined or network.upgrade == false) # Upgrade network - import_playbook: "{{ playbook_dir }}/../../hyperledger-fabric/configuration/upgrade-network.yaml" @@ -51,7 +51,7 @@ - network.type == 'fabric' - network.env.type != 'operator' - (reset is undefined or reset == 'false') - - (network.upgrade is defined) + - (network.upgrade is defined and network.upgrade == true) ############################################ # Playbook for R3 Corda Operations diff --git a/platforms/substrate/charts/README.md b/platforms/substrate/charts/README.md index 491123deeb7..b164389bf1a 100644 --- a/platforms/substrate/charts/README.md +++ b/platforms/substrate/charts/README.md @@ -3,43 +3,128 @@ [//]: # (SPDX-License-Identifier: Apache-2.0) [//]: # (##############################################################################################) -# Charts for Parity Substrate components +# Charts for Substrate components ## About -This folder contains helm charts which are used by the ansible playbooks for the deployment of the Parity Substrate network. Each chart folder contain a folder for templates, chart file and the corresponding value file. +This folder contains the helm charts which are used for the deployment of the Hyperledger Substrate components. Each helm that you can use has the following keys and you need to set them. The `global.cluster.provider` is used as a key for the various cloud features enabled. Also you only need to specify one cloud provider, **not** both if deploying to cloud. As of writing this doc, AWS and Azure both are fully supported. -## Example Folder Structure ### +```yaml +global: + serviceAccountName: vault-auth + cluster: + provider: aws # choose from: minikube | aws + cloudNativeServices: false # future: set to true to use Cloud Native Services + kubernetesUrl: "https://yourkubernetes.com" # Provide the k8s URL, ignore if not using Hashicorp Vault + vault: + type: hashicorp # choose from hashicorp | kubernetes + network: substrate # must be substrate for these charts + # Following are necesupplychain-subsary only when hashicorp vault is used. + addresupplychain-subs: http://vault.url:8200 + authPath: supplychain + secretEngine: secretsv2 + secretPrefix: "data/supplychain" + role: vault-role ``` -/substrate-node -|-- templates -| |-- _helpers.tpl -| |-- configmap.yaml -| |-- ingress.yaml -| |-- service.yaml -| |-- statefulset.yaml -| |-- volume.yaml -|-- Chart.yaml -|-- values.yaml +## Usage + +### Pre-requisites + +- Kubernetes Cluster (either Managed cloud option like AKS or local like minikube) +- Accessible and unsealed Hahsicorp Vault (if using Vault) +- Configured Ambassador AES (if using Ambassador as proxy) +- Update the dependencies + ``` + helm dependency update substrate-genesis + helm dependency update substrate-node + helm dependency update dscp-ipfs-node + ``` + + +## `Without Proxy and Vault` + +### 1. Install Genesis +```bash +# Install the genesis node +helm install genesis ./substrate-genesis --namespace supplychain-subs --create-namespace --values ./values/noproxy-and-novault/genesis.yaml ``` -## Pre-requisites +### 2. Install Nodes +```bash +# Install bootnode +helm install validator-1 ./substrate-node --namespace supplychain-subs --values ./values/noproxy-and-novault/node.yaml --set node.isBootnode.enabled=false + +helm install validator-2 ./substrate-node --namespace supplychain-subs --values ./values/noproxy-and-novault/node.yaml - Helm to be installed and configured +helm install validator-3 ./substrate-node --namespace supplychain-subs --values ./values/noproxy-and-novault/node.yaml -## Charts description ## +helm install validator-4 ./substrate-node --namespace supplychain-subs --values ./values/noproxy-and-novault/node.yaml -### 1. substrate-genesis ### -- This chart directory contains templates for building genesis file for the substrate network. +helm install member-1 ./substrate-node --namespace supplychain-subs --values ./values/noproxy-and-novault/node.yaml --set node.role=full +``` +## 4. Install IPFS Nodes -### 2. substrate-key-mgmt ### -- This chart directory contains templates for generating crypto material for substrate node. +**4.1.** Retrieve the `NODE_ID` from the Kubernetes secret: -### 3. substrate-node ### -- This chart directory contains templates for deploying a substrate node. +```bash +NODE_ID=$(kubectl get secret "substrate-node-member-1-keys" --namespace supplychain-subs -o jsonpath="{.data['substrate-node-keys']}" | base64 -d | jq -r '.data.node_id') +``` -### 4. vault-k8s-mgmt ### -- This chart directory contains templates for authenticating vault with kubernetes cluster. +**4.2.** Now, install the IPFS nodes: -### 5. dscp-ipfs-node -- This chart directory contains templates to deploy ipfs node. +```bash +helm install dscp-ipfs-node-1 ./dscp-ipfs-node --namespace supplychain-subs --values ./values/noproxy-and-novault/ipfs.yaml \ +--set config.ipfsBootNodeAddress="/dns4/dscp-ipfs-node-1-swarm.supplychain-subs/tcp/4001/p2p/$NODE_ID" +``` + +### _With Ambassador proxy and Vault_ + +### 1. Install Genesis + +Replace the `global.vault.address`, `global.cluster.kubernetesUrl` and `global.proxy.externalUrlSuffix` in all the files in `./values/proxy-and-vault/` folder. + +```bash +# If the namespace does not exist already +kubectl create namespace supplychain-subs +# Create the roottoken secret +kubectl -n supplychain-subs create secret generic roottoken --from-literal=token= + +helm install genesis ./substrate-genesis --namespace supplychain-subs --values ./values/proxy-and-vault/genesis.yaml +``` +### 2. Install Nodes +```bash +helm install validator-1 ./substrate-node --namespace supplychain-subs --values ./values/proxy-and-vault/validator.yaml --set global.proxy.p2p=15051 + +helm install validator-2 ./substrate-node --namespace supplychain-subs --values ./values/proxy-and-vault/validator.yaml --set global.proxy.p2p=15052 + +helm install validator-3 ./substrate-node --namespace supplychain-subs --values ./values/proxy-and-vault/validator.yaml --set global.proxy.p2p=15053 + +helm install validator-4 ./substrate-node --namespace supplychain-subs --values ./values/proxy-and-vault/validator.yaml --set global.proxy.p2p=15054 + +helm install member-1 ./substrate-node --namespace supplychain-subs --values ./values/proxy-and-vault/node.yaml --set node.role=full + +``` + +# Spin up a IPFS nodes + +```bash +NODE_ID=$(kubectl get secret "substrate-node-member-1-keys" --namespace supplychain-subs -o jsonpath="{.data['substrate-node-keys']}" | base64 -d | jq -r '.data.node_id') +``` + +```bash +helm install dscp-ipfs-node-1 ./dscp-ipfs-node --namespace supplychain-subs --values ./values/proxy-and-vault/ipfs.yaml \ +--set config.ipfsBootNodeAddress="/dns4/dscp-ipfs-node-1-swarm.supplychain-subs/tcp/4001/p2p/$NODE_ID" +``` + +## Clean-up + +To clean up, simply uninstall the Helm releases. It's important to uninstall the genesis Helm chart at the end to prevent any cleanup failure. +```bash +helm uninstall validator-1 --namespace supplychain-subs +helm uninstall validator-2 --namespace supplychain-subs +helm uninstall validator-3 --namespace supplychain-subs +helm uninstall validator-4 --namespace supplychain-subs +helm uninstall member-1 --namespace supplychain-subs +helm uninstall dscp-ipfs-node-1 --namespace supplychain-subs +helm uninstall genesis --namespace supplychain-subs +``` diff --git a/platforms/substrate/charts/dscp-ipfs-node/Chart.yaml b/platforms/substrate/charts/dscp-ipfs-node/Chart.yaml index 88cfd443a90..fecce88af9e 100644 --- a/platforms/substrate/charts/dscp-ipfs-node/Chart.yaml +++ b/platforms/substrate/charts/dscp-ipfs-node/Chart.yaml @@ -2,6 +2,8 @@ # This Chart is a fork from https://github.com/digicatapult/helm-charts/tree/main/charts/dscp-ipfs # Please update if needed ############################################################################################## + +--- apiVersion: v2 name: dscp-ipfs-node appVersion: '2.6.1' @@ -11,14 +13,6 @@ type: application annotations: hyperledger-bevel/platform: substrate licenses: Apache-2.0 -dependencies: - - name: dscp-node - alias: dscpNode - repository: https://digicatapult.github.io/helm-charts/ - tags: - - dscp-node - version: 4.x.x - condition: dscpNode.enabled home: https://github.com/hyperledger/bevel keywords: - DSCP diff --git a/platforms/substrate/charts/dscp-ipfs-node/README.md b/platforms/substrate/charts/dscp-ipfs-node/README.md index 27aa1bdf9fe..ab424ced55f 100644 --- a/platforms/substrate/charts/dscp-ipfs-node/README.md +++ b/platforms/substrate/charts/dscp-ipfs-node/README.md @@ -1,3 +1,9 @@ +[//]: # (##############################################################################################) +[//]: # (Copyright Accenture. All Rights Reserved.) +[//]: # (SPDX-License-Identifier: Apache-2.0) +[//]: # (##############################################################################################) + + # dscp-ipfs The dscp-ipfs is a component of the DSCP (Digital-Supply-Chain-Platform), a blockchain platform. The dscp-ipfs service is responsible for rotating IPFS swarm keys and storing data, it exposes an IPFS API for this purpose. See [https://github.com/digicatapult/dscp-documentation](https://github.com/digicatapult/dscp-documentation) for details. @@ -5,8 +11,8 @@ The dscp-ipfs is a component of the DSCP (Digital-Supply-Chain-Platform), a bloc ## TL;DR ```console -$ helm repo add bevel https://digicatapult.github.io/helm-charts -$ helm install my-release bevel/dscp-ipfs-node +$ helm repo add bevel https://hyperledger.github.io/bevel +$ helm install dscp-ipfs-node-1 bevel/dscp-ipfs-node ``` ## Introduction @@ -21,11 +27,11 @@ This chart bootstraps a [dscp-ipfs](https://github.com/inteli-poc/dscp-ipfs/) de ## Installing the Chart -To install the chart with the release name `my-release`: +To install the chart with the release name `dscp-ipfs-node-1`: ```console -$ helm repo add bevel https://digicatapult.github.io/helm-charts -$ helm install my-release bevel/dscp-ipfs-node +$ helm repo add bevel https://hyperledger.github.io/bevel +$ helm install dscp-ipfs-node-1 bevel/dscp-ipfs-node ``` The command deploys dscp-ipfs on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation. @@ -34,104 +40,103 @@ The command deploys dscp-ipfs on the Kubernetes cluster in the default configura ## Uninstalling the Chart -To uninstall/delete the `my-release` deployment: +To uninstall/delete the `dscp-ipfs-node-1` deployment: ```console -helm delete my-release +helm delete dscp-ipfs-node-1 ``` The command removes all the Kubernetes components associated with the chart and deletes the release. ## Parameters +| Name | Description | Default Value | +|--------|---------|-------------| +| `global.serviceAccountName` | Name of the service account for Vault Auth and Kubernetes Secret management | `vault-auth` | +| `global.cluster.provider` | Kubernetes cluster provider (e.g., AWS EKS, minikube). Currently tested with `aws` and `minikube`. | `aws` | +| `global.cluster.cloudNativeServices` | Future implementation for utilizing Cloud Native Services (`true` for SecretsManager and IAM for AWS; KeyVault & Managed Identities for Azure). | `false` | +| `global.cluster.kubernetesUrl` | URL of the Kubernetes Cluster | `""` | +| `global.vault.type` | Vault type support for other providers. Currently supports `hashicorp` and `kubernetes`. | `hashicorp` | +| `global.vault.role` | Role used for authentication with Vault | `vault-role` | +| `global.vault.network` | Deployed network type | `substrate` | +| `global.vault.address`| URL of the Vault server. | `""` | +| `global.vault.authPath` | Authentication path for Vault | `supplychain` | +| `global.vault.secretEngine` | Vault secret engine name | `secretsv2` | +| `global.vault.secretPrefix` | Vault secret prefix; must start with `data/` | `data/supplychain` | + ### Common parameters -| Name | Description | Default Value | -| ------------------------ | --------------------------------------------------------------------------------------- | --------------- | -| `fullnameOverride` | String to fully override common.names.fullname | `""` | +| Name | Description | Default Value | +| - | - | - | +| `fullnameOverride` | String to fully override common.names.fullname | `""` | ### IPFS subsystem parameters -| Name | Description | Default Value | -| ---------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | -| `config.healthCheckPort` | Port for checking the health of the api | `80` | -| `config.healthCheckPollPeriod` | Health check poll period in milliseconds | `30000` | -| `config.healthCheckTimeout` | Health check timeout in milliseconds | `2000` | -| `config.nodeHost` | External DSCP-Node hostname to query | `""` | -| `config.nodePort` | External DSCP-Node port to query | `""` | -| `config.publicKey` | Public key for the IPFS subsystem | `""` | -| `config.privateKey` | Private key for the IPFS subsystem | `""` | -| `config.logLevel` | logLevel for nodeJS service Allowed values: error, warn, info, debug | `info` | -| `config.ipfsApiPort` | dscp-ipfs IPFS subsystem api container port | `5001` | -| `config.ipfsSwarmPort` | dscp-ipfs IPFS subsystem Swarm container port | `4001` | -| `config.ipfsDataPath` | Path to mount the volume at. | `/ipfs` | -| `config.ipfsCommand` | Location of the ipfs binary in the container for the IPFS subsystem | `/usr/local/bin/ipfs` | -| `config.ipfsArgs` | Arguments to pass to the wasp-ipfs service to spawn the IPFS subsystem | `["daemon","--migrate"]` | -| `config.ipfsSwarmAddrFilters` | List of IPFS swarm address filters to apply to the IPFS subsystem | `null` | -| `config.ipfsLogLevel` | logLevel for IPFS subsystem, Allowed values: error, warn, info, debug | `info` | -| `config.ipfsBootNodeAddress` | IPFS boot node addresses in MultiAddress format for the IPFS subsystem | `""` | - - +| Name | Description | Default | +| - | - | - | +| `config.healthCheckPort` | Port for checking the health of the api | `80` | +| `config.healthCheckPollPeriod` | Health check poll period in milliseconds | `30000` | +| `config.healthCheckTimeout` | Health check timeout in milliseconds | `2000` | +| `config.nodeHost` | External DSCP-Node hostname to query | `""` | +| `config.nodePort` | External DSCP-Node port to query | `""` | +| `config.publicKey` | Public key for the IPFS subsystem | `""` | +| `config.privateKey` | Private key for the IPFS subsystem | `""` | +| `config.logLevel` | logLevel for nodeJS service Allowed values: error, warn, info, debug | `info` | +| `config.ipfsApiPort` | dscp-ipfs IPFS subsystem api container port | `5001` | +| `config.ipfsSwarmPort` | dscp-ipfs IPFS subsystem Swarm container port | `4001` | +| `config.ipfsDataPath` | Path to mount the volume at. | `/ipfs` | +| `config.ipfsCommand` | Location of the ipfs binary in the container for the IPFS subsystem | `/usr/local/bin/ipfs` | +| `config.ipfsArgs` | Arguments to pass to the wasp-ipfs service to spawn the IPFS subsystem | `["daemon","--migrate"]` | +| `config.ipfsSwarmAddrFilters` | List of IPFS swarm address filters to apply to the IPFS subsystem | `null` | +| `config.ipfsLogLevel` | logLevel for IPFS subsystem, Allowed values: error, warn, info, debug | `info` | +| `config.ipfsBootNodeAddress` | IPFS boot node addresses in MultiAddress format for the IPFS subsystem | `""` | ### IPFS Service Parameters -| Name | Description | Default Value | -| --------------------------------------- | ------------------------------------------------------------------------------------------ | ----------- | -| `service.swarm.annotations` | dscp-ipfs swarm service annotations | `{}` | -| `service.swarm.enabled` | Enable dscp-ipfs swarm service | `true` | -| `service.swarm.port` | dscp-ipfs swarm service HTTP port | `4001` | -| `service.api.annotations` | dscp-ipfs api service annotations | `{}` | -| `service.api.enabled` | Enable dscp-ipfs api service | `true` | -| `service.api.port` | dscp-ipfs api service HTTP port | `4001` | -| `statefulSet.annotations` | dscp-ipfs statefulset annotations | `{}` | -| `statefulSet.livenessProbe.enabled` | dscp-ipfs statefulset liveness probe | `true` | +| Name | Description | Default Value | +| - | - | - | +| `service.swarm.annotations` | dscp-ipfs swarm service annotations | `{}` | +| `service.swarm.enabled` | Enable dscp-ipfs swarm service | `true` | +| `service.swarm.port` | dscp-ipfs swarm service HTTP port | `4001` | +| `service.api.annotations` | dscp-ipfs api service annotations | `{}` | +| `service.api.enabled` | Enable dscp-ipfs api service | `true` | +| `service.api.port` | dscp-ipfs api service HTTP port | `4001` | +| `statefulSet.annotations` | dscp-ipfs statefulset annotations | `{}` | +| `statefulSet.livenessProbe.enabled` | dscp-ipfs statefulset liveness probe | `true` | ### IPFS image config parameters -| Name | Description | Default Value | -| ------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------ | -| `image.repository` | dscp-ipfs image repository | `ghcr.io/inteli-poc/dscp-ipfs` | -| `image.tag` | dscp-ipfs image tag (immutable tags are recommended) | `v2.6.2` | -| `image.pullPolicy` | dscp-ipfs image pull policy | `IfNotPresent` | -| `initContainer.image` | alpine-utils container image | `ghcr.io/hyperledger/alpine-utils:1.0` | -| `initContainer.pullPolicy` | alpine-utils container image pull policy | `IfNotPresent` | +| Name | Description | Default Value | +| - | - | - | +| `image.repository` | dscp-ipfs image repository | `ghcr.io/inteli-poc/dscp-ipfs` | +| `image.tag` | dscp-ipfs image tag (immutable tags are recommended) | `v2.6.2` | +| `image.pullPolicy` | dscp-ipfs image pull policy | `IfNotPresent` | +| `initContainer.image` | alpine-utils container image | `ghcr.io/hyperledger/alpine-utils:1.0` | +| `initContainer.pullPolicy`| alpine-utils container image pull policy | `IfNotPresent` | ### Persistence Parameters -| Name | Description | Default Value | -| --------------------------- | ------------------------------------------------------------------------------------------------------- | ------------------- | -| `storage.storageClass` | Storage class of backing PVC | `""` | -| `storage.dataVolumeSize` | Size of data volume | `1Gi` | +| Name | Description | Default Value | +| - | - | - | +| `storage.storageClass` | Storage class of backing PVC | `""` | +| `storage.dataVolumeSize` | Size of data volume | `1Gi` | ### DSCP-Node Parameters -| Name | Description | Default Value | -| --------------------------- | ----------------------------------------------------------------------------------------- | ------ | -| `dscpNode.enabled` | Enable DSCP-Node subchart | `false` | +| Name | Description | Default Value | +| - | - | - | +| `dscpNode.enabled` | Enable DSCP-Node subchart | `false` | ### Proxy Service Parameters -| Name | Description | Default Value | -| --------------------------- | ----------------------------------------------------------------------------------------- | ------ | -| `proxy.provider` | The type of proxy provider | `ambassador` | -| `proxy.external_url` | External URL where the DSCP swarm service will be exposed | `""` | -| `proxy.port` | External PORT where the DSCP swarm service will be exposed | `15010` | -| `proxy.certSecret` | Kubernetes secret which stores the CA certificate for the proxy | `""` | - - -### Vault Parameters - -| Name | Description | Default Value | -| --------------------------- | ----------------------------------------------------------------------------------------- | ------ | -| `vault.provider` | The type of secret manager to be used | `hashicorp` | -| `vault.address` | URL of the Vault server | `""` | -| `vault.role` | The Vault role which will access the server | `vault-role` | -| `vault.authpath` | The Auth Path configured on Hashicorp Vault | `""` | -| `vault.serviceAccountName` | The service account that has been authenticated with Hashicorp Vault | `vault-auth` | -| `vault.certSecretPrefix` | The path where certificates are stored | `""` | - +| Name | Description | Default Value | +| - | - | - | +| `proxy.provider` | The type of proxy provider | `ambassador` | +| `proxy.external_url` | External URL where the DSCP swarm service will be exposed | `""` | +| `proxy.port` | External PORT where the DSCP swarm service will be exposed | `15010` | +| `proxy.certSecret` | Kubernetes secret which stores the CA certificate for the proxy | `""` | ## License diff --git a/platforms/substrate/charts/dscp-ipfs-node/requirements.yaml b/platforms/substrate/charts/dscp-ipfs-node/requirements.yaml new file mode 100644 index 00000000000..eb6458f06ab --- /dev/null +++ b/platforms/substrate/charts/dscp-ipfs-node/requirements.yaml @@ -0,0 +1,14 @@ +dependencies: + - name: dscp-node + alias: dscpNode + repository: https://digicatapult.github.io/helm-charts/ + tags: + - dscp-node + version: 4.x.x + condition: dscpNode.enabled + - name: bevel-storageclass + alias: substrate-storage + repository: "file://../../../shared/charts/bevel-storageclass" + tags: + - storage + version: ~1.0.0 diff --git a/platforms/substrate/charts/dscp-ipfs-node/templates/secret.yaml b/platforms/substrate/charts/dscp-ipfs-node/templates/secret.yaml index 2f831ba6284..dcf4bc60f7e 100644 --- a/platforms/substrate/charts/dscp-ipfs-node/templates/secret.yaml +++ b/platforms/substrate/charts/dscp-ipfs-node/templates/secret.yaml @@ -1,7 +1,7 @@ apiVersion: v1 kind: Secret metadata: - name: {{ include "dscp-ipfs.fullname" . }}-secret + name: "substrate-node-{{ include "dscp-ipfs.fullname" . }}-keys" labels: {{- include "dscp-ipfs.labels" . | nindent 4 }} {{- if and .Values.config.publicKey .Values.config.privateKey }} diff --git a/platforms/substrate/charts/dscp-ipfs-node/templates/statefulset.yaml b/platforms/substrate/charts/dscp-ipfs-node/templates/statefulset.yaml index ef90e6ee83a..6a177cdad62 100644 --- a/platforms/substrate/charts/dscp-ipfs-node/templates/statefulset.yaml +++ b/platforms/substrate/charts/dscp-ipfs-node/templates/statefulset.yaml @@ -1,3 +1,9 @@ +############################################################################################## +# Copyright Accenture. All Rights Reserved. +# +# SPDX-License-Identifier: Apache-2.0 +############################################################################################## + {{ $fullname := include "dscp-ipfs.fullname" . }} apiVersion: apps/v1 kind: StatefulSet @@ -21,7 +27,7 @@ spec: labels: name: {{ include "dscp-ipfs.fullname" . }} spec: - serviceAccountName: {{ $.Values.vault.serviceaccountname }} + serviceAccountName: {{ $.Values.global.serviceAccountName }} {{- include "dscp-ipfs.imagePullSecrets" . | indent 6 }} volumes: - name: package-manager @@ -82,21 +88,24 @@ spec: - mountPath: {{ .Values.config.ipfsDataPath }} name: ipfs-data {{- end }} - {{- if eq .Values.vault.provider "hashicorp" }} - name: ipfs-init image: {{ .Values.initContainer.image }} imagePullPolicy: {{ .Values.initContainer.pullPolicy | quote }} env: - name: MOUNT_PATH value: {{ .Values.config.ipfsDataPath }} +{{- if eq .Values.global.vault.type "hashicorp" }} - name: VAULT_ADDR - value: {{ $.Values.vault.address }} + value: {{ $.Values.global.vault.address }} - name: KUBERNETES_AUTH_PATH - value: {{ $.Values.vault.authpath }} + value: {{ $.Values.global.vault.authPath }} - name: VAULT_APP_ROLE - value: {{ $.Values.vault.role }} - - name: CERTS_SECRET_PREFIX - value: {{ .Values.vault.certsecretprefix }} + value: {{ $.Values.global.vault.role }} + - name: VAULT_SECRET_ENGINE + value: {{ .Values.global.vault.secretEngine }} + - name: VAULT_SECRET_PREFIX + value: {{ .Values.global.vault.secretPrefix }} +{{- end }} volumeMounts: - mountPath: {{ .Values.config.ipfsDataPath }} name: ipfs-data @@ -107,7 +116,7 @@ spec: args: - |- #!/usr/bin/env bash - +{{- if eq .Values.global.vault.type "hashicorp" }} echo "validating vault response" validateVaultResponse () { if echo ${2} | grep "errors"; then @@ -130,6 +139,7 @@ spec: fi } echo "done validating vault response" +{{- end }} . /scripts/package-manager.sh # Define the packages to install @@ -141,6 +151,7 @@ spec: peer_id=$(cat config | jq -r .Identity.PeerID) private_key=$(cat config | jq -r .Identity.PrivKey) +{{- if eq .Values.global.vault.type "hashicorp" }} echo " { \"data\": { @@ -158,7 +169,7 @@ spec: jq -r 'if .errors then . else .auth.client_token end') validateVaultResponse 'vault login token' "${VAULT_CLIENT_TOKEN}" - vault_secret_key="${CERTS_SECRET_PREFIX}/ipfs" + vault_secret_key="${VAULT_SECRET_ENGINE}"/"${VAULT_SECRET_PREFIX}-ipfs-keys" # Save the generated keys to VAULT LOOKUP_SECRET_RESPONSE=$(curl -sS -H "X-Vault-Token: ${VAULT_CLIENT_TOKEN}" \ -H "Content-Type: application/json" \ @@ -168,7 +179,7 @@ spec: jq -r 'if .errors then . else .auth.client_token end') validateVaultResponse " secret $vault_secret_key" "${LOOKUP_SECRET_RESPONSE}" "LOOKUPSECRETRESPONSE" echo "Done saving keys in vault" - {{- end }} +{{- end }} containers: - name: {{ include "dscp-ipfs.fullname" . }} image: {{ .Values.image.repository }}:{{ .Values.image.tag }} @@ -252,7 +263,7 @@ spec: spec: accessModes: [ "ReadWriteOnce" ] {{- if .Values.storage.storageClass }} - storageClassName: {{ .Values.storage.storageClass }} + storageClassName: substrate-storage-{{ .Release.Name }} {{- end }} resources: requests: diff --git a/platforms/substrate/charts/dscp-ipfs-node/values.yaml b/platforms/substrate/charts/dscp-ipfs-node/values.yaml index 2ba177321bf..755405f3401 100644 --- a/platforms/substrate/charts/dscp-ipfs-node/values.yaml +++ b/platforms/substrate/charts/dscp-ipfs-node/values.yaml @@ -1,6 +1,36 @@ +############################################################################################## +# Copyright Accenture. All Rights Reserved. +# +# SPDX-License-Identifier: Apache-2.0 +############################################################################################## # This is a YAML-formatted file. # Declare variables to be passed into your templates. + +global: + # Provide the service account name autheticated to vault. + # NOTE: Make sure that the service account is already created and authenticated to use the vault. + # Eg. serviceAccountName: vault-auth + serviceAccountName: vault-auth + cluster: + provider: azure + cloudNativeServices: false +# Vault section contains the vault provider configuration + vault: + # Mention the vault provider. Currently hashicorp is supported + provider: kubernetes # kubernetes | hashicorp + # Provide the vault address + # Eg. address: http://vault.example.com:8200 + address: "" + # Provide the vault role used. + # Eg. role: vault-role + role: vault-role + # Provide the authpath configured to be used. + authpath: "" + # Provide the vault path where the certificates are stored + # Eg. certsecretprefix: secret/cenm-org-name + certSecretPrefix: "" + ## Provide a name to substitute for the full names of resources fullnameOverride: "" # This section contains the ipfs node config values @@ -9,9 +39,9 @@ config: healthCheckPollPeriod: 30000 healthCheckTimeout: 2000 # External DSCP-Node hostname to query, this overrides dscpNode.enabled - nodeHost: "" + nodeHost: member-1-substrate-node # External DSCP-Node port to query - nodePort: "" + nodePort: 9944 # Public key for the IPFS subsystem publicKey: "" # Private key for the IPFS subsystem @@ -89,7 +119,7 @@ dscpNode: proxy: # Mention the proxy provider. Currently ambassador is supported # eg. provider: ambassador - provider: ambassador + provider: none # none | ambassador # url that will be added in DNS recordset # eg. external_url: test.substrate.example.com external_url: "" @@ -99,23 +129,3 @@ proxy: port: 15010 # Provide the secret name which contains the certificate certSecret: "" - -# Vault section contains the vault provider configuration -vault: - # Mention the vault provider. Currently hashicorp is supported - provider: hashicorp - # Provide the vault address - # Eg. address: http://vault.example.com:8200 - address: "" - # Provide the vault role used. - # Eg. role: vault-role - role: vault-role - # Provide the authpath configured to be used. - authpath: "" - # Provide the service account name autheticated to vault. - # NOTE: Make sure that the service account is already created and authenticated to use the vault. - # Eg. serviceaccountname: vault-auth - serviceAccountName: vault-auth - # Provide the vault path where the certificates are stored - # Eg. certsecretprefix: secret/cenm-org-name - certSecretPrefix: "" diff --git a/platforms/substrate/charts/substrate-genesis/Chart.yaml b/platforms/substrate/charts/substrate-genesis/Chart.yaml index 3c9029d1465..5ce42fdaf75 100644 --- a/platforms/substrate/charts/substrate-genesis/Chart.yaml +++ b/platforms/substrate/charts/substrate-genesis/Chart.yaml @@ -3,19 +3,26 @@ # # SPDX-License-Identifier: Apache-2.0 ############################################################################################## + +--- apiVersion: v2 name: substrate-genesis -appVersion: 'latest' description: A Helm chart to generate the genesis for Substrate Nodes -version: 1.0.0 type: application -annotations: - hyperledger-bevel/platform: substrate - licenses: Apache-2.0 -home: https://github.com/hyperledger/bevel +version: 1.0.0 +appVersion: latest keywords: - - DSCP - - BEVEL - - SUBSTRATE + - bevel + - ethereum + - substrate + - hyperledger + - enterprise + - blockchain + - deployment + - accenture +home: https://hyperledger-bevel.readthedocs.io/en/latest/ sources: - https://github.com/hyperledger/bevel +maintainers: + - name: Hyperledger Bevel maintainers + email: bevel@lists.hyperledger.org diff --git a/platforms/substrate/charts/substrate-genesis/README.md b/platforms/substrate/charts/substrate-genesis/README.md index 0e0c749920f..77c154fa8f0 100644 --- a/platforms/substrate/charts/substrate-genesis/README.md +++ b/platforms/substrate/charts/substrate-genesis/README.md @@ -1,122 +1,103 @@ +[//]: # (##############################################################################################) +[//]: # (Copyright Accenture. All Rights Reserved.) +[//]: # (SPDX-License-Identifier: Apache-2.0) +[//]: # (##############################################################################################) + # substrate-genesis -This helm chart generates genesis file for substrate based node and stores into Hashicorp Vault. Currently, only Hashicorp Vault as secure storage is supported, other Secret managers will be added in due course. This chart has been updated to be used with DSCP (Digital-Supply-Chain-Platform) example provided in Hyperledger Bevel. And, so, it needs the aura keys, grandpa keys and the list of members to be passed as parameters +This Helm chart generates keys for a specified number of nodes using the dscp-node CLI tool, storing them as Kubernetes secrets and, if enabled, saving them to HashiCorp Vault, while also generating a customized Genesis based on user-provided information. ## TL;DR ```console -$ helm repo add bevel https://digicatapult.github.io/helm-charts -$ helm install -f helm-override.yaml my-release bevel/substrate-genesis +$ helm repo add bevel https://hyperledger.github.io/bevel +$ helm install genesis bevel/substrate-genesis ``` -## Introduction - -This chart generates a Genesis file for a [dscp-node](https://github.com/inteli-poc/dscp-node/) network on a Kubernetes cluster using the [Helm](https://helm.sh/) package manager. - -## Prerequisites +### Prerequisites - Kubernetes 1.19+ - Helm 3.2.0+ -- Hashicorp Vault server with access to store the Genesis -- Aura Keys, Grandpa Keys and Account details of the members - -## Installing the Chart - -To install the chart with the release name `my-release`, first create a `helm-override.yaml` file with the keys and account details. The file may look like this: -```yaml -aura_keys: -- '5DyCUqDTSgTXcL1B7i7KMBcVBvGdtxXLXZ6uEi5Ktekj5tQF' -- '5GBbtj2twDjJfncE6RtLibzjezH8xghRoRD1dDbZFxsKQjuk' -- '5EPPujEsETrqJ8v87EwJgLjt2K9RJBkJ7cduVQwUZdPiJ6TX' -- '5H19343NxLc5ssgUCbX5Mgwadk4XfaTrup2Qg37L3GtqQcE1' -grandpa_keys: -- '5EtJgUviLmr1RCNhb7jttY6bX5VUHneL6Uyno6rLyGtawGzA' -- '5FwRY6PZ1fkyJUcKgVN5Pv6hzzPZZ31A49UuSXjmciL36LH1' -- '5Hqun9H3ugvht2ukj1KdRqV2Pr8ydu6Vs4VKAQHCpfcXf2gN' -- '5FGuiafBZY5MrBUBUfcxeSsG7iQvzNEp2feXtrhnZqAv31cj' -members: - - account_id: 5D5c66mYy5B2RVKPfXyCcNqvQ4QHpB5SnTEEXGJ9fEvTu12S - balance: 1152921504606846976 - nodes: - - 0024080112204D4DD3B677A3ECF61E926F3E0C7B0F61099D358BDC975700FA9F0881495C6CFB - - 0024080112201EC4AA69077A06D19ACBCA81FD6990CB93748487A53BEC76B4224815F2FADE20 - - 002408011220BAA87C06CAE76D1FA0DE072E37101897016E372D7F9EDFFC0DA8D262AC930F17 - - 00240801122070BC9485475DEDA36CD4F2A12A4BAD9BF1F751AD67A78C5469E5619619A9EB92 - - 0024080112200B8299C0EA87D40F12A2B94CACAB9EED4B6C6C9F79DCB777DB47DE361C03DB5F - - account_id: 5EnHWK7eNuFa5K4U4Z74W3yqXp5NEy1mMCmuYvzcQdPxuTmd - balance: 1152921504606846976 - nodes: - - 002408011220E4CE21100FACA40C75A332566E48108D2188A86D6E501D6D0B8A6F02AC836955 - - 0024080112208D7E157C0EF197453A7F2A645816AF0C9159167C65D4B224AA39366E870ABB65 - - 0024080112207C660839F1380F035B918F354B58BAC7401E279F76A02F7F69513A6FDD399CFF - - account_id: 5FnA4xy1yYigBLEYMFxyHy7N3DM1s3iE8USKzKShLvohq8rd - balance: 1152921504606846976 - nodes: - - 0024080112204846FBE5A8F5A0FECC569EBEBD6A6F360AAABEC5EE8F782259DEC44B8652C714 -``` -Other default values from `values.yaml` can also be overriden in the above file. +If HashiCorp Vault is utilized, ensure: +- HashiCorp Vault Server 1.13.1+ -```console -$ helm repo add bevel https://digicatapult.github.io/helm-charts -$ helm install -f helm-override.yaml my-release bevel/substrate-genesis +> **Note**: Verify the dependent charts for additional prerequisites. + +### Installation + +To install the chart with the release name `genesis`, execute: + +```bash +helm repo add bevel https://hyperledger.github.io/bevel +helm install genesis bevel/substrate-genesis ``` -The [Parameters](#parameters) section lists the parameters that can be configured during installation. +This command deploys the chart onto the Kubernetes cluster using default configurations. Refer to the [Parameters](#parameters) section for customizable options. -> **Tip**: List all releases using `helm list` +> **Tip**: Utilize `helm list` to list all releases. -## Uninstalling the Chart +### Uninstallation -To uninstall/delete the `my-release` deployment: +To remove the `genesis` deployment, use: -```console -helm delete my-release +```bash +helm uninstall genesis ``` -The command removes all the Kubernetes components associated with the chart and deletes the release. +This command eliminates all Kubernetes components associated with the chart and deletes the release. ## Parameters -### Common parameters - -| Name | Description | Default Value | -| ------------------------ | --------------------------------------------------------------------------------------- | --------------- | -| `matadata.namespace` | The namespace where this job will be deployed | `default` | -| `matadata.name` | The name of the job | `substrate-genesis-job` | +#### Global Parameters +These parameters remain consistent across parent or child charts. + +| Name | Description | Default Value | +|--------|---------|-------------| +| `global.serviceAccountName` | Name of the service account for Vault Auth and Kubernetes Secret management | `vault-auth` | +| `global.cluster.provider` | Kubernetes cluster provider (e.g., AWS EKS, minikube). Currently tested with `aws` and `minikube`. | `aws` | +| `global.cluster.cloudNativeServices` | Future implementation for utilizing Cloud Native Services (`true` for SecretsManager and IAM for AWS; KeyVault & Managed Identities for Azure). | `false` | +| `global.cluster.kubernetesUrl` | URL of the Kubernetes Cluster | `""` | +| `global.vault.type` | Vault type support for other providers. Currently supports `hashicorp` and `kubernetes`. | `hashicorp` | +| `global.vault.role` | Role used for authentication with Vault | `vault-role` | +| `global.vault.network` | Deployed network type | `substrate` | +| `global.vault.address`| URL of the Vault server. | `""` | +| `global.vault.authPath` | Authentication path for Vault | `supplychain` | +| `global.vault.secretEngine` | Vault secret engine name | `secretsv2` | +| `global.vault.secretPrefix` | Vault secret prefix; must start with `data/` | `data/supplychain` | ### Genesis image config parameters -| Name | Description | Default Value | -| ------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------ | -| `node.image` | The dscp-node or substrate image repository | `ghcr.io/inteli-poc/dscp-node` | -| `node.imageTag` | The dscp-node or substrate image tag | `v4.3.1` | -| `node.pullPolicy` | dscp-node image pull policy | `IfNotPresent` | -| `node.command` | The binary that will be executed to generate the genesis (this corresponds to the node.image) | `./dscp-node` | +| Name | Description | Default Value | +| - | - | - | +| `node.image` | The dscp-node or substrate image | `ghcr.io/inteli-poc/dscp-node` | +| `node.imageTag` | The dscp-node or substrate image tag | `v4.3.1` | +| `node.pullPolicy` | dscp-node image pull | `IfNotPresent` | +| `node.command` | The binary that will be executed to generate the genesis (this corresponds to the node.image) | `./dscp-node` | +| `node.validator.count` | Specify the count of validator nodes | `4` | +| `node.member.count` | Specify the count of member nodes | `1` | +| `node.members.balance` | Pre-allocate some balance for the nodes | `1152921504606846976` | -### Vault Parameters - -| Name | Description | Default Value | -| --------------------------- | ----------------------------------------------------------------------------------------- | ------ | -| `vault.address` | URL of the Vault server | `""` | -| `vault.role` | The Vault role which will access the server | `vault-role` | -| `vault.authpath` | The Auth Path configured on Hashicorp Vault | `""` | -| `vault.serviceAccountName` | The service account that has been authenticated with Hashicorp Vault | `vault-auth` | -| `vault.certSecretPrefix` | The path where certificates are stored | `""` | +## License -### DSCP-Node/Substrate Account Parameters +This chart is licensed under the Apache v2.0 license. -| Name | Description | Default Value | -| --------------------------- | ----------------------------------------------------------------------------------------- | ------ | -| `chain` | The name of the chain which is embedded in the genesis | `inteli` | -| `aura_keys` | List of aura keys that will be added to the genesis | `[]` | -| `grandpa_keys` | List of grandpa keys that will be added to the genesis | `[]` | -| `members` | List of members with these attributes: `account_id`, `balance` and `nodes` list. | `[]` | +Copyright © 2023 Accenture +### Attribution +This chart is adapted from the [charts](https://hyperledger.github.io/bevel/) which is licensed under the Apache v2.0 License which is reproduced here: -## License +``` +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at -This chart is licensed under the Apache v2.0 license. + http://www.apache.org/licenses/LICENSE-2.0 -Copyright © 2023 Accenture +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +``` diff --git a/platforms/substrate/charts/substrate-genesis/requirements.yaml b/platforms/substrate/charts/substrate-genesis/requirements.yaml new file mode 100644 index 00000000000..b1195396c5f --- /dev/null +++ b/platforms/substrate/charts/substrate-genesis/requirements.yaml @@ -0,0 +1,11 @@ +dependencies: + - name: bevel-vault-mgmt + repository: "file://../../../shared/charts/bevel-vault-mgmt" + tags: + - bevel + version: ~1.0.0 + - name: bevel-scripts + repository: "file://../../../shared/charts/bevel-scripts" + tags: + - bevel + version: ~1.0.0 diff --git a/platforms/substrate/charts/substrate-genesis/templates/generate-keys.yaml b/platforms/substrate/charts/substrate-genesis/templates/generate-keys.yaml new file mode 100644 index 00000000000..13fecd30268 --- /dev/null +++ b/platforms/substrate/charts/substrate-genesis/templates/generate-keys.yaml @@ -0,0 +1,244 @@ + +############################################################################################## +# Copyright Accenture. All Rights Reserved. +# +# SPDX-License-Identifier: Apache-2.0 +############################################################################################## + +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ .Release.Name }}-generate-keys + namespace: {{ .Release.Namespace }} + annotations: + "helm.sh/hook": post-install + "helm.sh/hook-weight": "1" + "helm.sh/hook-delete-policy": "before-hook-creation" + labels: + app: genesis + app.kubernetes.io/name: pre-install-hook + app.kubernetes.io/component: keygen + app.kubernetes.io/namespace: {{ .Release.Namespace }} + app.kubernetes.io/release: {{ .Release.Name }} + app.kubernetes.io/managed-by: helm +spec: + backoffLimit: 1 + completions: 1 + template: + metadata: + labels: + app.kubernetes.io/name: pre-install-hook + app.kubernetes.io/instance: {{ .Release.Name }} + spec: + serviceAccountName: {{ $.Values.global.serviceAccountName }} + restartPolicy: "OnFailure" + containers: + - name: generate-keys + image: {{ $.Values.node.image }} + securityContext: + runAsUser: 0 + volumeMounts: + - name: package-manager + mountPath: /scripts/package-manager.sh + subPath: package-manager.sh +{{- if eq .Values.global.vault.type "hashicorp" }} + - name: scripts-volume + mountPath: /scripts/bevel-vault.sh + subPath: bevel-vault.sh + env: + - name: VAULT_ADDR + value: {{ $.Values.global.vault.address }} + - name: VAULT_SECRET_ENGINE + value: {{ .Values.global.vault.secretEngine }} + - name: KUBERNETES_AUTH_PATH + value: {{ $.Values.global.vault.authPath }} + - name: VAULT_APP_ROLE + value: {{ $.Values.global.vault.role }} + - name: VAULT_SECRET_PREFIX + value: {{ .Values.global.vault.secretPrefix }} + - name: VAULT_TYPE + value: {{ $.Values.global.vault.type }} +{{- end }} + command: ["bash", "-c"] + args: + - | + + #!/usr/bin/env bash + + echo "Step 1: Check if the node image is available and install necessary packages if needed." + {{- if ne $.Values.node.image "docker.io/paritytech/substrate-playground-template-node-template" }} + # Install necessary packages using custom package manager script + . /scripts/package-manager.sh + packages_to_install="jq bc curl unzip base58 xxd" + install_packages "$packages_to_install" + + # Check if jq is installed and download it if not installed + if ! command -v jq &> /dev/null; then + cd ~ + curl -k -L -o jq https://github.com/stedolan/jq/releases/download/jq-1.6/jq-linux64 + chmod +x jq + export PATH="$PATH:$HOME" + else + echo "jq is already installed via package-manager.sh script." + fi + {{- end }} + # Download and set up kubectl for Kubernetes management + curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl" + chmod +x kubectl + mv kubectl /usr/local/bin/ + kubectl version --client +{{- if eq .Values.global.vault.type "hashicorp" }} + # Source the script containing vault-related functions + . /scripts/bevel-vault.sh + + echo "Generate a customize token." + vaultBevelFunc "init" + + # Function to safely write keys + safeWriteSecret() { + local key="$1" + local payload_json="$2" + + # Read secret from vault + vaultBevelFunc "readJson" "${VAULT_SECRET_ENGINE}/${VAULT_SECRET_PREFIX}/${key}" + # Check if secrets are available in the vault + if [ "$SECRETS_AVAILABLE" == "yes" ] + then + # Extract secrets from JSON response + local node_id=$(echo ${VAULT_SECRET} | jq -r '.["node_id"]') + local node_key=$(echo ${VAULT_SECRET} | jq -r '.["node_key"]') + local aura_seed=$(echo ${VAULT_SECRET} | jq -r '.["aura_seed"]') + local aura_addr=$(echo ${VAULT_SECRET} | jq -r '.["aura_addr"]') + local grandpa_seed=$(echo ${VAULT_SECRET} | jq -r '.["grandpa_seed"]') + local grandpa_addr=$(echo ${VAULT_SECRET} | jq -r '.["grandpa_addr"]') + local aura_file_b64=$(echo ${VAULT_SECRET} | jq -r '.["aura_file_b64"]' | base64 -d) + local grandpa_file_b64=$(echo ${VAULT_SECRET} | jq -r '.["grandpa_file_b64"]' | base64 -d) + + # Check if Kubernetes secret exists, if not, create one + if ! kubectl get secret substrate-node-validator-${i}-keys --namespace {{ .Release.Namespace }} &> /dev/null; then + kubectl create secret generic substrate-node-validator-${i}-keys --namespace {{ .Release.Namespace }} \ + --from-literal=node_id=${node_id} \ + --from-literal=node_key=${node_key} \ + --from-literal=aura_seed=${aura_seed} \ + --from-literal=aura_addr=${aura_addr} \ + --from-literal=grandpa_seed=${grandpa_seed} \ + --from-literal=grandpa_addr=${grandpa_addr} \ + --from-literal=aura_file_b64=${aura_file_b64} + --from-literal=grandpa_file_b64=${grandpa_file_b64} + fi + else + # Push data to vault + vaultBevelFunc 'write' "${VAULT_SECRET_ENGINE}/${VAULT_SECRET_PREFIX}/${key}" "${payload_json}" #${key}" + #rm nodePayload.json + fi + } +{{- else }} + safeWriteSecret() { + # Placeholder: + # - Implement code to fetch the keys if using any cloud-native service or platform different from HashiCorp to store the keys + # - After fetching the keys, create Kubernetes secrets from them + # - For guidance, refer to the code written for HashiCorp Vault for the same purpose + return 0 + } +{{- end }} + + echo "Step 2: Define functions to generate keys." + generate_key() { + local scheme="$1" + local output_file="$2" + $command key generate --scheme "$scheme" --output-type json >> "$output_file" + } + + generate_nodekey() { + local node_type="$1" + local node_index="$2" + local node_id="$($command key generate-node-key --file node_key 2>&1)" + + # Generate keys for aura and grandpa + generate_key "Sr25519" "auraKeygenOutput_${node_type}${node_index}.json" + AURA_SEED=$(jq -r '.secretSeed' "auraKeygenOutput_${node_type}${node_index}.json") + AURA_ADDR=$(jq -r '.ss58Address' "auraKeygenOutput_${node_type}${node_index}.json") + + generate_key "Ed25519" "grandpaKeygenOutput_${node_type}${node_index}.json" + GRANDPA_SEED=$(jq -r '.secretSeed' "grandpaKeygenOutput_${node_type}${node_index}.json") + GRANDPA_ADDR=$(jq -r '.ss58Address' "grandpaKeygenOutput_${node_type}${node_index}.json") + + if [[ "$node_type" == "member" ]]; then + # For member nodes, also generate account keys + generate_key "Sr25519" "accKeygenOutput_${node_type}${node_index}.json" + ACCOUNT_SEED=$(jq -r '.secretSeed' "accKeygenOutput_${node_type}${node_index}.json") + ACCOUNT_ADDR=$(jq -r '.ss58Address' "accKeygenOutput_${node_type}${node_index}.json") + + echo "{ + \"data\": { + \"node_id\": \"$node_id\", + \"node_key\": \"$(cat node_key)\", + \"aura_seed\": \"$AURA_SEED\", + \"aura_addr\": \"$AURA_ADDR\", + \"grandpa_seed\": \"$GRANDPA_SEED\", + \"grandpa_addr\": \"$GRANDPA_ADDR\", + \"account_seed\": \"$ACCOUNT_SEED\", + \"account_addr\": \"$ACCOUNT_ADDR\", + \"account_file_b64\": \"$(cat accKeygenOutput_${node_type}${node_index}.json | base64 -w 0)\", + \"aura_file_b64\": \"$(cat auraKeygenOutput_${node_type}${node_index}.json | base64 -w 0)\", + \"grandpa_file_b64\": \"$(cat grandpaKeygenOutput_${node_type}${node_index}.json | base64 -w 0)\" + } + }" > finalJSON.json + secret_name="substrate-node-${node_type}-${node_index}-keys" + else + echo "{ + \"data\": { + \"node_id\": \"$node_id\", + \"node_key\": \"$(cat node_key)\", + \"aura_seed\": \"$AURA_SEED\", + \"aura_addr\": \"$AURA_ADDR\", + \"grandpa_seed\": \"$GRANDPA_SEED\", + \"grandpa_addr\": \"$GRANDPA_ADDR\", + \"aura_file_b64\": \"$(cat auraKeygenOutput_${node_type}${node_index}.json | base64 -w 0)\", + \"grandpa_file_b64\": \"$(cat grandpaKeygenOutput_${node_type}${node_index}.json | base64 -w 0)\" + } + }" > finalJSON.json + secret_name="substrate-node-${node_type}-${node_index}-keys" + fi + + # Create Kubernetes secret if it doesn't exist + if ! kubectl get secret "${secret_name}" --namespace "{{ .Release.Namespace }}" &> /dev/null; then + echo "creating secrets" + kubectl create secret generic "${secret_name}" --namespace "{{ .Release.Namespace }}" --from-file="substrate-node-keys"="finalJSON.json" + fi + } + + echo "Step 3: Initiate key generation." + main() { + command={{ $.Values.node.command }} + # Generate keys for member nodes + for ((i=1; i<= {{ $.Values.node.member.count }}; i++)); do + if [[ $.Values.global.vault.type != "hashicorp" ]]; then + echo "Generating nodekey for member-$i" + generate_nodekey "member" "$i" + fi + echo "Writing secrets for member-$i" + safeWriteSecret "substrate-node-member-$i-keys" "finalJSON.json" + done + # Generate keys for validator nodes + for ((i=1; i<= {{ $.Values.node.validator.count }}; i++)); do + if [[ $.Values.global.vault.type != "hashicorp" ]]; then + echo "Generating nodekey for validator-$i" + generate_nodekey "validator" "$i" + fi + echo "Writing secrets for validator-$i" + safeWriteSecret "substrate-node-validator-$i-keys" "finalJSON.json" + done + } + main + volumes: +{{- if eq .Values.global.vault.type "hashicorp" }} + - name: scripts-volume + configMap: + name: bevel-vault-script + defaultMode: 0777 +{{- end }} + - name: package-manager + configMap: + name: package-manager diff --git a/platforms/substrate/charts/substrate-genesis/templates/genesis-job-cleanup.yaml b/platforms/substrate/charts/substrate-genesis/templates/genesis-job-cleanup.yaml new file mode 100644 index 00000000000..e9a1af8ea5f --- /dev/null +++ b/platforms/substrate/charts/substrate-genesis/templates/genesis-job-cleanup.yaml @@ -0,0 +1,56 @@ +############################################################################################## +# Copyright Accenture. All Rights Reserved. +# +# SPDX-License-Identifier: Apache-2.0 +############################################################################################## + +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ .Release.Name }}-cleanup + namespace: {{ .Release.Namespace }} + annotations: + "helm.sh/hook": pre-delete + "helm.sh/hook-weight": "0" + "helm.sh/hook-delete-policy": "hook-succeeded" + labels: + app: {{ .Release.Name }} + app.kubernetes.io/name: {{ .Release.Name }} + helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} +spec: + backoffLimit: 6 + template: + metadata: + labels: + app: {{ .Release.Name }} + app.kubernetes.io/name: {{ .Release.Name }} + app.kubernetes.io/instance: {{ .Release.Name }} + spec: + restartPolicy: OnFailure + serviceAccountName: {{ $.Values.global.serviceAccountName }} + securityContext: + fsGroup: 1000 + containers: + - name: generate-genesis + image: "{{ .Values.removeGenesisOnDelete.image.repository }}:{{ .Values.removeGenesisOnDelete.image.tag }}" + securityContext: + runAsUser: 0 + imagePullPolicy: {{ .Values.removeGenesisOnDelete.image.pullPolicy }} + command: + - /bin/bash + - -c + args: + - | + {{- if .Values.removeGenesisOnDelete.enabled }} + secret_names=$(kubectl get secret -n {{ .Release.Namespace }} | grep substrate-node | awk '{print $1}') + + while IFS= read -r nodeKeys; do + kubectl delete secret "$nodeKeys" --namespace {{ .Release.Namespace }} + done <<< "$secret_names" + + if kubectl get configmap "substrate-genesis" --namespace {{ .Release.Namespace }} &> /dev/null; then + kubectl delete configmap "substrate-genesis" --namespace {{ .Release.Namespace }} + fi + {{- end}} diff --git a/platforms/substrate/charts/substrate-genesis/templates/genesis.yaml b/platforms/substrate/charts/substrate-genesis/templates/genesis.yaml new file mode 100644 index 00000000000..70ea79eb86f --- /dev/null +++ b/platforms/substrate/charts/substrate-genesis/templates/genesis.yaml @@ -0,0 +1,237 @@ +############################################################################################## +# Copyright Accenture. All Rights Reserved. +# +# SPDX-License-Identifier: Apache-2.0 +############################################################################################## + +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ .Release.Name }} + namespace: {{ .Release.Namespace }} + annotations: + "helm.sh/hook": post-install + "helm.sh/hook-weight": "2" + "helm.sh/hook-delete-policy": "before-hook-creation" + labels: + app: genesis + app: {{ .Release.Name }} + app.kubernetes.io/name: {{ .Release.Name }} + helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} +spec: + backoffLimit: 6 + template: + metadata: + labels: + app: {{ .Release.Name }} + app.kubernetes.io/name: {{ .Release.Name }} + app.kubernetes.io/instance: {{ .Release.Name }} + spec: + restartPolicy: OnFailure + serviceAccountName: {{ $.Values.global.serviceAccountName }} + securityContext: + fsGroup: 1000 + containers: + - name: generate-genesis + image: {{ $.Values.node.image }}:{{ $.Values.node.imageTag }} + imagePullPolicy: {{ $.Values.node.pullPolicy }} + volumeMounts: + - name: certcheck + mountPath: certcheck + - name: package-manager + mountPath: /scripts/package-manager.sh + subPath: package-manager.sh +{{- if eq .Values.global.vault.type "hashicorp" }} + - name: scripts-volume + mountPath: /scripts/bevel-vault.sh + subPath: bevel-vault.sh +{{- end }} + env: +{{- if eq .Values.global.vault.type "hashicorp" }} + - name: VAULT_ADDR + value: {{ $.Values.global.vault.address }} + - name: KUBERNETES_AUTH_PATH + value: {{ $.Values.global.vault.authPath }} + - name: VAULT_APP_ROLE + value: {{ $.Values.global.vault.role }} + - name: VAULT_SECRET_PREFIX + value: {{ .Values.global.vault.secretPrefix }} + - name: VAULT_SECRET_ENGINE + value: {{ .Values.global.vault.secretEngine }} + - name: VAULT_TYPE + value: {{ .Values.global.vault.type }} + - name: CERTS_SECRET_PREFIX + value: {{ .Values.global.vault.certPrefix }} +{{- end }} + - name: MOUNT_PATH + value: "certcheck" + command: ["bash", "-c"] + args: + - |- + + echo "Step 1: Check if the node image is available and install necessary packages if needed." + {{- if ne $.Values.node.image "docker.io/paritytech/substrate-playground-template-node-template" }} + # Install necessary packages using custom package manager script + . /scripts/package-manager.sh + packages_to_install="jq bc curl unzip base58 xxd" + install_packages "$packages_to_install" + + # Check if jq is installed and download it if not installed + if ! command -v jq &> /dev/null; then + cd ~ + curl -k -L -o jq https://github.com/stedolan/jq/releases/download/jq-1.6/jq-linux64 + chmod +x jq + export PATH="$PATH:$HOME" + else + echo "jq is already installed via package-manager.sh script." + fi + {{- end }} + # Download and set up kubectl for Kubernetes management + curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl" + chmod +x kubectl + mv kubectl /usr/local/bin/ + kubectl version --client + + echo "Step 2: Execute the provided command to generate the genesis block." + mkdir certcheck + command={{ $.Values.node.command }} + echo "Generate genesis" + GENESIS=$($command build-spec --disable-default-bootnode --chain local) + + echo "Step 3: Edit genesis configuration." + # Set values + GENESIS=$(echo $GENESIS | jq '.name |= {{ .Values.chain | quote }}') + GENESIS=$(echo $GENESIS | jq '.id |= {{ .Values.chain | replace "-" "_" | quote }}') + GENESIS=$(echo $GENESIS | jq '.chainType |= "Live"') + # Clear authorities and balances + GENESIS=$(echo $GENESIS | jq '.genesis.runtime.aura.authorities |= []') + GENESIS=$(echo $GENESIS | jq '.genesis.runtime.grandpa.authorities |= []') + GENESIS=$(echo $GENESIS | jq '.genesis.runtime.balances.balances |= []') + {{- if eq $.Values.node.image "ghcr.io/inteli-poc/dscp-node" }} + GENESIS=$(echo $GENESIS | jq '.genesis.runtime.nodeAuthorization.nodes |= []') + GENESIS=$(echo $GENESIS | jq '.genesis.runtime.membership.members |= []') + {{- end }} + + echo "Step 4: Generate sudo key with Sr25519 scheme and add sudo account key and balance into genesis." + # Generate sudo key with Sr25519 scheme + $command key generate --scheme Sr25519 --output-type json >> certcheck/sudoKeygenOutput.json + SUDO_SEED=$(jq -r '.secretPhrase' certcheck/sudoKeygenOutput.json) + SUDO_ADDR=$(jq -r '.ss58Address' certcheck/sudoKeygenOutput.json) + # Add sudo account key and balance into genesis + GENESIS=$(echo $GENESIS | jq --arg sudo $SUDO_ADDR --arg balance 1152921504606846976 '.genesis.runtime.balances.balances += [[$sudo, ($balance | tonumber)]]') + GENESIS=$(echo $GENESIS | jq --arg sudo $SUDO_ADDR '.genesis.runtime.sudo.key |= $sudo') + + echo "Step 5: Insert AURA & GRANDPA keys into genesis for validators." + echo "Inserting keys into genesis for validators" + for ((i=1; i<={{ $.Values.node.validator.count }}; i++)); do + secret_data=$(kubectl get secret "substrate-node-validator-${i}-keys" --namespace {{ .Release.Namespace }} -o json | jq -r '.data["substrate-node-keys"]' | base64 -d) + + # Check if secret_data is empty or not + if [ -n "$secret_data" ]; then + # Extract aura_addr and grandpa_addr + aura_addr=$(echo "$secret_data" | jq -r '.data.aura_addr') + grandpa_addr=$(echo "$secret_data" | jq -r '.data.grandpa_addr') + + # Check if extraction successful + if [ -n "$aura_addr" ] && [ -n "$grandpa_addr" ]; then + # Insert aura_addr keys into GENESIS JSON + GENESIS=$(echo "$GENESIS" | jq --arg aura "$aura_addr" '.genesis.runtime.aura.authorities += [$aura]') + GENESIS=$(echo "$GENESIS" | jq --arg grandpa "$grandpa_addr" '.genesis.runtime.grandpa.authorities += [[$grandpa, 1]]') + else + echo "Error: Unable to retrieve aura_addr or grandpa_addr key" + fi + else + echo "Error: Unable to retrieve data." + fi + done + + # Initialize an array to store each nodes' Kubernetes secret name + secret_names=$(kubectl get secret -n {{ .Release.Namespace }} | grep substrate-node | awk '{print $1}') + declare -a nodes + while IFS= read -r line; do + nodes+=("$line") + done <<< "$secret_names" + + echo "Step 6: Adding member accounts and their balances to genesis." + for ((i=1; i<={{ $.Values.node.member.count }}; i++)); do + account_addr=$(kubectl get secret "substrate-node-member-$i-keys" --namespace {{ .Release.Namespace }} -o json | jq -r '.data["substrate-node-keys"]' | base64 -d | jq -r '.data.account_addr') + # Add account address and balance into genesis block + GENESIS=$(echo "$GENESIS" | jq --arg account_id "$account_addr" --arg balance {{ $.Values.node.member.balance }} '.genesis.runtime.balances.balances += [[$account_id, ($balance | tonumber)]]') + GENESIS=$(echo "$GENESIS" | jq --arg account_id "$account_addr" '.genesis.runtime.membership.members += [$account_id]') + + # Loop through each node to add authorization for the current member + for node in "${nodes[@]}"; do + # Retrieve node ID for the current node + node_id=$(kubectl get secret "$node" --namespace {{ .Release.Namespace }} -o json | jq -r '.data["substrate-node-keys"]' | base64 -d | jq -r '.data.node_id') + + # Convert node ID to base58 format + base58=$(echo -n "$node_id" | base58 -d | xxd -p | tr -d '[:space:]' | tr '[:lower:]' '[:upper:]') + + # Split the base58 string into an array of bytes + arr_node_id=($(echo $base58 | fold -w2)) + + # Add authorization for the current member to the node + GENESIS=$(echo $GENESIS | jq --arg owner "$account_addr" '.genesis.runtime.nodeAuthorization.nodes += [[[], $owner]]') + for byte in "${arr_node_id[@]}" + do + # Add each byte of the node ID to the authorization + GENESIS=$(echo $GENESIS | jq --arg byte $(echo "obase=10; ibase=16; $byte" | bc) '.genesis.runtime.nodeAuthorization.nodes[-1][0] += [($byte | tonumber)]') + done + done + done + + echo "Step 7: Update the format of the modified genesis JSON and create a config map if it doesn't exist." + # Write the modified genesis JSON to a file + echo "$GENESIS" > certcheck/genesis.json + # Convert the genesis JSON to raw format + echo "Converting genesis to raw format" + GENESIS=$($command build-spec --disable-default-bootnode --raw --chain certcheck/genesis.json) + echo "$GENESIS" > certcheck/genesis_raw.json + # Encode the raw genesis JSON to base64 + cat certcheck/genesis_raw.json | base64 -w0 > certcheck/genesis_base64 # No need to encode it if you wanna store genesis as a K8s secret + + # # Create the config map "substrate-genesis" using the base64 encoded genesis JSON and sudoKeygenOutput.json if it doesn't exist + if ! kubectl get configmap "substrate-genesis" --namespace {{ .Release.Namespace }} &> /dev/null; then + kubectl create configmap "substrate-genesis" --namespace {{ .Release.Namespace }} --from-file=genesis="${MOUNT_PATH}/genesis_base64" --from-file=sudoKeygenOutput="${MOUNT_PATH}/sudoKeygenOutput.json" + fi + echo "COMPLETED!" +{{- if eq .Values.global.vault.type "hashicorp" }} + # Initialize the token + . /scripts/bevel-vault.sh + vaultBevelFunc "init" + + vault_secret_key="${VAULT_SECRET_ENGINE}/${VAULT_SECRET_PREFIX}/genesis" + vaultBevelFunc "readJson" "$vault_secret_key" + + # The vault CLI is required for this job as the genesis file is too large to be passed in via a vault API call + echo "Installing Vault CLI" + curl -O -L https://releases.hashicorp.com/vault/1.7.1/vault_1.7.1_linux_amd64.zip + unzip vault_1.7.1_linux_amd64.zip + {{- if eq $.Values.node.image "docker.io/paritytech/substrate-playground-template-node-template" }} + export PATH=$PATH:~/workspace + {{- else }} + mv vault /bin + {{- end }} + vault --version + # Save the generated keys to VAULT + if [ "$SECRETS_AVAILABLE" == "no" ]; then + vault_secret_key="${VAULT_SECRET_ENGINE}/${CERTS_SECRET_PREFIX}/genesis" + vault kv put "$vault_secret_key" genesis=@"${MOUNT_PATH}/genesis_base64" sudo_details=@"${MOUNT_PATH}/sudoKeygenOutput.json" + fi +{{- end }} + volumes: + - name: certcheck + emptyDir: + medium: Memory + - name: package-manager + configMap: + name: package-manager +{{- if eq .Values.global.vault.type "hashicorp" }} + - name: scripts-volume + configMap: + name: bevel-vault-script + defaultMode: 0777 +{{- end }} diff --git a/platforms/substrate/charts/substrate-genesis/templates/job.yaml b/platforms/substrate/charts/substrate-genesis/templates/job.yaml deleted file mode 100644 index 95536f09d6f..00000000000 --- a/platforms/substrate/charts/substrate-genesis/templates/job.yaml +++ /dev/null @@ -1,194 +0,0 @@ -############################################################################################## -# Copyright Accenture. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -############################################################################################## - -apiVersion: batch/v1 -kind: Job -metadata: - name: "{{ $.Values.metadata.name }}" - namespace: "{{ $.Values.metadata.namespace }}" - labels: - app: "{{ $.Values.metadata.name }}" - app.kubernetes.io/name: "{{ $.Values.metadata.name }}" - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} - {{- include "labels.custom" . | nindent 2 }} -spec: - backoffLimit: 6 - template: - metadata: - labels: - app: "{{ $.Values.metadata.name }}" - app.kubernetes.io/name: "{{ $.Values.metadata.name }}" - app.kubernetes.io/instance: {{ .Release.Name }} - {{- include "labels.custom" . | nindent 2 }} - spec: - restartPolicy: OnFailure - serviceAccountName: {{ $.Values.vault.serviceaccountname }} - securityContext: - fsGroup: 1000 - containers: - - name: generate-genesis - image: {{ $.Values.node.image }}:{{ $.Values.node.imageTag }} - imagePullPolicy: {{ $.Values.node.pullPolicy }} - volumeMounts: - - name: certcheck - mountPath: certcheck - - name: package-manager - mountPath: /scripts/package-manager.sh - subPath: package-manager.sh - env: - - name: VAULT_ADDR - value: {{ $.Values.vault.address }} - - name: KUBERNETES_AUTH_PATH - value: {{ $.Values.vault.authpath }} - - name: VAULT_APP_ROLE - value: {{ $.Values.vault.role }} - - name: MOUNT_PATH - value: "certcheck" - - name: CERTS_SECRET_PREFIX - value: {{ .Values.vault.certsecretprefix }} - command: ["bash", "-c"] - args: - - |- - #!/usr/bin/env bash - - {{- if ne $.Values.node.image "docker.io/paritytech/substrate-playground-template-node-template" }} - . /scripts/package-manager.sh - # Define the packages to install - packages_to_install="jq bc curl unzip" - install_packages "$packages_to_install" - - if [[ $? > 0 ]] - then - # download jq - cd ~; - curl -k -L -o jq https://github.com/stedolan/jq/releases/download/jq-1.6/jq-linux64; - chmod +x jq; - export PATH=$PATH:.; - else - echo "jq and curl was installed using apt-get." - fi; - {{- end }} - - validateVaultResponse () { - if echo ${2} | grep "errors"; then - echo "ERROR: unable to retrieve ${1}: ${2}" - exit 1 - fi - if [ "$3" == "LOOKUPSECRETRESPONSE" ] - then - http_code=$(curl -sS -o /dev/null -w "%{http_code}" \ - --header "X-Vault-Token: ${VAULT_CLIENT_TOKEN}" \ - ${VAULT_ADDR}/v1/${vault_secret_key}) - curl_response=$? - if test "$http_code" != "200" ; then - echo "Http response code from Vault - $http_code" - if test "$curl_response" != "0"; then - echo "Error: curl command failed with error code - $curl_response" - exit 1 - fi - fi - fi - } - - mkdir certcheck - command={{ $.Values.node.command }} - echo "Generate genesis" - GENESIS=$($command build-spec --disable-default-bootnode --chain local) - GENESIS=$(echo $GENESIS | jq '.name |= {{ .Values.chain | quote }}') - GENESIS=$(echo $GENESIS | jq '.id |= {{ .Values.chain | replace "-" "_" | quote }}') - GENESIS=$(echo $GENESIS | jq '.chainType |= "Live"') - - echo "Editing genesis config" - GENESIS=$(echo $GENESIS | jq '.genesis.runtime.aura.authorities |= []') - GENESIS=$(echo $GENESIS | jq '.genesis.runtime.grandpa.authorities |= []') - GENESIS=$(echo $GENESIS | jq '.genesis.runtime.balances.balances |= []') - {{- if eq $.Values.node.image "ghcr.io/inteli-poc/dscp-node" }} - GENESIS=$(echo $GENESIS | jq '.genesis.runtime.nodeAuthorization.nodes |= []') - GENESIS=$(echo $GENESIS | jq '.genesis.runtime.membership.members |= []') - {{- end }} - - echo "Generating sudo key with scheme Sr25519..." - $command key generate --scheme Sr25519 --output-type json >> certcheck/sudoKeygenOutput.json - SUDO_SEED=$(jq -r '.secretPhrase' certcheck/sudoKeygenOutput.json) - SUDO_ADDR=$(jq -r '.ss58Address' certcheck/sudoKeygenOutput.json) - - echo "Adding sudo account key and balance into genesis" - GENESIS=$(echo $GENESIS | jq --arg sudo $SUDO_ADDR --arg balance 1152921504606846976 '.genesis.runtime.balances.balances += [[$sudo, ($balance | tonumber)]]') - GENESIS=$(echo $GENESIS | jq --arg sudo $SUDO_ADDR '.genesis.runtime.sudo.key |= $sudo') - - echo "Inserting keys into genesis" - - echo "Inserting aura keys into genesis" - {{- range .Values.aura_keys }} - echo {{.}} - GENESIS=$(echo "$GENESIS" | jq --arg aura {{.}} '.genesis.runtime.aura.authorities += [$aura]') - {{- end }} - - echo "Inserting grandpa keys into genesis" - {{- range .Values.grandpa_keys }} - echo {{.}} - GENESIS=$(echo "$GENESIS" | jq --arg grandpa {{.}} '.genesis.runtime.grandpa.authorities += [[$grandpa, 1]]') - {{- end }} - - {{- if eq $.Values.node.image "ghcr.io/inteli-poc/dscp-node" }} - echo "Adding member accounts and their balances to genesis" - {{- range $idx, $member := .Values.members }} - GENESIS=$(echo $GENESIS | jq --arg account_id {{ $member.account_id }} --arg balance {{ $member.balance }} '.genesis.runtime.balances.balances += [[$account_id, ($balance | tonumber)]]') - GENESIS=$(echo $GENESIS | jq --arg account_id {{ $member.account_id }} '.genesis.runtime.membership.members += [$account_id]') - - {{- range $member.nodes }} - this_node_id={{ . }} - arr_node_id=($(echo $this_node_id | fold -w2)) - GENESIS=$(echo $GENESIS | jq --arg owner {{ $member.account_id }} '.genesis.runtime.nodeAuthorization.nodes += [[[], $owner]]') - for byte in "${arr_node_id[@]}" - do - GENESIS=$(echo $GENESIS | jq --arg byte $(echo "obase=10; ibase=16; $byte" | bc) '.genesis.runtime.nodeAuthorization.nodes[-1][0] += [($byte | tonumber)]') - done - {{- end }} - {{- end }} - {{- end }} - - echo "$GENESIS" > certcheck/genesis.json - echo "********* check genesis *********" - cat certcheck/genesis.json - echo "Converting genesis to raw format" - GENESIS=$($command build-spec --disable-default-bootnode --raw --chain certcheck/genesis.json) - echo "$GENESIS" > certcheck/genesis_raw.json - cat certcheck/genesis_raw.json | base64 -w0 > certcheck/genesis_base64 - - KUBE_SA_TOKEN=$(cat /var/run/secrets/kubernetes.io/serviceaccount/token) - echo "Getting secrets from Vault Server: ${VAULT_ADDR}" - - # Login to Vault and so I can get an approle token - export VAULT_TOKEN=$(curl -sS --request POST ${VAULT_ADDR}/v1/auth/${KUBERNETES_AUTH_PATH}/login \ - -H "Content-Type: application/json" \ - -d '{"role":"'"${VAULT_APP_ROLE}"'","jwt":"'"${KUBE_SA_TOKEN}"'"}' | \ - jq -r 'if .errors then . else .auth.client_token end') - - # the vault cli is required for this job as the genesis file is too large to be passed in via a vault api call - echo "installing vault cli" - curl -O -L https://releases.hashicorp.com/vault/1.7.1/vault_1.7.1_linux_amd64.zip - unzip vault_1.7.1_linux_amd64.zip - {{- if eq $.Values.node.image "docker.io/paritytech/substrate-playground-template-node-template" }} - export PATH=$PATH:~/workspace - {{- else }} - mv vault /bin - {{- end }} - vault --version - - validateVaultResponse 'vault login token' "${VAULT_CLIENT_TOKEN}" - vault_secret_key="${CERTS_SECRET_PREFIX}/genesis" - # Save the generated keys to VAULT - vault kv put $vault_secret_key genesis="@${MOUNT_PATH}/genesis_base64" sudo_details="@${MOUNT_PATH}/sudoKeygenOutput.json" - volumes: - - name: certcheck - emptyDir: - medium: Memory - - name: package-manager - configMap: - name: package-manager diff --git a/platforms/substrate/charts/substrate-genesis/values.yaml b/platforms/substrate/charts/substrate-genesis/values.yaml index 821c80ff910..4f4e5efaa1f 100644 --- a/platforms/substrate/charts/substrate-genesis/values.yaml +++ b/platforms/substrate/charts/substrate-genesis/values.yaml @@ -11,14 +11,42 @@ # This is a YAML-formatted file. # Declare variables to be passed into your templates. -metadata: - #Provide the namespace for organization's peer - #Eg. namespace: carrier-subs - namespace: default +# The following are for overriding global values +global: + # Provide the service account name which will be created. + serviceAccountName: vault-auth + cluster: + provider: azure # choose from: minikube | aws + cloudNativeServices: false # 'false' is implemented + #Provide the kubernetes host url + #Eg. kubernetesUrl: https://10.3.8.5:6443 + kubernetesUrl: + vault: + # Provide the type of vault + type: kubernetes # hashicorp | kubernetes + # Provide the vault role used. + role: vault-role + # Provide the network type + network: substrate + # Provide the vault server address + address: # "http://vault_url" + # Provide the vault authPath configured to be used. + authPath: supplychain + # Provide the secret engine. + secretEngine: secretsv2 + # Provide the vault path where the secrets will be stored + secretPrefix: "data/supplychain" + +removeGenesisOnDelete: + enabled: true + image: + repository: ghcr.io/hyperledger/bevel-k8s-hooks + tag: qgt-0.2.12 + pullPolicy: IfNotPresent - #Provide the name for substrate-key-mgmt job release - #Eg. name: carrier-keys-job - name: substrate-genesis-job +# Provide custom chain name +# Eg. chain: inteli-gcp +chain: inteli node: # Pull substrate Docker image @@ -31,53 +59,8 @@ node: # Command to be invoked to perform operations on the node # Eg. command: substrate command: ./dscp-node - -############################################################# -# HashiCorp Vault Configuration # -############################################################# -# NOTE: Make sure that the vault is already unsealed, intialized and configured to -# use Kubernetes service account token based authentication. -# For more info, see https://www.vaultproject.io/docs/auth/kubernetes - -vault: - # Provide the vault address - # Eg. address: http://vault.example.com:8200 - address: "" - # Provide the vault role used. - # Eg. role: vault-role - role: vault-role - # Provide the authpath configured to be used. - authpath: "" - # Provide the service account name autheticated to vault. - # NOTE: Make sure that the service account is already created and autheticated to use the vault. - # Eg. serviceaccountname: vault-auth - serviceAccountName: vault-auth - # Provide the vault path where the certificates are stored - # Eg. certsecretprefix: secret/cenm-org-name - certSecretPrefix: "" - -# Provide custom chain name -# Eg. chain: inteli-gcp -chain: inteli -# Provide the aura keys in a list format -# e.g. -# aura_keys: -# - 5DyCUqDTSgTXcL1B7i7KMBcVBvGdtxXLXZ6uEi5Ktekj5tQF -# - 5GBbtj2twDjJfncE6RtLibzjezH8xghRoRD1dDbZFxsKQjuk -aura_keys: [] -# Provide the grandpa keys in a list format -# e.g. -# grandpa_keys: -# - 5EtJgUviLmr1RCNhb7jttY6bX5VUHneL6Uyno6rLyGtawGzA -# - 5FwRY6PZ1fkyJUcKgVN5Pv6hzzPZZ31A49UuSXjmciL36LH1 -grandpa_keys: [] - -# Provide array of member details -# e.g. -# members: -# - account_id: 5GHW6ZUNk8Hoh4ZEtRnYcx7hvbQdrqqvi7NUBo5YaGSkdxrS -# balance: 1152921504606846976 -# nodes: -# - 0024080112200B290101F0A19F007C6C70EE4CA8430FC349DF6E2C8EED770B69F09AFBD48A19 -# - 00240801122023F888BC544900B3876ED8F9C7BE6A92C4BB1A2A5030396DD70EE0E02EA534FA -members: [] + validator: + count: 4 + member: + count: 1 + balance: 1152921504606846976 diff --git a/platforms/substrate/charts/substrate-node/Chart.yaml b/platforms/substrate/charts/substrate-node/Chart.yaml index 16c122ed066..9a1d7f3ff95 100644 --- a/platforms/substrate/charts/substrate-node/Chart.yaml +++ b/platforms/substrate/charts/substrate-node/Chart.yaml @@ -2,9 +2,26 @@ # This Chart is a fork from https://github.com/paritytech/helm-charts # Please update if needed ############################################################################################## -apiVersion: v2 + +--- +apiVersion: v2 name: substrate-node description: A Helm chart to deploy Substrate/Polkadot nodes type: application -version: 1.2.0 -appVersion: "0.0.1" +version: 1.0.0 +appVersion: latest +keywords: + - bevel + - ethereum + - substrate + - hyperledger + - enterprise + - blockchain + - deployment + - accenture +home: https://hyperledger-bevel.readthedocs.io/en/latest/ +sources: + - https://github.com/hyperledger/bevel +maintainers: + - name: Hyperledger Bevel maintainers + email: bevel@lists.hyperledger.org diff --git a/platforms/substrate/charts/substrate-node/README.md b/platforms/substrate/charts/substrate-node/README.md index 8db5538ef10..b5528d69360 100644 --- a/platforms/substrate/charts/substrate-node/README.md +++ b/platforms/substrate/charts/substrate-node/README.md @@ -1,29 +1,70 @@ -# Substrate/Polkadot node helm chart +[//]: # (##############################################################################################) +[//]: # (Copyright Accenture. All Rights Reserved.) +[//]: # (SPDX-License-Identifier: Apache-2.0) +[//]: # (##############################################################################################) -## Installing the chart +# substrate-node +This Helm chart deploys the nodes to initiate the Substrate network. + +## TL;DR ```console -helm repo add parity https://paritytech.github.io/helm-charts/ -helm install polkadot-node parity/node +$ helm repo add bevel https://hyperledger.github.io/bevel +$ helm install Validator-1 bevel/substrate-node ``` -This will deploy a single Polkadot node with the default configuration. +### Prerequisites -### Deploying a node with data synced from a snapshot archive (eg. [Polkashot](https://polkashots.io/)) +- Kubernetes 1.19+ +- Helm 3.2.0+ -Polkadot: -```console -helm install polkadot-node parity/node --set node.chainDataSnapshotUrl=https://dot-rocksdb.polkashots.io/snapshot --set node.chainDataSnapshotFormat=7z +If HashiCorp Vault is utilized, ensure: +- HashiCorp Vault Server 1.13.1+ + +> **Note**: Verify the dependent charts for additional prerequisites. + +### Installation + +To install the chart with the release name `genesis`, execute: + +```bash +helm repo add bevel https://hyperledger.github.io/bevel +helm install Validator-1 bevel/substrate-node ``` -Kusama: -```console -helm install kusama-node parity/node --set node.chainDataSnapshotUrl=https://ksm-rocksdb.polkashots.io/snapshot --set node.chainDataSnapshotFormat=7z --set node.chainPath=ksmcc3 +This command deploys the chart onto the Kubernetes cluster using default configurations. Refer to the [Parameters](#parameters) section for customizable options. + +> **Tip**: Utilize `helm list` to list all releases. + +### Uninstallation + +To remove the `Validator-1` deployment, use: + +```bash +helm uninstall Validator-1 ``` -⚠️ For some chains where the local directory name is different from the chain ID, `node.chainPath` needs to be set to a custom value. + +This command eliminates all Kubernetes components associated with the chart and deletes the release. ## Parameters +#### Global Parameters +These parameters remain consistent across parent or child charts. + +| Name | Description | Default Value | +|--------|---------|-------------| +| `global.serviceAccountName` | Name of the service account for Vault Auth and Kubernetes Secret management | `vault-auth` | +| `global.cluster.provider` | Kubernetes cluster provider (e.g., AWS EKS, minikube). Currently tested with `aws` and `minikube`. | `aws` | +| `global.cluster.cloudNativeServices` | Future implementation for utilizing Cloud Native Services (`true` for SecretsManager and IAM for AWS; KeyVault & Managed Identities for Azure). | `false` | +| `global.cluster.kubernetesUrl` | URL of the Kubernetes Cluster | `""` | +| `global.vault.type` | Vault type support for other providers. Currently supports `hashicorp` and `kubernetes`. | `hashicorp` | +| `global.vault.role` | Role used for authentication with Vault | `vault-role` | +| `global.vault.network` | Deployed network type | `substrate` | +| `global.vault.address`| URL of the Vault server. | `""` | +| `global.vault.authPath` | Authentication path for Vault | `supplychain` | +| `global.vault.secretEngine` | Vault secret engine name | `secretsv2` | +| `global.vault.secretPrefix` | Vault secret prefix; must start with `data/` | `data/supplychain` | + ### Common parameters | Parameter | Description | Default | @@ -39,48 +80,52 @@ helm install kusama-node parity/node --set node.chainDataSnapshotUrl=https://ksm ### Node parameters -| Parameter | Description | Default | -|--------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------| -| `node.chain` | Network to connect the node to (ie `--chain`) | `polkadot` | -| `node.command` | Command to be invoked to launch the node binary | `polkadot` | -| `node.flags` | Node flags other than `--name` (set from the helm release name), `--base-path` and `--chain` (both set with `node.chain`) | `--prometheus-external --rpc-external --ws-external --rpc-cors all` | -| `node.keys` | The list of keys to inject on the node before startup (object{ type, scheme, seed }) | `{}` | -| `node.persistGeneratedNodeKey` | Persist the auto-generated node key inside the data volume (at /data/node-key) | `false` | -| `node.customNodeKey` | Use a custom node-key, if `node.persistGeneratedNodeKey` is true then this will not be used. (Must be 64 byte hex key) | `nil` | -| `node.enableStartupProbe` | If true, enable the startup probe check | `true` | -| `node.enableReadinessProbe` | If true, enable the readiness probe check | `true` | -| `node.dataVolumeSize` | The size of the chain data PersistentVolume | `100Gi` | -| `node.replica` | Number of replica in the node StatefulSet | `1` | -| `node.role` | Set the role of the node: `full`, `authority`/`validator`, `collator` or `light` | `full` | -| `node.chainDataSnapshotUrl` | Download and load chain data from a snapshot archive http URL | `` | -| `node.chainDataSnapshotFormat` | The snapshot archive format (`tar` or `7z`) | `tar` | -| `node.chainDataGcsBucketUrl` | Sync chain data files from a GCS bucket (eg. `gs://bucket-name/folder-name`) | `` | -| `node.chainPath` | Path at which the chain database files are located (`/data/chains/${CHAIN_PATH}`) | `nil` (if undefined, fallbacks to the value in `node.chain`) | -| `node.chainDataKubernetesVolumeSnapshot` | Initialize the chain data volume from a Kubernetes VolumeSnapshot | `` | -| `node.customChainspecUrl` | Download and use a custom chainspec file from a URL | `nil` | -| `node.collator.isParachain` | If true, configure the node as a parachain (set the relay-chain flags after `--`) | `nil` | -| `node.collator.relayChainCustomChainspecUrl` | Download and use a custom relay-chain chainspec file from a URL | `nil` | -| `node.collator.relayChainDataSnapshotUrl` | Download and load relay-chain data from a snapshot archive http URL | `nil` | -| `node.collator.relayChainDataSnapshotFormat` | The relay-chain snapshot archive format (`tar` or `7z`) | `nil` | -| `node.collator.relayChainPath` | Path at which the chain database files are located (`/data/polkadot/chains/${RELAY_CHAIN_PATH}`) | `nil` | -| `node.collator.relayChainDataKubernetesVolumeSnapshot` | Initialize the relay-chain data volume from a Kubernetes VolumeSnapshot | `nil` | -| `node.collator.relayChainDataGcsBucketUrl` | Sync relay-chain data files from a GCS bucket (eg. `gs://bucket-name/folder-name`) | `nil` | -| `node.collator.relayChainFlags` | Relay-chain node flags other than `--name` (set from the helm release name), `--base-path` and `--chain` | `nil` | -| `node.resources.limits` | The resources limits (cpu/memory) for nodes | `{}` | -| `node.podManagementPolicy` | The pod management policy to apply to the StatefulSet, set it to `Parallel` to launch or terminate all Pods in parallel, and not to wait for pods to become Running and Ready or completely terminated prior to launching or terminating another pod | `{}` | -| `node.resources.requests` | The resources requests (cpu/memory) for nodes | `{}` | -| `node.serviceMonitor.enabled` | If true, creates a Prometheus Operator ServiceMonitor | `false` | -| `node.serviceMonitor.namespace` | Prometheus namespace | `nil` | -| `node.serviceMonitor.internal` | Prometheus scrape interval | `nil` | -| `node.serviceMonitor.scrapeTimeout` | Prometheus scrape timeout | `nil` | -| `node.tracing.enabled` | If true, creates a jaeger agent sidecar | `false` | -| `node.subtrateApiSiecar.enabled` | If true, creates a substrate api sidecar | `false` | -| `node.perNodeServices.createApiService` | If true creates a clusterIP API service | `true` | -| `node.perNodeServices.createP2pService` | If true creates a p2p NodePort service, if `node.collator.isParachain` also true, creates a NodePort p2p service for the parachain | `false` | -| `node.perNodeServices.relayServiceAnnotations` | Annotations to be inserted into the relay chain service | `{}` | -| `node.perNodeServices.paraServiceAnnotations` | Annotations to be inserted into the para chain service | `{}` | -| `node.perNodeServices.setPublicAddressToExternal.enabled` | If true sets the `--public-addr` flag to be the NodePort p2p services external address | `false` | -| `node.perNodeServices.setPublicAddressToExternal.ipRetrievalServiceUrl` | The external service to return the NodePort IP | `https://ifconfig.io` | +| Parameter | Description | Default | +| - | - | - | +| `node.chain` | Network to connect the node to (ie `--chain`) | `polkadot` | +| `node.command` | Command to be invoked to launch the node binary | `polkadot` | +| `node.isBootnode` | Set to false to start the first node as a Bootnode. Set to true for subsequent nodes to connect to an existing Bootnode | `false` | +| `node.bootnodeName` | Name of the Bootnode deployed | `` | +| `node.bootnodeAddr` | Address to access the Bootnode | `` | +| `node.bootnodePort` | Port for communication with other peers/nodes | `30333` | +| `node.flags` | Node flags other than `--name` (set from the helm release name), `--base-path` and `--chain` (both set with `node.chain`) | `--prometheus-external --rpc-external --ws-external --rpc-cors all` | +| `node.keys` | The list of keys to inject on the node before startup (object{ type, scheme, seed }) | `{}` | +| `node.persistGeneratedNodeKey` | Persist the auto-generated node key inside the data volume (at /data/node-key) | `false` | +| `node.customNodeKey` | Use a custom node-key, if `node.persistGeneratedNodeKey` is true then this will not be used. (Must be 64 byte hex key) | `nil` | +| `node.enableStartupProbe` | If true, enable the startup probe check | `true` | +| `node.enableReadinessProbe` | If true, enable the readiness probe check | `true` | +| `node.dataVolumeSize` | The size of the chain data PersistentVolume | `100Gi` | +| `node.replica` | Number of replica in the node StatefulSet | `1` | +| `node.role` | Set the role of the node: `full`, `authority`/`validator`, `collator` or `light` | `full` | +| `node.chainDataSnapshotUrl` | Download and load chain data from a snapshot archive http URL | `` | +| `node.chainDataSnapshotFormat` | The snapshot archive format (`tar` or `7z`) | `tar` | +| `node.chainDataGcsBucketUrl` | Sync chain data files from a GCS bucket (eg. `gs://bucket-name/folder-name`) | `` | +| `node.chainPath` | Path at which the chain database files are located (`/data/chains/${CHAIN_PATH}`) | `nil` (if undefined, fallbacks to the value in `node.chain`) | +| `node.chainDataKubernetesVolumeSnapshot` | Initialize the chain data volume from a Kubernetes VolumeSnapshot | `` | +| `node.customChainspecUrl` | Download and use a custom chainspec file from a URL | `nil` | +| `node.collator.isParachain` | If true, configure the node as a parachain (set the relay-chain flags after `--`) | `nil` | +| `node.collator.relayChainCustomChainspecUrl` | Download and use a custom relay-chain chainspec file from a URL | `nil` | +| `node.collator.relayChainDataSnapshotUrl` | Download and load relay-chain data from a snapshot archive http URL | `nil` | +| `node.collator.relayChainDataSnapshotFormat` | The relay-chain snapshot archive format (`tar` or `7z`) | `nil` | +| `node.collator.relayChainPath` | Path at which the chain database files are located (`/data/polkadot/chains/${RELAY_CHAIN_PATH}`) | `nil` | +| `node.collator.relayChainDataKubernetesVolumeSnapshot` | Initialize the relay-chain data volume from a Kubernetes VolumeSnapshot | `nil` | +| `node.collator.relayChainDataGcsBucketUrl` | Sync relay-chain data files from a GCS bucket (eg. `gs://bucket-name/folder-name`) | `nil` | +| `node.collator.relayChainFlags` | Relay-chain node flags other than `--name` (set from the helm release name), `--base-path` and `--chain` | `nil` | +| `node.resources.limits` | The resources limits (cpu/memory) for nodes | `{}` | +| `node.podManagementPolicy` | The pod management policy to apply to the StatefulSet, set it to `Parallel` to launch or terminate all Pods in parallel, and not to wait for pods to become Running and Ready or completely terminated prior to launching or terminating another pod | `{}` | +| `node.resources.requests` | The resources requests (cpu/memory) for nodes | `{}` | +| `node.serviceMonitor.enabled` | If true, creates a Prometheus Operator ServiceMonitor | `false` | +| `node.serviceMonitor.namespace` | Prometheus namespace | `nil` | +| `node.serviceMonitor.internal` | Prometheus scrape interval | `nil` | +| `node.serviceMonitor.scrapeTimeout` | Prometheus scrape timeout | `nil` | +| `node.tracing.enabled` | If true, creates a jaeger agent sidecar | `false` | +| `node.subtrateApiSiecar.enabled` | If true, creates a substrate api sidecar | `false` | +| `node.perNodeServices.createApiService` | If true creates a clusterIP API service | `true` | +| `node.perNodeServices.createP2pService` | If true creates a p2p NodePort service, if `node.collator.isParachain` also true, creates a NodePort p2p service for the parachain | `false` | +| `node.perNodeServices.relayServiceAnnotations` | Annotations to be inserted into the relay chain service | `{}` | +| `node.perNodeServices.paraServiceAnnotations` | Annotations to be inserted into the para chain service | `{}` | +| `node.perNodeServices.setPublicAddressToExternal.enabled` | If true sets the `--public-addr` flag to be the NodePort p2p services external address | `false` | +| `node.perNodeServices.setPublicAddressToExternal.ipRetrievalServiceUrl` | The external service to return the NodePort IP | `https://ifconfig.io` | ### Other parameters @@ -107,3 +152,27 @@ helm install kusama-node parity/node --set node.chainDataSnapshotUrl=https://ksm | `jaegerAgent.collector.url` | The URL which jaeger agent sends data | `nil` | | `jaegerAgent.collector.port ` | The port which jaeger agent sends data | `14250` | | `extraContainers ` | Sidecar containers to add to the node | `[]` | + +## License + +This chart is licensed under the Apache v2.0 license. + +Copyright © 2023 Accenture + +### Attribution + +This chart is adapted from the [charts](https://hyperledger.github.io/bevel/) which is licensed under the Apache v2.0 License which is reproduced here: + +``` +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +``` diff --git a/platforms/substrate/charts/substrate-node/requirements.yaml b/platforms/substrate/charts/substrate-node/requirements.yaml new file mode 100644 index 00000000000..1aef2dc7526 --- /dev/null +++ b/platforms/substrate/charts/substrate-node/requirements.yaml @@ -0,0 +1,7 @@ +dependencies: + - name: bevel-storageclass + alias: substrate-storage + repository: "file://../../../shared/charts/bevel-storageclass" + tags: + - storage + version: ~1.0.0 diff --git a/platforms/substrate/charts/substrate-node/templates/_helpers.tpl b/platforms/substrate/charts/substrate-node/templates/_helpers.tpl index e0625cd4215..dc38de6c0d3 100644 --- a/platforms/substrate/charts/substrate-node/templates/_helpers.tpl +++ b/platforms/substrate/charts/substrate-node/templates/_helpers.tpl @@ -50,6 +50,7 @@ app.kubernetes.io/managed-by: {{ .Release.Service }} Selector labels */}} {{- define "node.selectorLabels" -}} +name: {{ include "node.name" . }} app.kubernetes.io/name: {{ include "node.name" . }} app.kubernetes.io/instance: {{ .Release.Name }} {{- end }} diff --git a/platforms/substrate/charts/substrate-node/templates/service.yaml b/platforms/substrate/charts/substrate-node/templates/service.yaml index 3bf88639813..94ff0e1d672 100644 --- a/platforms/substrate/charts/substrate-node/templates/service.yaml +++ b/platforms/substrate/charts/substrate-node/templates/service.yaml @@ -107,7 +107,7 @@ apiVersion: getambassador.io/v3alpha1 kind: Listener metadata: name: "{{ $fullname }}-listener" - namespace: {{ $.Values.namespace }} + namespace: {{ $.Release.Namespace }} spec: port: {{ $.Values.proxy.p2p }} protocol: TCP @@ -121,9 +121,9 @@ apiVersion: getambassador.io/v3alpha1 kind: TCPMapping metadata: name: "{{ $fullname }}-tcpmapping" - namespace: {{ $.Values.namespace }} + namespace: {{ $.Release.Namespace }} spec: port: {{ $.Values.proxy.p2p }} - service: "{{ $fullname }}-{{ $i }}-rc-p2p.{{ $.Values.namespace }}:{{ $.Values.node.ports.p2p }}" + service: "{{ $fullname }}-{{ $i }}-rc-p2p.{{ $.Release.Namespace }}:{{ $.Values.node.ports.p2p }}" {{- end }} {{- end }} diff --git a/platforms/substrate/charts/substrate-node/templates/statefulset.yaml b/platforms/substrate/charts/substrate-node/templates/statefulset.yaml index 25ea5b4783c..76d3d99ac68 100644 --- a/platforms/substrate/charts/substrate-node/templates/statefulset.yaml +++ b/platforms/substrate/charts/substrate-node/templates/statefulset.yaml @@ -167,149 +167,14 @@ spec: readOnly: true {{- end }} {{- end }} - - name: node-secrets - image: {{ .Values.vault.image }} - imagePullPolicy: IfNotPresent - env: - - name: VAULT_ADDR - value: {{ .Values.vault.address }} - - name: VAULT_SECRET_PREFIX - value: {{ .Values.vault.secretPrefix }} - - name: KUBERNETES_AUTH_PATH - value: {{ .Values.vault.authPath }} - - name: VAULT_APP_ROLE - value: {{ .Values.vault.appRole }} - - name: PEER_NAME - value: {{ .Values.node.name }} - command: ["/bin/sh", "-c"] - args: - - |- - #!/bin/sh - validateVaultResponse () { - if echo ${2} | grep "errors"; then - echo "ERROR: unable to retrieve ${1}: ${2}" - exit 1 - fi - if [ "$3" == "LOOKUPSECRETRESPONSE" ] - then - http_code=$(curl -sS -o /dev/null -w "%{http_code}" \ - --header "X-Vault-Token: ${VAULT_CLIENT_TOKEN}" \ - ${VAULT_ADDR}/v1/${vault_secret_key}) - curl_response=$? - if test "$http_code" != "200" ; then - echo "Http response code from Vault - $http_code" - if test "$curl_response" != "0"; then - echo "Error: curl command failed with error code - $curl_response" - exit 1 - fi - fi - fi - } - - KUBE_SA_TOKEN=$(cat /var/run/secrets/kubernetes.io/serviceaccount/token) - echo "Getting secrets from Vault Server: ${VAULT_ADDR}" - - ## Login to Vault to get an app role token ## - VAULT_CLIENT_TOKEN=$(curl -sS --request POST ${VAULT_ADDR}/v1/auth/${KUBERNETES_AUTH_PATH}/login \ - -H "Content-Type: application/json" \ - -d '{"role":"'"${VAULT_APP_ROLE}"'","jwt":"'"${KUBE_SA_TOKEN}"'"}' | \ - jq -r 'if .errors then . else .auth.client_token end') - validateVaultResponse 'vault login token' "${VAULT_CLIENT_TOKEN}" - echo "logged in" - - vault_secret_key="${VAULT_SECRET_PREFIX}/${PEER_NAME}/substrate" - - echo "Getting node-key, aura and grandpa secret seeds from $vault_secret_key" - - LOOKUP_SECRET_RESPONSE=$(curl -sS \ - --header "X-Vault-Token:${VAULT_CLIENT_TOKEN}" \ - ${VAULT_ADDR}/v1/${vault_secret_key} | \ - jq -r 'if .errors then . else . end') - validateVaultResponse "secret (${vault_secret_key})" "${LOOKUP_SECRET_RESPONSE}" "LOOKUPSECRETRESPONSE" - - {{- range $keys := .Values.node.keys }} - secretSeed=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["{{ .seed }}"]') - echo "${secretSeed}" > /secrets/{{ .seed }} - {{- end }} - - node_key=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["node_key"]') - echo "${node_key}" > /secrets/node_key - volumeMounts: - - name: keystore - mountPath: /secrets - readOnly: false - - name: retrieve-chain-spec - image: {{ .Values.vault.image }} - imagePullPolicy: IfNotPresent - env: - - name: VAULT_ADDR - value: {{ .Values.vault.address }} - - name: VAULT_SECRET_PREFIX - value: {{ .Values.vault.secretPrefix }} - - name: KUBERNETES_AUTH_PATH - value: {{ .Values.vault.authPath }} - - name: VAULT_APP_ROLE - value: {{ .Values.vault.appRole }} - command: ["/bin/sh", "-c"] - args: - - |- - #!/bin/sh - - validateVaultResponse () { - if echo ${2} | grep "errors"; then - echo "ERROR: unable to retrieve ${1}: ${2}" - exit 1 - fi - if [ "$3" == "LOOKUPSECRETRESPONSE" ] - then - http_code=$(curl -sS -o /dev/null -w "%{http_code}" \ - --header "X-Vault-Token: ${VAULT_CLIENT_TOKEN}" \ - ${VAULT_ADDR}/v1/${vault_secret_key}) - curl_response=$? - if test "$http_code" != "200" ; then - echo "Http response code from Vault - $http_code" - if test "$curl_response" != "0"; then - echo "Error: curl command failed with error code - $curl_response" - exit 1 - fi - fi - fi - } - - KUBE_SA_TOKEN=$(cat /var/run/secrets/kubernetes.io/serviceaccount/token) - echo "Getting secrets from Vault Server: ${VAULT_ADDR}" - - ## Login to Vault to get an app role token ## - VAULT_CLIENT_TOKEN=$(curl -sS --request POST ${VAULT_ADDR}/v1/auth/${KUBERNETES_AUTH_PATH}/login \ - -H "Content-Type: application/json" \ - -d '{"role":"'"${VAULT_APP_ROLE}"'","jwt":"'"${KUBE_SA_TOKEN}"'"}' | \ - jq -r 'if .errors then . else .auth.client_token end') - validateVaultResponse 'vault login token' "${VAULT_CLIENT_TOKEN}" - echo "logged in" - - vault_secret_key="${VAULT_SECRET_PREFIX}/genesis" - - echo "Getting the chain spec from $vault_secret_key" - - LOOKUP_SECRET_RESPONSE=$(curl -sS \ - --header "X-Vault-Token:${VAULT_CLIENT_TOKEN}" \ - ${VAULT_ADDR}/v1/${vault_secret_key} | \ - jq -r 'if .errors then . else . end') - validateVaultResponse "secret (${vault_secret_key})" "${LOOKUP_SECRET_RESPONSE}" "LOOKUPSECRETRESPONSE" - - chain_spec=$(echo ${LOOKUP_SECRET_RESPONSE} | jq -r '.data.data["genesis"]') - echo "${chain_spec}" | base64 -d > {{ .Values.node.customChainspecPath }} - volumeMounts: - - name: chain-data - mountPath: /data - {{- if or .Values.node.customChainspecUrl .Values.node.collator.relayChainCustomChainspecUrl }} + {{- if and .Values.node.customChainspecUrl .Values.node.collator.relayChainCustomChainspecUrl }} - name: download-chainspec image: {{ .Values.initContainer.image.repository }}:{{ .Values.initContainer.image.tag }} command: [ "/bin/sh" ] args: - -c - | - {{- if .Values.node.customChainspecUrl }} + {{- if and (.Values.node.customChainspecUrl) (.Values.node.customChainspecPath) }} if [ ! -f {{ .Values.node.customChainspecPath }} ]; then wget -O {{ .Values.node.customChainspecPath }} {{ .Values.node.customChainspecUrl }} fi @@ -323,171 +188,111 @@ spec: - name: chain-data mountPath: /data {{- end }} - {{- if .Values.node.keys }} - - name: inject-keys + containers: + - name: {{ .Values.node.chain }} image: {{ .Values.image.repository }}:{{ .Values.image.tag }} imagePullPolicy: {{ .Values.image.pullPolicy }} command: ["/bin/sh", "-c"] args: - |- - {{- range $keys := .Values.node.keys }} - {{ $.Values.node.command }} key insert --base-path /data \ - {{- if $.Values.vault.secretPrefix }} - --chain {{ $.Values.node.customChainspecPath }} \ - --key-type {{ .type }} \ - --scheme {{ .scheme }} \ - --suri /secrets/{{ .seed }} \ - {{- else }} - --chain ${CHAIN} \ - --key-type $(cat /var/run/secrets/{{ .type }}/type) \ - --scheme $(cat /var/run/secrets/{{ .type }}/scheme) \ - --suri /var/run/secrets/{{ .type }}/seed \ + + #!/bin/sh + + echo "Step 1: Install necessary packages using custom package manager script" + . /scripts/package-manager.sh + packages_to_install="jq curl" + install_packages "$packages_to_install" + + echo "STEP-2: Download and set up kubectl for Kubernetes management" + curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl" + chmod +x kubectl + mv kubectl /usr/local/bin/ + kubectl version --client + + echo "Step 3: Extract chain specification (genesis) from Kubernetes ConfigMap and store it" + chain_spec=$(kubectl get configmap "substrate-genesis" --namespace {{ .Release.Namespace }} -o json | jq -r '.data["genesis"]') + echo "${chain_spec}" | base64 -d > {{ .Values.node.customChainspecPath }} + + echo "Step 4: Retrieve secret keys from Kubernetes Secrets if available" + secretName="substrate-node-{{ .Release.Name }}-keys" + if kubectl get secret "${secretName}" --namespace {{ .Release.Namespace }} >/dev/null 2>&1; then + # Extract AURA secret phrase from Kubernetes Secret + AURA_SECRETPHRASE=$(kubectl get secret "${secretName}" --namespace {{ .Release.Namespace }} -o json | jq -r '.data["substrate-node-keys"]' | base64 -d | jq -r '.data.aura_file_b64' | base64 -d | jq -r '.secretPhrase') + # Extract GRANDPA secret phrase from Kubernetes Secret + GRAN_SECRETPHRASE=$(kubectl get secret "${secretName}" --namespace {{ .Release.Namespace }} -o json | jq -r '.data["substrate-node-keys"]' | base64 -d | jq -r '.data.grandpa_file_b64' | base64 -d | jq -r '.secretPhrase') + # Extract NODE_KEY from Kubernetes Secret + NODE_KEY=$(kubectl get secret "${secretName}" --namespace {{ .Release.Namespace }} -o json | jq -r '.data["substrate-node-keys"]' | base64 -d | jq -r '.data.node_key') + fi + # Check if bootnode is enabled + {{- if .Values.node.isBootnode.enabled }} + # Retrieve BOOTNODE_ID from Kubernetes Secret + BOOTNODE_ID=$(kubectl get secret "substrate-node-{{ .Values.node.isBootnode.bootnodeName }}-keys" --namespace {{ .Release.Namespace }} -o json | jq -r '.data["substrate-node-keys"]' | base64 -d | jq -r '.data.node_id') {{- end }} - && echo "Inserted key {{ .type }} into Keystore" \ - || echo "Failed to insert key {{ .type}} into Keystore." - {{- end }} - env: - - name: CHAIN - value: {{ .Values.node.chain }} - volumeMounts: - - mountPath: /secrets - name: keystore - - mountPath: /data - name: chain-data - {{- range $keys := .Values.node.keys }} - - mountPath: /var/run/secrets/{{ .type }} - name: {{ .type }} - {{- end }} - {{- end }} - {{- if .Values.node.perNodeServices.createP2pService }} - - name: query-services - image: {{ .Values.kubectl.image.repository }}:{{ .Values.kubectl.image.tag }} - command: [ "/bin/sh" ] - args: - - -c - - | + + echo "Step 5: Insert keys into Keystore using dscp-node command-line tool" + # Insert AURA key into Keystore + ./{{ .Values.node.command }} key insert --base-path=/data --chain=/data/chainspec.json --key-type=aura --scheme=Sr25519 --suri="${AURA_SECRETPHRASE}" && echo "Inserted key aura into Keystore" || echo "Failed to insert key aura into Keystore." + # Insert GRANPA key into Keystore + ./{{ .Values.node.command }} key insert --base-path=/data --chain=/data/chainspec.json --key-type=gran --scheme=Ed25519 --suri="${GRAN_SECRETPHRASE}" && echo "Inserted key gran into Keystore" || echo "Failed to insert key gran into Keystore." + + echo "Step 6: Determine various ports and external addresses for P2P communication" POD_INDEX="${HOSTNAME##*-}" - {{- if eq .Values.node.perNodeServices.p2pServiceType "NodePort" }} - RELAY_CHAIN_P2P_PORT="$(kubectl --namespace {{ .Release.Namespace }} get service {{ $fullname }}-${POD_INDEX}-rc-p2p -o jsonpath='{.spec.ports[*].nodePort}')" - echo -n "${RELAY_CHAIN_P2P_PORT}" > /data/relay_chain_p2p_port - echo "Retrieved Kubernetes service node port from {{ $fullname }}-${POD_INDEX}-rc-p2p, saved ${RELAY_CHAIN_P2P_PORT} to /data/relay_chain_p2p_port" + {{- if and (.Values.node.perNodeServices.createP2pService) (eq .Values.node.perNodeServices.p2pServiceType "NodePort") }} + RELAY_CHAIN_P2P_PORT=$(kubectl --namespace {{ .Release.Namespace }} get service {{ $fullname }}-${POD_INDEX}-rc-p2p -o jsonpath='{.spec.ports[*].nodePort}') {{- else if or (eq .Values.node.perNodeServices.p2pServiceType "LoadBalancer") (eq .Values.node.perNodeServices.p2pServiceType "ClusterIP") }} - RELAY_CHAIN_P2P_PORT=30333 - echo -n "${RELAY_CHAIN_P2P_PORT}" > /data/relay_chain_p2p_port - echo "Kubernetes service {{ $fullname }}-${POD_INDEX}-rc-p2p is ${RELAY_CHAIN_P2P_PORT}" + RELAY_CHAIN_P2P_PORT=8080 # Default port {{- end }} - {{- if and .Values.node.collator.isParachain (eq .Values.node.perNodeServices.p2pServiceType "Nodeport") }} - PARA_CHAIN_P2P_PORT="$(kubectl --namespace {{ .Release.Namespace }} get service {{ $fullname }}-${POD_INDEX}-pc-p2p -o jsonpath='{.spec.ports[*].nodePort}')" - echo -n "${PARA_CHAIN_P2P_PORT}" > /data/para_chain_p2p_port - echo "Retrieved Kubernetes service node port from {{ $fullname }}-${POD_INDEX}-pc-p2p, saved ${PARA_CHAIN_P2P_PORT} to /data/para_chain_p2p_port" - {{- else if and .Values.node.collator.isParachain (or (eq .Values.node.perNodeServices.p2pServiceType "LoadBalancer") (eq .Values.node.perNodeServices.p2pServiceType "ClusterIP")) }} - PARA_CHAIN_P2P_PORT=30334 - echo -n "${PARA_CHAIN_P2P_PORT}" > /data/para_chain_p2p_port - echo "Kubernetes service {{ $fullname }}-${POD_INDEX}-pc-p2p is ${PARA_CHAIN_P2P_PORT}" + + {{- if and (.Values.node.perNodeServices.createP2pService) (.Values.node.collator.isParachain) (eq .Values.node.perNodeServices.p2pServiceType "Nodeport") }} + PARA_CHAIN_P2P_PORT=$(kubectl --namespace {{ .Release.Namespace }} get service {{ $fullname }}-${POD_INDEX}-pc-p2p -o jsonpath='{.spec.ports[*].nodePort}') + {{- else if and (.Values.node.collator.isParachain) (or (eq .Values.node.perNodeServices.p2pServiceType "LoadBalancer") (eq .Values.node.perNodeServices.p2pServiceType "ClusterIP")) }} + PARA_CHAIN_P2P_PORT=30334 # Default port {{- end }} - {{- if and .Values.node.perNodeServices.setPublicAddressToExternal.enabled (eq .Values.node.perNodeServices.p2pServiceType "NodePort") }} - EXTERNAL_ADDRESS=$(curl -sS {{ .Values.node.perNodeServices.setPublicAddressToExternal.ipRetrievalServiceUrl }}) - echo -n "${EXTERNAL_ADDRESS}" > /data/node_external_address - echo "Retrieved external IP from {{ .Values.node.perNodeServices.setPublicAddressToExternal.ipRetrievalServiceUrl }}, saved ${EXTERNAL_ADDRESS} to /data/node_external_address" + + {{- if and (.Values.node.perNodeServices.createP2pService) (.Values.node.perNodeServices.setPublicAddressToExternal.enabled) (eq .Values.node.perNodeServices.p2pServiceType "NodePort") }} + EXTERNAL_ADDRESS=$(curl -sS {{ .Values.node.perNodeServices.setPublicAddressToExternal.ipRetrievalServiceUrl }}) {{- else if and .Values.node.perNodeServices.setPublicAddressToExternal.enabled (eq .Values.node.perNodeServices.p2pServiceType "LoadBalancer") }} - EXTERNAL_ADDRESS=$(kubectl --namespace {{ .Release.Namespace }} get service {{ $fullname }}-${POD_INDEX}-rc-p2p -o jsonpath='{.status.loadBalancer.ingress[0].hostname}') - echo -n "${EXTERNAL_ADDRESS}" > /data/node_external_address - echo "External hostname is ${EXTERNAL_ADDRESS}, saved to /data/node_external_address" + EXTERNAL_ADDRESS=$(kubectl --namespace {{ .Release.Namespace }} get service {{ $fullname }}-${POD_INDEX}-rc-p2p -o jsonpath='{.status.loadBalancer.ingress[0].hostname}') + {{- else if eq .Values.proxy.provider "ambassador" }} + EXTERNAL_ADDRESS="{{ $fullname }}-${POD_INDEX}.{{ .Values.proxy.external_url }}" + EXTERNAL_P2P_PORT="{{ .Values.proxy.p2p }}" {{- else if eq .Values.node.perNodeServices.p2pServiceType "ClusterIP" }} - EXTERNAL_ADDRESS={{ $fullname }}-${POD_INDEX}-rc-p2p.{{ .Release.Namespace }}.svc.cluster.local - echo -n "${EXTERNAL_ADDRESS}" > /data/node_external_address - echo "External hostname is ${EXTERNAL_ADDRESS}, saved to /data/node_external_address" + EXTERNAL_ADDRESS={{ $fullname }}-${POD_INDEX}-rc-p2p.{{ .Release.Namespace }}.svc.cluster.local + EXTERNAL_P2P_PORT="{{ .Values.node.ports.p2p }}" {{- end }} - volumeMounts: - - mountPath: /data - name: chain-data - {{- end }} - containers: - - name: {{ .Values.node.chain }} - image: {{ .Values.image.repository }}:{{ .Values.image.tag }} - imagePullPolicy: {{ .Values.image.pullPolicy }} - command: ["/bin/sh", "-c"] - args: - - |- - #!/bin/sh - {{- if .Values.node.perNodeServices.createP2pService }} - if [ ! -s /data/node_external_address ]; then echo "EXTERNAL_ADDRESS is empty" && exit 1 ; fi - EXTERNAL_ADDRESS="$(cat /data/node_external_address)" - echo "EXTERNAL_ADDRESS=${EXTERNAL_ADDRESS}" - RELAY_CHAIN_P2P_PORT="$(cat /data/relay_chain_p2p_port)" - echo "RELAY_CHAIN_P2P_PORT=${RELAY_CHAIN_P2P_PORT}" - {{- if eq .Values.proxy.provider "ambassador" }} - EXTERNAL_ADDRESS="{{ .Values.proxy.external_url }}" - echo "EXTERNAL_ADDRESS=${EXTERNAL_ADDRESS}" - EXTERNAL_P2P_PORT="{{ .Values.proxy.p2p }}" - echo "EXTERNAL_P2P_PORT=${EXTERNAL_P2P_PORT}" + + echo "Step 7: Start the node" + exec {{ .Values.node.command }} \ + --name=${POD_NAME} \ + --base-path=/data/ \ + --node-key=${NODE_KEY} \ + --chain={{ if .Values.node.customChainspecUrl }}{{ .Values.node.customChainspecPath }}{{ else }}${CHAIN}{{ end }} \ + {{- if or (eq .Values.node.role "authority") (eq .Values.node.role "validator") }} + --validator \ + {{- else if eq .Values.node.role "collator" }} + --collator \ + {{- else if or (eq .Values.node.role "light") (eq .Values.node.role "member") }} + --light \ {{- end }} - {{- if .Values.node.collator.isParachain }} - PARA_CHAIN_P2P_PORT="$(cat /data/para_chain_p2p_port)" - echo "PARA_CHAIN_P2P_PORT=${PARA_CHAIN_P2P_PORT}" + {{- if .Values.node.isBootnode.enabled }} + --bootnodes "/dns4/{{ .Values.node.isBootnode.bootnodeAddr }}/tcp/{{ .Values.node.isBootnode.bootnodePort }}/p2p/${BOOTNODE_ID}" \ {{- end }} + {{- if (.Values.node.collator.isParachain) }} + --base-path=/data/relay/ \ + --public-addr=/dns4/${EXTERNAL_ADDRESS}/tcp/${PARA_CHAIN_P2P_PORT} \ + --listen-addr=/ip4/0.0.0.0/tcp/${PARA_CHAIN_P2P_PORT} \ + {{- else if (.Values.node.perNodeServices.createP2pService) }} + --public-addr=/dns4/${EXTERNAL_ADDRESS}/tcp/${EXTERNAL_P2P_PORT} \ + --listen-addr=/ip4/0.0.0.0/tcp/${RELAY_CHAIN_P2P_PORT} \ {{- end }} - exec {{ .Values.node.command }} \ - --name=${POD_NAME} \ - --base-path=/data/ \ - --chain={{ if .Values.node.customChainspecUrl }}{{ .Values.node.customChainspecPath }}{{ else }}${CHAIN}{{ end }} \ - {{- if or (eq .Values.node.role "authority") (eq .Values.node.role "validator") }} - --validator \ - {{- end }} - {{- if eq .Values.node.role "collator" }} - --collator \ - {{- end }} - {{- if or (eq .Values.node.role "light") (eq .Values.node.role "member") }} - --light \ - {{- end }} - {{- if .Values.node.collator.isParachain }} - {{- if .Values.node.perNodeServices.createP2pService }} - {{- if .Values.node.perNodeServices.setPublicAddressToExternal.enabled }} - {{- if eq .Values.node.perNodeServices.p2pServiceType "NodePort" }} - --public-addr=/ip4/${EXTERNAL_ADDRESS}/tcp/${PARA_CHAIN_P2P_PORT} \ - {{- else if eq .Values.node.perNodeServices.p2pServiceType "LoadBalancer" }} - --public-addr=/dns4/${EXTERNAL_ADDRESS}/tcp/${PARA_CHAIN_P2P_PORT} \ - {{- end }} - {{- else if and (not .Values.node.perNodeServices.setPublicAddressToExternal.enabled) (eq .Values.node.perNodeServices.p2pServiceType "ClusterIP") }} - --public-addr=/dns4/${EXTERNAL_ADDRESS}/tcp/${PARA_CHAIN_P2P_PORT} \ - {{- end }} - --listen-addr=/ip4/0.0.0.0/tcp/${PARA_CHAIN_P2P_PORT} \ - {{- end }} - --listen-addr=/ip4/0.0.0.0/tcp/30334 \ - {{- end }} - {{- if .Values.node.persistGeneratedNodeKey }} - --node-key-file /data/node-key \ - {{- else if .Values.node.customNodeKey }} - --node-key $(cat /tmp/custom-node-key) \ - {{- else if .Values.vault.secretPrefix }} - --node-key $(cat /secrets/node_key) \ - {{- end }} - {{- if .Values.node.tracing.enabled }} - --jaeger-agent=127.0.0.1:{{ .Values.jaegerAgent.ports.compactPort }} \ - {{- end }} - {{- join " " .Values.node.flags | nindent 16 }} \ - {{- if .Values.node.collator.isParachain }} - -- \ - --base-path=/data/relay/ \ - {{- end }} - {{- if .Values.node.collator.relayChainCustomChainspecUrl }} - --chain={{ .Values.node.relayChainCustomChainspecPath }} \ - {{- end }} - {{- if .Values.node.perNodeServices.createP2pService }} - {{- if .Values.node.perNodeServices.setPublicAddressToExternal.enabled }} - {{- if eq .Values.node.perNodeServices.p2pServiceType "NodePort" }} - --public-addr=/ip4/${EXTERNAL_ADDRESS}/tcp/${RELAY_CHAIN_P2P_PORT} \ - {{- else if eq .Values.node.perNodeServices.p2pServiceType "LoadBalancer" }} - --public-addr=/dns4/${EXTERNAL_ADDRESS}/tcp/${RELAY_CHAIN_P2P_PORT} \ - {{- end }} - {{- else if and (not .Values.node.perNodeServices.setPublicAddressToExternal.enabled) (eq .Values.node.perNodeServices.p2pServiceType "ClusterIP") }} - --public-addr=/dns4/${EXTERNAL_ADDRESS}/tcp/${EXTERNAL_P2P_PORT} \ - {{- end }} - --listen-addr=/ip4/0.0.0.0/tcp/${RELAY_CHAIN_P2P_PORT} \ - {{- else }} - --listen-addr=/ip4/0.0.0.0/tcp/30333 \ - {{- end }} - {{- join " " .Values.node.collator.relayChainFlags | nindent 16 }} + {{- if .Values.node.tracing.enabled }} + --jaeger-agent=127.0.0.1:{{ .Values.jaegerAgent.ports.compactPort }} \ + {{- end }} + {{- if .Values.node.collator.relayChainCustomChainspecUrl }} + --chain={{ .Values.node.relayChainCustomChainspecPath }} \ + {{- end }} + {{- join " " .Values.node.flags | nindent 16 }} \ + {{- join " " .Values.node.collator.relayChainFlags | nindent 16 }} env: - name: CHAIN value: {{ .Values.node.chain }} @@ -551,6 +356,9 @@ spec: name: custom-node-key readOnly: true {{- end }} + - name: package-manager + mountPath: /scripts/package-manager.sh + subPath: package-manager.sh {{- if .Values.node.substrateApiSidecar.enabled }} - name: substrate-api-sidecar image: {{ .Values.substrateApiSidecar.image.repository }}:{{ .Values.substrateApiSidecar.image.tag }} @@ -604,8 +412,6 @@ spec: {{- toYaml . | nindent 8 }} {{- end}} serviceAccountName: {{ $serviceAccountName }} - securityContext: - {{- toYaml .Values.podSecurityContext | nindent 8 }} terminationGracePeriodSeconds: {{ .Values.terminationGracePeriodSeconds }} {{- with .Values.nodeSelector }} nodeSelector: @@ -640,6 +446,9 @@ spec: - name: keystore emptyDir: medium: Memory + - name: package-manager + configMap: + name: package-manager volumeClaimTemplates: - apiVersion: v1 kind: PersistentVolumeClaim @@ -653,7 +462,7 @@ spec: kind: VolumeSnapshot apiGroup: snapshot.storage.k8s.io {{- end }} - storageClassName: {{ .Values.storageClass }} + storageClassName: substrate-storage-{{ .Release.Name }} resources: requests: - storage: {{ .Values.node.dataVolumeSize }} + storage: {{ .Values.storage.size }} diff --git a/platforms/substrate/charts/substrate-node/values.yaml b/platforms/substrate/charts/substrate-node/values.yaml index 53fd45b2fe5..d0f969bb6c9 100644 --- a/platforms/substrate/charts/substrate-node/values.yaml +++ b/platforms/substrate/charts/substrate-node/values.yaml @@ -1,12 +1,47 @@ + +############################################################################################## +# Copyright Accenture. All Rights Reserved. +# +# SPDX-License-Identifier: Apache-2.0 +############################################################################################## + # This is a YAML-formatted file. # Declare variables to be passed into your templates. +--- +global: + #Provide the service account name which will be created. + serviceAccountName: vault-auth + cluster: + provider: azure # choose from: minikube | aws + cloudNativeServices: false # 'false' is implemented + #Provide the kubernetes host url + #Eg. kubernetesUrl: https://10.3.8.5:6443 + kubernetesUrl: + vault: + # Provide the type of vault + type: "" # hashicorp + # Provide the vault role used. + role: vault-role + # Provide the network type + network: substrate + # Provide the vault server address + address: "" + # Provide the vault authPath configured to be used. + authPath: supplychain + # Provide the secret engine. + secretEngine: secretsv2 + # Provide the vault path where the secrets will be stored + secretPrefix: "data/supplychain" + # substrate node runtime image # Eg. repository: parity/substrate image: - repository: substrate - tag: 3.0.0 + repository: ghcr.io/inteli-poc/dscp-node + tag: v4.3.1 pullPolicy: IfNotPresent +# imagePullSecrets: +# - name: "regcred" # image for downloading chain snapshot - 7-Zip Docker image based on Alpine Linux. # Eg. repository: crazymax/7zip @@ -26,11 +61,9 @@ googleCloudSdk: image: repository: google/cloud-sdk tag: slim # more lightweight than the full image and still contains gsutil - #serviceAccountKey: "" + # serviceAccountKey: "" imagePullSecrets: [] -nameOverride: "" -fullnameOverride: "" serviceAccount: # Specifies whether a service account should be created @@ -42,7 +75,7 @@ serviceAccount: # Provide the service account name authenticated to vault. # NOTE: Make sure that the service account is already created and authenticated to use the vault. # Eg. name: vault-auth - name: "" + name: vault-auth podSecurityContext: runAsUser: 1000 @@ -64,101 +97,100 @@ ingress: # hosts: # - chart-example.local -#extraLabels: +# extraLabels: # type: rpc # This section contains the Substrate node details. node: - # Provide the name of the node. - # Eg. name: example - name: + chainDataSnapshotUrl: false + chainDataGcsBucketUrl: false # Specifies the chain specification. It can be one of the predefined ones (dev, local, or staging) # or it can be a path to a file with the chainspec (such as one exported by the `build-spec` subcommand). # Eg. chain: local or chain: - chain: + chain: inteli # Substrate CLI executable # Eg. command: substrate - command: + command: ./dscp-node + isBootnode: + enabled: true # false | true + bootnodeName: validator-1 + bootnodeAddr: validator-1-substrate-node-0-rc-p2p.supplychain-subs #if using proxy set it to this validator-1. + bootnodePort: 30333 # Provide the size of the volume # Eg. dataVolumeSize: 1Gi - dataVolumeSize: 100Gi + dataVolumeSize: 10Gi # Provide the number of replicas for your pods. # replicas: 1 replicas: 1 # role of the node # Eg. role: full - role: - #customChainspecUrl: - ## Node may require custom name for chainspec file. Example: moonbeam https://github.com/PureStake/moonbeam/issues/1104#issuecomment-996787548 - ## Note: path should start with /data/ since this folder mount in init container download-chainspec. - #Eg. customChainspecPath: "/data/chainspec.json" - customChainspecPath: "" - #chainDataSnapshotUrl: "https://dot-rocksdb.polkashots.io/snapshot" - #chainDataSnapshotFormat: 7z + role: validator # full | validator + customChainspecUrl: true + # Node may require custom name for chainspec file. Example: moonbeam https://github.com/PureStake/moonbeam/issues/1104#issuecomment-996787548 + # Note: path should start with /data/ since this folder mount in init container download-chainspec. + # Eg. customChainspecPath: "/data/chainspec.json" + customChainspecPath: "/data/chainspec.json" + # chainDataSnapshotUrl: "https://dot-rocksdb.polkashots.io/snapshot" + # chainDataSnapshotFormat: 7z # Specifies the directory for storing all of the data related to this chain. # Eg. chainPath: /tmp/alice chainPath: "" - #chainDataKubernetesVolumeSnapshot: "" - #chainDataGcsBucketUrl: "" + # chainDataKubernetesVolumeSnapshot: "" + # chainDataGcsBucketUrl: "" collator: isParachain: false - relayChain: polkadot - # relayChainCustomChainspecUrl: "" - # relayChainCustomChainspecPath: "/data/relay_chain_chainspec.json" - # relayChainDataSnapshotUrl: "https://dot-rocksdb.polkashots.io/snapshot" - # relayChainDataSnapshotFormat: 7z - # relayChainPath: "" - # relayChainDataKubernetesVolumeSnapshot: "" - # relayChainDataGcsBucketUrl: "" - # relayChainFlags: - enableStartupProbe: - enableReadinessProbe: + # relayChainCustomChainspecUrl: "" + # relayChainCustomChainspecPath: "/data/relay_chain_chainspec.json" + # relayChainDataSnapshotUrl: "https://dot-rocksdb.polkashots.io/snapshot" + # relayChainDataSnapshotFormat: 7z + # relayChainPath: "" + # relayChainDataKubernetesVolumeSnapshot: "" + # relayChainDataGcsBucketUrl: "" + # relayChainFlags: + + enableStartupProbe: false + enableReadinessProbe: false flags: - # - "--rpc-external" - # - "--ws-external" - # - "--rpc-methods=safe" - # - "--rpc-cors=all" - # - "--prometheus-external" - keys: {} - # - type: "gran" - # scheme: "ed25519" - # seed: "//Alice//gran" - # - type: "babe" - # scheme: "sr25519" - # seed: "//Alice//babe" + - "--rpc-external" + - "--ws-external" + - "--rpc-methods=Unsafe" + - "--rpc-cors=all" + - "--unsafe-ws-external" + - "--unsafe-rpc-external" + persistGeneratedNodeKey: false - #customNodeKey: "" + # customNodeKey: "" resources: {} serviceMonitor: enabled: false - #namespace: monitoring - #interval: 10s + # namespace: monitoring + # interval: 10s # scrapeTimeout: 10s + perNodeServices: createApiService: true createP2pService: true - p2pServiceType: ClusterIP # Must be type ClusterIP, NodePort or LoadBalancer, If using type NodePort or LoadBalancer then you must set NodeSelecter accordingly. - relayServiceAnnotations: {} - paraServiceAnnotations: {} + p2pServiceType: ClusterIP # Must be type ClusterIP, NodePort or LoadBalancer, If using type NodePort or LoadBalancer then you must set NodeSelecter accordingly. setPublicAddressToExternal: enabled: false - ipRetrievalServiceUrl: https://ifconfig.io/ip - #podManagementPolicy: Parallel + ipRetrievalServiceUrl: https://ifconfig.io + podManagementPolicy: Parallel + ports: - # Specifies the port to listen on for peer-to-peer (p2p) traffic - # Eg. p2p: 30333 - p2p: - # Specifies the port to listen on for incoming WebSocket traffic - # Eg. ws: 9944 - ws: - # Specifies the port to listen on for incoming RPC traffic - # Eg. 9933 - rpc: + # Specifies the port to listen on for peer-to-peer (p2p) traffic + # Eg. p2p: 30333 + p2p: 30333 #30333 + # Specifies the port to listen on for incoming WebSocket traffic + # Eg. ws: 9944 + ws: 9944 + # Specifies the port to listen on for incoming RPC traffic + # Eg. 9933 + rpc: 9933 # Enables Jaeger Agent as a sidecar tracing: enabled: false - + # Enables Sustrate API as a sidecar substrateApiSidecar: enabled: false @@ -167,14 +199,14 @@ node: proxy: # Mention the proxy provider. Currently ambassador is supported # eg. provider: ambassador - provider: ambassador + provider: none # none | ambassador # url that will be added in DNS recordset # eg. external_url: test.substrate.example.com - external_url: + external_url: # Mention the p2p port configured on proxy. # NOTE: Make sure that the port is enabled and not binded on the proxy. # Eg. p2p: 15010 - p2p: + p2p: # Provide the secret name which contains the certificate certSecret: @@ -215,29 +247,12 @@ tolerations: [] affinity: {} -# Provide the name of the storageclass. -# NOTE: Make sure that the storageclass exist prior to this deployment as -# this chart doesn't create the storageclass. -# Eg. storageClass: gcpstorageclass -storageClass: - extraContainers: [] - -# This section contains the vault related information. -# NOTE: Make sure that the vault is already unsealed, intialized and configured to -# use the Kubernetes service account token based authentication. -vault: - # Provide the vault address - # Eg. address: http://vault.example.com:8200 - address: - # Provide the vault path where the secrets are stored - # Eg. secretPrefix: secretsv2/sub-org-name - secretPrefix: - # Provide the auth path configured to be used. Default is /kubernetes - authPath: - # Provide the vault role used. - # Eg. appRole: vault-role - appRole: - # NOTE: The alpine image used is the base alpine image with CURL installed. - # Eg. image: ghcr.io/hyperledger/bevel-alpine:latest - image: +# Override necessary Subchart values +storage: + size: "10Gi" + # NOTE: when you set this to Retain, the volume WILL persist after the chart is delete and you need to manually delete it + reclaimPolicy: "Delete" # choose from: Delete | Retain + volumeBindingMode: Immediate # choose from: Immediate | WaitForFirstConsumer + allowedTopologies: + enabled: false diff --git a/platforms/substrate/charts/values/noproxy-and-novault/genesis.yaml b/platforms/substrate/charts/values/noproxy-and-novault/genesis.yaml new file mode 100644 index 00000000000..b1d32f2c6c1 --- /dev/null +++ b/platforms/substrate/charts/values/noproxy-and-novault/genesis.yaml @@ -0,0 +1,25 @@ +############################################################################################## +# Copyright Accenture. All Rights Reserved. +# +# SPDX-License-Identifier: Apache-2.0 +############################################################################################## + +--- +# helm install genesis -f values/noproxy-and-novault/genesis.yaml -n supplychain-subs substrate-genesis + +# The following are for overriding global values +global: + serviceAccountName: vault-auth + cluster: + provider: azure + cloudNativeServices: false + vault: + # Provide the type of vault + type: kubernetes # hashicorp | kubernetes + +node: + validator: + count: 4 + member: + count: 1 + balance: 1152921504606846976 diff --git a/platforms/quorum/configuration/roles/create/crypto/ibft/meta/main.yaml b/platforms/substrate/charts/values/noproxy-and-novault/ipfs.yaml similarity index 58% rename from platforms/quorum/configuration/roles/create/crypto/ibft/meta/main.yaml rename to platforms/substrate/charts/values/noproxy-and-novault/ipfs.yaml index 589d39ae941..34b4e70a34d 100644 --- a/platforms/quorum/configuration/roles/create/crypto/ibft/meta/main.yaml +++ b/platforms/substrate/charts/values/noproxy-and-novault/ipfs.yaml @@ -3,7 +3,14 @@ # # SPDX-License-Identifier: Apache-2.0 ############################################################################################## - --- -dependencies: - - role: "setup/geth-bootnode" +global: + serviceAccountName: vault-auth + cluster: + provider: azure + cloudNativeServices: false + vault: + type: kubernetes # kubernetes | hashicorp + +proxy: + provider: none # none | ambassador diff --git a/platforms/substrate/charts/values/noproxy-and-novault/node.yaml b/platforms/substrate/charts/values/noproxy-and-novault/node.yaml new file mode 100644 index 00000000000..69850f7b562 --- /dev/null +++ b/platforms/substrate/charts/values/noproxy-and-novault/node.yaml @@ -0,0 +1,17 @@ +############################################################################################## +# Copyright Accenture. All Rights Reserved. +# +# SPDX-License-Identifier: Apache-2.0 +############################################################################################## + +--- +# helm install validator-1 -f values/noproxy-and-novault/node.yaml -n supplychain-subs substrate-node +# helm upgrade validator-1 -f values/noproxy-and-novault/node.yaml -n supplychain-subs substrate-node + +global: + serviceAccountName: vault-auth + cluster: + provider: azure + cloudNativeServices: false + vault: + type: kubernetes # hashicorp | kubernetes diff --git a/platforms/substrate/charts/values/proxy-and-vault/genesis.yaml b/platforms/substrate/charts/values/proxy-and-vault/genesis.yaml new file mode 100644 index 00000000000..d686ab93d4b --- /dev/null +++ b/platforms/substrate/charts/values/proxy-and-vault/genesis.yaml @@ -0,0 +1,29 @@ +############################################################################################## +# Copyright Accenture. All Rights Reserved. +# +# SPDX-License-Identifier: Apache-2.0 +############################################################################################## +--- +global: + serviceAccountName: vault-auth + vault: + type: hashicorp + network: substrate + address: http://vault.demo.com:8200 + authPath: supplychain + secretEngine: secretsv2 + certPrefix: "subs" + secretPrefix: "data/supplychain" + role: vault-role + cluster: + provider: azure + cloudNativeServices: false + kubernetesUrl: https://kubernetes.url + +node: + validator: + count: 4 + member: + count: 1 + balance: 1152921504606846976 + diff --git a/platforms/hyperledger-fabric/configuration/roles/create/channel_artifacts/vars/main.yaml b/platforms/substrate/charts/values/proxy-and-vault/ipfs.yaml similarity index 50% rename from platforms/hyperledger-fabric/configuration/roles/create/channel_artifacts/vars/main.yaml rename to platforms/substrate/charts/values/proxy-and-vault/ipfs.yaml index 22da8c2d586..10eb2b1f9f3 100644 --- a/platforms/hyperledger-fabric/configuration/roles/create/channel_artifacts/vars/main.yaml +++ b/platforms/substrate/charts/values/proxy-and-vault/ipfs.yaml @@ -3,9 +3,16 @@ # # SPDX-License-Identifier: Apache-2.0 ############################################################################################## - --- -tmp_directory: "{{ lookup('env', 'TMPDIR') | default('/tmp',true) }}" -fabric: - os: "linux" # use "darwin" for MacOS X, "windows" for Windows - arch: "amd64" # other possible values: "386","arm64","arm","ppc64le","s390x" +global: + serviceAccountName: vault-auth + vault: + type: hashicorp + network: substrate + address: http://vault.demo.com:8200 + authPath: supplychain + secretEngine: secretsv2 + secretPrefix: "data/supplychain" + role: vault-role +proxy: + provider: ambassador diff --git a/platforms/substrate/charts/values/proxy-and-vault/node.yaml b/platforms/substrate/charts/values/proxy-and-vault/node.yaml new file mode 100644 index 00000000000..60ec4297f91 --- /dev/null +++ b/platforms/substrate/charts/values/proxy-and-vault/node.yaml @@ -0,0 +1,27 @@ +############################################################################################## +# Copyright Accenture. All Rights Reserved. +# +# SPDX-License-Identifier: Apache-2.0 +############################################################################################## + +--- +# helm install validator-1 -f values/noproxy-and-novault/node.yaml -n supplychain-subs substrate-node +# helm upgrade validator-1 -f values/noproxy-and-novault/node.yaml -n supplychain-subs substrate-node + +global: + serviceAccountName: vault-auth + cluster: + provider: azure + cloudNativeServices: false + vault: + type: hashicorp + network: substrate + address: http://vault.demo.com:8200 + authPath: supplychain + secretEngine: secretsv2 + secretPrefix: "data/supplychain" + role: vault-role + +proxy: + provider: ambassador # none | ambassador + external_url: test.yourdomain.com #specify the endpoint for the ambassador url diff --git a/platforms/substrate/configuration/deploy-network.yaml b/platforms/substrate/configuration/deploy-network.yaml index ca9c60ba15e..8bda8dace35 100644 --- a/platforms/substrate/configuration/deploy-network.yaml +++ b/platforms/substrate/configuration/deploy-network.yaml @@ -1,21 +1,22 @@ ############################################################################################## -# Copyright Accenture. All Rights Reserved. +# Copyright Accenture. All Rights Reserved. # -# SPDX-License-Identifier: Apache-2.0 +# SPDX-License-Identifier: Apache-2.0 ############################################################################################## ############################################################################################## -# This playbook deploys a DLT network on existing Kubernetes clusters -# The Kubernetes clusters should already be created and the infomation to connect to the -# clusters be updated in the network.yaml file that is used as an input to this playbook +# This Ansible playbook deploys a Distributed Ledger Technology (DLT) network on existing +# Kubernetes clusters. The Kubernetes clusters must already be provisioned, and the necessary +# connection information should be updated in the `network.yaml` file, which serves as input +# for this playbook. ############################################################################################## ############################################################################################## -# To Run this playbook from this directory, use the following command (network.yaml also in this directory) -# ansible-playbook deploy-network.yaml -e "@./network.yaml" +# Usage: +# To execute this playbook from the current directory, use the following command: +# ansible-playbook deploy-network.yaml -e "@./network.yaml" ############################################################################################## - # Please ensure that the ../../shared/configuration playbooks have been run using the same network.yaml - hosts: ansible_provisioners gather_facts: no @@ -27,7 +28,7 @@ path: "./build" state: absent - # Create namespace + # Create Kubernetes namespaces for each organization - name: "Create namespace" include_role: name: create/namespace @@ -38,46 +39,19 @@ gitops: "{{ item.gitops }}" loop: "{{ network['organizations'] }}" - # Create Storageclass - - name: Create Storage Class + # Create necessary Kubernetes secrets for each organization + - name: "Create k8s secrets" include_role: - name: "{{ playbook_dir }}/../../../platforms/shared/configuration/roles/setup/storageclass" - vars: - sc_name: "{{ org.name | lower }}-bevel-storageclass" - region: "{{ org.k8s.region | default('eu-west-1') }}" - loop: "{{ network['organizations'] }}" - loop_control: - loop_var: org - - # Setup script for Vault and OS Package Manager - - name: "Setup script for Vault and OS Package Manager" - include_role: - name: "{{ playbook_dir }}/../../shared/configuration/roles/setup/scripts" - vars: - namespace: "{{ org.name | lower }}-subs" - kubernetes: "{{ org.k8s }}" - loop: "{{ network['organizations'] }}" - loop_control: - loop_var: org - - # Setup Vault-Kubernetes accesses and Regcred for docker registry - - name: "Setup vault" - include_role: - name: "{{ playbook_dir }}/../../shared/configuration/roles/setup/vault_kubernetes" + name: create/secrets vars: - name: "{{ org.name | lower }}" component_ns: "{{ org.name | lower }}-subs" - component_name: "{{ org.name | lower }}-vaultk8s-job" - component_auth: "{{ network.env.type }}{{ name }}" - component_type: "organization" kubernetes: "{{ org.k8s }}" vault: "{{ org.vault }}" - gitops: "{{ org.gitops }}" loop: "{{ network['organizations'] }}" loop_control: loop_var: org - # Generate Ambassador certificate for nodes + # Generate Ambassador certificates for nodes (if using Ambassador as proxy) - name: "Create ambassador certificates for Nodes" include_role: name: create/certificates/ambassador @@ -93,51 +67,13 @@ loop: "{{ network['organizations']}}" when: network.env.proxy == "ambassador" - # Generate the key materials and stores them in vault - - name: "Generate key materials for Nodes" - include_role: - name: create/keys - vars: - name: "{{ item.name | lower }}" - component_name: "{{ item.name | lower }}" - component_ns: "{{ item.name | lower }}-subs" - vault: "{{ item.vault }}" - peers: "{{ item.services.peers }}" - charts_dir: "{{ item.gitops.chart_source }}" - values_dir: "{{playbook_dir}}/../../../{{item.gitops.release_dir}}/{{ item.name | lower }}" - org: "{{ item }}" - gitops: "{{ item.gitops }}" - kubernetes: "{{ item.k8s }}" - loop: "{{ network['organizations'] }}" - - # Generate the genesis.json for all orgs of the network - - name: "Generate genesis for the network" - include_role: - name: create/genesis - vars: - build_path: "./build" - sudo_org_query: "organizations[?type=='superuser']" - org: "{{ network | json_query(sudo_org_query) | first }}" - - # Deploy Substrate bootnodes - - name: "Deploy Bootnodes" + # Generate Genesis file that will contain the record of all nodes + - name: "Generate Genesis" include_role: - name: create/bootnode - vars: - build_path: "./build" - kubernetes: "{{ item.k8s }}" - component_ns: "{{ item.name | lower }}-subs" - name: "{{ item.name | lower }}" - peers: "{{ item.services.peers }}" - loop: "{{ network['organizations'] }}" - - # Generate the bootnode list file - - name: "Generate bootnode file list for the network" - include_role: - name: create/bootnodefile + name: create/generate_genesis - # Deploy Substrate validator nodes - - name: "Deploy Substrate validators" + # Deploy Bootnodes or Validator nodes + - name: "Deploy Bootnode or Validator nodes" include_role: name: create/validator_node vars: @@ -148,32 +84,8 @@ peers: "{{ item.services.peers }}" loop: "{{ network['organizations'] }}" - # Deploy ipfs bootnodes - - name: "Deploy ipfs bootnodes" - include_role: - name: create/ipfs_bootnode - vars: - build_path: "./build" - charts_dir: "{{ item.gitops.chart_source }}" - values_dir: "{{playbook_dir}}/../../../{{item.gitops.release_dir}}/{{ item.name | lower }}" - org: "{{ item }}" - vault: "{{ item.vault }}" - peers: "{{ item.services.peers }}" - gitops: "{{ item.gitops }}" - component_name: "{{ item.name | lower }}" - component_ns: "{{ item.name | lower }}-subs" - name: "{{ item.name | lower }}" - loop: "{{ network['organizations'] }}" - when: network.config.node_image == "inteli-poc/dscp-node" - - # Generate the ipfsbootnode list file - - name: "Generate ipfsbootnode file list for the network" - include_role: - name: create/ipfsbootnodefile - when: network.config.node_image == "inteli-poc/dscp-node" - - # Deploy Substrate member nodes - - name: "Deploy Substrate members" + # Deploy Member nodes, including IPFS node if enabled + - name: "Deploy Member nodes, including IPFS node if enabled" include_role: name: create/member_node vars: @@ -184,7 +96,7 @@ name: "{{ item.name | lower }}" peers: "{{ item.services.peers }}" loop: "{{ network['organizations'] }}" - + # These variables can be overriden from the command line vars: install_os: "linux" # Default to linux OS diff --git a/platforms/substrate/configuration/roles/create/certificates/ambassador/tasks/main.yaml b/platforms/substrate/configuration/roles/create/certificates/ambassador/tasks/main.yaml index 25ab4c2c1fa..9b76752cfaa 100644 --- a/platforms/substrate/configuration/roles/create/certificates/ambassador/tasks/main.yaml +++ b/platforms/substrate/configuration/roles/create/certificates/ambassador/tasks/main.yaml @@ -11,12 +11,13 @@ ############################################################################################## --- -# check if ambassadortls dir is there +# Check if Ambassador TLS directory exists - name: "check if dir exists or not" stat: path: "{{ ambassadortls }}" register: ambassadortlsdir_check +# Ensure Ambassador TLS directory exists - name: Ensure ambassador tls dir exists include_role: name: "{{ playbook_dir }}/../../shared/configuration/roles/check/directory" @@ -24,47 +25,25 @@ path: "{{ ambassadortls }}" when: not ambassadortlsdir_check.stat.exists -# Check ambassador tls certs already created -- name: Check if ambassador tls already created - shell: | - vault kv get -field=tlscacerts {{ vault.secret_path | default('secretsv2') }}/{{ organisation }}/tlscerts - environment: - VAULT_ADDR: "{{ vault.url }}" - VAULT_TOKEN: "{{ vault.root_token }}" - register: ambassador_tls_certs - ignore_errors: yes - tags: - - notest - -# Gets the existing ambassador tls certs -- name: Get ambassador and tls certs from Vault - shell: | - vault kv get -format=yaml {{ vault.secret_path | default('secretsv2') }}/{{ organisation }}/tlscerts - environment: - VAULT_ADDR: "{{ vault.url }}" - VAULT_TOKEN: "{{ vault.root_token }}" - register: ambassador_tls_certs_yaml - when: not ambassador_tls_certs.failed - -# Get ambassador tls certs -- name: Get ambassador tls certs - include_role: - name: "setup/get_crypto" - vars: - vault_output: "{{ ambassador_tls_certs_yaml.stdout | from_yaml }}" - type: "ambassador" - cert_path: "{{ ambassadortls }}" - when: ambassador_tls_certs.failed == False - +# Check if Ambassador credentials secret already exists +- name: Check Ambassador cred exists + k8s_info: + kind: Secret + namespace: "{{ component_ns }}" + name: "{{ component_name }}-ambassador-certs" + kubeconfig: "{{ kubernetes.config_file }}" + context: "{{ kubernetes.context }}" + register: get_ambassador_secret -# check if ambassadortls dir is there +# Check OpenSSL configuration file exists for Ambassador TLS if Ambassador secret does not exist - name: "check if openssl conf file exists or not" stat: path: "./build/openssl{{ component_name }}.conf" register: openssl_conf_check + when: get_ambassador_secret.resources|length == 0 -# Generates the openssl file for domain -- name: Generate openssl conf file +# Generate OpenSSL configuration file for domain if Ambassador secret does not exist +- name: Generate OpenSSL conf file shell: | cd ./build cat <openssl{{ component_name }}.conf @@ -86,44 +65,32 @@ domain_name: "{{ component_name }}.{{ item.external_url_suffix }}" domain_name_api: "{{ component_name }}api.{{ item.external_url_suffix }}" domain_name_web: "{{ component_name }}web.{{ item.external_url_suffix }}" - when: ambassador_tls_certs.failed == True and (not openssl_conf_check.stat.exists) + when: get_ambassador_secret.resources|length == 0 -# Generates the ambassador tls certificates if already not generated -- name: Generate ambassador tls certs +# Generate Ambassador TLS certificates if Ambassador secret does not exist +- name: Generate Ambassador TLS certs shell: | openssl req -x509 -out {{ ambassadortls }}/ambassador.crt -keyout {{ ambassadortls }}/ambassador.key -newkey rsa:2048 -nodes -sha256 -subj "/CN={{ domain_name }}" -extensions EXT -config "{{playbook_dir}}/build/openssl{{ component_name }}.conf" vars: domain_name: "{{ component_name }}.{{ item.external_url_suffix }}" - when: ambassador_tls_certs.failed == True and (not openssl_conf_check.stat.exists) + when: get_ambassador_secret.resources|length == 0 -# Stores the genreated ambassador tls certificates to vault -- name: Putting tls certs to vault +# Store the generated Ambassador TLS certificates in Vault if Ambassador secret does not exist +- name: Putting TLS certs to vault shell: | - vault kv put {{ vault.secret_path | default('secretsv2') }}/{{ organisation }}/tlscerts tlscacerts="$(cat {{ ambassadortls }}/ambassador.crt | base64)" tlskey="$(cat {{ ambassadortls }}/ambassador.key | base64)" + vault kv put {{ vault.secret_path | default('secretsv2') }}/{{ network.env.type }}{{ organisation }}/tlscerts tlscacerts="$(cat {{ ambassadortls }}/ambassador.crt | base64)" tlskey="$(cat {{ ambassadortls }}/ambassador.key | base64)" environment: VAULT_ADDR: "{{ vault.url }}" VAULT_TOKEN: "{{ vault.root_token }}" - when: ambassador_tls_certs.failed == True - tags: - - notest - -# Check if Ambassador credentials exist already -- name: Check Ambassador cred exists - k8s_info: - kind: Secret - namespace: "{{ component_ns }}" - name: "{{ component_name }}-ambassador-certs" - kubeconfig: "{{ kubernetes.config_file }}" - context: "{{ kubernetes.context }}" - register: get_ambassador_secret + when: get_ambassador_secret.resources|length == 0 -# Create the Ambassador TLS credentials for ambassador +# Create the Ambassador credentials secret if it does not exist - name: Create the Ambassador credentials shell: | KUBECONFIG={{ kubernetes.config_file }} kubectl create secret tls {{ component_name }}-ambassador-certs --cert={{ ambassadortls }}/ambassador.crt --key={{ ambassadortls }}/ambassador.key -n {{ component_ns }} when: get_ambassador_secret.resources|length == 0 -# Copy generated crt to build location for doorman and networkmap +# Copy generated Ambassador TLS certificate to specified build location if defined - name: Copy generated ambassador tls certs to given build location copy: src: "{{ ambassadortls }}/ambassador.crt" diff --git a/platforms/substrate/configuration/roles/create/generate_genesis/tasks/main.yaml b/platforms/substrate/configuration/roles/create/generate_genesis/tasks/main.yaml new file mode 100644 index 00000000000..1ede0c2613a --- /dev/null +++ b/platforms/substrate/configuration/roles/create/generate_genesis/tasks/main.yaml @@ -0,0 +1,68 @@ +############################################################################################## +# Copyright Accenture. All Rights Reserved. +# +# SPDX-License-Identifier: Apache-2.0 +############################################################################################## + +# Set organization variable based on the first organization in the network +- name: Set organization variable + set_fact: + organization: "{{ network.organizations[0] }}" + +# Initialize counts for validator and member nodes +- name: Initialize counts + set_fact: + validator_count: 0 + member_count: 0 + +# Loop through each organization to count nodes +- name: Count nodes + include_tasks: nodes.yaml + vars: + peers: "{{ item.services.peers }}" + loop: "{{ network['organizations'] }}" + + +# Retrieve kubernetes server url +- name: Get the kubernetes server url + shell: | + KUBECONFIG={{ organization.k8s.config_file }} kubectl config view --minify | grep server | cut -f 2- -d ":" | tr -d " " + register: kubernetes_server_url + +# Generate keys and genesis file +- name: Generate keys and genesis file + include_role: + name: create/helm_component + vars: + name: "{{ organization.name }}" + org_name: "{{ organization.name }}" + type: "genesis_job" + component_name: "genesis" + component_ns: "{{ organization.name }}-subs" + vault: "{{ organization.vault }}" + kubernetes_url: "{{ kubernetes_server_url.stdout }}" + charts_dir: "{{ organization.gitops.chart_source }}" + values_dir: "{{playbook_dir}}/../../../{{ organization.gitops.release_dir }}" + +# Push the created deployment files to the Git repository +- name: "Push the created deployment files to the Git repository" + include_role: + name: "{{ playbook_dir }}/../../shared/configuration/roles/git_push" + vars: + component_ns: "{{ organization.name }}" + GIT_DIR: "{{ playbook_dir }}/../../../" + msg: "[ci skip] Pushing key management job files for {{ component_ns }}" + gitops: "{{ organization.gitops }}" + tags: notest + +# Check if the Genesis job is completed +- name: Check if the Genesis job is completed + include_role: + name: "{{ playbook_dir }}/../../shared/configuration/roles/check/helm_component" + vars: + component_name: "genesis" + component_type: Job + namespace: "{{ organization.name }}-subs" + kubernetes: "{{ organization.k8s }}" + tags: + - notest diff --git a/platforms/substrate/configuration/roles/create/generate_genesis/tasks/nodes.yaml b/platforms/substrate/configuration/roles/create/generate_genesis/tasks/nodes.yaml new file mode 100644 index 00000000000..271ac3927be --- /dev/null +++ b/platforms/substrate/configuration/roles/create/generate_genesis/tasks/nodes.yaml @@ -0,0 +1,23 @@ +############################################################################################## +# Copyright Accenture. All Rights Reserved. +# +# SPDX-License-Identifier: Apache-2.0 +############################################################################################## + +# Increment the validator_count for each peer that is a 'bootnode' or 'validator' +- name: Count validator nodes + set_fact: + validator_count: "{{ validator_count | int + 1 }}" + loop: "{{ peers }}" + loop_control: + loop_var: peer + when: peer.type == 'bootnode' or peer.type == 'validator' + +# Increment the member_count for each peer that is a 'member' +- name: Count member nodes + set_fact: + member_count: "{{ member_count | int + 1 }}" + loop: "{{ peers }}" + loop_control: + loop_var: peer + when: peer.type == 'member' diff --git a/platforms/substrate/configuration/roles/create/helm_component/templates/dscp_ipfs_node.tpl b/platforms/substrate/configuration/roles/create/helm_component/templates/dscp_ipfs_node.tpl index a5e678e26ef..aa4a1de96b4 100644 --- a/platforms/substrate/configuration/roles/create/helm_component/templates/dscp_ipfs_node.tpl +++ b/platforms/substrate/configuration/roles/create/helm_component/templates/dscp_ipfs_node.tpl @@ -1,4 +1,4 @@ -apiVersion: helm.toolkit.fluxcd.io/v2beta1 +apiVersion: helm.toolkit.fluxcd.io/v2 kind: HelmRelease metadata: name: {{ component_name }} @@ -17,6 +17,16 @@ spec: namespace: flux-{{ network.env.type }} chart: {{ charts_dir }}/dscp-ipfs-node values: + global: + serviceAccountName: vault-auth + vault: + type: hashicorp + role: vault-role + network: substrate + address: {{ vault.url }} + authPath: {{ network.env.type }} + secretEngine: {{ vault.secret_path | default("secretsv2") }} + secretPrefix: "data/{{ network.env.type }}{{ vault_key }}" fullnameOverride: {{ peer.name }}-ipfs namespace: {{ component_ns }} config: @@ -66,9 +76,3 @@ spec: external_url: {{ peer.name }}-ipfs-swarm.{{ external_url }} port: {{ peer.ipfs.ambassador }} certSecret: {{ org.name | lower }}-ambassador-certs - vault: - address: {{ vault.url }} - role: vault-role - authpath: {{ network.env.type }}{{ name }} - serviceaccountname: vault-auth - certsecretprefix: {{ vault.secret_path | default('secretsv2') }}/data/{{ name }}/{{ peer.name }} diff --git a/platforms/substrate/configuration/roles/create/helm_component/templates/genesis_job.tpl b/platforms/substrate/configuration/roles/create/helm_component/templates/genesis_job.tpl index 382d0411345..dc0d087a01e 100644 --- a/platforms/substrate/configuration/roles/create/helm_component/templates/genesis_job.tpl +++ b/platforms/substrate/configuration/roles/create/helm_component/templates/genesis_job.tpl @@ -1,13 +1,13 @@ -apiVersion: helm.toolkit.fluxcd.io/v2beta1 +apiVersion: helm.toolkit.fluxcd.io/v2 kind: HelmRelease metadata: - name: {{ component_name }} + name: {{ component_name | replace('_','-') }} namespace: {{ component_ns }} annotations: fluxcd.io/automated: "false" spec: interval: 1m - releaseName: {{ component_name }} + releaseName: {{ component_name | replace('_','-') }} chart: spec: interval: 1m @@ -17,23 +17,36 @@ spec: namespace: flux-{{ network.env.type }} chart: {{ charts_dir }}/substrate-genesis values: + global: + serviceAccountName: vault-auth + cluster: + provider: azure + cloudNativeServices: false + kubernetesUrl: {{ kubernetes_url }} + vault: + type: hashicorp + role: vault-role + network: substrate + address: {{ vault.url }} + authPath: {{ network.env.type }} + secretEngine: {{ vault.secret_path | default("secretsv2") }} + certPrefix: {{ network.env.type }}{{ org_name }} + secretPrefix: "data/{{ network.env.type }}{{ org_name }}" + removeGenesisOnDelete: + enabled: true + image: + repository: ghcr.io/hyperledger/bevel-k8s-hooks + tag: qgt-0.2.12 + pullPolicy: IfNotPresent + chain: {{ network.config.chain }} node: name: {{ component_name }} image: {{ network.docker.url }}/{{ network.config.node_image }} imageTag: {{ network.version }} pullPolicy: IfNotPresent command: {{ network.config.command }} - metadata: - name: {{ component_name }} - namespace: {{ component_ns }} - vault: - address: {{ vault.url }} - role: vault-role - authpath: {{ network.env.type }}{{ name }} - serviceaccountname: vault-auth - certsecretprefix: {{ vault.secret_path | default('secretsv2') }}/{{ name }} - chain: {{ network.config.chain }} - aura_keys: {{ aura_key_list }} - grandpa_keys: {{ grandpa_key_list }} - members: - {{ member_list | to_nice_yaml | indent(width=6) }} + validator: + count: {{ validator_count }} + member: + count: {{ member_count }} + balance: 1152921504606846976 diff --git a/platforms/substrate/configuration/roles/create/helm_component/templates/node_substrate.tpl b/platforms/substrate/configuration/roles/create/helm_component/templates/node_substrate.tpl index bff9ddb145b..f1df9a27d34 100644 --- a/platforms/substrate/configuration/roles/create/helm_component/templates/node_substrate.tpl +++ b/platforms/substrate/configuration/roles/create/helm_component/templates/node_substrate.tpl @@ -1,4 +1,4 @@ -apiVersion: helm.toolkit.fluxcd.io/v2beta1 +apiVersion: helm.toolkit.fluxcd.io/v2 kind: HelmRelease metadata: name: {{ component_name }} @@ -16,7 +16,6 @@ spec: name: flux-{{ network.env.type }} namespace: flux-{{ network.env.type }} chart: {{ charts_dir }}/substrate-node - values: image: repository: {{ network.docker.url }}/{{ network.config.node_image }} @@ -44,7 +43,12 @@ spec: node: name: {{ peer.name }} chain: {{ network.config.chain }} - command: {{ command }} + command: {{ command }} + isBootnode: + enabled: {{ isBootnode }} + bootnodeName: validator-1 + bootnodeAddr: validator-1.{{ external_url }} + bootnodePort: 15051 dataVolumeSize: 10Gi replicas: 1 role: {{ role }} @@ -62,19 +66,9 @@ spec: - "--rpc-cors=all" - "--unsafe-ws-external" - "--unsafe-rpc-external" -{% if bootnode_data is defined %} - - "--bootnodes '{{ bootnode_data[1:] | join(',') }}'" -{% endif %} {% if peer.type == 'member' %} - "--pruning=archive" {% endif %} - keys: - - type: "gran" - scheme: "ed25519" - seed: "grandpa_seed" - - type: "aura" - scheme: "sr25519" - seed: "aura_seed" persistGeneratedNodeKey: false resources: {} @@ -102,15 +96,22 @@ spec: proxy: provider: ambassador - external_url: {{ peer.name }}.{{ external_url }} + external_url: {{ external_url }} p2p: {{ peer.p2p.ambassador }} certSecret: {{ org.name | lower }}-ambassador-certs storageClass: {{ storageclass_name }} + storage: + size: "10Gi" + reclaimPolicy: "Delete" + volumeBindingMode: Immediate + allowedTopologies: + enabled: false + vault: address: {{ vault.url }} secretPrefix: {{ vault.secret_path | default('secretsv2') }}/data/{{ name }} authPath: {{ network.env.type }}{{ name }} appRole: vault-role - image: ghcr.io/hyperledger/alpine-utils:1.0 + image: ghcr.io/hyperledger/alpine-utils:1.0 diff --git a/platforms/substrate/configuration/roles/create/helm_component/templates/substrate_keys_job.tpl b/platforms/substrate/configuration/roles/create/helm_component/templates/substrate_keys_job.tpl index eabfd0a0843..01e6f61c57f 100644 --- a/platforms/substrate/configuration/roles/create/helm_component/templates/substrate_keys_job.tpl +++ b/platforms/substrate/configuration/roles/create/helm_component/templates/substrate_keys_job.tpl @@ -1,4 +1,4 @@ -apiVersion: helm.toolkit.fluxcd.io/v2beta1 +apiVersion: helm.toolkit.fluxcd.io/v2 kind: HelmRelease metadata: name: {{ component_name }} diff --git a/platforms/substrate/configuration/roles/create/ipfs_bootnode/tasks/main.yaml b/platforms/substrate/configuration/roles/create/ipfs_bootnode/tasks/main.yaml index 8d250caeadc..4f47620241d 100644 --- a/platforms/substrate/configuration/roles/create/ipfs_bootnode/tasks/main.yaml +++ b/platforms/substrate/configuration/roles/create/ipfs_bootnode/tasks/main.yaml @@ -11,7 +11,7 @@ vars: component_name: "{{ peer.name }}-ipfs-node" type: "dscp_ipfs_node" - storageclass_name: "{{ name }}-bevel-storageclass" + storageclass_name: "{{ item.name | lower }}-bevel-storageclass" external_url: "{{ item.external_url_suffix }}" git_url: "{{ item.gitops.git_url }}" git_branch: "{{ item.gitops.branch }}" diff --git a/platforms/substrate/configuration/roles/create/ipfsbootnodefile/tasks/get_bootnode_data.yaml b/platforms/substrate/configuration/roles/create/ipfsbootnodefile/tasks/get_bootnode_data.yaml index e939b3b903e..ceb1e69e1f9 100644 --- a/platforms/substrate/configuration/roles/create/ipfsbootnodefile/tasks/get_bootnode_data.yaml +++ b/platforms/substrate/configuration/roles/create/ipfsbootnodefile/tasks/get_bootnode_data.yaml @@ -19,7 +19,7 @@ # Fetch the bootnode's peer id from vault - name: Fetch bootnode peer id from vault shell: | - vault kv get -field=peer_id {{ vault.secret_path | default('secretsv2') }}/{{ org.name }}/{{ peer.name }}/ipfs + vault kv get -field=peer_id "{{ vault.secret_path | default('secretsv2') }}/{{ network.env.type }}/{{ peer.name }}-ipfs" environment: VAULT_ADDR: "{{ vault.url }}" VAULT_TOKEN: "{{ vault.root_token }}" diff --git a/platforms/substrate/configuration/roles/create/member_node/tasks/ipfs_node.yaml b/platforms/substrate/configuration/roles/create/member_node/tasks/ipfs_node.yaml new file mode 100644 index 00000000000..d3e5a9c65ac --- /dev/null +++ b/platforms/substrate/configuration/roles/create/member_node/tasks/ipfs_node.yaml @@ -0,0 +1,68 @@ +############################################################################################## +# Copyright Accenture. All Rights Reserved. +# +# SPDX-License-Identifier: Apache-2.0 +############################################################################################## + +# Set node_list to empty +- name: Set node_list to [] + set_fact: + node_list: [] + +# Fetch the bootnode's node ID from Vault +- name: Fetch node_id of {{ peer.name }} node + shell: | + KUBECONFIG={{ kubernetes.config_file }} kubectl get secret "substrate-node-{{ peer.name | lower }}-keys" --namespace {{ component_ns }} -o jsonpath="{.data['substrate-node-keys']}" | base64 -d | jq -r '.data.node_id' + register: node_id + ignore_errors: true + +# Append bootnode information to node_list +- name: Collect Bootnode data + set_fact: + node_list={{ node_list|default([]) + [ {'bootnode_id':node_id.stdout, 'external_url':internal_url, 'p2p_port':peer.ipfs.swarmPort} ] }} + vars: + internal_url: "{{ peer.name }}-ipfs-swarm.{{ component_ns }}.svc.cluster.local" + +# Create a file to store bootnode information only if not already available +- name: Create bootnode file + template: + src: "ipfsbootnode.tpl" + dest: "{{ network.config.ipfsbootnodes }}" + +# Generate IPFS node HelmRelease file +- name: "Create ipfs node release file" + include_role: + name: create/helm_component + vars: + component_name: "{{ peer.name }}-ipfs-node" + type: "dscp_ipfs_node" + storageclass_name: "{{ item.name | lower }}-bevel-storageclass" + external_url: "{{ item.external_url_suffix }}" + vault_key: "{{ name }}/substrate-node-{{ peer.name | lower }}" + git_url: "{{ item.gitops.git_url }}" + git_branch: "{{ item.gitops.branch }}" + org: "{{ item }}" + docker_url: "{{ network.docker.url }}" + charts_dir: "{{ item.gitops.chart_source }}" + values_dir: "{{playbook_dir}}/../../../{{item.gitops.release_dir}}/{{ item.name | lower }}" + ipfs_bootnode: "{{ lookup('file', '{{ network.config.ipfsbootnodes }}').splitlines() }}" + +# Git Push : Push the above generated files to git directory +- name: Git Push + include_role: + name: "{{ playbook_dir }}/../../shared/configuration/roles/git_push" + vars: + GIT_DIR: "{{ playbook_dir }}/../../../" + gitops: "{{ item.gitops }}" + msg: "[ci skip] Pushing Peer files" + +# Check if IPFS-bootnode is running +- name: Check if {{ peer.name }} is running + include_role: + name: "{{ playbook_dir }}/../../shared/configuration/roles/check/helm_component" + vars: + component_name: "{{ peer.name }}-ipfs" + component_type: Pod + label_selectors: + - name = {{ component_name }} + namespace: "{{ component_ns }}" diff --git a/platforms/substrate/configuration/roles/create/member_node/tasks/main.yaml b/platforms/substrate/configuration/roles/create/member_node/tasks/main.yaml index be034000cbf..2bcdfce8eaa 100644 --- a/platforms/substrate/configuration/roles/create/member_node/tasks/main.yaml +++ b/platforms/substrate/configuration/roles/create/member_node/tasks/main.yaml @@ -4,56 +4,18 @@ # SPDX-License-Identifier: Apache-2.0 ############################################################################################## -# Generate member node helmrelease file -- name: Create value file for member nodes - include_role: - name: create/helm_component - vars: - component_name: "{{ name }}{{ peer.name }}membernode" - type: "node_substrate" - storageclass_name: "{{ name }}-bevel-storageclass" - external_url: "{{ item.external_url_suffix }}" - vault: "{{ item.vault }}" - git_url: "{{ item.gitops.git_url }}" - git_branch: "{{ item.gitops.branch }}" - org: "{{ item }}" - docker_url: "{{ network.docker.url }}" - charts_dir: "{{ item.gitops.chart_source }}" - values_dir: "{{playbook_dir}}/../../../{{item.gitops.release_dir}}/{{ item.name | lower }}" - bootnode_data: "{{ lookup('file', '{{ network.config.bootnodes }}').splitlines() }}" - command: "{{ network.config.command }}" - role: "full" +# Generate member node HelmRelease files +- name: "Create value file for member nodes" + include_tasks: member_node.yaml loop: "{{ peers }}" loop_control: loop_var: peer when: peer.type == "member" -# Generate ipfs node helmrelease file -- name: "Create ipfs node release file" - include_role: - name: create/helm_component - vars: - component_name: "{{ peer.name }}-ipfs-node" - type: "dscp_ipfs_node" - storageclass_name: "{{ name }}-bevel-storageclass" - external_url: "{{ item.external_url_suffix }}" - git_url: "{{ item.gitops.git_url }}" - git_branch: "{{ item.gitops.branch }}" - org: "{{ item }}" - docker_url: "{{ network.docker.url }}" - charts_dir: "{{ item.gitops.chart_source }}" - values_dir: "{{playbook_dir}}/../../../{{item.gitops.release_dir}}/{{ item.name | lower }}" - ipfs_bootnode: "{{ lookup('file', '{{ network.config.ipfsbootnodes }}').splitlines() }}" +# Generate IPFS node HelmRelease files +- name: "Create value file for ipfs nodes" + include_tasks: ipfs_node.yaml loop: "{{ peers }}" loop_control: loop_var: peer - when: peer.type == "member" and peer.ipfs is defined - -# Git Push : Push the above generated files to git directory -- name: Git Push - include_role: - name: "{{ playbook_dir }}/../../shared/configuration/roles/git_push" - vars: - GIT_DIR: "{{ playbook_dir }}/../../../" - gitops: "{{ item.gitops }}" - msg: "[ci skip] Pushing Peer files" + when: peer.type == "member" and peer.ipfs.enabled diff --git a/platforms/substrate/configuration/roles/create/member_node/tasks/member_node.yaml b/platforms/substrate/configuration/roles/create/member_node/tasks/member_node.yaml new file mode 100644 index 00000000000..da19b9a7e73 --- /dev/null +++ b/platforms/substrate/configuration/roles/create/member_node/tasks/member_node.yaml @@ -0,0 +1,47 @@ +############################################################################################## +# Copyright Accenture. All Rights Reserved. +# +# SPDX-License-Identifier: Apache-2.0 +############################################################################################## + +# Generate member node helmrelease file +- name: Create value file for member nodes + include_role: + name: create/helm_component + vars: + component_name: "{{ peer.name }}" + type: "node_substrate" + storageclass_name: "{{ item.name | lower }}-bevel-storageclass" + external_url: "{{ item.external_url_suffix }}" + vault: "{{ item.vault }}" + git_url: "{{ item.gitops.git_url }}" + git_branch: "{{ item.gitops.branch }}" + org: "{{ item }}" + docker_url: "{{ network.docker.url }}" + charts_dir: "{{ item.gitops.chart_source }}" + values_dir: "{{playbook_dir}}/../../../{{item.gitops.release_dir}}/{{ item.name | lower }}" + bootnode_data: "{{ lookup('file', '{{ network.config.bootnodes }}').splitlines() }}" + command: "{{ network.config.command }}" + role: "full" + isBootnode: true + +# Git Push : Push the above generated files to git directory +- name: Git Push + include_role: + name: "{{ playbook_dir }}/../../shared/configuration/roles/git_push" + vars: + GIT_DIR: "{{ playbook_dir }}/../../../" + gitops: "{{ item.gitops }}" + msg: "[ci skip] Pushing Peer files" + +# Check if ipfs-bootnode is running +- name: Check if {{ peer.name }} is running + include_role: + name: "{{ playbook_dir }}/../../shared/configuration/roles/check/helm_component" + vars: + component_name: "{{ peer.name }}" + component_type: Pod + label_selectors: + - name = {{ component_name }} + namespace: "{{ component_ns }}" + tags: notest diff --git a/platforms/substrate/configuration/roles/create/member_node/templates/ipfsbootnode.tpl b/platforms/substrate/configuration/roles/create/member_node/templates/ipfsbootnode.tpl new file mode 100644 index 00000000000..a4403f718b1 --- /dev/null +++ b/platforms/substrate/configuration/roles/create/member_node/templates/ipfsbootnode.tpl @@ -0,0 +1,4 @@ +ipfsbootnodes: +{%- for enode in node_list %} +/dns4/{{ enode.external_url }}/tcp/{{ enode.p2p_port }}/p2p/{{ enode.bootnode_id }} +{%- endfor %} diff --git a/platforms/substrate/configuration/roles/create/secrets/tasks/main.yaml b/platforms/substrate/configuration/roles/create/secrets/tasks/main.yaml new file mode 100644 index 00000000000..cc31dd73c32 --- /dev/null +++ b/platforms/substrate/configuration/roles/create/secrets/tasks/main.yaml @@ -0,0 +1,32 @@ +############################################################################################## +# Copyright Accenture. All Rights Reserved. +# +# SPDX-License-Identifier: Apache-2.0 +############################################################################################## + +# Wait for namespace to be created by flux +- name: "Wait for the namespace {{ component_ns }} to be created" + include_role: + name: "{{ playbook_dir }}/../../shared/configuration/roles/check/k8_component" + vars: + component_type: "Namespace" + component_name: "{{ component_ns }}" + type: "retry" + +# Create the vault roottoken secret +- name: "Create vault token secret" + include_role: + name: "{{ playbook_dir }}/../../shared/configuration/roles/create/shared_k8s_secrets" + vars: + namespace: "{{ component_ns }}" + check: "token_secret" + +# Create the docker pull credentials for image registry +- name: "Create docker credentials secret" + include_role: + name: "{{ playbook_dir }}/../../shared/configuration/roles/create/shared_k8s_secrets" + vars: + namespace: "{{ component_ns }}" + check: "docker_credentials" + when: + - network.docker.username is defined diff --git a/platforms/substrate/configuration/roles/create/validator_node/tasks/main.yaml b/platforms/substrate/configuration/roles/create/validator_node/tasks/main.yaml index be6ee749b46..fd2ad4d7fab 100644 --- a/platforms/substrate/configuration/roles/create/validator_node/tasks/main.yaml +++ b/platforms/substrate/configuration/roles/create/validator_node/tasks/main.yaml @@ -9,9 +9,9 @@ include_role: name: create/helm_component vars: - component_name: "{{ name }}{{ peer.name }}validatornode" + component_name: "{{ peer.name }}" type: "node_substrate" - storageclass_name: "{{ name }}-bevel-storageclass" + storageclass_name: "{{ item.cloud_provider }}storageclass" external_url: "{{ item.external_url_suffix }}" vault: "{{ item.vault }}" git_url: "{{ item.gitops.git_url }}" @@ -20,13 +20,13 @@ docker_url: "{{ network.docker.url }}" charts_dir: "{{ item.gitops.chart_source }}" values_dir: "{{playbook_dir}}/../../../{{item.gitops.release_dir}}/{{ item.name | lower }}" - bootnode_data: "{{ lookup('file', '{{ network.config.bootnodes }}').splitlines() }}" command: "{{ network.config.command }}" role: "validator" + isBootnode: "{{ false if peer.type == 'bootnode' else true }}" loop: "{{ peers }}" loop_control: loop_var: peer - when: peer.type == "validator" + when: peer.type in ['validator', 'bootnode'] # Git Push : Push the above generated files to git directory - name: Git Push diff --git a/platforms/substrate/configuration/roles/delete/vault_secrets/tasks/main.yaml b/platforms/substrate/configuration/roles/delete/vault_secrets/tasks/main.yaml index 8166cb3bfa5..2d911260193 100644 --- a/platforms/substrate/configuration/roles/delete/vault_secrets/tasks/main.yaml +++ b/platforms/substrate/configuration/roles/delete/vault_secrets/tasks/main.yaml @@ -8,8 +8,6 @@ # This role deletes the Vault configurations ############################################################################################# -############################################################################################# - # Delete the Docker credentials - name: Delete docker creds k8s: @@ -35,11 +33,15 @@ loop_var: peer ignore_errors: yes -# Delete Peer Crypto material -- name: Delete Peer Crypto material +# Delete Peer Crypto material (including IPFS keys if applicable) +- name: Delete Peer Crypto material shell: | - vault kv delete {{ item.vault.secret_path | default('secretsv2') }}/{{ org_name }}/{{ peer.name }}/substrate - vault kv delete {{ item.vault.secret_path | default('secretsv2') }}/{{ org_name }}/{{ peer.name }}/ipfs + {% if peer.type == "member" %} + {% if peer.ipfs is defined %} + vault kv delete {{ item.vault.secret_path | default('secretsv2') }}/{{ network.env.type }}{{ org_name }}/substrate-node-{{ peer.name }}-ipfs-keys + {% endif %} + {% endif %} + vault kv delete {{ item.vault.secret_path | default('secretsv2') }}/{{ network.env.type }}{{ org_name }}/substrate-node-{{ peer.name }}-keys environment: VAULT_ADDR: "{{ item.vault.url }}" VAULT_TOKEN: "{{ item.vault.root_token }}" @@ -48,24 +50,12 @@ loop_var: peer ignore_errors: yes -# Delete Organization's Crypto material -- name: Delete Org Crypto material +# Delete Genesis and Ambassador crypto material +- name: Delete genesis & ambassador crypto material shell: | - vault kv delete {{ item.vault.secret_path | default('secretsv2') }}/{{ item.name }}/genesis - vault kv delete {{ item.vault.secret_path | default('secretsv2') }}/{{ item.name }}/tlscerts + vault kv delete {{ item.vault.secret_path | default('secretsv2') }}/{{ network.env.type }}{{ org_name }}/tlscerts + vault kv delete {{ item.vault.secret_path | default('secretsv2') }}/{{ network.env.type }}{{ org_name }}/genesis environment: VAULT_ADDR: "{{ item.vault.url }}" VAULT_TOKEN: "{{ item.vault.root_token }}" ignore_errors: yes - - # Delete application crypto material -- name: Delete Application Crypto material - shell: | - vault kv delete {{ item.vault.secret_path | default('secretsv2') }}/{{ item.name }}/application - loop: "{{ services.peers }}" - environment: - VAULT_ADDR: "{{ item.vault.url }}" - VAULT_TOKEN: "{{ item.vault.root_token }}" - loop_control: - loop_var: peer - ignore_errors: yes diff --git a/platforms/substrate/configuration/samples/network-sample.yaml b/platforms/substrate/configuration/samples/network-sample.yaml index 38c72cc51c4..808da375998 100644 --- a/platforms/substrate/configuration/samples/network-sample.yaml +++ b/platforms/substrate/configuration/samples/network-sample.yaml @@ -18,15 +18,13 @@ network: env: type: "dscpdev" # tag for the environment. Important to run multiple flux on single cluster proxy: ambassador # value has to be 'ambassador' as 'haproxy' has not been implemented for Substrate + proxy_namespace: "ambassador" # These ports are enabled per cluster, so if you have multiple clusters you do not need so many ports # This sample uses a single cluster, so we have to open 4 ports for each Node. These ports are again specified for each organization below ambassadorPorts: # Any additional Ambassador ports can be given here, this is valid only if proxy='ambassador' - # Specify a list of individual ports to use - ports: [15010, 15023, 15024, 15025, 15033, 15034, 15035, 15043, 15044, 15045] - # Alternatively, specify a range of ports to use all ports within the specified range - # portRange: - # from: 15010 # Starting port of the range - # to: 15045 # Ending port of the range + portRange: # For a range of ports + from: 15010 + to: 15043 # ports: 15020,15021 # For specific ports retry_count: 20 # Retry count for the checks on Kubernetes cluster external_dns: enabled # Should be enabled if using external-dns for automatic route configuration @@ -35,9 +33,9 @@ network: # Please ensure all required images are built and stored in this registry. # Do not check-in docker_password. docker: - url: "docker.io" - username: "docker_username" - password: "docker_password" + url: "ghcr.io" + #username: "docker_username" + #password: "docker_password" # Following are the configurations for the common Substrate network config: diff --git a/platforms/substrate/configuration/samples/network-substrate.yaml b/platforms/substrate/configuration/samples/network-substrate.yaml index bd4132064cb..8ccc99bf324 100644 --- a/platforms/substrate/configuration/samples/network-substrate.yaml +++ b/platforms/substrate/configuration/samples/network-substrate.yaml @@ -17,17 +17,14 @@ network: #Environment section for Kubernetes setup env: type: "substratedev" # tag for the environment. Important to run multiple flux on single cluster - proxy: ambassador # value has to be 'ambassador' as 'haproxy' has not been implemented for Substrate - proxy_namespace: "ambassador" # Namespace for the proxy + proxy: ambassador # value has to be 'ambassador' as 'haproxy' has not been implemented for Substrate + proxy_namespace: "ambassador" # These ports are enabled per cluster, so if you have multiple clusters you do not need so many ports # This sample uses a single cluster, so we have to open 4 ports for each Node. These ports are again specified for each organization below ambassadorPorts: # Any additional Ambassador ports can be given here, this is valid only if proxy='ambassador' - # Specify a list of individual ports to use - ports: [15010, 15023, 15024, 15025, 15033, 15034, 15035, 15043, 15044, 15045] - # Alternatively, specify a range of ports to use all ports within the specified range - # portRange: - # from: 15010 # Starting port of the range - # to: 15045 # Ending port of the range + portRange: # For a range of ports + from: 15010 + to: 15043 # ports: 15020,15021 # For specific ports retry_count: 20 # Retry count for the checks on Kubernetes cluster external_dns: enabled # Should be enabled if using external-dns for automatic route configuration @@ -63,7 +60,7 @@ network: name: carrier type: superuser external_url_suffix: subs.example.com # This is the url suffix that will be added in DNS recordset. Must be different for different clusters - cloud_provider: aws # Options: aws, azure, gcp + cloud_provider: gcp # Options: aws, azure, gcp aws: access_key: "AWS_ACCESS_KEY" # AWS Access key, only used when cloud_provider=aws secret_key: "AWS_SECRET_KEY" # AWS Secret key, only used when cloud_provider=aws diff --git a/platforms/substrate/configuration/samples/substrate-network-config.yaml b/platforms/substrate/configuration/samples/substrate-network-config.yaml new file mode 100644 index 00000000000..ef28eebd73f --- /dev/null +++ b/platforms/substrate/configuration/samples/substrate-network-config.yaml @@ -0,0 +1,173 @@ +############################################################################################## +# Copyright Accenture. All Rights Reserved. +# +# SPDX-License-Identifier: Apache-2.0 +############################################################################################## + +--- +# yaml-language-server: $schema=../../../../platforms/network-schema.json +# This is a sample configuration file for Substrate based Inteli network which has 3 organizations. +# All text values are case-sensitive +network: + # Network level configuration specifies the attributes required for each organization + # to join an existing network. + type: substrate + version: v4.3.1 #this is the version of Substrate docker image that will be deployed. + + #Environment section for Kubernetes setup + env: + type: "substrate" # tag for the environment. Important to run multiple flux on single cluster + proxy: ambassador # value has to be 'ambassador' as 'haproxy' has not been implemented for Substrate + proxy_namespace: "ambassador" + # These ports are enabled per cluster, so if you have multiple clusters you do not need so many ports + # This sample uses a single cluster, so we have to open 4 ports for each Node. These ports are again specified for each organization below + ambassadorPorts: # Any additional Ambassador ports can be given here, this is valid only if proxy='ambassador' + portRange: # For a range of ports + from: 15010 + to: 15043 + # ports: 15020,15021 # For specific ports + retry_count: 20 # Retry count for the checks on Kubernetes cluster + external_dns: enabled # Should be enabled if using external-dns for automatic route configuration + + # Docker registry details where images are stored. This will be used to create k8s secrets + # Please ensure all required images are built and stored in this registry. + # Do not check-in docker_password. + docker: + url: "ghcr.io" + # username: "docker_username" + # password: "docker_password" + + # Following are the configurations for the common Substrate network + config: + ## Certificate subject for the root CA of the network. + # This is for development usage only where we create self-signed certificates and the truststores are generated automatically. + # Production systems should generate proper certificates and configure truststores accordingly. + subject: "CN=DLT Root CA,OU=DLT,O=DLT,L=London,C=GB" + # Provide the docker image which will be used for the Substrate Nodes in this network. (will be downloaded from docker.url) + node_image: "inteli-poc/dscp-node" + # Provide the command which is used to start the node + command: "./dscp-node" # Please ensure the command corresponds to the node_image above + # provide a chain name for Substrate nodes + chain: "inteli" + # NOTE for the below paths, the directories should exist + genesis: "/BUILD_DIR/substrate_genesis" # Location where information is read or saved if empty + bootnodes: "/BUILD_DIR/substrate_bootnodes" # Location where bootnodes information is read or saved if empty + ipfsbootnodes: "/BUILD_DIR/ipfs_bootnodes" # Location where IPFS bootnodes information is read or saved if empty + + # Allows specification of one or many organizations that will be connecting to a network. + organizations: + # Specification for the 1st organization. Each organization should map to a VPC and a separate k8s cluster for production deployments + - organization: + name: supplychain + type: superuser + persona: buyer + external_url_suffix: test.subs.blockchaincloudpoc.com # This is the url suffix that will be added in DNS recordset. Must be different for different clusters + cloud_provider: aws # Options: aws, azure, gcp + aws: + access_key: "AWS_ACCESS_KEY" # AWS Access key, only used when cloud_provider=aws + secret_key: "AWS_SECRET_KEY" # AWS Secret key, only used when cloud_provider=aws + + # Kubernetes cluster deployment variables. The config file path and name has to be provided in case + # the cluster has already been created. + k8s: + context: "CLUSTER_CONTEXT" + config_file: "/BUILD_DIR/config" + # Hashicorp Vault server address and root-token. Vault should be unsealed. + # Do not check-in root_token + vault: + url: "http://VAULT_URL:VAULT_PORT" + root_token: "VAULT_TOKEN" + secret_path: "secretsv2" + # these are the Auth0 API values that will be used for authentication to substrate api endpoints. Eg. tokenUrl: https://example.eu.auth0.com/oauth/token + auth: + type: NONE + # Git Repo details which will be used by GitOps/Flux. + # Do not check-in git_access_token + gitops: + git_protocol: "https" # Option for git over https or ssh + git_url: "https://github.com//bevel.git" # Gitops https or ssh url for flux value files + branch: "substrate" # Git branch where release is being made + release_dir: "platforms/substrate/releases/dev" # Relative Path in the Git repo for flux sync per environment. + chart_source: "platforms/substrate/charts" # Relative Path where the Helm charts are stored in Git repo + git_repo: "github.com//bevel.git" # Gitops git repository URL for git push + username: "GIT_USERNAME" # Git Service user who has rights to check-in in all branches + password: "GIT_TOKEN" # Git Server user password/token (Optional for ssh; Required for https) + email: "git@email.com" # Email to use in git config + private_key: "/BUILD_DIR/gitops" # Path to private key file which has write-access to the git repo (Optional for https; Required for ssh) + # The participating nodes are named as peers + services: + peers: + - peer: + name: validator-1 + subject: "O=validator-1,OU=validator-1,L=51.50/-0.13/London,C=GB" # This is the node subject. L=lat/long is mandatory for supplychain sample app + type: bootnode # value can be validator or bootnode ( or ipfs, for vitalAM) + p2p: + port: 30333 + ambassador: 15010 # Port exposed on ambassador service (use one port per org if using single cluster) + rpc: + port: 9933 + ws: + port: 9944 + - peer: + name: validator-2 + subject: "O=validator-2,OU=validator-2,L=51.50/-0.13/London,C=GB" # This is the node subject. L=lat/long is mandatory for supplychain sample app + type: validator # value can be validator or bootnode ( or ipfs, for vitalAM) + p2p: + port: 30333 + ambassador: 15011 # Port exposed on ambassador service (use one port per org if using single cluster) + rpc: + port: 9933 + ws: + port: 9944 + - peer: + name: validator-3 + subject: "O=validator-3,OU=validator-3,L=51.50/-0.13/London,C=GB" # This is the node subject. L=lat/long is mandatory for supplychain sample app + type: validator # value can be validator or bootnode ( or ipfs, for vitalAM) + p2p: + port: 30333 + ambassador: 15012 # Port exposed on ambassador service (use one port per org if using single cluster) + rpc: + port: 9933 + ws: + port: 9944 + - peer: + name: validator-4 + subject: "O=validator-4,OU=validator-4,L=51.50/-0.13/London,C=GB" # This is the node subject. L=lat/long is mandatory for supplychain sample app + type: validator # value can be validator or bootnode ( or ipfs, for vitalAM) + p2p: + port: 30333 + ambassador: 15013 #Port exposed on ambassador service (use one port per org if using single cluster) + rpc: + port: 9933 + ws: + port: 9944 + - peer: + name: member-1 + subject: "O=member-1,OU=member-1,London,C=GB" # This is the node subject. + type: member # value can be validator or bootnode ( or ipfs, for vitalAM) + nodeHost: oem # peer name of substrate node for IPFS API-WS connection + p2p: + port: 30333 + ambassador: 15014 # Port exposed on ambassador service (use one port per org if using single cluster) + rpc: + port: 9933 + ws: + port: 9944 + ipfs: + enabled: true # Set to true if an IPFS node is required, otherwise set to false + swarmPort: 4001 + ambassador: 15016 # Port exposed on ambassador service (use one port per org if using single cluster) + apiPort: 5001 + api: + port: 80 + postgresql: + port: 5432 + user: postgres + password: "postgres123" + id_service: + db_name: "id-service" + port: 3001 + inteli_api: + db_name: "inteli-api" + port: 3000 + ambassador: 443