diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 2d71d8d..ea83ff3 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -4,6 +4,43 @@ community.sap_infrastructure Release Notes .. contents:: Topics +v1.1.0 +====== + +Release Summary +--------------- + +| Release Date: 2024-12-01 +| sap_hypervisor_node_preconfigure: OCPv improve auth and add namespace targets +| sap_hypervisor_node_preconfigure: OCPv update default vars and var prefixes +| sap_hypervisor_node_preconfigure: OCPv add waits for resource readiness +| sap_hypervisor_node_preconfigure: OCPv improve SR-IOV handling +| sap_vm_provision: update platform guidance document +| sap_vm_provision: remove AWS CLI and GCloud CLI dependency +| sap_vm_provision: add spread placement strategy for AWS, GCP, IBM Cloud, MS Azure, IBM PowerVM +| sap_vm_provision: add AWS Route53 record overwrite +| sap_vm_provision: add IBM Cloud Private DNS Custom Resolver for IBM Power VS +| sap_vm_provision: add var for Virtual IP handling across multiple roles +| sap_vm_provision: fix /etc/hosts for Virtual IPs +| sap_vm_provision: add var for Load Balancer naming on GCP, IBM Cloud, MS Azure +| sap_vm_provision: update OS Images for AWS, GCP, IBM Cloud, MS Azure +| sap_vm_provision: add vars for Kubevirt VM +| sap_vm_provision: fix OS Subscription registration logic and BYOL/BYOS +| sap_vm_provision: improve Web Proxy logic +| sap_vm_provision: fix handling of nested variables within host_specifications_dictionary +| sap_vm_provision: fix handling of custom IOPS on AWS, GCP, IBM Cloud +| sap_vm_provision: fix handling of AWS IAM Policy for HA +| sap_vm_provision: fix handling of MS Azure IAM Role for HA +| sap_vm_provision: add google-guest-agent service for load balancer config +| sap_vm_provision: add readiness for AnyDB HA (e.g. IBM Db2 HADR) +| sap_vm_provision: update IBM Power VS locations lookup list +| sap_vm_provision: update logic for IBM Power VS Workspace with latest backend routing (PER) +| sap_vm_provision: update logic for IBM Cloud Virtual Network Interfaces (VNI) +| sap_vm_provision: fix Ansible to Terraform copy to working directory logic and note +| sap_vm_provision: update embedded Terraform Template with updated var names for imported Terraform Modules +| sap_vm_temp_vip: overhaul replace all shell logic with Ansible Modules and use special vars to determine OS network devices reliably +| sap_vm_temp_vip: overhaul documentation + v1.0.1 ====== diff --git a/galaxy.yml b/galaxy.yml index afa47b1..88a3088 100644 --- a/galaxy.yml +++ b/galaxy.yml @@ -10,7 +10,7 @@ namespace: community name: sap_infrastructure # The version of the collection. Must be compatible with semantic versioning -version: 1.0.1 +version: 1.1.0 # The path to the Markdown (.md) readme file. This path is relative to the root of the collection readme: README.md @@ -20,6 +20,7 @@ authors: - Sean Freeman - Janine Fuchs - Nils Koenig + - Marcel Mamula ### OPTIONAL but strongly recommended # A short summary description of the collection @@ -40,6 +41,10 @@ tags: - database - application - sap + - infrastructure + - provision + - cloud + - hypervisor # Collections that this collection requires to be installed for it to be usable. The key of the dict is the # collection label 'namespace.name'. The value is a version range diff --git a/playbooks/sample-sap-hypervisor-redhat-ocp-virt-preconfigure.yml b/playbooks/sample-sap-hypervisor-redhat-ocp-virt-preconfigure.yml index f60d69b..5f1d0bb 100644 --- a/playbooks/sample-sap-hypervisor-redhat-ocp-virt-preconfigure.yml +++ b/playbooks/sample-sap-hypervisor-redhat-ocp-virt-preconfigure.yml @@ -1,19 +1,105 @@ --- - -- name: Ansible Play to run sap_hypervisor_node_preconfigure Ansible Role +- name: Ansible Play to run sap_hypervisor_node_preconfigure Ansible Role for Red Hat OpenShift hosts: all - gather_facts: true - serial: 1 - + gather_facts: false vars: - sap_hypervisor_node_platform: redhat_ocp_virt - sap_hypervisor_node_kubeconfig: "{{ lookup( 'ansible.builtin.env', 'KUBECONFIG') }}" + sap_hypervisor_node_preconfigure_platform: redhat_ocp_virt + tasks: - environment: - KUBECONFIG: "{{ sap_hypervisor_node_kubeconfig }}" - K8S_AUTH_KUBECONFIG: "{{ sap_hypervisor_node_kubeconfig }}" + - name: Use kubeconfig file specified in environment variable K8S_AUTH_KUBECONFIG if sap_hypervisor_node_preconfigure_ocp_kubeconfig_path is not defined + when: > + sap_hypervisor_node_preconfigure_ocp_kubeconfig_path is not defined or + sap_hypervisor_node_preconfigure_ocp_kubeconfig_path == None or + sap_hypervisor_node_preconfigure_ocp_kubeconfig_path == '' + ansible.builtin.set_fact: + sap_hypervisor_node_preconfigure_ocp_kubeconfig_path: "{{ lookup('env', 'K8S_AUTH_KUBECONFIG') | default(None) }}" - tasks: - - name: Include sap_hypervisor_node_preconfigure Ansible Role - ansible.builtin.include_role: - name: sap_hypervisor_node_preconfigure + - name: Create Tempdir on jumphost + ansible.builtin.tempfile: + state: directory + suffix: "_sap_hypervisor_node_preconfigure" + register: __sap_hypervisor_node_preconfigure_register_tmpdir_jumphost + + - name: Set kubeconfig file variable + ansible.builtin.set_fact: + __sap_hypervisor_node_preconfigure_register_kubeconfig: "{{ __sap_hypervisor_node_preconfigure_register_tmpdir_jumphost.path }}/kubeconfig" + + - name: Read content of kubeconfig file + ansible.builtin.set_fact: + sap_hypervisor_node_preconfigure_ocp_kubeconfig_data: + "{{ lookup('file', sap_hypervisor_node_preconfigure_ocp_kubeconfig_path) | from_yaml }}" + + - name: Read cluster endpoint and CA certificate from kubeconfig if either is not defined + when: sap_hypervisor_node_preconfigure_ocp_extract_kubeconfig + block: + + - name: Set sap_hypervisor_node_preconfigure_ocp_endpoint from kubeconfig + ansible.builtin.set_fact: + __sap_hypervisor_node_preconfigure_register_ocp_endpoint: + "{{ sap_hypervisor_node_preconfigure_ocp_kubeconfig_data['clusters'][0]['cluster']['server'] }}" + + - name: Write the certificate-authority-data to temp dir on jumphost + ansible.builtin.copy: + content: "{{ sap_hypervisor_node_preconfigure_ocp_kubeconfig_data['clusters'][0]['cluster']['certificate-authority-data'] | b64decode }}" + dest: "{{ __sap_hypervisor_node_preconfigure_register_tmpdir_jumphost.path }}/cluster-ca-cert.pem" + mode: "0666" + + - name: Set CA file variable + ansible.builtin.set_fact: + __sap_hypervisor_node_preconfigure_register_ca_cert: "{{ __sap_hypervisor_node_preconfigure_register_tmpdir_jumphost.path }}/cluster-ca-cert.pem" + + + - name: Use predefined CA cert and API endpoint + when: not sap_hypervisor_node_preconfigure_ocp_extract_kubeconfig + + block: + - name: Set predefined OCP API Endpoint + ansible.builtin.set_fact: + __sap_hypervisor_node_preconfigure_register_ocp_endpoint: "{{ sap_hypervisor_node_preconfigure_ocp_endpoint }}" + + - name: Set predefined CA file + ansible.builtin.set_fact: + __sap_hypervisor_node_preconfigure_register_ca_cert: "{{ sap_hypervisor_node_preconfigure_ocp_ca_cert }}" + + + - name: Log into Red Hat OpenShift cluster (obtain access token) + community.okd.openshift_auth: + host: "{{ __sap_hypervisor_node_preconfigure_register_ocp_endpoint }}" + username: "{{ sap_hypervisor_node_preconfigure_ocp_admin_username }}" + password: "{{ sap_hypervisor_node_preconfigure_ocp_admin_password }}" + ca_cert: "{{ __sap_hypervisor_node_preconfigure_register_ca_cert }}" + register: __sap_vm_provision_register_ocp_auth_results + + - name: Set token in kubeconfig + ansible.builtin.set_fact: + sap_hypervisor_node_preconfigure_ocp_kubeconfig_data: >- + {{ + sap_hypervisor_node_preconfigure_ocp_kubeconfig_data | combine({ + 'users': sap_hypervisor_node_preconfigure_ocp_kubeconfig_data.users | map('combine', [{'user': {'token': __sap_vm_provision_register_ocp_auth_results.openshift_auth.api_key }}] ) + }, recursive=True) + }} + + - name: Write the updated kubeconfig + ansible.builtin.copy: + content: "{{ sap_hypervisor_node_preconfigure_ocp_kubeconfig_data | to_nice_yaml }}" + dest: "{{ __sap_hypervisor_node_preconfigure_register_kubeconfig }}" + mode: "0600" + + - name: Invoke role with credentials set as environment variables + delegate_to: "{{ inventory_hostname }}" + delegate_facts: true + environment: + KUBECONFIG: "{{ __sap_hypervisor_node_preconfigure_register_kubeconfig }}" + K8S_AUTH_KUBECONFIG: "{{ __sap_hypervisor_node_preconfigure_register_kubeconfig }}" + block: + + - name: Include sap_hypervisor_node_preconfigure Ansible Role + ansible.builtin.include_role: + name: community.sap_infrastructure.sap_hypervisor_node_preconfigure + + always: + + - name: Remove temporary directory + ansible.builtin.file: + state: absent + path: "{{ __sap_hypervisor_node_preconfigure_register_tmpdir_jumphost.path }}" diff --git a/playbooks/sample-sap-vm-provision-redhat-ocpv.yml b/playbooks/sample-sap-vm-provision-redhat-ocpv.yml new file mode 100644 index 0000000..279bc20 --- /dev/null +++ b/playbooks/sample-sap-vm-provision-redhat-ocpv.yml @@ -0,0 +1,128 @@ +--- +- name: Preparation Ansible Play for SAP VM provisioning on Red Hat OpenShift Virtualization + hosts: all + gather_facts: false + serial: 1 + vars: + sap_vm_provision_iac_type: ansible + sap_vm_provision_iac_platform: kubevirt_vm + pre_tasks: + # Alternative to executing ansible-playbook with -e for Ansible Extravars file +# - name: Include sample variables for Red Hat Openshift Virtualization +# ansible.builtin.include_vars: ./vars/sample-variables-sap-vm-provision-redhat-ocpv.yml + tasks: + + - name: Save inventory_host as execution_host + ansible.builtin.set_fact: + sap_vm_provision_execution_host: "{{ inventory_hostname }}" + + - name: Save ansible_user as execution_host user + ansible.builtin.set_fact: + __sap_vm_provision_kubevirt_vm_register_execution_host_user: "{{ ansible_user }}" + + - name: Use kubeconfig file specified in environment variable K8S_AUTH_KUBECONFIG if sap_vm_provision_kubevirt_vm_kubeconfig_path is not defined + when: > + sap_vm_provision_kubevirt_vm_kubeconfig_path is not defined or + sap_vm_provision_kubevirt_vm_kubeconfig_path == None or + sap_vm_provision_kubevirt_vm_kubeconfig_path == '' + ansible.builtin.set_fact: + sap_vm_provision_kubevirt_vm_kubeconfig_path: "{{ lookup('env', 'K8S_AUTH_KUBECONFIG') | default(None) }}" + + - name: Create Tempdir + ansible.builtin.tempfile: + state: directory + suffix: "_sap_vm_provision_kubevirt_vm" + register: __sap_vm_provision_kubevirt_vm_register_tmpdir + + - name: Set kubeconfig file variable + ansible.builtin.set_fact: + __sap_vm_provision_kubevirt_vm_register_kubeconfig: "{{ __sap_vm_provision_kubevirt_vm_register_tmpdir.path }}/kubeconfig" + + - name: Read content of kubeconfig file + ansible.builtin.set_fact: + __sap_vm_provision_kubevirt_vm_register_kubeconfig_data: + "{{ lookup('file', sap_vm_provision_kubevirt_vm_kubeconfig_path) | from_yaml }}" + + - name: Read cluster endpoint and CA certificate from kubeconfig if either is not defined + when: sap_vm_provision_kubevirt_vm_extract_kubeconfig + block: + + - name: Set sap_vm_provision_kubevirt_vm_api_endpoint from kubeconfig + ansible.builtin.set_fact: + __sap_vm_provision_kubevirt_vm_register_api_endpoint: + "{{ __sap_vm_provision_kubevirt_vm_register_kubeconfig_data['clusters'][0]['cluster']['server'] }}" + + - name: Write the certificate-authority-data to temp dir + ansible.builtin.copy: + content: "{{ __sap_vm_provision_kubevirt_vm_register_kubeconfig_data['clusters'][0]['cluster']['certificate-authority-data'] | b64decode }}" + dest: "{{ __sap_vm_provision_kubevirt_vm_register_tmpdir.path }}/cluster-ca-cert.pem" + mode: "0600" + + - name: Set CA file variable + ansible.builtin.set_fact: + __sap_vm_provision_kubevirt_vm_register_ca_cert: "{{ __sap_vm_provision_kubevirt_vm_register_tmpdir.path }}/cluster-ca-cert.pem" + + - name: Use predefined CA cert and API endpoint + when: not sap_vm_provision_kubevirt_vm_extract_kubeconfig + block: + - name: Set predefined OCP API Endpoint + ansible.builtin.set_fact: + __sap_vm_provision_kubevirt_vm_register_api_endpoint: "{{ sap_vm_provision_kubevirt_vm_api_endpoint }}" + + - name: Set predefined CA file + ansible.builtin.set_fact: + __sap_vm_provision_kubevirt_vm_register_ca_cert: "{{ sap_vm_provision_kubevirt_vm_ca_cert }}" + + - name: Log into Red Hat OpenShift cluster (obtain access token) + community.okd.openshift_auth: + host: "{{ __sap_vm_provision_kubevirt_vm_register_api_endpoint }}" + username: "{{ sap_vm_provision_kubevirt_vm_admin_username }}" + password: "{{ sap_vm_provision_kubevirt_vm_admin_password }}" + ca_cert: "{{ __sap_vm_provision_kubevirt_vm_register_ca_cert }}" + register: __sap_vm_provision_kubevirt_vm_register_kubevirt_vm_auth_results + + - name: Set token in kubeconfig + ansible.builtin.set_fact: + __sap_vm_provision_kubevirt_vm_register_kubeconfig_data: >- + {{ + __sap_vm_provision_kubevirt_vm_register_kubeconfig_data | combine({ + 'users': __sap_vm_provision_kubevirt_vm_register_kubeconfig_data.users | map('combine', [{'user': {'token': __sap_vm_provision_kubevirt_vm_register_kubevirt_vm_auth_results.openshift_auth.api_key }}] ) + }, recursive=True) + }} + + - name: Write the updated kubeconfig + ansible.builtin.copy: + content: "{{ __sap_vm_provision_kubevirt_vm_register_kubeconfig_data | to_nice_yaml }}" + dest: "{{ __sap_vm_provision_kubevirt_vm_register_kubeconfig }}" + mode: "0600" + + - name: Create dynamic inventory group for Ansible Role sap_vm_provision and provide execution_host and api token + ansible.builtin.add_host: + name: "{{ item }}" + group: sap_vm_provision_target_inventory_group + sap_vm_provision_execution_host: "{{ sap_vm_provision_execution_host }}" + __sap_vm_provision_kubevirt_vm_register_execution_host_user: "{{ __sap_vm_provision_kubevirt_vm_register_execution_host_user }}" + __sap_vm_provision_kubevirt_vm_register_tmpdir: "{{ __sap_vm_provision_kubevirt_vm_register_tmpdir }}" + __sap_vm_provision_kubevirt_vm_register_kubeconfig: "{{ __sap_vm_provision_kubevirt_vm_register_kubeconfig }}" + loop: "{{ sap_vm_provision_kubevirt_vm_host_specifications_dictionary[sap_vm_provision_host_specification_plan].keys() }}" + +- name: Ansible Play to provision VMs for SAP + hosts: sap_vm_provision_target_inventory_group # Ansible Play target hosts pattern, use Inventory Group created by previous Ansible Task (add_host) + gather_facts: false + environment: + K8S_AUTH_KUBECONFIG: "{{ __sap_vm_provision_kubevirt_vm_register_kubeconfig }}" + tasks: + + - name: Execute Ansible Role sap_vm_provision + when: sap_vm_provision_iac_type == "ansible" or sap_vm_provision_iac_type == "ansible_to_terraform" + block: + - name: Include sap_vm_provision Ansible Role + ansible.builtin.include_role: + name: community.sap_infrastructure.sap_vm_provision + + always: + - name: Remove temporary directory on execution_host + delegate_to: "{{ sap_vm_provision_execution_host }}" + ansible.builtin.file: + state: absent + path: "{{ __sap_vm_provision_kubevirt_vm_register_tmpdir.path }}" diff --git a/playbooks/vars/sample-variables-sap-hypervisor-redhat-ocp-virt-preconfigure.yml b/playbooks/vars/sample-variables-sap-hypervisor-redhat-ocp-virt-preconfigure.yml index 51b8445..5799575 100644 --- a/playbooks/vars/sample-variables-sap-hypervisor-redhat-ocp-virt-preconfigure.yml +++ b/playbooks/vars/sample-variables-sap-hypervisor-redhat-ocp-virt-preconfigure.yml @@ -1,40 +1,87 @@ --- +# vars file for redhat_ocp_virt + +########################################################### +# Red Hat OpenShift cluster connection details +########################################################### + +# Admin username for Red Hat OpenShift cluster connection +sap_hypervisor_node_preconfigure_ocp_admin_username: + +# Admin password for Red Hat OpenShift cluster connection +sap_hypervisor_node_preconfigure_ocp_admin_password: + +# Path to kubeconfig file Red Hat OpenShift cluster connection +sap_hypervisor_node_preconfigure_ocp_kubeconfig_path: + +# If this is set to true, the API endpoint and the +# CA Certificate are extracted from the kubeconfig file. +# If set to false, sap_hypervisor_node_preconfigure_ocp_endpoint and +# sap_hypervisor_node_preconfigure_ocp_ca_cert have to be specified. +sap_hypervisor_node_preconfigure_ocp_extract_kubeconfig: true + +# URL to the API endpoint of Red Hat OpenShift cluster +#sap_hypervisor_node_preconfigure_ocp_endpoint: + +# CA Certificate for Red Hat OpenShift cluster connection +# To extract the CA Cert from the kubeconfig, you can use +# grep certificate-authority-data ${KUBECONFIG} | awk '{ print $2 }' | base64 --decode > client-cert.pem +#sap_hypervisor_node_preconfigure_ocp_ca_cert: + + +########################################################### +# Configuration of what should be preconfigured +########################################################### + +# Install and configure the host path provisioner (hpp) for a local storage disk +sap_hypervisor_node_preconfigure_install_hpp: false + +# Install the trident NFS storage provider +sap_hypervisor_node_preconfigure_install_trident: false + +# Should the operators be installed sap_hypervisor_node_preconfigure_install_operators: true + +# Configure the workers? sap_hypervisor_node_preconfigure_setup_worker_nodes: true -# Install the trident NFS storage provider. If yes, expects configuration details under -# sap_hypervisor_node_preconfigure_cluster_config.trident, see example config. -sap_hypervisor_node_preconfigure_install_trident: false # true, false + +########################################################### +# Configuration details +########################################################### + # URL of the trident installer package to use -sap_hypervisor_node_preconfigure_install_trident_url: https://github.com/NetApp/trident/releases/download/v23.01.0/trident-installer-23.01.0.tar.gz +sap_hypervisor_node_preconfigure_install_trident_url: https://github.com/NetApp/trident/releases/download/v24.06.0/trident-installer-24.06.0.tar.gz + +# Allow unsupported NICs to be used for SR-IOV? +sap_hypervisor_node_preconfigure_sriov_enable_unsupported_nics: false + +# Amount of memory [GiB] to be reserved for the hypervisor on hosts >= 512GiB +sap_hypervisor_node_preconfigure_hypervisor_reserved_ram_host_ge_512: 64 # GiB + +# Amount of memory [GiB] to be reserved for the hypervisor on hosts < 512GiB +sap_hypervisor_node_preconfigure_hypervisor_reserved_ram_host_lt_512: 32 # GiB -# should SRIOV be enabled for unsupported NICs -sap_hypervisor_node_preconfigure_sriov_enable_unsupported_nics: true # true, false +# Should the check for the minimal amount of be ignored? Minimal amount is 96 GiB +sap_hypervisor_node_preconfigure_ignore_minimal_memory_check: false -# Amount of memory [GB] to be reserved for the hypervisor on hosts >= 512GB -sap_hypervisor_node_preconfigure_hypervisor_reserved_ram_host_ge_512: 64 #GB -# Amount of memory [GB] to be reserved for the hypervisor on hosts < 512GB -sap_hypervisor_node_preconfigure_hypervisor_reserved_ram_host_lt_512: 32 #GB +# Namespace to be used for the Red Hat Openshift Virtualization Operator +sap_hypervisor_node_preconfigure_ocpv_namespace: openshift-cnv -# Should the check for the minimal amount of memory be ignored? Minimal amount is 96 GB -# If ignored, the amount of $hostmemory - $reserved is allocated with a lower bound of 0 in case $reserved > $hostmemory -sap_hypervisor_node_preconfigure_ignore_minimal_memory_check: true # true, false +# Channel to be used for the Red Hat Openshift Virtualization Operator +sap_hypervisor_node_preconfigure_ocpv_subscription_channel: stable -# Define if the host path provisioner should be installed in order to use a local disk as storage device. -# Uses the following variable to be set to the storage device to be used, e.g.: -# sap_hypervisor_node_preconfigure_cluster_config.worker_localstorage_device: /dev/sdb -sap_hypervisor_node_preconfigure_install_hpp: true # true, false +########################################################### +# Red Hat OpenShift cluster configuration details +########################################################### # Example configuration for redhat_ocp_virt sap_hypervisor_node_preconfigure_cluster_config: - # URL under which the OCP cluster is reachable - cluster_url: ocpcluster.domain.org - # namespace under which the VMs are created, note this has to be - # openshift-sriov-network-operator in case of using SRIOV network + # openshift-sriov-network-operator in case of using SR-IOV network # devices vm_namespace: sap @@ -77,10 +124,18 @@ sap_hypervisor_node_preconfigure_cluster_config: port: - name: ens1f0 # network IF name - - name: storage # an SRIOV device + - name: storage # an SR-IOV device interface: ens2f0 # network IF name type: sriov + - name: ens2f0 # Set elevated MTU of 9000 + type: ethernet # on parent interface of + state: up # storagebridge + ipv4: + dhcp: false + enabled: false + mtu: 9000 + - name: storagebridge # using a bridge bridge: # another bridge options: @@ -92,18 +147,18 @@ sap_hypervisor_node_preconfigure_cluster_config: mtu: 9000 ipv4: address: - - ip: 192.168.1.51 # IP config + - ip: 192.168.1.10 # IP config prefix-length: 24 auto-dns: false auto-gateway: false - enabled: true state: up type: linux-bridge - - name: multi # another SRIOV device + - name: multi # another SR-IOV device interface: ens2f1 # network IF name type: sriov + - name: worker-1 # second worker configuration networks: # Example network config @@ -122,6 +177,36 @@ sap_hypervisor_node_preconfigure_cluster_config: port: - name: ens1f0 # network IF name - - name: storage # an SRIOV device + - name: storage # an SR-IOV device interface: ens2f0 # network IF name type: sriov + + - name: ens2f0 # Set elevated MTU of 9000 + type: ethernet # on parent interface of + state: up # storagebridge + ipv4: + dhcp: false + enabled: false + mtu: 9000 + + - name: storagebridge # create storage bridge + bridge: + options: + stp: + enabled: false + port: + - name: ens2f0 # network IF name + description: storage + mtu: 9000 + ipv4: + address: + - ip: 192.168.1.11 # IP config + prefix-length: 24 + auto-dns: false + auto-gateway: false + state: up + type: linux-bridge + + - name: multi # another SR-IOV device + interface: ens2f1 # network IF name + type: sriov diff --git a/playbooks/vars/sample-variables-sap-vm-provision-redhat-ocpv.yml b/playbooks/vars/sample-variables-sap-vm-provision-redhat-ocpv.yml new file mode 100644 index 0000000..0dd8a39 --- /dev/null +++ b/playbooks/vars/sample-variables-sap-vm-provision-redhat-ocpv.yml @@ -0,0 +1,104 @@ +--- +############################################ +# Red Hat OpenShift Virtualization # +############################################ + +# Namespace where the VM should be created in +sap_vm_provision_kubevirt_vm_target_namespace: sap + +# Username to be created on guest +sap_vm_provision_kubevirt_vm_os_user: cloud-user + +# Password for the above user +sap_vm_provision_kubevirt_vm_os_user_password: "" + +# how to authenticate to the guest vm [password|private_key|private_key_data] +# password: uses provided password in sap_vm_provision_kubevirt_vm_os_user_password, make sure your ssh config allows password authentication +# private_key: use the private ssh key at the location defined by sap_vm_provision_ssh_host_private_key_file_path +# private_key_data: use the private ssh key provided in sap_vm_provision_ssh_host_private_key_data and write it to the location defined in sap_vm_provision_ssh_host_private_key_file_path +sap_vm_provision_kubevirt_vm_guest_ssh_auth_mechanism: private-key + +# Private SSH key file, must be accessible on the ansible controller +# sap_vm_provision_ssh_host_private_key_file_path: + +# private ssh key, make sure the indentation is correct, here it's two spaces at the beginning of every line +# sap_vm_provision_ssh_host_private_key_data: | +# < your key data> + +# Should the CA cert and the API endpoint be extracted from the kubeconfig file? +sap_vm_provision_kubevirt_vm_extract_kubeconfig: true + +# Should an existing VM be overwritten? +sap_vm_provision_kubevirt_vm_overwrite_vm: false + +# Kubeconfig file for cluster where VMs should be created +sap_vm_provision_kubevirt_vm_kubeconfig_path: /path/to/clusterconfigs/kubeconfig + +# In order to use secured communication, provide the CA cert bundle for the cluster. +# This can be extracted from the kubeconfig file with the following command from the +# kubeconfig file: +# grep certificate-authority-data ${KUBECONFIG} | awk '{ print $2 }' | base64 --decode > cluster-ca-cert.pem +# This variable will not be used if sap_vm_provision_kubevirt_vm_extract_kubeconfig = true +# sap_vm_provision_kubevirt_vm_ca_cert: /path/to/clusterconfigs/cluster-ca-cert.pem + +# API endpoint of the cluster +# This variable will not be used if sap_vm_provision_kubevirt_vm_extract_kubeconfig = true +# sap_vm_provision_kubevirt_vm_api_endpoint: https://api.cluster.domain.tld:6443 + +# Admin username for the cluster communication +sap_vm_provision_kubevirt_vm_admin_username: kubeadmin + +# Password for the above admin user +sap_vm_provision_kubevirt_vm_admin_password: AAAAA-BBBBB-CCCCC-DDDDD + +# RAM Overhead [GiB] for virt-launcher container, this can be small for VMs < 1 TB and without SRIOV but should be increased to 16 or more for VMs > 1TB +sap_vm_provision_kubevirt_vm_container_memory_overhead: 1 + +# hostname of the ansible controller +sap_vm_provision_kubevirt_vm_ansible_controller: localhost # on AAP, this is localhost + +sap_vm_provision_kubevirt_vm_host_specifications_dictionary: + example_host_specification_plan: + host1: # Hostname, must be 13 characters or less + # SMT-2 (i.e. 2 CPU Threads per CPU Core) is default for Intel CPU Hyper-Threading, optionally can be altered to SMT-1 + kubevirt_vm_cpu_smt: 2 + kubevirt_vm_cpu_cores: 2 + kubevirt_vm_memory_gib: 24 + sap_system_type: project_dev # project_dev, project_tst, project_prd + sap_host_type: hana_primary # hana_primary, hana_secondary, anydb_primary, anydb_secondary, nwas_ascs, nwas_ers, nwas_pas, nwas_aas + # Provide either an existing PVC or a URL for an OS image + os_image: # either url or source_pvc_name have to be provided + # URL for an image to be used + url: "docker://registry.redhat.io/rhel8/rhel-guest-image:8.8.0" + # Name for a PVC to be cloned + # source_pvc_name: "rhel-8.8" + namespace: openshift-virtualization-os-images + size: "50Gi" + network_definition: + - name: sapbridge + type: bridge + networkName: sapbridge-network-definition + model: virtio + storage_definition: + - name: hana + mountpoint: /hana + disk_count: 1 # default: 1 + disk_size: 2048 # size in GB, integer + disk_type: nas # KubeVirt Storage Class + cloudinit: + userData: |- + #cloud-config + timezone: Europe/Berlin + hostname: "{{ scaleout_origin_host_spec }}" + user: {{ sap_vm_provision_kubevirt_vm_os_user if sap_vm_provision_kubevirt_vm_os_user is defined }} + password: {{ sap_vm_provision_kubevirt_vm_os_user_password if sap_vm_provision_kubevirt_vm_os_user_password is defined }} + chpasswd: + expire: false + ssh_authorized_keys: + - "{{ lookup('ansible.builtin.file', sap_vm_provision_ssh_host_public_key_file_path ) }}" + networkData: |- + network: + version: 2 + ethernets: + eth0: + dhcp4: true diff --git a/requirements.yml b/requirements.yml index 03131ca..6669d66 100644 --- a/requirements.yml +++ b/requirements.yml @@ -1,37 +1,41 @@ --- collections: - - name: cloud.terraform - type: galaxy - version: 1.1.0 - name: amazon.aws type: galaxy - version: 7.2.0 + version: 9.0.0 - name: community.aws type: galaxy - version: 7.1.0 + version: 9.0.0 - name: azure.azcollection type: galaxy - version: 2.2.0 + version: 3.0.1 - name: google.cloud type: galaxy version: 1.1.3 # Replace with ibm.cloud in future, legacy Ansible Collection uses hidden on-the-fly Terraform files in /var/tmp/ansible/ibmcloud) - name: ibm.cloudcollection type: galaxy - version: 1.51.0 + version: 1.71.2 - name: ovirt.ovirt type: galaxy version: 3.1.2 - name: openstack.cloud type: galaxy version: 2.1.0 - - name: kubevirt.core - type: galaxy - version: 1.1.0 - name: vmware.vmware_rest type: galaxy version: 3.0.0 - name: cloud.common + type: galaxy + version: 4.0.0 + - name: cloud.terraform type: galaxy version: 3.0.0 + - name: kubevirt.core + type: galaxy + version: 1.5.0 + # For Red Hat OpenShift + - name: community.okd + type: galaxy + version: 3.0.1 diff --git a/roles/sap_hypervisor_node_preconfigure/README.md b/roles/sap_hypervisor_node_preconfigure/README.md index 2d335d9..4091eb4 100644 --- a/roles/sap_hypervisor_node_preconfigure/README.md +++ b/roles/sap_hypervisor_node_preconfigure/README.md @@ -4,10 +4,9 @@ Ansible Role for configuration of Hypervisor Nodes and Control Plane for hosting Virtual Machines with SAP Systems. -This Ansible Role will configure the following hypervisors in order to run SAP workloads: -- Red Hat OpenShift Virtualization (OCPV), i.e. KubeVirt -- Red Hat Enterprise Virtualization (RHV), i.e. OVirt KVM - +This Ansible Role can configure the following hypervisors in order to run SAP workloads: +- Red Hat OpenShift Virtualization (OCPV). The corresponding upstream project KubeVirt is not tested with this role. While this might work, there is no guarantee. +- Red Hat Enterprise Virtualization (RHV). The corresponding upstream project OVirt KVM is not tested with this role. While this might work, there is no guarantee. ## Functionality @@ -16,92 +15,223 @@ The hypervisor nodes for Virtual Machines hosting SAP Software are amended by th ## Scope -All hosts for SAP Software on a target Hypervisor. - - -## Requirements +All hosts for SAP Software running one of the following hypervisors. -### Target hypervisor nodes - -**Hypervisor Versions:** -- Red Hat OpenShift Virtualization (OCPV) version XYZ+ +**Hypervisor Versions** +- Red Hat OpenShift Virtualization (OCPV) version 4.14+ - Red Hat Virtualization (RHV) version 4.4+ (Extended Support until 1H-2026) - Contains 'Red Hat Virtualization Manager (RHV-M)' and the 'Red Hat Virtualization Host (RHV-H)' hypervisor nodes that this Ansible Role preconfigures - _Formerly called Red Hat Enterprise Virtualization (RHEV) prior to version 4.4_ - _Not to be confused with standalone RHEL KVM (RHEL-KVM) hypervisor nodes, which this Ansible Role is not compatible with_ -**Prerequisites:** -- Hypervisor Administrator credentials +## Execution + +### Sample execution + +For further information, see the [sample Ansible Playbooks in `/playbooks`](../playbooks/). For example: + +```shell +ansible-playbook --connection=local -i "localhost," \ +./playbooks/sample-sap-hypervisor-redhat-ocp-virt-preconfigure.yml \ +-e @./playbooks/vars/sample-variables-sap-hypervisor-redhat-ocp-virt-preconfigure.yml +``` + +### Suggested execution sequence + +Prior to execution of this Ansible Role, there are no Ansible Roles suggested to be executed first. + +### Summary of execution flow + +- Execute with specified Hypervisor platform using variable `sap_hypervisor_node_preconfigure_platform` +- Import default variables from `/vars` for specified Hypervisor platform +- Re-configure specified Hypervisor platform +- Append performance configuration for specified Hypervisor platform + +### Tags to control execution + +There are no tags used to control the execution of this Ansible Role + +## Platform: Red Hat OpenShift Virtualization + +Configure a plain vanilla Red Hat OpenShift cluster so it can be used for SAP workloads. -**Platform-specific - Red Hat OpenShift Virtualization (OCPV):** +### Requirements +- Jumphost which can access the Red Hat OpenShift cluster +- Optional: Ansible Automation Platform Controller can be used to facilitate the orchestration - Red Hat OpenShift cluster: - - Preferable without any previous customization - - Worker nodes with minimum 96GB of Memory (DRAM) - - Worker nodes with Intel CPU Instruction Sets: `TSX` ([SAP Note 2737837](https://me.sap.com/notes/2737837/E)) - - Storage as Local Storage (e.g. LVM) using host path provisioner, NFS, OpenShift Data Foundation, or other via storage orchestrators (such as Trident for NetApp) + - Cluster without any previous customization + - Credentials such as kubeconfig, admin user and password + - Worker nodes with minimum 96GB of memory (DRAM) + - For SAP HANA: Worker nodes with Intel CPU Instruction Sets: `TSX` ([SAP Note 2737837](https://me.sap.com/notes/2737837/E)) + - Storage + - Netapp filer with NFS using Astra Trident Operator or + - Local storage using Host Path Provisioner (HPP). + - OpenShift Data Foundation or other storage orchestrators have to be manually configured. + ### Execution/Controller host -**Dependencies:** +An Ansible Automation Platform Controller can be used to facilitate the orchestration. A jumphost with access to the Red Hat OpenShift cluster is required. + +**Dependencies** - OS Packages - Python 3.9.7+ (i.e. CPython distribution) - - Red Hat OpenShift CLI Client (`oc` binary) - Python Packages: - `kubernetes` 29.0.0+ - Ansible - Ansible Core 2.12.0+ - Ansible Collections: - `kubernetes.core` 3.0.0+ + - `community.okd` 3.0.1 -**During execution:** -- For Red Hat OpenShift Virtualization (OCPV), use Environment Variable `KUBECONFIG` +See also the `requirements.yml` if running standalone. The requirements can be installed with +``` +# ansible-galaxy install -r requirements.yml +``` +**During execution** +- For Red Hat OpenShift Virtualization (OCPV), use environment variable `K8S_AUTH_KUBECONFIG` -## Execution -### Sample execution +### Role Variables +Use [sample-variables-sap-hypervisor-redhat-ocp-virt-preconfigure.yml](../playbooks/vars/sample-variables-sap-hypervisor-redhat-ocp-virt-preconfigure.yml) as a starting point and add your configuration. -For further information, see the [sample Ansible Playbooks in `/playbooks`](../playbooks/). For example: +Let's have a look at the most important variables you need to set. -```shell -ansible-playbook --connection=local -i "localhost," \ -./playbooks/sample-sap-hypervisor-redhat-ocp-virt-preconfigure.yml \ --e @./playbooks/vars/sample-variables-sap-hypervisor-redhat-ocp-virt-preconfigure.yml ``` +########################################################### +# Red Hat OpenShift cluster connection details +########################################################### -### Suggested execution sequence +# Admin username for Red Hat OpenShift cluster connection +sap_hypervisor_node_preconfigure_ocp_admin_username: -Prior to execution of this Ansible Role, there are no Ansible Roles suggested to be executed first. +# Admin password for Red Hat OpenShift cluster connection +sap_hypervisor_node_preconfigure_ocp_admin_password: -### Summary of execution flow +# Path to kubeconfig file Red Hat OpenShift cluster connection +sap_hypervisor_node_preconfigure_ocp_kubeconfig_path: -- Execute with specified Hypervisor platform using variable `sap_hypervisor_node_platform` -- Import default variables from `/vars` for specified Hypervisor platform -- Re-configure specified Hypervisor platform -- Append performance configuration for specified Hypervisor platform +# If this is set to true, the API endpoint and the +# CA Certificate are extracted from the kubeconfig file. +# If set to false, sap_hypervisor_node_preconfigure_ocp_endpoint and +# sap_hypervisor_node_preconfigure_ocp_ca_cert have to be specified. +sap_hypervisor_node_preconfigure_ocp_extract_kubeconfig: true -### Tags to control execution +``` +You need to provide username and password for the Red Hat OpenShift Cluster. The `kubeconfig` file can be specified in `sap_hypervisor_node_preconfigure_ocp_kubeconfig_path` or if omitted, the environment variable `K8S_AUTH_KUBECONFIG` has to point to it. Default is, to use the CA certificate and Red Hat OpenShift cluster API endpoint as specified in the `kubeconfig` file (controlled by variable `sap_hypervisor_node_preconfigure_ocp_extract_kubeconfig`). Make sure to specify the username and password for the cluster: `sap_hypervisor_node_preconfigure_ocp_admin_username` and `sap_hypervisor_node_preconfigure_ocp_admin_password`. -There are no tags used to control the execution of this Ansible Role +Next are variables that define what storage configuration should be configured, if the operators should be installed and the configuration of the workers should be done. +``` +########################################################### +# Configuration of what should be preconfigured +########################################################### -## License +# Install and configure the host path provisioner (hpp) for a local storage disk +sap_hypervisor_node_preconfigure_install_hpp: false -Apache 2.0 +# Install the trident NFS storage provider +sap_hypervisor_node_preconfigure_install_trident: false +# Should the operators be installed +sap_hypervisor_node_preconfigure_install_operators: true -## Authors +# Configure the workers? +sap_hypervisor_node_preconfigure_setup_worker_nodes: true +``` -Nils Koenig +The next section you have to modify are the cluster configuration details. Every worker has to have an entry in the `workers` section and make sure, that the name attribute corresponds with the cluster node name (here: worker-0). Adjust the network interface name you want to use. There are two types of networking technologies available: bridging or SR-IOV. See the configuration example file for more options (`playbooks/vars/sample-variables-sap-hypervisor-redhat-ocp-virt-preconfigure.yml`). ---- +There is a section for the `trident` configuration, this is required when installing the NetApp Astra Trident Operator for NFS storage. When using the host path provisioner, `worker_localstorage_device` has to point to the block device which should be used. -## Ansible Role Input Variables -Please first check the [/defaults parameters file](./defaults/main.yml), and platform specific parameters (e.g. [/vars/platform_defaults_redhat_ocp_virt](./vars/platform_defaults_redhat_ocp_virt.yml). +``` +########################################################### +# Red Hat OpenShift cluster configuration details +########################################################### + +# Example configuration for redhat_ocp_virt +sap_hypervisor_node_preconfigure_cluster_config: + + # namespace under which the VMs are created, note this has to be + # openshift-sriov-network-operator in case of using SR-IOV network + # devices + vm_namespace: sap + + # Optional, configuration for trident driver for Netapp NFS filer + trident: + management: management.domain.org + data: datalif.netapp.domain.org + svm: sap_svm + backend: nas_backend + aggregate: aggregate_Name + username: admin + password: xxxxx + storage_driver: ontap-nas + storage_prefix: ocpv_sap_ + + # CPU cores which will be reserved for kubernetes + worker_kubernetes_reserved_cpus: "0,1" + + # Storage device used for host path provisioner as local storage. + worker_localstorage_device: /dev/vdb + + # detailed configuration for every worker that should be configured + workers: + + - name: worker-0 # name must match the node name + networks: # Example network config + + - name: sapbridge # using a bridge + description: SAP bridge + state: up + type: linux-bridge + ipv4: + enabled: false + auto-gateway: false + auto-dns: false + bridge: + options: + stp: + enabled: false + port: + - name: ens1f0 # network IF name -Below is the list of input parameters for this Ansible Role. +``` +### Example Playbook +See [sample-sap-hypervisor-redhat_ocp_virt-preconfigure.yml](../playbooks/sample-sap-hypervisor-redhat_ocp_virt-preconfigure.yml) for an example. +### Example Usage +Make sure to set the `K8S_AUTH_KUBECONFIG` environment variable, e.g. +``` +export K8S_AUTH_KUBECONFIG=/path/to/my_kubeconfig +``` +To invoke the example playbook with the example configuration using your localhost as ansible host use the following command line: + +```shell +ansible-playbook --connection=local -i localhost, \ +playbooks/sample-sap-hypervisor-redhat_ocp_virt-preconfigure.yml \ +-e @playbooks/vars/sample-sap-hypervisor-redhat_ocp_virt-preconfigure.yml +``` + + +## Platform: Red Hat Virtualization (RHV) +This Ansible Role allows preconfigure of Red Hat Virtualization (RHV), formerly called Red Hat Enterprise Virtualization (RHEV) prior to version 4.4 release. Red Hat Virtualization (RHV) consists of 'Red Hat Virtualization Manager (RHV-M)' and the 'Red Hat Virtualization Host (RHV-H)' hypervisor nodes that this Ansible Role preconfigures. Please note, Red Hat Virtualization is discontinued and maintenance support will end mid-2024. Extended life support for RHV ends mid-2026. +This Ansible Role does not preconfigure RHEL KVM (RHEL-KVM) hypervisor nodes. Please note that RHEL KVM is standalone, and does not have Management tooling (previously provided by RHV-M). + +### Requirements + +**Prerequisites:** +- Hypervisor Administrator credentials +- RHV hypervisor(s) + + +**Platform-specific - Red Hat Virtualization (RHV)** +- Jumphost + +### Role Variables +See [sample-variables-sap-hypervisor-redhat-rhel-kvm-preconfigure.yml](../playbooks/vars/sample-variables-sap-hypervisor-redhat-rhel-kvm-preconfigure.yml) for details. `sap_hypervisor_node_preconfigure_reserved_ram (default: 100)` Reserve memory [GB] for hypervisor host. Depending in the use case should be at least 50-100GB. @@ -110,8 +240,7 @@ static: done at kernel command line which is slow, but safe runtime: done with hugeadm which is faster, but can in some cases not ensure all HPs are allocated. `sap_hypervisor_node_preconfigure_kvm_nx_huge_pages (default: "auto")` Setting for the huge page shattering kvm.nx_huge_pages: {"auto"|"on"|"off"}. Note the importance of the quotes, otherwise off will be mapped to false. See https://www.kernel.org/doc/html/latest/admin-guide/kernel-parameters.html for additional information: - -```ini +``` kvm.nx_huge_pages= [KVM] Controls the software workaround for the X86_BUG_ITLB_MULTIHIT bug. @@ -133,3 +262,13 @@ runtime: done with hugeadm which is faster, but can in some cases not ensure all `sap_hypervisor_node_preconfigure_ignore_failed_assertion (default: no)` Fail if assertion is invalid. `sap_hypervisor_node_preconfigure_run_grub2_mkconfig (default: yes)` Update the grub2 config. + + +### Example Playbook +See [sample-sap-hypervisor-redhat-rhel-kvm-preconfigure.yml](../playbooks/sample-sap-hypervisor-redhat-rhel-kvm-preconfigure.yml) for an example. + +### License +Apache 2.0 + +### Author Information +Nils Koenig (nkoenig@redhat.com) diff --git a/roles/sap_hypervisor_node_preconfigure/defaults/main.yml b/roles/sap_hypervisor_node_preconfigure/defaults/main.yml index d072014..9f19324 100644 --- a/roles/sap_hypervisor_node_preconfigure/defaults/main.yml +++ b/roles/sap_hypervisor_node_preconfigure/defaults/main.yml @@ -1,123 +1,4 @@ --- # ibmpower_phyp, redhat_ocp_virt, redhat_rhel_kvm, vmware_vsphere -sap_hypervisor_node_platform: - - -# Example configuration for redhat_ocp_virt -sap_hypervisor_node_preconfigure_cluster_config: - - # URL under which the OCP cluster is reachable - cluster_url: ocpcluster.domain.org - - # namespace under which the VMs are created, note this has to be - # openshift-sriov-network-operator in case of using SRIOV network - # devices - vm_namespace: sap - - # Optional, configuration for trident driver for Netapp NFS filer - trident: - management: management.domain.org - data: datalif.netapp.domain.org - svm: sap_svm - backend: nas_backend - aggregate: aggregate_Name - username: admin - password: xxxxx - storage_driver: ontap-nas - storage_prefix: ocpv_sap_ - - # CPU cores reserved for kubernetes on worker node - worker_kubernetes_reserved_cpus: "0,1" - - # Storage device which should be used if host path provisioner is used - worker_localstorage_device: /dev/vdb - - # detailed configuration for every worker that should be configured - workers: - - # - name: worker-0 # name must match the node name - # networks: # Example network config - - # - name: sapbridge # using a bridge - # description: SAP bridge - # state: up - # type: linux-bridge - # ipv4: - # enabled: false - # auto-gateway: false - # auto-dns: false - # bridge: - # options: - # stp: - # enabled: false - # port: - # - name: ens1f0 # network IF name - - # - name: storage # an SRIOV device - # interface: ens2f0 # network IF name - # type: sriov - - # - name: storagebridge # using a bridge - # bridge: # another bridge - # options: - # stp: - # enabled: false - # port: - # - name: ens2f0 # network IF name - # description: storage - # mtu: 9000 - # ipv4: - # address: - # - ip: 192.168.1.51 # IP config - # prefix-length: 24 - # auto-dns: false - # auto-gateway: false - # enabled: true - # state: up - # type: linux-bridge - - # - name: multi # another SRIOV device - # interface: ens2f1 # network IF name - # type: sriov - - # - name: worker-1 # second worker configuration - # networks: # Example network config - - # - name: sapbridge # using a bridge - # description: SAP bridge - # state: up - # type: linux-bridge - # ipv4: - # enabled: false - # auto-gateway: false - # auto-dns: false - # bridge: - # options: - # stp: - # enabled: false - # port: - # - name: ens1f0 # network IF name - - # - name: storagebridge # using a bridge - # bridge: # another bridge - # options: - # stp: - # enabled: false - # port: - # - name: ens2f0 # network IF name - # description: storage - # mtu: 9000 - # ipv4: - # address: - # - ip: 192.168.1.51 # IP config - # prefix-length: 24 - # auto-dns: false - # auto-gateway: false - # enabled: true - # state: up - # type: linux-bridge - - # - name: storage # an SRIOV device - # interface: ens2f0 # network IF name - # type: sriov +sap_hypervisor_node_preconfigure_platform: diff --git a/roles/sap_hypervisor_node_preconfigure/handlers/main.yml b/roles/sap_hypervisor_node_preconfigure/handlers/main.yml index d943640..01ac2a0 100644 --- a/roles/sap_hypervisor_node_preconfigure/handlers/main.yml +++ b/roles/sap_hypervisor_node_preconfigure/handlers/main.yml @@ -1,3 +1,3 @@ --- -- name: Hypervisor node preconfigure - Include Handler Tasks for {{ sap_hypervisor_node_platform }} - ansible.builtin.import_tasks: "platform/{{ sap_hypervisor_node_platform }}/main.yml" +- name: Hypervisor node preconfigure - Include Handler Tasks for {{ sap_hypervisor_node_preconfigure_platform }} + ansible.builtin.import_tasks: "platform/{{ sap_hypervisor_node_preconfigure_platform }}/main.yml" diff --git a/roles/sap_hypervisor_node_preconfigure/tasks/main.yml b/roles/sap_hypervisor_node_preconfigure/tasks/main.yml index 1736339..fe1e8b1 100644 --- a/roles/sap_hypervisor_node_preconfigure/tasks/main.yml +++ b/roles/sap_hypervisor_node_preconfigure/tasks/main.yml @@ -1,6 +1,6 @@ --- -- name: SAP certified hypervisor node preconfigure - Include Vars for {{ sap_hypervisor_node_platform }} - ansible.builtin.include_vars: "platform_defaults_{{ sap_hypervisor_node_platform }}.yml" +- name: SAP certified hypervisor node preconfigure - Include Vars for {{ sap_hypervisor_node_preconfigure_platform }} + ansible.builtin.include_vars: "platform_defaults_{{ sap_hypervisor_node_preconfigure_platform }}.yml" -- name: SAP certified hypervisor node preconfigure - Include Tasks for {{ sap_hypervisor_node_platform }} - ansible.builtin.include_tasks: "platform/{{ sap_hypervisor_node_platform }}/main.yml" +- name: SAP certified hypervisor node preconfigure - Include Tasks for {{ sap_hypervisor_node_preconfigure_platform }} + ansible.builtin.include_tasks: "platform/{{ sap_hypervisor_node_preconfigure_platform }}/main.yml" diff --git a/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/configure-worker-node.yml b/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/configure-worker-node.yml index 3af1dcf..9680189 100644 --- a/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/configure-worker-node.yml +++ b/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/configure-worker-node.yml @@ -1,9 +1,4 @@ --- -- name: Label nodes - ansible.builtin.command: "oc label node {{ __sap_hypervisor_node_preconfigure_register_worker.name }} cpumanager=true --overwrite=true" - register: __sap_hypervisor_node_preconfigure_label_node_result - changed_when: __sap_hypervisor_node_preconfigure_label_node_result.rc != 0 - - name: Include node network ansible.builtin.include_tasks: node-network.yml with_items: "{{ __sap_hypervisor_node_preconfigure_register_worker.networks }}" @@ -12,8 +7,12 @@ index_var: __sap_hypervisor_node_preconfigure_register_worker_network_nr when: __sap_hypervisor_node_preconfigure_register_worker.networks is defined -# How to wait for node to be scheduleable? (NodeSchedulable) - name: Wait for all k8s nodes to be ready - ansible.builtin.command: oc wait --for=condition=Ready nodes --all --timeout=3600s - register: __sap_hypervisor_node_preconfigure_register_nodes_ready - changed_when: __sap_hypervisor_node_preconfigure_register_nodes_ready.rc != 0 + kubernetes.core.k8s_info: + kind: Node + wait: true + wait_condition: + status: true + type: Ready + wait_sleep: 20 + wait_timeout: 600 diff --git a/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/install-cnv-operator.yml b/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/install-cnv-operator.yml index e36e26f..ab6ad5d 100644 --- a/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/install-cnv-operator.yml +++ b/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/install-cnv-operator.yml @@ -6,21 +6,20 @@ apiVersion: v1 kind: Namespace metadata: - name: openshift-cnv + name: "{{ sap_hypervisor_node_preconfigure_ocpv_namespace }}" - name: Create CNV OperatorGroup kubevirt-hyperconverged-group kubernetes.core.k8s: state: present - definition: apiVersion: operators.coreos.com/v1 kind: OperatorGroup metadata: name: kubevirt-hyperconverged-group - namespace: openshift-cnv + namespace: "{{ sap_hypervisor_node_preconfigure_ocpv_namespace }}" spec: targetNamespaces: - - openshift-cnv + - "{{ sap_hypervisor_node_preconfigure_ocpv_namespace }}" - name: Create CNV Subscription kubernetes.core.k8s: @@ -30,33 +29,49 @@ kind: Subscription metadata: name: hco-operatorhub - namespace: openshift-cnv + namespace: "{{ sap_hypervisor_node_preconfigure_ocpv_namespace }}" spec: source: redhat-operators sourceNamespace: openshift-marketplace name: kubevirt-hyperconverged + channel: "{{ sap_hypervisor_node_preconfigure_ocpv_subscription_channel }}" -- name: Wait - ansible.builtin.pause: - seconds: 300 - -- name: Get Install Plan Name +- name: Get Install Plan Name from Subscription retries: 10 delay: 10 - ansible.builtin.command: oc get subscriptions/hco-operatorhub --namespace openshift-cnv --output=jsonpath='{$.status.installplan.name}' + kubernetes.core.k8s_info: + api_version: operators.coreos.com/v1alpha1 + kind: Subscription + name: hco-operatorhub + namespace: "{{ sap_hypervisor_node_preconfigure_ocpv_namespace }}" register: __sap_hypervisor_node_preconfigure_register_cnv_subscription_install_plan_name - until: __sap_hypervisor_node_preconfigure_register_cnv_subscription_install_plan_name.stdout != "" - changed_when: __sap_hypervisor_node_preconfigure_register_cnv_subscription_install_plan_name.stdout != "" + until: __sap_hypervisor_node_preconfigure_register_cnv_subscription_install_plan_name.resources[0].status.installPlanRef.name is defined + changed_when: True + +- name: Set Install Plan Name + ansible.builtin.set_fact: + install_plan_name: "{{ __sap_hypervisor_node_preconfigure_register_cnv_subscription_install_plan_name.resources[0].status.installPlanRef.name }}" - name: Wait for Install Plan to finish - ansible.builtin.command: "oc wait installplan \ - {{ __sap_hypervisor_node_preconfigure_register_cnv_subscription_install_plan_name.stdout }} --namespace openshift-cnv --for=condition='Installed' --timeout='5m'" - register: __sap_hypervisor_node_preconfigure_register_wait_for_installplan - changed_when: __sap_hypervisor_node_preconfigure_register_wait_for_installplan.rc != 0 + vars: + install_plan_name: "{{ __sap_hypervisor_node_preconfigure_register_cnv_subscription_install_plan_name.stdout }}" + block: + - name: Get Install Plan details + kubernetes.core.k8s_info: + api_version: operators.coreos.com/v1alpha1 + kind: InstallPlan + name: "{{ install_plan_name }}" + namespace: "{{ sap_hypervisor_node_preconfigure_ocpv_namespace }}" + register: __sap_hypervisor_node_preconfigure_register_wait_for_installplan + until: __sap_hypervisor_node_preconfigure_register_wait_for_installplan.resources[0].status.phase == "Complete" + retries: 60 + delay: 5 + ignore_errors: true -- name: Wait - ansible.builtin.pause: - seconds: 300 + - name: Fail if Install Plan is not Complete after waiting + ansible.builtin.fail: + msg: "Install Plan is not Complete after the specified wait period." + when: __sap_hypervisor_node_preconfigure_register_wait_for_installplan.resources[0].status.phase != "Complete" - name: Create CNV HyperConverged kubernetes.core.k8s: @@ -66,9 +81,36 @@ kind: HyperConverged metadata: name: kubevirt-hyperconverged - namespace: openshift-cnv - spec: + namespace: "{{ sap_hypervisor_node_preconfigure_ocpv_namespace }}" + +- name: Retrieve HyperConverged Status + kubernetes.core.k8s_info: + api_version: hco.kubevirt.io/v1beta1 + kind: HyperConverged + name: kubevirt-hyperconverged + namespace: "{{ sap_hypervisor_node_preconfigure_ocpv_namespace }}" + register: hyperconverged_status + until: > + {{ + (hyperconverged_status.resources[0].status.conditions | + selectattr('type', 'equalto', 'Available') | + selectattr('status', 'equalto', 'True') | + list | length > 0) | bool + }} + retries: 60 + delay: 10 + +- name: Set HyperConverged Available Fact + ansible.builtin.set_fact: + hyperconverged_available: >- + {{ + (hyperconverged_status.resources[0].status.conditions | + selectattr('type', 'equalto', 'Available') | + selectattr('status', 'equalto', 'True') | + list | length > 0) | bool + }} -- name: Wait - ansible.builtin.pause: - seconds: 300 +- name: Fail if HyperConverged is not Available after waiting + ansible.builtin.fail: + msg: "HyperConverged is not Available after the specified wait period." + when: not hyperconverged_available diff --git a/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/install-hpp.yml b/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/install-hpp.yml index daa713a..6222887 100644 --- a/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/install-hpp.yml +++ b/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/install-hpp.yml @@ -67,10 +67,9 @@ storagePools: - name: localstorage path: /var/localstorage - workload: - nodeSelector: - kubernetes.io/os: linux - machineconfiguration.openshift.io/role: worker + workload: + nodeSelector: + kubernetes.io/os: linux - name: Create storage class for HPP kubernetes.core.k8s: diff --git a/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/install-nmstate-operator.yml b/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/install-nmstate-operator.yml index 5e1e4f4..12f493a 100644 --- a/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/install-nmstate-operator.yml +++ b/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/install-nmstate-operator.yml @@ -30,9 +30,16 @@ targetNamespaces: - openshift-nmstate -- name: Pause to give operator a chance to install - ansible.builtin.pause: - minutes: 2 +- name: Wait for nmstate OperatorGroup to be created + kubernetes.core.k8s_info: + api_version: operators.coreos.com/v1 + kind: OperatorGroup + name: openshift-nmstate-tn6k8 + namespace: openshift-nmstate + register: operatorgroup_status + retries: 30 + delay: 10 + until: operatorgroup_status.resources | length > 0 - name: Subscribe to the nmstate Operator kubernetes.core.k8s: @@ -52,9 +59,27 @@ source: redhat-operators sourceNamespace: openshift-marketplace -- name: Pause to give operator a chance to install - ansible.builtin.pause: - minutes: 5 +- name: Wait for Subscription to have an InstallPlan + kubernetes.core.k8s_info: + api_version: operators.coreos.com/v1alpha1 + kind: Subscription + name: kubernetes-nmstate-operator + namespace: openshift-nmstate + register: subscription_status + retries: 30 + delay: 10 + until: subscription_status.resources[0].status.installplan.name is defined + +- name: Wait for InstallPlan to complete + kubernetes.core.k8s_info: + api_version: operators.coreos.com/v1alpha1 + kind: InstallPlan + name: "{{ subscription_status.resources[0].status.installplan.name }}" + namespace: openshift-nmstate + register: installplan_status + retries: 30 + delay: 10 + until: installplan_status.resources[0].status.phase == "Complete" - name: Create instance of the nmstate operator kubernetes.core.k8s: @@ -65,6 +90,12 @@ metadata: name: nmstate -- name: Pause to give instance a chance to come up - ansible.builtin.pause: - minutes: 5 +- name: Wait for NMState instance to be created + kubernetes.core.k8s_info: + api_version: nmstate.io/v1 + kind: NMState + name: nmstate + register: nmstate_status + retries: 30 + delay: 10 + until: nmstate_status.resources | length > 0 diff --git a/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/install-sriov-operator.yml b/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/install-sriov-operator.yml index 5fcb437..35aceda 100644 --- a/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/install-sriov-operator.yml +++ b/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/install-sriov-operator.yml @@ -1,5 +1,5 @@ --- -- name: Create the SRIOV Operator namespace +- name: Create the SR-IOV Operator namespace kubernetes.core.k8s: state: present definition: @@ -8,7 +8,7 @@ metadata: name: openshift-sriov-network-operator -- name: Create the SRIOV Operator namespace +- name: Create the SR-IOV Operator namespace kubernetes.core.k8s: state: present definition: @@ -21,7 +21,7 @@ targetNamespaces: - openshift-sriov-network-operator -- name: Create the SRIOV Operator namespace +- name: Create the SR-IOV Operator namespace kubernetes.core.k8s: state: present definition: @@ -36,19 +36,84 @@ name: sriov-network-operator channel: "stable" -- name: Pause to give operator a chance to install - ansible.builtin.pause: - minutes: 3 - -- name: Copy patch to enable unsupported NICs - ansible.builtin.copy: - src: sriov-enabled-unsupported-nics.sh - dest: "{{ __sap_hypervisor_node_preconfigure_register_tmpdir.path }}/sriov-enabled-unsupported-nics.sh" - mode: "0755" - when: sap_hypervisor_node_preconfigure_sriov_enable_unsupported_nics - -- name: Enable unsupported NICs - ansible.builtin.command: "{{ __sap_hypervisor_node_preconfigure_register_tmpdir.path }}/sriov-enabled-unsupported-nics.sh" - when: sap_hypervisor_node_preconfigure_sriov_enable_unsupported_nics - register: __sap_hypervisor_node_preconfigure_register_enable_unsupported_nics - changed_when: __sap_hypervisor_node_preconfigure_register_enable_unsupported_nics.rc != 0 +- name: Wait for Subscription to have an InstallPlan + kubernetes.core.k8s_info: + api_version: operators.coreos.com/v1alpha1 + kind: Subscription + name: sriov-network-operator-subscription + namespace: openshift-sriov-network-operator + register: subscription_status + retries: 30 + delay: 10 + until: subscription_status.resources[0].status.installplan.name is defined + +- name: Wait for InstallPlan to complete for SR-IOV + kubernetes.core.k8s_info: + api_version: operators.coreos.com/v1alpha1 + kind: InstallPlan + name: "{{ subscription_status.resources[0].status.installplan.name }}" + namespace: openshift-sriov-network-operator + register: installplan_status + retries: 30 + delay: 10 + ignore_errors: yes + until: installplan_status.resources[0].status.phase == "Complete" + +- name: Verify SR-IOV Operator is running + kubernetes.core.k8s_info: + api_version: operators.coreos.com/v1 + kind: OperatorGroup + name: sriov-network-operators + namespace: openshift-sriov-network-operator + register: operatorgroup_status + retries: 30 + delay: 10 + until: operatorgroup_status.resources + +- name: Check if SriovOperatorConfig exists + kubernetes.core.k8s_info: + api_version: sriovnetwork.openshift.io/v1 + kind: SriovOperatorConfig + name: default + namespace: openshift-sriov-network-operator + register: sriov_operator_config_status + ignore_errors: yes + +- name: Enable unsupported NICs for SR-IOV usage if the resource exists + kubernetes.core.k8s: + state: patched + definition: + apiVersion: sriovnetwork.openshift.io/v1 + kind: SriovOperatorConfig + metadata: + name: default + namespace: openshift-sriov-network-operator + spec: + enableOperatorWebhook: false + when: sriov_operator_config_status.resources is defined and sriov_operator_config_status.resources | length > 0 + +- name: Create SriovOperatorConfig if it does not exist + kubernetes.core.k8s: + state: present + definition: + apiVersion: sriovnetwork.openshift.io/v1 + kind: SriovOperatorConfig + metadata: + name: default + namespace: openshift-sriov-network-operator + spec: + disableDrain: false + enableInjector: true + enableOperatorWebhook: false + logLevel: 2 + +- name: Wait for SriovOperatorConfig to exist + kubernetes.core.k8s_info: + api_version: sriovnetwork.openshift.io/v1 + kind: SriovOperatorConfig + name: default + namespace: openshift-sriov-network-operator + register: sriov_operator_config_status + retries: 10 + delay: 10 + until: sriov_operator_config_status.resources | length > 0 diff --git a/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/install-trident.yml b/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/install-trident.yml index c1788db..6cf235a 100644 --- a/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/install-trident.yml +++ b/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/install-trident.yml @@ -3,30 +3,28 @@ ansible.builtin.unarchive: remote_src: true src: "{{ sap_hypervisor_node_preconfigure_install_trident_url }}" - dest: "{{ __sap_hypervisor_node_preconfigure_register_tmpdir.path }}/" + dest: "{{ __sap_hypervisor_node_preconfigure_register_tmpdir_role.path }}/" - name: Uninstall trident - ansible.builtin.command: "{{ __sap_hypervisor_node_preconfigure_register_tmpdir.path }}/trident-installer/tridentctl uninstall -n trident" + ansible.builtin.command: "{{ __sap_hypervisor_node_preconfigure_register_tmpdir_role.path }}/trident-installer/tridentctl uninstall -n trident" ignore_errors: true register: __sap_hypervisor_node_preconfigure_register_uninstall_trident changed_when: __sap_hypervisor_node_preconfigure_register_uninstall_trident.rc != 0 - name: Install trident - ansible.builtin.command: "{{ __sap_hypervisor_node_preconfigure_register_tmpdir.path }}/trident-installer/tridentctl install -n trident" + ansible.builtin.command: "{{ __sap_hypervisor_node_preconfigure_register_tmpdir_role.path }}/trident-installer/tridentctl install -n trident" register: __sap_hypervisor_node_preconfigure_register_install_trident changed_when: __sap_hypervisor_node_preconfigure_register_install_trident.rc != 0 - name: Copy backend file ansible.builtin.template: src: "trident-backend.json.j2" - dest: "{{ __sap_hypervisor_node_preconfigure_register_tmpdir.path }}/trident-backend.json" + dest: "{{ __sap_hypervisor_node_preconfigure_register_tmpdir_role.path }}/trident-backend.json" mode: "0644" - name: Create trident backend - ansible.builtin.command: "{{ __sap_hypervisor_node_preconfigure_register_tmpdir.path }}\ - /trident-installer/tridentctl -n trident create backend -f\ - {{ __sap_hypervisor_node_preconfigure_register_tmpdir.path }}\ - /trident-backend.json" + ansible.builtin.command: + "{{ __sap_hypervisor_node_preconfigure_register_tmpdir_role.path }}/trident-installer/tridentctl -n trident create backend -f {{ __sap_hypervisor_node_preconfigure_register_tmpdir_role.path }}/trident-backend.json" register: __sap_hypervisor_node_preconfigure_register_create_trident_backend changed_when: __sap_hypervisor_node_preconfigure_register_create_trident_backend.rc != 0 diff --git a/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/install-virtctl.yml b/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/install-virtctl.yml deleted file mode 100644 index bd5dd81..0000000 --- a/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/install-virtctl.yml +++ /dev/null @@ -1,15 +0,0 @@ ---- -- name: Create ~/bin - ansible.builtin.file: - path: ~/bin - state: directory - mode: "0700" - -- name: Get and extract virtctl -# become: yes - ansible.builtin.unarchive: - validate_certs: false - remote_src: true - src: "https://hyperconverged-cluster-cli-download-openshift-cnv.apps.\ - {{ sap_hypervisor_node_preconfigure_cluster_config.cluster_url }}/amd64/linux/virtctl.tar.gz" - dest: ~/bin diff --git a/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/kargs.yml b/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/kargs.yml deleted file mode 100644 index bd28ea5..0000000 --- a/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/kargs.yml +++ /dev/null @@ -1,11 +0,0 @@ ---- -- name: Personalize template - ansible.builtin.template: - src: 99-kargs-worker.yml.j2 - dest: "{{ __sap_hypervisor_node_preconfigure_register_tmpdir.path }}/99-kargs-{{ __sap_hypervisor_node_preconfigure_register_worker_name }}.yml.j2" - mode: "0644" - -- name: Enable hugepages - kubernetes.core.k8s: - state: present - src: "{{ __sap_hypervisor_node_preconfigure_register_tmpdir.path }}/99-kargs-{{ __sap_hypervisor_node_preconfigure_register_worker_name }}.yml.j2" diff --git a/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/main.yml b/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/main.yml index 89b67c9..cb982cd 100644 --- a/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/main.yml +++ b/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/main.yml @@ -52,53 +52,60 @@ # if system < 512GiB memory use 32GiB as upper boundary, 64GB otherwise as upper boundary - name: Calculate amount of hugepages to reserve (host memory < 512 GiB) ansible.builtin.set_fact: - __sap_hypervisor_node_preconfigure_register_worker_reserved_hugepages: "{{ __sap_hypervisor_node_preconfigure_register_worker_memory_gib | int - sap_hypervisor_node_preconfigure_hypervisor_reserved_ram_host_lt_512 }}" + __sap_hypervisor_node_preconfigure_register_worker_reserved_hugepages: "{{ __sap_hypervisor_node_preconfigure_register_worker_memory_gib | int \ + - sap_hypervisor_node_preconfigure_hypervisor_reserved_ram_host_lt_512 }}" when: __sap_hypervisor_node_preconfigure_register_worker_memory_gib | int < 512 - name: Calculate amount of hugepages to reserve (host memory >= 512 GiB) ansible.builtin.set_fact: - __sap_hypervisor_node_preconfigure_register_worker_reserved_hugepages: "{{ __sap_hypervisor_node_preconfigure_register_worker_memory_gib | int - sap_hypervisor_node_preconfigure_hypervisor_reserved_ram_host_ge_512 }}" + __sap_hypervisor_node_preconfigure_register_worker_reserved_hugepages: "{{ __sap_hypervisor_node_preconfigure_register_worker_memory_gib | int \ + - sap_hypervisor_node_preconfigure_hypervisor_reserved_ram_host_ge_512 }}" when: __sap_hypervisor_node_preconfigure_register_worker_memory_gib | int >= 512 - name: Include prepare - ansible.builtin.include_tasks: "platform/{{ sap_hypervisor_node_platform }}/prepare.yml" + ansible.builtin.include_tasks: "platform/{{ sap_hypervisor_node_preconfigure_platform }}/prepare.yml" + +- name: Include patch cluster masters unschedulable + ansible.builtin.include_tasks: "platform/{{ sap_hypervisor_node_preconfigure_platform }}/patch-cluster-masters-unschedulable.yml" - name: Include tuned virtual host - ansible.builtin.include_tasks: "platform/{{ sap_hypervisor_node_platform }}/tuned-virtual-host.yml" + ansible.builtin.include_tasks: "platform/{{ sap_hypervisor_node_preconfigure_platform }}/tuned-virtual-host.yml" - name: Include install CNV operator - ansible.builtin.include_tasks: "platform/{{ sap_hypervisor_node_platform }}/install-cnv-operator.yml" + ansible.builtin.include_tasks: "platform/{{ sap_hypervisor_node_preconfigure_platform }}/install-cnv-operator.yml" when: sap_hypervisor_node_preconfigure_install_operators - name: Include install sriov operator - ansible.builtin.include_tasks: "platform/{{ sap_hypervisor_node_platform }}/install-sriov-operator.yml" + ansible.builtin.include_tasks: "platform/{{ sap_hypervisor_node_preconfigure_platform }}/install-sriov-operator.yml" when: sap_hypervisor_node_preconfigure_install_operators - name: Include install nmstate operator - ansible.builtin.include_tasks: "platform/{{ sap_hypervisor_node_platform }}/install-nmstate-operator.yml" + ansible.builtin.include_tasks: "platform/{{ sap_hypervisor_node_preconfigure_platform }}/install-nmstate-operator.yml" when: sap_hypervisor_node_preconfigure_install_operators -- name: Include install virtctl - ansible.builtin.include_tasks: "platform/{{ sap_hypervisor_node_platform }}/install-virtctl.yml" - - name: Include setup worker nodes - ansible.builtin.include_tasks: "platform/{{ sap_hypervisor_node_platform }}/setup-worker-nodes.yml" + ansible.builtin.include_tasks: "platform/{{ sap_hypervisor_node_preconfigure_platform }}/setup-worker-nodes.yml" when: sap_hypervisor_node_preconfigure_setup_worker_nodes -# How to wait for node to be scheduleable? (NodeSchedulable) - name: Wait for all k8s nodes to be ready - ansible.builtin.command: oc wait --for=condition=Ready nodes --all --timeout=3600s - register: __sap_hypervisor_node_preconfigure_register_nodes_ready - changed_when: __sap_hypervisor_node_preconfigure_register_nodes_ready.rc != 0 - -- name: Print nodes - ansible.builtin.debug: - var: __sap_hypervisor_node_preconfigure_register_nodes_ready.stdout_lines + kubernetes.core.k8s_info: + kind: Node + wait: yes + wait_condition: + status: True + type: Ready + wait_sleep: 20 + wait_timeout: 600 - name: Include Trident installation - ansible.builtin.include_tasks: "platform/{{ sap_hypervisor_node_platform }}/install-trident.yml" + ansible.builtin.include_tasks: "platform/{{ sap_hypervisor_node_preconfigure_platform }}/install-trident.yml" when: sap_hypervisor_node_preconfigure_install_trident - name: Include local storage creation (HPP) - ansible.builtin.include_tasks: "platform/{{ sap_hypervisor_node_platform }}/install-hpp.yml" + ansible.builtin.include_tasks: "platform/{{ sap_hypervisor_node_preconfigure_platform }}/install-hpp.yml" when: sap_hypervisor_node_preconfigure_install_hpp + +- name: Remove temporary directory + ansible.builtin.file: + state: absent + path: "{{ __sap_hypervisor_node_preconfigure_register_tmpdir_role.path }}" diff --git a/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/node-network.yml b/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/node-network.yml index 421d24c..b8fc1e4 100644 --- a/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/node-network.yml +++ b/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/node-network.yml @@ -3,7 +3,7 @@ ansible.builtin.debug: var: __sap_hypervisor_node_preconfigure_register_worker_network -- name: "Create NodeNetworkConfigurationPolicy\ +- name: "Create NodeNetworkConfigurationPolicy \ {{ __sap_hypervisor_node_preconfigure_register_worker_network.name }} on \ {{ __sap_hypervisor_node_preconfigure_register_worker.name }}" kubernetes.core.k8s: @@ -20,6 +20,7 @@ interfaces: - "{{ __sap_hypervisor_node_preconfigure_register_worker_network }}" when: __sap_hypervisor_node_preconfigure_register_worker_network.type == 'linux-bridge' + or __sap_hypervisor_node_preconfigure_register_worker_network.type == 'ethernet' - name: "Create NetworkAttachmentDefinition {{ __sap_hypervisor_node_preconfigure_register_worker_network.name }}" kubernetes.core.k8s: @@ -54,7 +55,7 @@ state: present when: __sap_hypervisor_node_preconfigure_register_worker_network.type == 'sriov' -- name: "Create SRIOV NodeNetworkConfigurationPolicy \ +- name: "Create SR-IOV NodeNetworkConfigurationPolicy \ {{ __sap_hypervisor_node_preconfigure_register_worker_network.name }} on \ {{ __sap_hypervisor_node_preconfigure_register_worker.name }}" kubernetes.core.k8s: diff --git a/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/patch-cluster-masters-unschedulable.yml b/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/patch-cluster-masters-unschedulable.yml new file mode 100644 index 0000000..2e703fa --- /dev/null +++ b/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/patch-cluster-masters-unschedulable.yml @@ -0,0 +1,11 @@ +--- +- name: Set cluster master nodes unscheduable + kubernetes.core.k8s: + state: patched + definition: + apiVersion: config.openshift.io/v1 + kind: Scheduler + metadata: + name: cluster + spec: + mastersSchedulable: false diff --git a/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/prepare.yml b/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/prepare.yml index 0dfbfa1..280eb02 100644 --- a/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/prepare.yml +++ b/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/prepare.yml @@ -5,8 +5,8 @@ - name: Create Tempdir ansible.builtin.tempfile: state: directory - suffix: "_sap_hypervisor_node_preconfigure" - register: __sap_hypervisor_node_preconfigure_register_tmpdir + suffix: "_sap_hypervisor_node_preconfigure_role" + register: __sap_hypervisor_node_preconfigure_register_tmpdir_role - name: "Create VM namespace {{ sap_hypervisor_node_preconfigure_cluster_config.vm_namespace }}" kubernetes.core.k8s: diff --git a/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/setup-worker-nodes.yml b/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/setup-worker-nodes.yml index 29420be..50acdc0 100644 --- a/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/setup-worker-nodes.yml +++ b/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/setup-worker-nodes.yml @@ -1,6 +1,6 @@ --- - name: Include configure worker - ansible.builtin.include_tasks: "platform/{{ sap_hypervisor_node_platform }}/configure-worker-node.yml" + ansible.builtin.include_tasks: "platform/{{ sap_hypervisor_node_preconfigure_platform }}/configure-worker-node.yml" with_items: "{{ sap_hypervisor_node_preconfigure_cluster_config.workers }}" loop_control: loop_var: __sap_hypervisor_node_preconfigure_register_worker @@ -72,10 +72,10 @@ - name: Render template ansible.builtin.template: src: 99-kargs-worker.yml.j2 - dest: "{{ __sap_hypervisor_node_preconfigure_register_tmpdir.path }}/99-kargs-worker.yml" + dest: "{{ __sap_hypervisor_node_preconfigure_register_tmpdir_role.path }}/99-kargs-worker.yml" mode: "0644" - name: Enable hugepages kubernetes.core.k8s: state: present - src: "{{ __sap_hypervisor_node_preconfigure_register_tmpdir.path }}/99-kargs-worker.yml" + src: "{{ __sap_hypervisor_node_preconfigure_register_tmpdir_role.path }}/99-kargs-worker.yml" diff --git a/roles/sap_hypervisor_node_preconfigure/vars/platform_defaults_redhat_ocp_virt.yml b/roles/sap_hypervisor_node_preconfigure/vars/platform_defaults_redhat_ocp_virt.yml index 0d1fedf..cc85822 100644 --- a/roles/sap_hypervisor_node_preconfigure/vars/platform_defaults_redhat_ocp_virt.yml +++ b/roles/sap_hypervisor_node_preconfigure/vars/platform_defaults_redhat_ocp_virt.yml @@ -2,17 +2,60 @@ # vars file for redhat_ocp_virt +########################################################### +# Red Hat OpenShift cluster connection details +########################################################### + +# Admin username for Red Hat OpenShift cluster connection +sap_hypervisor_node_preconfigure_ocp_admin_username: + +# Admin password for Red Hat OpenShift cluster connection +sap_hypervisor_node_preconfigure_ocp_admin_password: + +# Path to kubeconfig file Red Hat OpenShift cluster connection +sap_hypervisor_node_preconfigure_ocp_kubeconfig_path: + +# If this is set to true, the API endpoint and the +# CA Certificate are extracted from the kubeconfig file. +# If set to false, sap_hypervisor_node_preconfigure_ocp_endpoint and +# sap_hypervisor_node_preconfigure_ocp_ca_cert have to be specified. +sap_hypervisor_node_preconfigure_ocp_extract_kubeconfig: true + +# URL to the API endpoint of Red Hat OpenShift cluster +#sap_hypervisor_node_preconfigure_ocp_endpoint: + +# CA Certificate for Red Hat OpenShift cluster connection +# To extract the CA Cert from the kubeconfig, you can use +# grep certificate-authority-data ${KUBECONFIG} | awk '{ print $2 }' | base64 --decode > client-cert.pem +#sap_hypervisor_node_preconfigure_ocp_ca_cert: + + +########################################################### +# Configuration of what should be preconfigured +########################################################### + # Install and configure the host path provisioner (hpp) for a local storage disk sap_hypervisor_node_preconfigure_install_hpp: false # Install the trident NFS storage provider sap_hypervisor_node_preconfigure_install_trident: false +# Should the operators be installed +sap_hypervisor_node_preconfigure_install_operators: true + +# Configure the workers? +sap_hypervisor_node_preconfigure_setup_worker_nodes: true + + +########################################################### +# Configuration details +########################################################### + # URL of the trident installer package to use -sap_hypervisor_node_preconfigure_install_trident_url: https://github.com/NetApp/trident/releases/download/v23.10.0/trident-installer-23.10.0.tar.gz +sap_hypervisor_node_preconfigure_install_trident_url: https://github.com/NetApp/trident/releases/download/v24.06.0/trident-installer-24.06.0.tar.gz -# should SRIOV be enabled for unsupported NICs -sap_hypervisor_node_preconfigure_sriov_enable_unsupported_nics: true +# Allow unsupported NICs to be used for SR-IOV? +sap_hypervisor_node_preconfigure_sriov_enable_unsupported_nics: false # Amount of memory [GiB] to be reserved for the hypervisor on hosts >= 512GiB sap_hypervisor_node_preconfigure_hypervisor_reserved_ram_host_ge_512: 64 # GiB @@ -23,8 +66,146 @@ sap_hypervisor_node_preconfigure_hypervisor_reserved_ram_host_lt_512: 32 # GiB # Should the check for the minimal amount of be ignored? Minimal amount is 96 GiB sap_hypervisor_node_preconfigure_ignore_minimal_memory_check: false -# Should the operators be installed -sap_hypervisor_node_preconfigure_install_operators: true +# Namespace to be used for the Red Hat Openshift Virtualization Operator +sap_hypervisor_node_preconfigure_ocpv_namespace: openshift-cnv -# Configure the workers? -sap_hypervisor_node_preconfigure_setup_worker_nodes: true + +########################################################### +# Red Hat OpenShift cluster configuration details +########################################################### + +# Example configuration for redhat_ocp_virt +sap_hypervisor_node_preconfigure_cluster_config: + + # namespace under which the VMs are created, note this has to be + # openshift-sriov-network-operator in case of using SR-IOV network + # devices + vm_namespace: sap + + # Optional, configuration for trident driver for Netapp NFS filer + trident: + management: management.domain.org + data: datalif.netapp.domain.org + svm: sap_svm + backend: nas_backend + aggregate: aggregate_Name + username: admin + password: xxxxx + storage_driver: ontap-nas + storage_prefix: ocpv_sap_ + + # CPU cores which will be reserved for kubernetes + worker_kubernetes_reserved_cpus: "0,1" + + # Storage device used for host path provisioner as local storage. + worker_localstorage_device: /dev/vdb + + # detailed configuration for every worker that should be configured + workers: + + - name: worker-0 # name must match the node name + networks: # Example network config + + - name: sapbridge # using a bridge + description: SAP bridge + state: up + type: linux-bridge + ipv4: + enabled: false + auto-gateway: false + auto-dns: false + bridge: + options: + stp: + enabled: false + port: + - name: ens1f0 # network IF name + + - name: storage # an SR-IOV device + interface: ens2f0 # network IF name + type: sriov + + - name: ens2f0 # Set elevated MTU of 9000 + type: ethernet # on parent interface of + state: up # storagebridge + ipv4: + dhcp: false + enabled: false + mtu: 9000 + + - name: storagebridge # using a bridge + bridge: # another bridge + options: + stp: + enabled: false + port: + - name: ens2f0 # network IF name + description: storage + mtu: 9000 + ipv4: + address: + - ip: 192.168.1.10 # IP config + prefix-length: 24 + auto-dns: false + auto-gateway: false + enabled: true + state: up + type: linux-bridge + + - name: multi # another SR-IOV device + interface: ens2f1 # network IF name + type: sriov + + + - name: worker-1 # second worker configuration + networks: # Example network config + + - name: sapbridge # using a bridge + description: SAP bridge + state: up + type: linux-bridge + ipv4: + enabled: false + auto-gateway: false + auto-dns: false + bridge: + options: + stp: + enabled: false + port: + - name: ens1f0 # network IF name + + - name: storage # an SR-IOV device + interface: ens2f0 # network IF name + type: sriov + + - name: ens2f0 # Set elevated MTU of 9000 + type: ethernet # on parent interface of + state: up # storagebridge + ipv4: + dhcp: false + enabled: false + mtu: 9000 + + - name: storagebridge # create storage bridge + bridge: + options: + stp: + enabled: false + port: + - name: ens2f0 # network IF name + description: storage + mtu: 9000 + ipv4: + address: + - ip: 192.168.1.11 # IP config + prefix-length: 24 + auto-dns: false + auto-gateway: false + enabled: true + state: up + type: linux-bridge + + - name: multi # another SR-IOV device + interface: ens2f1 # network IF name + type: sriov diff --git a/roles/sap_vm_provision/PLATFORM_GUIDANCE.md b/roles/sap_vm_provision/PLATFORM_GUIDANCE.md index d15427e..ba1af34 100644 --- a/roles/sap_vm_provision/PLATFORM_GUIDANCE.md +++ b/roles/sap_vm_provision/PLATFORM_GUIDANCE.md @@ -34,9 +34,9 @@ See below for the drop-down list of required environment resources on an Infrast - VPC Subnetwork - Compute Firewall - Compute Router - - SNAT + - Cloud NAT (SNAT) - DNS Managed Zone (Private DNS) -- Filestore (NFS) +- Filestore (NFS) or NFS server - Bastion host (GCP CE VM) @@ -99,6 +99,31 @@ See below for the drop-down list of required environment resources on an Infrast +
+Red Hat OpenShift Virtualization (kubevirt_vm) + +- IMPORTANT: The playbook has to run with the environment variable `ANSIBLE_JINJA2_NATIVE=true` otherwise you will see an unmarshalling error when the VM is created. On Ansible Automation Platform Controller (AAPC) you have to set this in Settings --> Job Settings --> Extra Environment Variables, e.g. +``` +{ + "ANSIBLE_JINJA2_NATIVE": "true", + "HOME": "/var/lib/awx" +} +``` + +- Kubeconfig file, kubeadmin user and password for the cluster you want to deploy. Default behavior is to extract CA certificate and API endpoint from kubeconfig (`sap_vm_provision_kubevirt_vm_extract_kubeconfig: true`). Kubeconfig location will be read from `sap_vm_provision_kubevirt_vm_kubeconfig_path` and if that variable is not defined from environment variable `K8S_AUTH_KUBECONFIG`. + +- SSH Key Pair for VMs or provide a password + - `sap_vm_provision_ocp_guest_ssh_auth_mechanism`: Authentication mechanism to be used to connect to the guest. Possible options are: + - `password`: Make sure to set password in `sap_vm_provision_ocp_os_user_password`. + - `private_key`: Use the private ssh key at the location defined by `sap_vm_provision_ssh_host_private_key_file_path`. + - `private_key_data`: use the private ssh key provided in `sap_vm_provision_ssh_host_private_key_data` and write it to the location defined in `sap_vm_provision_ssh_host_private_key_file_path`. + +- Optional: Execution host with access to OpenShift cluster. + +- Native Kubernetes with KubeVirt has not been tested. + +
+
KubeVirt: @@ -212,6 +237,58 @@ The Google Cloud User credentials (Client ID and Client Secret) JSON file with a - Enable the Cloud Filestore API, using https://console.cloud.google.com/apis/library/file.googleapis.com - Enable the Service Networking API (Private Services Connection to Filestore), using https://console.cloud.google.com/apis/library/servicenetworking.googleapis.com +It is recommended to create new custom IAM role with detailed actions to improve security. +- Following permissions are minimum requirement to provision SAP HA system. +```shell +compute.addresses.createInternal +compute.addresses.deleteInternal +compute.addresses.get +compute.addresses.useInternal +compute.disks.create +compute.disks.get +compute.disks.use +compute.forwardingRules.create +compute.forwardingRules.get +compute.forwardingRules.update +compute.healthChecks.create +compute.healthChecks.get +compute.healthChecks.update +compute.healthChecks.useReadOnly +compute.images.get +compute.images.list +compute.instanceGroups.create +compute.instanceGroups.get +compute.instanceGroups.update +compute.instanceGroups.use +compute.instances.attachDisk +compute.instances.create +compute.instances.get +compute.instances.list +compute.instances.setMetadata +compute.instances.setServiceAccount +compute.instances.update +compute.instances.use +compute.networks.list +compute.regionBackendServices.create +compute.regionBackendServices.get +compute.regionBackendServices.list +compute.regionBackendServices.use +compute.subnetworks.list +compute.subnetworks.use +compute.zoneOperations.get +dns.changes.create +dns.changes.get +dns.changes.list +dns.managedZones.create +dns.managedZones.get +dns.managedZones.list +dns.managedZones.update +dns.resourceRecordSets.create +dns.resourceRecordSets.get +dns.resourceRecordSets.list +dns.resourceRecordSets.update +``` +
@@ -243,6 +320,51 @@ az role assignment create --assignee "$AZ_SERVICE_PRINCIPAL_ID" \ az ad sp credential reset --name $AZ_CLIENT_ID ``` +It is recommended to create new Azure custom role with detailed actions to improve security. +```json +{ + "properties": { + "roleName": "ansible-sap-automation", + "description": "Custom role for SAP LinuxLab ansible automation.", + "permissions": [ + { + "actions": [ + "Microsoft.Authorization/roleAssignments/read", + "Microsoft.Authorization/roleAssignments/write", + "Microsoft.Authorization/roleDefinitions/read", + "Microsoft.Authorization/roleDefinitions/write", + "Microsoft.Compute/disks/read", + "Microsoft.Compute/disks/write", + "Microsoft.Compute/sshPublicKeys/read", + "Microsoft.Compute/sshPublicKeys/write", + "Microsoft.Compute/virtualMachines/instanceView/read", + "Microsoft.Compute/virtualMachines/read", + "Microsoft.Compute/virtualMachines/write", + "Microsoft.Network/loadBalancers/backendAddressPools/join/action", + "Microsoft.Network/loadBalancers/read", + "Microsoft.Network/loadBalancers/write", + "Microsoft.Network/networkInterfaces/join/action", + "Microsoft.Network/networkInterfaces/read", + "Microsoft.Network/networkInterfaces/write", + "Microsoft.Network/networkSecurityGroups/read", + "Microsoft.Network/privateDnsZones/A/read", + "Microsoft.Network/privateDnsZones/A/write", + "Microsoft.Network/privateDnsZones/read", + "Microsoft.Network/privateDnsZones/virtualNetworkLinks/read", + "Microsoft.Network/virtualNetworks/privateDnsZoneLinks/read", + "Microsoft.Network/virtualNetworks/subnets/join/action", + "Microsoft.Network/virtualNetworks/subnets/read", + "Microsoft.Resources/subscriptions/resourceGroups/read", + ], + "notActions": [], + "dataActions": [], + "notDataActions": [] + } + ] + } +} +``` + Note: MS Azure VMs provisioned will contain Hyper-V Hypervisor virtual interfaces using eth* on the OS, and when Accelerated Networking (AccelNet) is enabled for the MS Azure VM then the Mellanox SmartNIC/DPU SR-IOV Virtual Function (VF) may use enP* on the OS. For further information, see [MS Azure - How Accelerated Networking works](https://learn.microsoft.com/en-us/azure/virtual-network/accelerated-networking-how-it-works). During High Availability executions, failures may occur and may require additional variable 'sap_ha_pacemaker_cluster_vip_client_interface' to be defined.
@@ -294,6 +416,21 @@ The recommended [IBM PowerVC Security Role](https://www.ibm.com/docs/en/powervc/ See below for the drop-down list of recommended configurations for each Infrastructure Platform. +
+Google Cloud (GCP): + +Using Cloud NAT to allow outbound communication can result in registration issues on SLES images. +Please follow troubleshooting guide at [Troubleshooting SLES pay-as-you-go registration](https://cloud.google.com/compute/docs/troubleshooting/troubleshooting-suse-registration) + +These issues were detected when using SLES PAYG (Pay As You Go) images +Issues were resolved by following [Troubleshooting SLES pay-as-you-go registration - Registration failed](https://cloud.google.com/compute/docs/troubleshooting/troubleshooting-suse-registration#registration_failed) +``` +Cloud NAT parameter "minimum ports per VM instance" has to be increased to higher than 160 (Recommended higher). +``` + + +
+
VMware vCenter: @@ -347,3 +484,23 @@ When VMware vCenter and vSphere clusters with VMware NSX virtualized network ove N.B. When VMware vCenter and vSphere clusters with direct network subnet IP allocations to the VMXNet network adapter (no VMware NSX network overlays), the above actions may not be required.
+ + +## Notice regarding SAP High Availability and hardware placement strategies + +Each Hyperscaler Cloud Service Provider provides a different approach to the placement strategy of a Virtual Machine to the physical/hardware Hypervisor node it runs atop. + +The `sap_vm_provision` Ansible Role enforces scope control for this capability, only providing a "spread" placement strategy for the High Availability scenarios. As such the variable used is `sap_vm_provision_<>_placement_strategy_spread: true/false`. + +The following are the equivalent Placement Strategies, commonly referenced as 'Anti-Affinity', in each Infrastructure Platform: + +- **AWS EC2 VS Placement Group, Rack-level Spread strategy** - each VS on different hosts, in different racks with distinct network source and power supply. See [AWS EC2 Networking - Placement Strategies documentation](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/placement-strategies.html#placement-groups-spread) +- **GCP CE VM Resource Policy (type: Group Placement Policy), Availability Domain Spread strategy** - each VM on different hosts, in different racks with distinct power supply (dual redundancy from different sources). See [GCP CE 'Spread Group Placement Policy' documentation](https://cloud.google.com/compute/docs/instances/use-spread-placement-policies#create-spread-policy) Not provided due to [google.cloud Ansible Collection issue 323](https://github.com/ansible-collections/google.cloud/issues/323) +- **IBM Cloud VS Placement Group Strategy, Power Spread strategy** - each VS on different hosts, in different racks with distinct network source and power supplies (dual redundancy from different sources). See [IBM Cloud Infrastructure Services Placement Groups documentation](https://cloud.ibm.com/docs/vpc?topic=vpc-about-placement-groups-for-vpc) +- **IBM Cloud, IBM Power VS Placement Group Collocation Policy, Server Anti-Affinity (aka. Different Server) Spread strategy** - each VS on different hosts, in different racks with distinct network source and power supplies (dual redundancy from different sources). See [IBM Cloud, IBM Power VS Placement Group Collocation Policy documentation](https://cloud.ibm.com/docs/power-iaas?topic=power-iaas-managing-placement-groups) and the associated [FAQ for IBM Power VS related to Anti-Affinity Rules](https://cloud.ibm.com/docs/power-iaas?topic=power-iaas-powervs-faqs#affinity) +- **MS Azure Availability Set, Fault Domain Spread strategy** - each VM on different hosts, in different racks with distinct network source and power supply. See [MS Azure 'Availability Set' documentation](https://learn.microsoft.com/en-us/azure/virtual-machines/availability-set-overview); not to be confused with [MS Azure 'VM Scale Set' (VMSS)](https://learn.microsoft.com/en-us/azure/virtual-machine-scale-sets/virtual-machine-scale-sets-manage-fault-domains) +- **IBM PowerVM Collocation Rule, Anti-Affinity Spread strategy** - each VM (aka. LPAR) on different hosts. See [IBM PowerVC Collocation Rules documentation](www.ibm.com/docs/en/powervc/latest?topic=powervc-collocation-rules) +- TBD: + - KubeVirt VM + - OVirt VM + - VMware VM diff --git a/roles/sap_vm_provision/README.md b/roles/sap_vm_provision/README.md index 4103dc4..80e1e29 100644 --- a/roles/sap_vm_provision/README.md +++ b/roles/sap_vm_provision/README.md @@ -32,7 +32,8 @@ The code modularity and commonality of provisioning enables a wide gamut of SAP - Microsoft Azure Virtual Machine/s - IBM PowerVM Virtual Machine/s _(formerly LPAR/s)_ - OVirt Virtual Machine/s (e.g. Red Hat Enterprise Linux KVM) -- KubeVirt Virtual Machine/s (e.g. Red Hat OpenShift Virtualization, SUSE Rancher with Harvester HCI) `[Experimental]` +- KubeVirt Virtual Machine/s (e.g. SUSE Rancher with Harvester HCI) `[Experimental]` +- Red Hat OpenShift Virtualization `[Experimental]` - VMware vSphere Virtual Machine/s `[Beta]` ### Known issues @@ -57,8 +58,6 @@ For a list of requirements and recommended authorizations on each Infrastructure **Dependencies:** - OS Packages - Python 3.9.7+ (i.e. CPython distribution) - - AWS CLI _(when High Availability on AWS)_ - - GCloud CLI _(when High Availability on GCP)_ - IBM Cloud CLI _(when High Availability on IBM Cloud)_ - Terraform 1.0.0-1.5.5 _(when Ansible to Terraform, or legacy Ansible Collection for IBM Cloud)_ - Python Packages @@ -71,6 +70,7 @@ For a list of requirements and recommended authorizations on each Infrastructure - `openstacksdk` for IBM PowerVM - `ovirt-engine-sdk-python` for OVirt - `aiohttp` for VMware + - `kubernetes` for Kubernetes based platforms such as Red Hat OpenShift Virtualization - Ansible - Ansible Core 2.12.0+ - Ansible Collections: @@ -82,10 +82,13 @@ For a list of requirements and recommended authorizations on each Infrastructure - `google.cloud` - `ibm.cloudcollection` - _(legacy, to be replaced with `ibm.cloud` in future)_ - - `kubevirt.core` + - `kubevirt.core` for kubevirt_vm or Red Hat OpenShift Virtualization - `openstack.cloud` - `ovirt.ovirt` - `vmware.vmware_rest` _(requires `cloud.common`)_ + - `community.okd` for Red Hat OpenShift Virtualization + +TODO: Split up above dependencies per platform. ## Execution @@ -167,6 +170,7 @@ This required structure will: - For Hyperscaler Cloud Service Providers that use Resource Groups (IBM Cloud, Microsoft Azure): - Virtual Machine and associated resources (Disks, Network Interfaces, Load Balancer etc.) will be provisioned to the same Resource Group as the targeted network/subnet. - Optional: Private DNS may be allocated to another Resource Group, and an optional variable is provided for this. +- Virtual Disk with defined IOPS is only possible on AWS, Google Cloud, IBM Cloud ### Tags to control execution @@ -181,6 +185,7 @@ Apache 2.0 ## Authors Sean Freeman +Nils Koenig (nkoenig@redhat.com) kubevirt_vm / Red Hat OpenShift Virtualization --- diff --git a/roles/sap_vm_provision/defaults/main.yml b/roles/sap_vm_provision/defaults/main.yml index 9f671fa..b8834b2 100644 --- a/roles/sap_vm_provision/defaults/main.yml +++ b/roles/sap_vm_provision/defaults/main.yml @@ -10,6 +10,8 @@ sap_vm_provision_iac_type: "" # aws_ec2_vs , gcp_ce_vm , ibmcloud_vs , ibmcloud_powervs , msazure_vm , ibmpowervm_vm , kubevirt_vm , ovirt_vm , vmware_vm sap_vm_provision_iac_platform: "" +# execution_host where ansible playbook will delegate_to +sap_vm_provision_execution_host: "localhost" #### # VM Provision Infrastructure-as-Code (IaC) Configuration - Ansible provisioning - Cloud Hyperscaler @@ -57,17 +59,22 @@ sap_vm_provision_dns_root_domain: "" #### -# VM Provision - Generic configuration - Hypervsior +# VM Provision - Generic configuration - Hypervisor or Cloud Hyperscaler OS Images with Bring-Your-Own-Subscription (BYOS) #### # Register to OS Vendor online subscription to package repositories -sap_vm_provision_os_online_registration_passcode: "" -sap_vm_provision_os_online_registration_user: "" +# sap_vm_provision_os_online_registration_passcode: "" +# sap_vm_provision_os_online_registration_user: "" # Register to Hosted Mirror of OS Vendor package repositories # sap_vm_provision_os_registration_ca_file_path: "" # sap_vm_provision_os_registration_script_command: "" + +#### +# VM Provision - Generic configuration - Hypervisor +#### + # Proxy - Web Forward, when SNAT not available for Hypervisor VMs # sap_vm_provision_proxy_web_forward_proxy_ip: "" # IP:Port only, no http:// prefix # sap_vm_provision_proxy_web_forward_exclusions: "localhost,127.0.0.1,{{ sap_vm_provision_dns_root_domain }}" @@ -134,6 +141,15 @@ sap_vm_provision_aws_vpc_subnet_id: "" # if ansible_to_terraform, use "new" sap_vm_provision_aws_vpc_subnet_create_boolean: "{{ true | default(false) if sap_vm_provision_aws_vpc_subnet_id == 'new' else false }}" sap_vm_provision_aws_vpc_sg_names: "" # comma-separated, if ansible_to_terraform then ignore this variable sap_vm_provision_aws_key_pair_name_ssh_host_public_key: "" +sap_vm_provision_aws_placement_resource_name: "sap-placement-group-spread" +sap_vm_provision_aws_placement_strategy_spread: false +# Specify role/profile names to allow multiple clusters +# Example for HANA HA: "HA-Role-Pacemaker-{{ sap_system_hana_db_sid }}" +sap_vm_provision_aws_ha_iam_role: "HA-Role-Pacemaker" +sap_vm_provision_aws_ha_iam_instance_profile: "HA-Instance-Profile-Pacemaker-Cluster" +# Enable to overwrite existing DNS record. +# AWS Route53 module fails when DNS record already exists. +sap_vm_provision_aws_dns_overwrite: false # Google Cloud sap_vm_provision_gcp_credentials_json: "" @@ -142,6 +158,8 @@ sap_vm_provision_gcp_region: "{{ sap_vm_provision_gcp_region_zone[:-2] }}" sap_vm_provision_gcp_region_zone: "" sap_vm_provision_gcp_vpc_name: "" sap_vm_provision_gcp_vpc_subnet_name: "" +sap_vm_provision_gcp_placement_resource_name: "sap-placement-policy-spread" +sap_vm_provision_gcp_placement_strategy_spread: false # IBM Cloud sap_vm_provision_ibmcloud_api_key: "" @@ -154,8 +172,11 @@ sap_vm_provision_ibmcloud_vpc_name: "" sap_vm_provision_ibmcloud_vpc_subnet_name: "" sap_vm_provision_ibmcloud_vpc_sg_names: "" # comma-separated, if ansible_to_terraform then ignore this variable sap_vm_provision_ibmcloud_key_pair_name_ssh_host_public_key: "" +sap_vm_provision_ibmcloud_placement_resource_name: "sap-placement-group-spread" +sap_vm_provision_ibmcloud_placement_strategy_spread: false # IBM Cloud, addendum for IBM Power VS +sap_vm_provision_ibmcloud_private_dns_custom_resolver_ip: "" sap_vm_provision_ibmcloud_powervs_location: "" sap_vm_provision_ibmcloud_powervs_workspace_name: "" sap_vm_provision_ibmcloud_powervs_vlan_subnet_name: "" @@ -173,6 +194,41 @@ sap_vm_provision_msazure_location_availability_zone_no: 1 sap_vm_provision_msazure_vnet_name: "" sap_vm_provision_msazure_vnet_subnet_name: "" sap_vm_provision_msazure_key_pair_name_ssh_host_public_key: "" +sap_vm_provision_msazure_placement_resource_name: "sap-availability-set-spread" +sap_vm_provision_msazure_placement_strategy_spread: false +# Specify role name for fence agent +sap_vm_provision_msazure_ha_iam_role: "Linux Fence Agent Role" + +#### +# Infrastructure Platform - Cloud Hyperscaler - High Availability resources +#### + +# Default names for Load Balancer resources on Google Cloud, IBM Cloud, Microsoft Azure +sap_vm_provision_ha_load_balancer_name_hana: "lb-sap-ha-hana" +sap_vm_provision_ha_load_balancer_name_anydb: "lb-sap-ha-anydb" +sap_vm_provision_ha_load_balancer_name_nwas: "lb-sap-ha-nwas" + + +#### +# Infrastructure Platform - Cloud Hyperscaler - High Availability Virtual IPs (VIPs) +# +# AWS: the VIP must be **outside** of any VPC Subnet ranges +# GCP: the VIP must be within the VPC Subnetwork range attached to GCP Backend Service for the Internal passthrough Network Load Balancer +# IBM Cloud: the VIP is within the VPC Subnet range and is statically defined by the IBM Cloud Load Balancer (NLB L4 or ALB L7) +# IBM Cloud, Power VS: the VIP must be within the IBM Power Infrastructure VLAN Subnet range +# MS Azure: the VIP must be within the VNet Subnet range attached to the Azure Load Balancer (NLB L4) +# IBM PowerVM: the VIP must be within the VLAN Subnet range +# OVirt KVM: the VIP must be within the VLAN Subnet range +# KubeVirt: the VIP must be within the VLAN Subnet range +#### + +# Blank defaults for High Availability Virtual IPs (VIPs), with CIDR +sap_vm_provision_ha_vip_hana_primary: "" # e.g. 192.168.1.90/32 +sap_vm_provision_ha_vip_anydb_primary: "" # e.g. 192.168.1.90/32 +sap_vm_provision_ha_vip_nwas_abap_ascs: "" # e.g. 192.168.2.10/32 +sap_vm_provision_ha_vip_nwas_abap_ers: "" # e.g. 192.168.2.11/32 +# sap_vm_provision_ha_vip_nwas_abap_pas: # not implemented +# sap_vm_provision_ha_vip_nwas_abap_aas: # not implemented #### @@ -190,6 +246,8 @@ sap_vm_provision_ibmpowervm_network_name: "" sap_vm_provision_ibmpowervm_network_vnic_type: "normal" # 'direct' == SR-IOV, 'normal' == Shared Ethernet Adapter (SEA) sap_vm_provision_ibmpowervm_storage_template_name: "" # aka. Openstack Cinder Volume Type sap_vm_provision_ibmpowervm_key_pair_name_ssh_host_public_key: "" +sap_vm_provision_ibmpowervm_placement_resource_name: "sap-collocation-rule-spread" +sap_vm_provision_ibmpowervm_placement_strategy_spread: false # Kubevirt sap_vm_provision_kubevirt_api_key: "" @@ -250,36 +308,67 @@ sap_vm_provision_ibmpowervm_vm_host_os_image: "" # OS Images - AWS AMI sap_vm_provision_aws_ec2_vs_host_os_image_dictionary: rhel-8-1: "*RHEL-8.1*_HVM*x86_64*" - rhel-8-2: "*RHEL-8.2*_HVM*x86_64*" + # rhel-8-2: "*RHEL-8.2*_HVM*x86_64*" # removed rhel-8-4: "*RHEL-8.4*_HVM*x86_64*" rhel-8-6: "*RHEL-8.6*_HVM*x86_64*" rhel-8-8: "*RHEL-8.8*_HVM*x86_64*" + rhel-8-10: "*RHEL-8.10*_HVM*x86_64*" + rhel-9-0: "*RHEL-9.0*_HVM*x86_64*" + rhel-9-1: "*RHEL-9.1*_HVM*x86_64*" + rhel-9-2: "*RHEL-9.2*_HVM*x86_64*" + rhel-9-3: "*RHEL-9.3*_HVM*x86_64*" + rhel-9-4: "*RHEL-9.4*_HVM*x86_64*" + sles-12-5: "*suse-sles-12-sp5-v202*-hvm-ssd-x86_64*" + # sles-15-2: "*suse-sles-15-sp2-v202*-hvm-ssd-x86_64*" # removed + # sles-15-3: "*suse-sles-15-sp3-v202*-hvm-ssd-x86_64*" # removed + # sles-15-4: "*suse-sles-15-sp4-v202*-hvm-ssd-x86_64*" # removed + sles-15-5: "*suse-sles-15-sp5-v202*-hvm-ssd-x86_64*" + sles-15-6: "*suse-sles-15-sp6-v202*-hvm-ssd-x86_64*" + # rhel-7-7-sap-ha: "*RHEL-SAP-8.1.0*" # removed + # rhel-7-9-sap-ha: "*RHEL-SAP-8.1.0*" # removed rhel-8-1-sap-ha: "*RHEL-SAP-8.1.0*" rhel-8-2-sap-ha: "*RHEL-SAP-8.2.0*" rhel-8-4-sap-ha: "*RHEL-SAP-8.4.0*" rhel-8-6-sap-ha: "*RHEL-SAP-8.6.0*" rhel-8-8-sap-ha: "*RHEL-SAP-8.8.0*" + rhel-8-10-sap-ha: "*RHEL-SAP-8.10.0*" rhel-9-0-sap-ha: "*RHEL-SAP-9.0.0*" rhel-9-2-sap-ha: "*RHEL-SAP-9.2.0*" - sles-15-2: "*suse-sles-15-sp2-v202*-hvm-ssd-x86_64*" - sles-15-3: "*suse-sles-15-sp3-v202*-hvm-ssd-x86_64*" - sles-15-4: "*suse-sles-15-sp4-v202*-hvm-ssd-x86_64*" - sles-15-5: "*suse-sles-15-sp5-v202*-hvm-ssd-x86_64*" + rhel-9-4-sap-ha: "*RHEL-SAP-9.4.0*" sles-12-5-sap-ha: "*suse-sles-sap-12-sp5-v202*-hvm-ssd-x86_64*" sles-15-1-sap-ha: "*suse-sles-sap-15-sp1-v202*-hvm-ssd-x86_64*" sles-15-2-sap-ha: "*suse-sles-sap-15-sp2-v202*-hvm-ssd-x86_64*" sles-15-3-sap-ha: "*suse-sles-sap-15-sp3-v202*-hvm-ssd-x86_64*" sles-15-4-sap-ha: "*suse-sles-sap-15-sp4-v202*-hvm-ssd-x86_64*" sles-15-5-sap-ha: "*suse-sles-sap-15-sp5-v202*-hvm-ssd-x86_64*" + sles-15-6-sap-ha: "*suse-sles-sap-15-sp6-v202*-hvm-ssd-x86_64*" + sles-15-4-sap-ha-byos: "*suse-sles-sap-15-sp4-byos-v202*-hvm-ssd-x86_64*" + sles-15-5-sap-ha-byos: "*suse-sles-sap-15-sp5-byos-v202*-hvm-ssd-x86_64*" + sles-15-6-sap-ha-byos: "*suse-sles-sap-15-sp6-byos-v202*-hvm-ssd-x86_64*" # OS Images - Google Cloud sap_vm_provision_gcp_ce_vm_host_os_image_dictionary: rhel-8-latest: project: "rhel-cloud" family: "rhel-8" - rhel-8-1-sap-ha: + rhel-9-latest: + project: "rhel-cloud" + family: "rhel-9" + sles-12-latest: + project: "suse-cloud" + family: "sles-12" + sles-15-latest: + project: "suse-cloud" + family: "sles-15" + # rhel-7-7-sap-ha: # removed + # project: "rhel-sap-cloud" + # family: "rhel-7-7-sap-ha" + rhel-7-9-sap-ha: project: "rhel-sap-cloud" - family: "rhel-8-1-sap-ha" + family: "rhel-7-9-sap-ha" + # rhel-8-1-sap-ha: # removed + # project: "rhel-sap-cloud" + # family: "rhel-8-1-sap-ha" rhel-8-2-sap-ha: project: "rhel-sap-cloud" family: "rhel-8-2-sap-ha" @@ -289,57 +378,193 @@ sap_vm_provision_gcp_ce_vm_host_os_image_dictionary: rhel-8-6-sap-ha: project: "rhel-sap-cloud" family: "rhel-8-6-sap-ha" - rhel-8-8-sap-ha: - project: "rhel-sap-cloud" - family: "rhel-8-8-sap-ha" - sles-15-latest: - project: "suse-cloud" - family: "sles-15" - sles-15-sp3-sap: + sles-12-5-sap-ha: + project: "suse-sap-cloud" + family: "sles-12-sp5-sap" + # sles-15-1-sap-ha: # removed + # project: "suse-sap-cloud" + # family: "sles-15-sp1-sap" + sles-15-2-sap-ha: + project: "suse-sap-cloud" + family: "sles-15-sp2-sap" + sles-15-3-sap-ha: project: "suse-sap-cloud" family: "sles-15-sp3-sap" - sles-15-sp4-sap: + sles-15-4-sap-ha: project: "suse-sap-cloud" family: "sles-15-sp4-sap" - sles-15-sp5-sap: + sles-15-5-sap-ha: project: "suse-sap-cloud" family: "sles-15-sp5-sap" + sles-15-6-sap-ha: + project: "suse-sap-cloud" + family: "sles-15-sp6-sap" + sles-12-5-sap-ha-byos: + project: "suse-byos-cloud" + family: "sles-12-sp5-sap-byos" + sles-15-2-sap-byos: + project: "suse-byos-cloud" + family: "sles-15-sp2-sap-byos" + sles-15-3-sap-byos: + project: "suse-byos-cloud" + family: "sles-15-sp3-sap-byos" + sles-15-4-sap-byos: + project: "suse-byos-cloud" + family: "sles-15-sp4-sap-byos" + sles-15-5-sap-byos: + project: "suse-byos-cloud" + family: "sles-15-sp5-sap-byos" + sles-15-6-sap-byos: + project: "suse-byos-cloud" + family: "sles-15-sp6-sap-byos" # OS Images - IBM Cloud sap_vm_provision_ibmcloud_vs_host_os_image_dictionary: rhel-8-4: ".*redhat.*8-4.*minimal.*amd64.*" rhel-8-6: ".*redhat.*8-6.*minimal.*amd64.*" + rhel-8-8: ".*redhat.*8-8.*minimal.*amd64.*" + rhel-8-10: ".*redhat.*8-10.*minimal.*amd64.*" rhel-9-0: ".*redhat.*9-0.*minimal.*amd64.*" rhel-9-2: ".*redhat.*9-2.*minimal.*amd64.*" + rhel-9-4: ".*redhat.*9-4.*minimal.*amd64.*" + sles-15-5: ".*sles.*15-5.*amd64-[0-9]" + sles-15-6: ".*sles.*15-6.*amd64-[0-9]" + # rhel-7-6-sap-ha: ".*redhat.*7-6.*amd64.*hana.*" # retrievable from deprecated list + # rhel-7-9-sap-ha: ".*redhat.*7-9.*amd64.*hana.*" # retrievable from deprecated list + # rhel-8-1-sap-ha: ".*redhat.*8-1.*amd64.*hana.*" # retrievable from deprecated list + # rhel-8-2-sap-ha: ".*redhat.*8-2.*amd64.*hana.*" # retrievable from deprecated list rhel-8-4-sap-ha: ".*redhat.*8-4.*amd64.*hana.*" rhel-8-6-sap-ha: ".*redhat.*8-6.*amd64.*hana.*" rhel-8-8-sap-ha: ".*redhat.*8-8.*amd64.*hana.*" + rhel-8-10-sap-ha: ".*redhat.*8-10.*amd64.*hana.*" rhel-9-0-sap-ha: ".*redhat.*9-0.*amd64.*hana.*" - rhel-9-2-sap-ha: ".*redhat.*9-0.*amd64.*hana.*" + rhel-9-2-sap-ha: ".*redhat.*9-2.*amd64.*hana.*" + rhel-9-4-sap-ha: ".*redhat.*9-4.*amd64.*hana.*" + # sles-12-4-sap-ha: ".*sles.*12-4.*amd64.*hana.*" # retrievable from deprecated list + # sles-12-5-sap-ha: ".*sles.*12-5.*amd64.*hana.*" # retrievable from deprecated list + # sles-15-1-sap-ha: ".*sles.*15-1.*amd64.*hana.*" # retrievable from deprecated list sles-15-3-sap-ha: ".*sles.*15-3.*amd64.*hana.*" sles-15-4-sap-ha: ".*sles.*15-4.*amd64.*hana.*" sles-15-5-sap-ha: ".*sles.*15-5.*amd64.*hana.*" + sles-15-6-sap-ha: ".*sles.*15-6.*amd64.*hana.*" # OS Images - IBM Cloud, IBM Power VS 'Full Linux subscription' with support and activation keys sap_vm_provision_ibmcloud_powervs_host_os_image_dictionary: - rhel-8-4: ".*RHEL.*8.*4" - rhel-8-6: ".*RHEL.*8.*6" + rhel-8-8: ".*RHEL.*8.*8" rhel-9-2: ".*RHEL.*9.*2" - sles-15-3: ".*SLES.*15.*3" - sles-15-4: ".*SLES.*15.*4" + rhel-9-4: ".*RHEL.*9.*4" + sles-15-5: ".*SLES.*15.*5$" + rhel-8-4-sap-ha: "RHEL8-SP4-SAP" rhel-8-6-sap-ha: ".*RHEL.*8.*6.*SAP$" # ensure string suffix using $ rhel-8-8-sap-ha: ".*RHEL.*8.*8.*SAP$" # ensure string suffix using $ rhel-9-2-sap-ha: ".*RHEL.*9.*2.*SAP$" # ensure string suffix using $ sles-15-2-sap-ha: ".*SLES.*15.*2.*SAP$" # ensure string suffix using $ sles-15-3-sap-ha: ".*SLES.*15.*3.*SAP$" # ensure string suffix using $ sles-15-4-sap-ha: ".*SLES.*15.*4.*SAP$" # ensure string suffix using $ + sles-15-5-sap-ha: "SLES15-SP5-SAP" + # rhel-8-4-sap-ha-byol: "RHEL8-SP4-SAP-BYOL" + # rhel-8-6-sap-ha-byol: ".*RHEL.*8.*6.*SAP-BYOL$" # ensure string suffix using $ + # rhel-8-8-sap-ha-byol: ".*RHEL.*8.*8.*SAP-BYOL$" # ensure string suffix using $ + # rhel-9-2-sap-ha-byol: ".*RHEL.*9.*2.*SAP-BYOL$" # ensure string suffix using $ + # sles-15-2-sap-ha-byol: ".*SLES.*15.*2.*SAP-BYOL$" # ensure string suffix using $ + # sles-15-3-sap-ha-byol: ".*SLES.*15.*3.*SAP-BYOL$" # ensure string suffix using $ + # sles-15-4-sap-ha-byol: ".*SLES.*15.*4.*SAP-BYOL$" # ensure string suffix using $ + # sles-15-5-sap-ha-byol: "SLES15-SP5-SAP-BYOL" # OS Images - MS Azure sap_vm_provision_msazure_vm_host_os_image_dictionary: + rhel-8-0: + publisher: "RedHat" + offer: "RHEL" + sku: "8-gen2" + rhel-8-1: + publisher: "RedHat" + offer: "RHEL" + sku: "81-gen2" + rhel-8-2: + publisher: "RedHat" + offer: "RHEL" + sku: "82-gen2" + rhel-8-3: + publisher: "RedHat" + offer: "RHEL" + sku: "83-gen2" rhel-8-4: publisher: "RedHat" offer: "RHEL" sku: "84-gen2" + rhel-8-5: + publisher: "RedHat" + offer: "RHEL" + sku: "85-gen2" + rhel-8-6: + publisher: "RedHat" + offer: "RHEL" + sku: "86-gen2" + rhel-8-7: + publisher: "RedHat" + offer: "RHEL" + sku: "87-gen2" + rhel-8-8: + publisher: "RedHat" + offer: "RHEL" + sku: "88-gen2" + rhel-8-9: + publisher: "RedHat" + offer: "RHEL" + sku: "89-gen2" + rhel-8-10: + publisher: "RedHat" + offer: "RHEL" + sku: "810-gen2" + rhel-9-0: + publisher: "RedHat" + offer: "RHEL" + sku: "90-gen2" + rhel-9-1: + publisher: "RedHat" + offer: "RHEL" + sku: "91-gen2" + rhel-9-2: + publisher: "RedHat" + offer: "RHEL" + sku: "92-gen2" + rhel-9-3: + publisher: "RedHat" + offer: "RHEL" + sku: "93-gen2" + rhel-9-4: + publisher: "RedHat" + offer: "RHEL" + sku: "94-gen2" + sles-12-5: + publisher: "SUSE" + offer: "sles-12-sp5" + sku: "gen2" + sles-15-1: + publisher: "SUSE" + offer: "sles-15-sp1" + sku: "gen2" + sles-15-2: + publisher: "SUSE" + offer: "sles-15-sp2" + sku: "gen2" + sles-15-3: + publisher: "SUSE" + offer: "sles-15-sp3" + sku: "gen2" + sles-15-4: + publisher: "SUSE" + offer: "sles-15-sp4" + sku: "gen2" + sles-15-5: + publisher: "SUSE" + offer: "sles-15-sp5" + sku: "gen2" + sles-15-6: + publisher: "SUSE" + offer: "sles-15-sp6" + sku: "gen2" rhel-8-1-sap-ha: publisher: "RedHat" offer: "RHEL-SAP-HA" @@ -360,6 +585,10 @@ sap_vm_provision_msazure_vm_host_os_image_dictionary: publisher: "RedHat" offer: "RHEL-SAP-HA" sku: "88sapha-gen2" + rhel-8-10-sap-ha: + publisher: "RedHat" + offer: "RHEL-SAP-HA" + sku: "810sapha-gen2" rhel-9-0-sap-ha: publisher: "RedHat" offer: "RHEL-SAP-HA" @@ -368,46 +597,62 @@ sap_vm_provision_msazure_vm_host_os_image_dictionary: publisher: "RedHat" offer: "RHEL-SAP-HA" sku: "92sapha-gen2" - rhel-8-1-sap-applications: - publisher: "RedHat" - offer: "RHEL-SAP-HA" - sku: "81sapapps-gen2" - rhel-8-2-sap-applications: + rhel-9-4-sap-ha: publisher: "RedHat" offer: "RHEL-SAP-HA" - sku: "82sapapps-gen2" - rhel-8-4-sap-applications: - publisher: "RedHat" - offer: "RHEL-SAP-HA" - sku: "84sapapps-gen2" - rhel-8-6-sap-applications: - publisher: "RedHat" - offer: "RHEL-SAP-applications" - sku: "86sapapps-gen2" - rhel-8-8-sap-applications: - publisher: "RedHat" - offer: "RHEL-SAP-applications" - sku: "88sapapps-gen2" - rhel-9-0-sap-applications: - publisher: "RedHat" - offer: "RHEL-SAP-applications" - sku: "90sapapps-gen2" - rhel-9-2-sap-applications: - publisher: "RedHat" - offer: "RHEL-SAP-applications" - sku: "92sapapps-gen2" - sles-15-sp3-sap: + sku: "94sapha-gen2" + sles-12-5-sap-ha: + publisher: "SUSE" + offer: "sles-sap-12-sp5" + sku: "gen2" + sles-15-1-sap-ha: + publisher: "SUSE" + offer: "sles-sap-15-sp1" + sku: "gen2" + sles-15-2-sap-ha: + publisher: "SUSE" + offer: "sles-sap-15-sp2" + sku: "gen2" + sles-15-3-sap-ha: publisher: "SUSE" offer: "sles-sap-15-sp3" sku: "gen2" - sles-15-sp4-sap: + sles-15-4-sap-ha: publisher: "SUSE" offer: "sles-sap-15-sp4" sku: "gen2" - sles-15-sp5-sap: + sles-15-5-sap-ha: publisher: "SUSE" offer: "sles-sap-15-sp5" sku: "gen2" + sles-15-6-sap-ha: + publisher: "SUSE" + offer: "sles-sap-15-sp6" + sku: "gen2" + sles-15-1-sap-byos: + publisher: "SUSE" + offer: "sles-sap-15-sp1-byos" + sku: "gen2" + sles-15-2-sap-byos: + publisher: "SUSE" + offer: "sles-sap-15-sp2-byos" + sku: "gen2" + sles-15-3-sap-byos: + publisher: "SUSE" + offer: "sles-sap-15-sp3-byos" + sku: "gen2" + sles-15-4-sap-byos: + publisher: "SUSE" + offer: "sles-sap-15-sp4-byos" + sku: "gen2" + sles-15-5-sap-byos: + publisher: "SUSE" + offer: "sles-sap-15-sp5-byos" + sku: "gen2" + sles-15-6-sap-byos: + publisher: "SUSE" + offer: "sles-sap-15-sp6-byos" + sku: "gen2" #### @@ -516,23 +761,109 @@ sap_vm_provision_ibmpowervm_vm_host_specifications_dictionary: disk_size: 512 # size in GB, integer -# KubeVirt +####################################################### +# kubevirt / Red Hat OpenShift Virtualization # +####################################################### + +# Namespace where the VM should be created in +sap_vm_provision_kubevirt_vm_target_namespace: sap + +# Username to be created on guest +sap_vm_provision_kubevirt_vm_os_user: cloud-user + +# Password for the above user +sap_vm_provision_kubevirt_vm_os_user_password: "" + +# how to authenticate to the guest vm [password|private_key|private_key_data] +# password: uses provided password in sap_vm_provision_kubevirt_vm_os_user_password, make sure your ssh config allows password authentication +# private_key: use the private ssh key at the location defined by sap_vm_provision_ssh_host_private_key_file_path +# private_key_data: use the private ssh key provided in sap_vm_provision_ssh_host_private_key_data and write it to the location defined in sap_vm_provision_ssh_host_private_key_file_path +sap_vm_provision_kubevirt_vm_guest_ssh_auth_mechanism: private-key + +# Private SSH key file, must be accessible on the ansible controller +# sap_vm_provision_ssh_host_private_key_file_path: + +# private ssh key, make sure the indentation is correct, here it's two spaces at the beginning of every line +# sap_vm_provision_ssh_host_private_key_data: | +# < your key data> + +# Should the CA cert and the API endpoint be extracted from the kubeconfig file? +sap_vm_provision_kubevirt_vm_extract_kubeconfig: true + +# Should an existing VM be overwritten? +sap_vm_provision_kubevirt_vm_overwrite_vm: false + +# Kubeconfig file for cluster where VMs should be created +sap_vm_provision_kubevirt_vm_kubeconfig_path: /path/to/clusterconfigs/kubeconfig + +# In order to use secured communication, provide the CA cert bundle for the cluster. +# This can be extracted from the kubeconfig file with the following command from the +# kubeconfig file: +# grep certificate-authority-data ${KUBECONFIG} | awk '{ print $2 }' | base64 --decode > cluster-ca-cert.pem +# This variable will not be used if sap_vm_provision_kubevirt_vm_extract_kubeconfig = true +# sap_vm_provision_kubevirt_vm_ca_cert: /path/to/clusterconfigs/cluster-ca-cert.pem + +# API endpoint of the cluster +# This variable will not be used if sap_vm_provision_kubevirt_vm_extract_kubeconfig = true +# sap_vm_provision_kubevirt_api_vm_endpoint: https://api.cluster.domain.tld:6443 + +# Admin username for the cluster communication +sap_vm_provision_kubevirt_vm_admin_username: kubeadmin + +# Password for the above admin user +sap_vm_provision_kubevirt_vm_admin_password: AAAAA-BBBBB-CCCCC-DDDDD + +# RAM Overhead [GiB] for virt-launcher container, this can be small for VMs < 1 TB and without SRIOV but should be increased to 16 or more for VMs > 1TB +sap_vm_provision_kubevirt_vm_container_memory_overhead: 1 + +# hostname of the ansible controller +sap_vm_provision_kubevirt_vm_ansible_controller: localhost # on AAP, this is localhost + sap_vm_provision_kubevirt_vm_host_specifications_dictionary: example_host_specification_plan: host1: # Hostname, must be 13 characters or less # SMT-2 (i.e. 2 CPU Threads per CPU Core) is default for Intel CPU Hyper-Threading, optionally can be altered to SMT-1 kubevirt_vm_cpu_smt: 2 - kubevirt_vm_cpu_threads: 32 - kubevirt_vm_memory_gib: 256 - #sap_system_type: project_dev # project_dev, project_tst, project_prd + kubevirt_vm_cpu_cores: 2 + kubevirt_vm_memory_gib: 24 + sap_system_type: project_dev # project_dev, project_tst, project_prd sap_host_type: hana_primary # hana_primary, hana_secondary, anydb_primary, anydb_secondary, nwas_ascs, nwas_ers, nwas_pas, nwas_aas + # Provide either an existing PVC or a URL for an OS image + os_image: # either url or source_pvc_name have to be provided + # URL for an image to be used + url: "docker://registry.redhat.io/rhel8/rhel-guest-image:8.8.0" + # Name for a PVC to be cloned + # source_pvc_name: "rhel-8.8" + namespace: openshift-virtualization-os-images + size: "50Gi" + network_definition: + - name: sapbridge + type: bridge + networkName: sapbridge-network-definition + model: virtio storage_definition: - - name: data_0 - mountpoint: /data0 + - name: hana + mountpoint: /hana disk_count: 1 # default: 1 - disk_size: 512 # size in GB, integer - disk_type: nas # KubeVirt Storage Clas - + disk_size: 2048 # size in GB, integer + disk_type: nas # KubeVirt Storage Class + cloudinit: + userData: |- + #cloud-config + timezone: Europe/Berlin + hostname: "{{ scaleout_origin_host_spec }}" + user: {{ sap_vm_provision_kubevirt_vm_os_user if sap_vm_provision_kubevirt_vm_os_user is defined }} + password: {{ sap_vm_provision_kubevirt_vm_os_user_password if sap_vm_provision_kubevirt_vm_os_user_password is defined }} + chpasswd: + expire: false + ssh_authorized_keys: + - "{{ lookup('ansible.builtin.file', sap_vm_provision_ssh_host_public_key_file_path ) }}" + networkData: |- + network: + version: 2 + ethernets: + eth0: + dhcp4: true # OVirt sap_vm_provision_ovirt_vm_boot_menu: false diff --git a/roles/sap_vm_provision/tasks/common/register_os.yml b/roles/sap_vm_provision/tasks/common/register_os.yml index b63a374..717aa01 100644 --- a/roles/sap_vm_provision/tasks/common/register_os.yml +++ b/roles/sap_vm_provision/tasks/common/register_os.yml @@ -2,12 +2,12 @@ #### For On-Premise Package Repo Mirrors #### -- name: Ansible Task block for RHEL Package Repositories setup +- name: Ansible Task block for RHEL Package Repositories setup with Registration Script when: - ansible_os_family == 'RedHat' - (sap_vm_provision_os_registration_script_command is defined) and (sap_vm_provision_os_registration_script_command | length > 0) - - (not sap_vm_provision_os_online_registration_user is defined) or (sap_vm_provision_os_online_registration_user | length == 0) - - (not sap_vm_provision_os_online_registration_passcode is defined) or (sap_vm_provision_os_online_registration_passcode | length == 0) + - (sap_vm_provision_os_online_registration_user is undefined) or (sap_vm_provision_os_online_registration_user | length == 0) + - (sap_vm_provision_os_online_registration_passcode is undefined) or (sap_vm_provision_os_online_registration_passcode | length == 0) block: - name: Red Hat Package Repositories - Clean any existing Red Hat Subscription Manager data @@ -34,12 +34,12 @@ ansible.builtin.command: dnf clean all -- name: Ansible Task block for SLES Package Repositories setup +- name: Ansible Task block for SLES Package Repositories setup with Registration Script when: - ansible_os_family == 'Suse' - (sap_vm_provision_os_registration_script_command is defined) and (sap_vm_provision_os_registration_script_command | length > 0) - - (not sap_vm_provision_os_online_registration_user is defined) or (sap_vm_provision_os_online_registration_user | length == 0) - - (not sap_vm_provision_os_online_registration_passcode is defined) or (sap_vm_provision_os_online_registration_passcode | length == 0) + - (sap_vm_provision_os_online_registration_user is undefined) or (sap_vm_provision_os_online_registration_user | length == 0) + - (sap_vm_provision_os_online_registration_passcode is undefined) or (sap_vm_provision_os_online_registration_passcode | length == 0) block: - name: SUSE Package Repositories - Import CA file for SUSE RMT server @@ -62,12 +62,12 @@ #### For Online Registration via SNAT #### -- name: Ansible Task block for RHEL Online Package Repositories setup +- name: Ansible Task block for RHEL Package Repositories setup via Public Internet when: - ansible_os_family == 'RedHat' - - (not sap_vm_provision_os_registration_script_command is defined) or (sap_vm_provision_os_registration_script_command | length == 0) - - (sap_vm_provision_os_online_registration_user is defined) or (sap_vm_provision_os_online_registration_user | length > 0) - - (sap_vm_provision_os_online_registration_passcode is defined) or (sap_vm_provision_os_online_registration_passcode | length > 0) + - (sap_vm_provision_os_registration_script_command is undefined) or (sap_vm_provision_os_registration_script_command | length == 0) + - (sap_vm_provision_os_online_registration_user is defined) and (sap_vm_provision_os_online_registration_user | length > 0) + - (sap_vm_provision_os_online_registration_passcode is defined) and (sap_vm_provision_os_online_registration_passcode | length > 0) block: - name: Red Hat Customer Portal (RHCP) Online Package Repositories - Execute @@ -75,12 +75,12 @@ ignore_errors: true -- name: Ansible Task block for SLES Online Package Repositories setup +- name: Ansible Task block for SLES Package Repositories setup via Public Internet when: - ansible_os_family == 'Suse' - - (not sap_vm_provision_os_registration_script_command is defined) or (sap_vm_provision_os_registration_script_command | length == 0) - - (sap_vm_provision_os_online_registration_user is defined) or (sap_vm_provision_os_online_registration_user | length > 0) - - (sap_vm_provision_os_online_registration_passcode is defined) or (sap_vm_provision_os_online_registration_passcode | length > 0) + - (sap_vm_provision_os_registration_script_command is undefined) or (sap_vm_provision_os_registration_script_command | length == 0) + - (sap_vm_provision_os_online_registration_user is defined) and (sap_vm_provision_os_online_registration_user | length > 0) + - (sap_vm_provision_os_online_registration_passcode is defined) and (sap_vm_provision_os_online_registration_passcode | length > 0) block: - name: SUSE Customer Center (SCC) Online Package Repositories - Execute diff --git a/roles/sap_vm_provision/tasks/common/register_proxy.yml b/roles/sap_vm_provision/tasks/common/register_proxy.yml index 40ebddd..4698358 100644 --- a/roles/sap_vm_provision/tasks/common/register_proxy.yml +++ b/roles/sap_vm_provision/tasks/common/register_proxy.yml @@ -41,24 +41,39 @@ mode: '0644' force: false + # Ignore domains within the Cloud Platform's internal backbone, to access Cloud Services when a Proxy is enabled + - name: Web Forward Proxy - Cloud Platform specific exclusions - IBM Cloud, IBM Power Virtual Server + ansible.builtin.set_fact: + __sap_vm_provision_task_proxy_platform_exclude: ',cloud.ibm.com,networklayer.com' + when: + - sap_vm_provision_iac_platform == "ibmcloud_powervs" + - __sap_vm_provision_task_ibmcloud_pi_workspace_capabilities.resource.pi_workspace_capabilities['power-edge-router'] + # For non-interactive login shell, append proxy env var to /root/.bashrc (proxy will not work if using /etc/bashrc or script stored in /etc/profile.d/) - name: Web Forward Proxy - Append Proxy env var to non-interactive login shell config file ansible.builtin.blockinfile: path: "{{ non_interactive_login_shell_config_file }}" marker: "#-- {mark} ANSIBLE MANAGED BLOCK --#" # must have {mark} otherwise block will repeat append on re-run block: | - export http_proxy="{{ sap_vm_provision_proxy_web_forward_proxy_ip }}" - export https_proxy="{{ sap_vm_provision_proxy_web_forward_proxy_ip }}" - export HTTP_PROXY="{{ sap_vm_provision_proxy_web_forward_proxy_ip }}" - export HTTPS_PROXY="{{ sap_vm_provision_proxy_web_forward_proxy_ip }}" + export http_proxy="http://{{ sap_vm_provision_proxy_web_forward_proxy_ip }}" + export https_proxy="http://{{ sap_vm_provision_proxy_web_forward_proxy_ip }}" + export HTTP_PROXY="http://{{ sap_vm_provision_proxy_web_forward_proxy_ip }}" + export HTTPS_PROXY="http://{{ sap_vm_provision_proxy_web_forward_proxy_ip }}" #export ftp_proxy #export FTP_PROXY - export no_proxy="{{ sap_vm_provision_proxy_web_forward_exclusions }},{{ sap_vm_provision_proxy_web_forward_exclusions_dynamic }}" - export NO_PROXY="{{ sap_vm_provision_proxy_web_forward_exclusions }},{{ sap_vm_provision_proxy_web_forward_exclusions_dynamic }}" + export no_proxy="{{ sap_vm_provision_proxy_web_forward_exclusions }},{{ sap_vm_provision_proxy_web_forward_exclusions_dynamic }}{{ __sap_vm_provision_task_proxy_platform_exclude if __sap_vm_provision_task_proxy_platform_exclude is defined }}" + export NO_PROXY="{{ sap_vm_provision_proxy_web_forward_exclusions }},{{ sap_vm_provision_proxy_web_forward_exclusions_dynamic }}{{ __sap_vm_provision_task_proxy_platform_exclude if __sap_vm_provision_task_proxy_platform_exclude is defined }}" - - name: Edit /etc/dnf/dnf.conf to add proxy + # dnf/yum will first use proxy var from config file, followed by environment variables of the login shell + # When dnf/yum is set to empty string in config file, it will ignore proxy. However, "The curl environment variables (such as http_proxy) are effective if this option is unset" and + # therefore dnf/yum has no method to enable Proxy for Public Internet connectivity in combination with no Proxy for OS Package Repositories. + # Using no_proxy/NO_PROXY env var with domain suffix for mirror OS Package Repositories inside the Cloud Service Provider will force no Proxy usage. + - name: Web Forward Proxy - Edit /etc/dnf/dnf.conf to add proxy ansible.builtin.blockinfile: path: /etc/dnf/dnf.conf block: | proxy=http://{{ sap_vm_provision_proxy_web_forward_proxy_ip }} - when: ansible_os_family == "RedHat" + when: + - ansible_os_family == "RedHat" + - not __sap_vm_provision_task_ibmcloud_pi_workspace_capabilities.resource.pi_workspace_capabilities['power-edge-router'] + - __sap_vm_provision_task_proxy_platform_exclude is undefined diff --git a/roles/sap_vm_provision/tasks/common/set_ansible_vars.yml b/roles/sap_vm_provision/tasks/common/set_ansible_vars.yml index a6140c6..b1ae642 100644 --- a/roles/sap_vm_provision/tasks/common/set_ansible_vars.yml +++ b/roles/sap_vm_provision/tasks/common/set_ansible_vars.yml @@ -1,13 +1,25 @@ --- +# Required when defining Ansible Role variables within the host_specifications_dictionary for multiple SAP Systems / SAP Landscapes +- name: Set facts for all hosts - use facts from localhost - SAP Variables from host_specifications_dictionary + ansible.builtin.set_fact: + "{{ host_spec_sap_item }}": "{{ lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan][scaleout_origin_host_spec | default(inventory_hostname)][host_spec_sap_item] }}" + loop: "{{ lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan][scaleout_origin_host_spec | default(inventory_hostname)].keys() | map('regex_findall', '^sap_.*') | flatten | select() | list }}" + loop_control: + loop_var: host_spec_sap_item + +- name: Set facts for all hosts - use facts from localhost - Host Specifications Dictionary + ansible.builtin.set_fact: + host_specifications_dictionary: "{{ lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary') }}" + + - name: Set facts for all hosts - use facts from localhost - Generic ansible.builtin.set_fact: sap_vm_provision_dns_root_domain: "{{ sap_vm_provision_dns_root_domain }}" sap_vm_provision_host_specification_plan: "{{ sap_vm_provision_host_specification_plan }}" sap_vm_provision_nfs_mount_point: "{{ sap_vm_provision_nfs_mount_point | default('') }}" sap_vm_provision_nfs_mount_point_separate_sap_transport_dir: "{{ sap_vm_provision_nfs_mount_point_separate_sap_transport_dir | default('') }}" - sap_software_download_directory: "{{ sap_software_download_directory | default('/software') }}" - sap_install_media_detect_source_directory: "{{ sap_software_download_directory | default('/software') }}" + sap_install_media_detect_source_directory: "{{ sap_install_media_detect_source_directory | default('/software') }}" - name: Set facts for all hosts - use facts from localhost - Generic - SAP ID ansible.builtin.set_fact: @@ -22,13 +34,12 @@ - name: Set facts for all hosts - use facts from localhost - SAP HANA ansible.builtin.set_fact: - sap_hana_sid: "{{ sap_hana_sid | default(sap_system_hana_db_sid) }}" + sap_hana_install_sid: "{{ sap_hana_install_sid | default(sap_system_hana_db_sid) }}" sap_hana_install_instance_nr: "{{ sap_hana_install_instance_nr | default(sap_system_hana_db_instance_nr) }}" sap_hana_install_use_master_password: "{{ sap_hana_install_use_master_password | default('y') }}" sap_hana_install_master_password: "{{ sap_hana_install_master_password | default('') }}" - sap_hana_install_software_directory: "{{ sap_software_download_directory | default('/software') }}" when: - - (sap_hana_sid is defined or sap_system_hana_db_sid is defined) or (hostvars[inventory_hostname].vars['sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary'][sap_vm_provision_host_specification_plan][inventory_hostname].sap_system_hana_db_sid is defined) + - (sap_hana_install_sid is defined or sap_system_hana_db_sid is defined) or (hostvars[inventory_hostname].vars['sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary'][sap_vm_provision_host_specification_plan][scaleout_origin_host_spec | default(inventory_hostname)].sap_system_hana_db_sid is defined) - name: Set facts for all hosts - use facts from localhost - SAP SWPM @@ -42,10 +53,59 @@ sap_swpm_db_sid: "{{ sap_swpm_db_sid | default(sap_system_hana_db_sid) | default('') }}" sap_swpm_db_instance_nr: "{{ sap_swpm_db_instance_nr | default(sap_system_hana_db_instance_nr) | default('') }}" when: - - (sap_swpm_sid is defined or sap_system_sid is defined) or (hostvars[inventory_hostname].vars['sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary'][sap_vm_provision_host_specification_plan][inventory_hostname].sap_system_sid is defined) + - (sap_swpm_sid is defined or sap_system_sid is defined) or (hostvars[inventory_hostname].vars['sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary'][sap_vm_provision_host_specification_plan][scaleout_origin_host_spec | default(inventory_hostname)].sap_system_sid is defined) - (sap_swpm_templates_install_dictionary is defined or sap_system_sid is defined) or hostvars[inventory_hostname].sap_swpm_templates_install_dictionary is defined +- name: Set facts for all hosts - use facts from localhost - HA/DR - Virtual IP for SAP HANA Primary node + ansible.builtin.set_fact: + sap_vm_temp_vip_hana_primary: "{{ sap_vm_provision_ha_vip_hana_primary }}" + sap_ha_pacemaker_cluster_vip_hana_primary_ip_address: "{{ sap_vm_provision_ha_vip_hana_primary }}" + when: + - (sap_vm_provision_ha_vip_hana_primary | length) > 0 + no_log: "{{ __sap_vm_provision_no_log }}" + +- name: Set facts for all hosts - use facts from localhost - HA/DR - Virtual IP for SAP AnyDB Primary node + ansible.builtin.set_fact: + sap_vm_temp_vip_anydb_primary: "{{ sap_vm_provision_ha_vip_anydb_primary }}" + sap_ha_install_anydb_ibmdb2_vip_primary_ip_address: "{{ sap_vm_provision_ha_vip_anydb_primary }}" + when: + - (sap_vm_provision_ha_vip_anydb_primary | length) > 0 + no_log: "{{ __sap_vm_provision_no_log }}" + +- name: Set facts for all hosts - use facts from localhost - HA/DR - Virtual IP for SAP NetWeaver ASCS + ansible.builtin.set_fact: + sap_vm_temp_vip_nwas_abap_ascs: "{{ sap_vm_provision_ha_vip_nwas_abap_ascs }}" + sap_ha_pacemaker_cluster_vip_nwas_abap_ascs_ip_address: "{{ sap_vm_provision_ha_vip_nwas_abap_ascs }}" + when: + - (sap_vm_provision_ha_vip_nwas_abap_ascs | length) > 0 + no_log: "{{ __sap_vm_provision_no_log }}" + +- name: Set facts for all hosts - use facts from localhost - HA/DR - Virtual IP for SAP NetWeaver ERS + ansible.builtin.set_fact: + sap_vm_temp_vip_nwas_abap_ers: "{{ sap_vm_provision_ha_vip_nwas_abap_ers }}" + sap_ha_pacemaker_cluster_vip_nwas_abap_ers_ip_address: "{{ sap_vm_provision_ha_vip_nwas_abap_ers }}" + when: + - (sap_vm_provision_ha_vip_nwas_abap_ers | length) > 0 + no_log: "{{ __sap_vm_provision_no_log }}" + +# - name: Set facts for all hosts - use facts from localhost - HA/DR - Virtual IP for SAP NetWeaver PAS +# ansible.builtin.set_fact: +# sap_vm_temp_vip_nwas_abap_pas: "{{ sap_vm_provision_ha_vip_nwas_abap_pas }}" +# sap_ha_pacemaker_cluster_vip_nwas_abap_pas_ip_address: "{{ sap_vm_provision_ha_vip_nwas_abap_pas }}" +# when: +# - (sap_vm_provision_ha_vip_nwas_abap_pas | length) > 0 +# no_log: "{{ __sap_vm_provision_no_log }}" + +# - name: Set facts for all hosts - use facts from localhost - HA/DR - Virtual IP for SAP NetWeaver AAS +# ansible.builtin.set_fact: +# sap_vm_temp_vip_nwas_abap_aas: "{{ sap_vm_provision_ha_vip_nwas_abap_aas }}" +# sap_ha_pacemaker_cluster_vip_nwas_abap_aas_ip_address: "{{ sap_vm_provision_ha_vip_nwas_abap_aas }}" +# when: +# - (sap_vm_provision_ha_vip_nwas_abap_aas | length) > 0 +# no_log: "{{ __sap_vm_provision_no_log }}" + + - name: Set facts for all hosts - use facts from localhost - HA/DR - AWS ansible.builtin.set_fact: sap_ha_pacemaker_cluster_aws_region: "{{ sap_ha_pacemaker_cluster_aws_region }}" @@ -131,18 +191,6 @@ - sap_vm_provision_iac_platform == "ibmpowervm_vm" or sap_vm_provision_iac_platform == "ibmcloud_powervs" -# Required when defining Ansible Role variables within the host_specifications_dictionary for multiple SAP Systems / SAP Landscapes -- name: Set facts for all hosts - use facts from localhost - SAP Variables from host_specifications_dictionary - ansible.builtin.set_fact: - "{{ host_spec_sap_item }}": "{{ lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan][inventory_hostname][host_spec_sap_item] }}" - loop: "{{ vars['sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary'][sap_vm_provision_host_specification_plan][inventory_hostname].keys() | map('regex_findall', '^sap_.*') | flatten | select() | list }}" - loop_control: - loop_var: host_spec_sap_item - -- name: Set facts for all hosts - use facts from localhost - Host Specifications Dictionary - ansible.builtin.set_fact: - host_specifications_dictionary: "{{ lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary') }}" - # Set default to none, this will not set the var if the group does not exist - name: Set facts for all hosts - sap_vm_provision_dynamic_inventory_* hostname variables to identify hosts for other Ansible Roles ansible.builtin.set_fact: diff --git a/roles/sap_vm_provision/tasks/common/set_ansible_vars_storage.yml b/roles/sap_vm_provision/tasks/common/set_ansible_vars_storage.yml index b8925bb..53bcd0d 100644 --- a/roles/sap_vm_provision/tasks/common/set_ansible_vars_storage.yml +++ b/roles/sap_vm_provision/tasks/common/set_ansible_vars_storage.yml @@ -6,7 +6,7 @@ host_node_scaleout_origin_spec: "{{ ansible_hostname | regex_replace('^(.+?)\\d*$', '\\1') }}" when: - sap_vm_provision_calculate_sap_hana_scaleout_active_coordinator is defined - - not inventory_hostname in vars[ansible_prompt_iac_platform_choice + '_host_specifications_dictionary'][sap_vm_provision_host_specification_plan].keys() + - not inventory_hostname in lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan] # Use inventory_hostname_short to retrieve host specification from the dictionary. While ansible_hostname will work for Ansible only, using Ansible>Terraform may see ansible_hostname as 'localhost' and fail # For end user ease of use, the host specifications dictionary uses disk_count to indicate how many disks will be provisioned @@ -26,7 +26,7 @@ 'value': entry.value, } ]) %} - {%- elif not "disk_type" in entry.key %} + {%- elif (not "disk_type" in entry.key) and (not "disk_iops" in entry.key) %} {%- set add_entry = new_element.extend([ { 'key': entry.key, diff --git a/roles/sap_vm_provision/tasks/common/set_etc_hosts_ha.yml b/roles/sap_vm_provision/tasks/common/set_etc_hosts_ha.yml index f87271a..6bb3627 100644 --- a/roles/sap_vm_provision/tasks/common/set_etc_hosts_ha.yml +++ b/roles/sap_vm_provision/tasks/common/set_etc_hosts_ha.yml @@ -48,10 +48,11 @@ line: "{{ item }}" state: present loop: - - "{{ (sap_ha_pacemaker_cluster_vip_hana_primary_ip_address | default('192.168.1.90/32')) | regex_replace('/.*', '') }}\t{{ sap_swpm_db_host }}.{{ ansible_domain }}\t{{ sap_swpm_db_host }}" + - "{{ sap_vm_provision_ha_vip_hana_primary | regex_replace('/.*', '') }}\t{{ sap_swpm_db_host }}.{{ ansible_domain }}\t{{ sap_swpm_db_host }}" when: - (groups["hana_secondary"] is defined and (groups["hana_secondary"] | length>0)) - (not ansible_product_name == "Google Compute Engine" and not ansible_chassis_vendor == "Microsoft Corporation" and not ansible_chassis_asset_tag == 'ibmcloud') or ( (ansible_product_name == "Google Compute Engine" or ansible_chassis_vendor == "Microsoft Corporation" or ansible_chassis_asset_tag == 'ibmcloud') and (not inventory_hostname in groups["hana_primary"] or not inventory_hostname in groups["hana_secondary"]) ) + - (sap_vm_provision_ha_vip_hana_primary | length) > 0 - name: Update /etc/hosts file for SAP AnyDB HA @@ -71,9 +72,10 @@ line: "{{ item }}" state: present loop: - - "{{ (sap_vm_temp_vip_anydb_primary | default('192.168.1.90/32')) | regex_replace('/.*', '') }}\t{{ sap_swpm_db_host }}.{{ ansible_domain }}\t{{ sap_swpm_db_host }}" + - "{{ sap_vm_provision_ha_vip_anydb_primary | regex_replace('/.*', '') }}\t{{ sap_swpm_db_host }}.{{ ansible_domain }}\t{{ sap_swpm_db_host }}" when: - (groups["anydb_secondary"] is defined and (groups["anydb_secondary"] | length>0)) + - (sap_vm_provision_ha_vip_anydb_primary | length) > 0 - name: Update /etc/hosts file for SAP NetWeaver HA @@ -84,9 +86,14 @@ loop: - "{{ sap_vm_provision_dynamic_inventory_nw_ascs_ip }}\t{{ sap_vm_provision_dynamic_inventory_nw_ascs_hostname }}.{{ ansible_domain }}\t{{ sap_vm_provision_dynamic_inventory_nw_ascs_hostname }}" - "{{ sap_vm_provision_dynamic_inventory_nw_ers_ip }}\t{{ sap_vm_provision_dynamic_inventory_nw_ers_hostname }}.{{ ansible_domain }}\t{{ sap_vm_provision_dynamic_inventory_nw_ers_hostname }}" - - "{{ sap_vm_provision_dynamic_inventory_nw_pas_ip }}\t{{ sap_vm_provision_dynamic_inventory_nw_pas_hostname }}.{{ ansible_domain }}\t{{ sap_vm_provision_dynamic_inventory_nw_pas_hostname }}" +# - "{{ sap_vm_provision_dynamic_inventory_nw_pas_ip }}\t{{ sap_vm_provision_dynamic_inventory_nw_pas_hostname }}.{{ ansible_domain }}\t{{ sap_vm_provision_dynamic_inventory_nw_pas_hostname }}" + # Allows to build ASCS ERS cluster without PAS if PAS details are not provided + - "{{ sap_vm_provision_dynamic_inventory_nw_pas_ip | string + '\t' + sap_vm_provision_dynamic_inventory_nw_pas_hostname + '.' + ansible_domain + '\t' + sap_vm_provision_dynamic_inventory_nw_pas_hostname + if (sap_vm_provision_dynamic_inventory_nw_pas_hostname is defined and sap_vm_provision_dynamic_inventory_nw_pas_hostname | length > 0 ) + and (sap_vm_provision_dynamic_inventory_nw_pas_ip is defined and sap_vm_provision_dynamic_inventory_nw_pas_ip | length > 0) else ''}}" when: - (groups["nwas_ers"] is defined and (groups["nwas_ers"] | length>0)) + - item != '' - name: Update /etc/hosts file with Virtual IPs for SAP NetWeaver HA - ASCS / ERS ansible.builtin.lineinfile: @@ -94,11 +101,13 @@ line: "{{ item }}" state: present loop: - - "{{ (sap_ha_pacemaker_cluster_vip_nwas_abap_ascs_ip_address | default('192.168.2.10/32')) | regex_replace('/.*', '') }}\t{{ sap_swpm_ascs_instance_hostname }}.{{ ansible_domain }}\t{{ sap_swpm_ascs_instance_hostname }}" - - "{{ (sap_ha_pacemaker_cluster_vip_nwas_abap_ers_ip_address | default('192.168.2.11/32')) | regex_replace('/.*', '') }}\t{{ sap_swpm_ers_instance_hostname }}.{{ ansible_domain }}\t{{ sap_swpm_ers_instance_hostname }}" + - "{{ sap_vm_provision_ha_vip_nwas_abap_ascs | regex_replace('/.*', '') }}\t{{ sap_swpm_ascs_instance_hostname }}.{{ ansible_domain }}\t{{ sap_swpm_ascs_instance_hostname }}" + - "{{ sap_vm_provision_ha_vip_nwas_abap_ers | regex_replace('/.*', '') }}\t{{ sap_swpm_ers_instance_hostname }}.{{ ansible_domain }}\t{{ sap_swpm_ers_instance_hostname }}" when: - (groups["nwas_ers"] is defined and (groups["nwas_ers"] | length>0)) - not ansible_chassis_asset_tag == 'ibmcloud' or ((not inventory_hostname_short in groups['nwas_ascs'] and not inventory_hostname_short in groups['nwas_ers']) and ansible_chassis_asset_tag == 'ibmcloud') + - (sap_vm_provision_ha_vip_nwas_abap_ascs | length) > 0 + - (sap_vm_provision_ha_vip_nwas_abap_ers | length) > 0 # - name: Update /etc/hosts file with Virtual IPs for SAP NetWeaver HA - PAS / AAS # ansible.builtin.lineinfile: @@ -106,11 +115,13 @@ # line: "{{ item }}" # state: present # loop: - # - "{{ (sap_ha_pacemaker_cluster_vip_nwas_abap_pas_ip_address | default('192.168.2.12/32')) | regex_replace('/.*', '') }}\t{{ sap_swpm_pas_instance_hostname }}.{{ ansible_domain }}\t{{ sap_swpm_pas_instance_hostname }}" - # - "{{ (sap_ha_pacemaker_cluster_vip_nwas_abap_aas_ip_address | default('192.168.2.13/32')) | regex_replace('/.*', '') }}\t{{ .sap_swpm_aas_instance_hostname }}.{{ ansible_domain }}\t{{ sap_swpm_aas_instance_hostname }}" + # - "{{ sap_vm_provision_ha_vip_nwas_abap_pas | regex_replace('/.*', '') }}\t{{ sap_swpm_pas_instance_hostname }}.{{ ansible_domain }}\t{{ sap_swpm_pas_instance_hostname }}" + # - "{{ sap_vm_provision_ha_vip_nwas_abap_aas | regex_replace('/.*', '') }}\t{{ .sap_swpm_aas_instance_hostname }}.{{ ansible_domain }}\t{{ sap_swpm_aas_instance_hostname }}" # when: # - (groups["nwas_aas"] is defined and (groups["nwas_aas"] | length>0)) # - not ansible_chassis_asset_tag == 'ibmcloud' or ((not inventory_hostname_short in groups['nwas_pas'] and not inventory_hostname_short in groups['nwas_pas']) and ansible_chassis_asset_tag == 'ibmcloud') + # - (sap_vm_provision_ha_vip_nwas_abap_pas | length) > 0 + # - (sap_vm_provision_ha_vip_nwas_abap_aas | length) > 0 - name: Update /etc/hosts file for SAP NetWeaver AAS ansible.builtin.lineinfile: @@ -124,10 +135,11 @@ # Ensure SAP AnyDB, SAP HANA or SAP NetWeaver hostname is not localhost in /etc/hosts. See SAP Note 1054467 - Local host name refers to loopback address. -# However, as IBM Cloud Load Balancer is a secure design using Back-end Pool servers with singular Port Number and Front-end Listener with single Port Number, -# and controls the Virtual IP from the Load Balancer. Therefore the Virtual IP is not added as a Secondary IP to the OS Network Interface, -# which causes connectivity issues due to SAP NetWeaver instance random dynamic port usage. -# As workaround, configure /etc/hosts to map Virtual Hostname to use the host IP Address instead of the Virtual IP Address +# However, as IBM Cloud Load Balancer is a secure design (using Back-end Pool servers with singular Port Number and Front-end Listener with single Port Number), +# which controls the Virtual IP from the Load Balancer - the Virtual IP is not added as a Secondary IP to the OS Network Interface. +# This causes connectivity issues due to SAP NetWeaver instance random dynamic port usage. +# As workaround, configure /etc/hosts to map Virtual Hostname to use the host IP Address instead of the Virtual IP Address, +# by appending an alias of the Virtual Hostname to the existing /etc/hosts entry for the host IP Address. - name: Ansible Play for controlling execution to an Infrastructure Platform when High Availability is used - IBM Cloud when: - (groups["hana_secondary"] is defined and (groups["hana_secondary"] | length>0)) or (groups["nwas_ers"] is defined and (groups["nwas_ers"] | length>0)) or (groups["anydb_secondary"] is defined and (groups["anydb_secondary"] | length>0)) @@ -140,12 +152,14 @@ line: "{{ item }}" state: present loop: - - "{{ sap_vm_provision_dynamic_inventory_nw_ascs_ip }}\t{{ sap_swpm_ascs_instance_hostname }}.{{ ansible_domain }}\t{{ sap_swpm_ascs_instance_hostname }}" - - "{{ (sap_ha_pacemaker_cluster_vip_nwas_abap_ers_ip_address | default('192.168.2.11/32')) | regex_replace('/.*', '') }}\t{{ sap_swpm_ers_instance_hostname }}.{{ ansible_domain }}\t{{ sap_swpm_ers_instance_hostname }}" + - "{{ sap_vm_provision_dynamic_inventory_nw_ascs_ip }}\t{{ sap_vm_provision_dynamic_inventory_nw_ascs_hostname }}.{{ ansible_domain }}\t{{ sap_vm_provision_dynamic_inventory_nw_ascs_hostname }}\t{{ sap_swpm_ascs_instance_hostname }}.{{ ansible_domain }}\t{{ sap_swpm_ascs_instance_hostname }}" + - "{{ sap_vm_provision_dynamic_inventory_nw_ers_ip }}\t{{ sap_vm_provision_dynamic_inventory_nw_ers_hostname }}.{{ ansible_domain }}\t{{ sap_vm_provision_dynamic_inventory_nw_ers_hostname }}" + - "{{ sap_vm_provision_ha_vip_nwas_abap_ers | regex_replace('/.*', '') }}\t{{ sap_swpm_ers_instance_hostname }}.{{ ansible_domain }}\t{{ sap_swpm_ers_instance_hostname }}" when: - (groups["nwas_ers"] is defined and (groups["nwas_ers"] | length>0)) - ansible_chassis_asset_tag == 'ibmcloud' - inventory_hostname_short in groups['nwas_ascs'] + - (sap_vm_provision_ha_vip_nwas_abap_ascs | length) > 0 - name: Update /etc/hosts file with Virtual Hostname for SAP NetWeaver HA ERS on IBM Cloud ansible.builtin.lineinfile: @@ -153,9 +167,41 @@ line: "{{ item }}" state: present loop: - - "{{ (sap_ha_pacemaker_cluster_vip_nwas_abap_ascs_ip_address | default('192.168.2.10/32')) | regex_replace('/.*', '') }}\t{{ sap_swpm_ascs_instance_hostname }}.{{ ansible_domain }}\t{{ sap_swpm_ascs_instance_hostname }}" - - "{{ sap_vm_provision_dynamic_inventory_nw_ers_ip }}\t{{ sap_swpm_ers_instance_hostname }}.{{ ansible_domain }}\t{{ sap_swpm_ers_instance_hostname }}" + - "{{ sap_vm_provision_dynamic_inventory_nw_ascs_ip }}\t{{ sap_vm_provision_dynamic_inventory_nw_ascs_hostname }}.{{ ansible_domain }}\t{{ sap_vm_provision_dynamic_inventory_nw_ascs_hostname }}" + - "{{ sap_vm_provision_dynamic_inventory_nw_ers_ip }}\t{{ sap_vm_provision_dynamic_inventory_nw_ers_hostname }}.{{ ansible_domain }}\t{{ sap_vm_provision_dynamic_inventory_nw_ers_hostname }}\t{{ sap_swpm_ers_instance_hostname }}.{{ ansible_domain }}\t{{ sap_swpm_ers_instance_hostname }}" + - "{{ sap_vm_provision_ha_vip_nwas_abap_ascs | regex_replace('/.*', '') }}\t{{ sap_swpm_ascs_instance_hostname }}.{{ ansible_domain }}\t{{ sap_swpm_ascs_instance_hostname }}" + when: + - (groups["nwas_ers"] is defined and (groups["nwas_ers"] | length>0)) + - ansible_chassis_asset_tag == 'ibmcloud' + - inventory_hostname_short in groups['nwas_ers'] + - (sap_vm_provision_ha_vip_nwas_abap_ers | length) > 0 + + # Remove /etc/hosts entries and then consolidate into one entry with aliases + - name: Remove /etc/hosts multiple entries for SAP NetWeaver ASCS and ASCS HA Virtual Hostname + ansible.builtin.lineinfile: + dest: /etc/hosts + line: "{{ item }}" + state: absent + loop: + - "{{ sap_vm_provision_dynamic_inventory_nw_ascs_ip }}\t{{ sap_vm_provision_dynamic_inventory_nw_ascs_hostname }}.{{ ansible_domain }}\t{{ sap_vm_provision_dynamic_inventory_nw_ascs_hostname }}" + - "{{ sap_vm_provision_ha_vip_nwas_abap_ascs | regex_replace('/.*', '') }}\t{{ sap_swpm_ascs_instance_hostname }}.{{ ansible_domain }}\t{{ sap_swpm_ascs_instance_hostname }}" + when: + - (groups["nwas_ers"] is defined and (groups["nwas_ers"] | length>0)) + - ansible_chassis_asset_tag == 'ibmcloud' + - inventory_hostname_short in groups['nwas_ascs'] + - (sap_vm_provision_ha_vip_nwas_abap_ascs | length) > 0 + + # Remove /etc/hosts entries and then consolidate into one entry with aliases + - name: Remove /etc/hosts multiple entries for SAP NetWeaver ERS and ERS HA Virtual Hostname + ansible.builtin.lineinfile: + dest: /etc/hosts + line: "{{ item }}" + state: absent + loop: + - "{{ sap_vm_provision_dynamic_inventory_nw_ers_ip }}\t{{ sap_vm_provision_dynamic_inventory_nw_ers_hostname }}.{{ ansible_domain }}\t{{ sap_vm_provision_dynamic_inventory_nw_ers_hostname }}" + - "{{ sap_vm_provision_ha_vip_nwas_abap_ers | regex_replace('/.*', '') }}\t{{ sap_swpm_ers_instance_hostname }}.{{ ansible_domain }}\t{{ sap_swpm_ers_instance_hostname }}" when: - (groups["nwas_ers"] is defined and (groups["nwas_ers"] | length>0)) - ansible_chassis_asset_tag == 'ibmcloud' - inventory_hostname_short in groups['nwas_ers'] + - (sap_vm_provision_ha_vip_nwas_abap_ers | length) > 0 diff --git a/roles/sap_vm_provision/tasks/common/set_etc_hosts_scaleout.yml b/roles/sap_vm_provision/tasks/common/set_etc_hosts_scaleout.yml index ae637a8..d818059 100644 --- a/roles/sap_vm_provision/tasks/common/set_etc_hosts_scaleout.yml +++ b/roles/sap_vm_provision/tasks/common/set_etc_hosts_scaleout.yml @@ -31,7 +31,7 @@ line: "{{ item }}" state: present loop: - - "# SAP HANA scale-out workers\n{% for host in (groups['hana_primary'] | reject('search', '0') | list) %}{% if (host != inventory_hostname_short) %}{{ hostvars[host]['ansible_host'] }}\t{{ hostvars[host]['ansible_fqdn'] }}\t{{ hostvars[host]['inventory_hostname_short'] }}\n{% endif %}{% endfor %}" + - "# SAP HANA scale-out workers\n{% for host in (groups['hana_primary'] | reject('regex', '0$') | list) %}{% if (host != inventory_hostname_short) %}{{ hostvars[host]['ansible_host'] }}\t{{ hostvars[host]['ansible_fqdn'] }}\t{{ hostvars[host]['inventory_hostname_short'] }}\n{% endif %}{% endfor %}" loop_control: label: "{{ inventory_hostname_short }}" when: @@ -43,7 +43,7 @@ line: "{{ item }}" state: present loop: - - "# SAP HANA scale-out workers\n{% for host in (groups['hana_primary'] | reject('search', '0') | list)[:-1] %}{% if (host != inventory_hostname_short) %}{{ hostvars[host]['ansible_host'] }}\t{{ hostvars[host]['ansible_fqdn'] }}\t{{ hostvars[host]['inventory_hostname_short'] }}\n{% endif %}{% endfor %}" + - "# SAP HANA scale-out workers\n{% for host in (groups['hana_primary'] | reject('regex', '0$') | list)[:-1] %}{% if (host != inventory_hostname_short) %}{{ hostvars[host]['ansible_host'] }}\t{{ hostvars[host]['ansible_fqdn'] }}\t{{ hostvars[host]['inventory_hostname_short'] }}\n{% endif %}{% endfor %}" loop_control: label: "{{ inventory_hostname_short }}" when: sap_vm_provision_calculate_sap_hana_scaleout_standby > 0 diff --git a/roles/sap_vm_provision/tasks/main.yml b/roles/sap_vm_provision/tasks/main.yml index 0db4d41..36190d6 100644 --- a/roles/sap_vm_provision/tasks/main.yml +++ b/roles/sap_vm_provision/tasks/main.yml @@ -3,7 +3,7 @@ #### Provision host/s for Deployment of SAP Software (as part of an SAP Software Solution Scenario e.g. SAP S/4HANA Distributed HA) #### - name: Begin execution - delegate_to: localhost + delegate_to: "{{ sap_vm_provision_execution_host }}" delegate_facts: false # keep facts with the original play hosts, not the delegated host block: @@ -15,7 +15,7 @@ #### Post Deployment of SAP - tasks for GCP, IBM Cloud, MS Azure #### - name: Begin execution - delegate_to: localhost + delegate_to: "{{ sap_vm_provision_execution_host }}" delegate_facts: false # keep facts with the original play hosts, not the delegated host block: diff --git a/roles/sap_vm_provision/tasks/platform_ansible/aws_ec2_vs/execute_main.yml b/roles/sap_vm_provision/tasks/platform_ansible/aws_ec2_vs/execute_main.yml index f34ce81..bcbeced 100644 --- a/roles/sap_vm_provision/tasks/platform_ansible/aws_ec2_vs/execute_main.yml +++ b/roles/sap_vm_provision/tasks/platform_ansible/aws_ec2_vs/execute_main.yml @@ -20,6 +20,25 @@ access_key: "{{ sap_vm_provision_aws_access_key }}" secret_key: "{{ sap_vm_provision_aws_secret_access_key }}" + - name: Create Placement Groups when High Availability + no_log: "{{ __sap_vm_provision_no_log }}" + register: __sap_vm_provision_task_aws_placement_group + run_once: true + community.aws.ec2_placement_group: + name: "{{ sap_vm_provision_aws_placement_resource_name }}-{{ item }}" + state: present + strategy: spread + access_key: "{{ sap_vm_provision_aws_access_key }}" + secret_key: "{{ sap_vm_provision_aws_secret_access_key }}" + loop: + - "{{ 'hana' if 'hana_secondary' in lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan] | json_query('*') | map(attribute='sap_host_type') else '' }}" + - "{{ 'anydb' if 'anydb_secondary' in lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan] | json_query('*') | map(attribute='sap_host_type') else '' }}" + - "{{ 'nwas' if 'nwas_ers' in lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan] | json_query('*') | map(attribute='sap_host_type') else '' }}" + when: + - sap_vm_provision_aws_placement_resource_name is defined + - sap_vm_provision_aws_placement_strategy_spread + - not item == '' + - name: Set fact to hold loop variables from include_tasks ansible.builtin.set_fact: register_provisioned_host_all: [] @@ -63,7 +82,7 @@ - name: Set fact to hold all inventory hosts in all groups ansible.builtin.set_fact: - groups_merged_list: "{{ [ [ groups['hana_primary'] | default([]) ] , [ groups['hana_secondary'] | default([]) ] , [ groups['nwas_ascs'] | default([]) ] , [ groups['nwas_ers'] | default([]) ] , [ groups['nwas_pas'] | default([]) ] , [ groups['nwas_aas'] | default([]) ] ] | flatten | select() }}" + groups_merged_list: "{{ [ [ groups['hana_primary'] | default([]) ] , [ groups['hana_secondary'] | default([]) ] , [ groups['anydb_primary'] | default([]) ] , [ groups['anydb_secondary'] | default([]) ] , [ groups['nwas_ascs'] | default([]) ] , [ groups['nwas_ers'] | default([]) ] , [ groups['nwas_pas'] | default([]) ] , [ groups['nwas_aas'] | default([]) ] ] | flatten | select() }}" - name: Set Ansible Vars register: __sap_vm_provision_task_ansible_vars_set @@ -84,6 +103,7 @@ wait: true access_key: "{{ sap_vm_provision_aws_access_key }}" secret_key: "{{ sap_vm_provision_aws_secret_access_key }}" + overwrite: "{{ sap_vm_provision_aws_dns_overwrite if sap_vm_provision_aws_dns_overwrite | bool else false }}" rescue: # This requires no_log set on each Ansible Task, and not set on the Ansible Task Block @@ -151,6 +171,10 @@ ansible.builtin.include_tasks: file: common/set_ansible_vars_storage.yml + - name: Register Package Repositories for OS Images with Bring-Your-Own-Subscription (BYOS) + ansible.builtin.include_tasks: + file: common/register_os.yml + - name: Ansible Task block for provisioning of High Availability resources for AWS EC2 instances delegate_to: localhost diff --git a/roles/sap_vm_provision/tasks/platform_ansible/aws_ec2_vs/execute_provision.yml b/roles/sap_vm_provision/tasks/platform_ansible/aws_ec2_vs/execute_provision.yml index 7c62e81..82fb7c4 100644 --- a/roles/sap_vm_provision/tasks/platform_ansible/aws_ec2_vs/execute_provision.yml +++ b/roles/sap_vm_provision/tasks/platform_ansible/aws_ec2_vs/execute_provision.yml @@ -14,6 +14,10 @@ - sap_vm_provision_calculate_sap_hana_scaleout_active_coordinator is defined - not inventory_hostname in lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan].keys() +- name: Set fact for host specifications of the provision target + ansible.builtin.set_fact: + target_provision_host_spec: "{{ lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan][scaleout_origin_host_spec | default(inventory_hostname)] }}" + - name: Provision AWS EC2 Virtual Server instance register: __sap_vm_provision_task_provision_host_single no_log: "{{ __sap_vm_provision_no_log }}" @@ -21,11 +25,10 @@ state: started name: "{{ inventory_hostname }}" image_id: "{{ (__sap_vm_provision_task_aws_ami.images | sort(attribute='creation_date') | last).image_id }}" - instance_type: "{{ lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan][scaleout_origin_host_spec | default(inventory_hostname)].virtual_machine_profile }}" + instance_type: "{{ target_provision_host_spec.virtual_machine_profile }}" key_name: "{{ sap_vm_provision_aws_key_pair_name_ssh_host_public_key }}" security_groups: "{{ sap_vm_provision_aws_vpc_sg_names }}" vpc_subnet_id: "{{ sap_vm_provision_aws_vpc_subnet_id }}" - tenancy: default metadata_options: http_endpoint: enabled # http_put_response_hop_limit: 8 @@ -33,9 +36,25 @@ # instance_metadata_tags: disabled network: assign_public_ip: false - source_dest_check: "{{ not lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan][scaleout_origin_host_spec | default(inventory_hostname)].disable_ip_anti_spoofing }}" # Disable the Anti IP Spoofing by setting Source/Destination Check to false + source_dest_check: "{{ not target_provision_host_spec.disable_ip_anti_spoofing }}" # Disable the Anti IP Spoofing by setting Source/Destination Check to false + # availability_zone: "{{ sap_vm_provision_aws_vpc_availability_zone }}" # Conflict with vpc_subnet_id + placement: "{{ placement_dict if sap_vm_provision_aws_placement_strategy_spread else omit }}" access_key: "{{ sap_vm_provision_aws_access_key }}" secret_key: "{{ sap_vm_provision_aws_secret_access_key }}" + vars: + placement_dict: + availability_zone: "{{ sap_vm_provision_aws_vpc_availability_zone }}" + group_name: "{{ ( + (__sap_vm_provision_task_aws_placement_group.results | selectattr('item','==','hana'))[0].name + if ('hana_primary' in target_provision_host_spec.sap_host_type or 'hana_secondary' in target_provision_host_spec.sap_host_type) + else + (__sap_vm_provision_task_aws_placement_group.results | selectattr('item','==','anydb'))[0].name + if ('anydb_primary' in target_provision_host_spec.sap_host_type or 'anydb_secondary' in target_provision_host_spec.sap_host_type) + else + (__sap_vm_provision_task_aws_placement_group.results | selectattr('item','==','nwas'))[0].name + if ('nwas_ascs' in target_provision_host_spec.sap_host_type or 'nwas_ers' in target_provision_host_spec.sap_host_type) + ) | default(omit) }}" + tenancy: default # default is shared tenancy - name: Set fact for storage volume letters calculations (max 25 volumes) ansible.builtin.set_fact: @@ -73,7 +92,7 @@ filesystem_volume_map: | {% set volume_map = [] -%} {% set av_vol = available_volumes -%} - {% for storage_item in lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan][scaleout_origin_host_spec | default(inventory_hostname)].storage_definition -%} + {% for storage_item in target_provision_host_spec.storage_definition -%} {% for idx in range(0, storage_item.disk_count | default(1)) -%} {% if (storage_item.filesystem_type is defined) -%} {% if ('swap' in storage_item.filesystem_type and storage_item.swap_path is not defined) @@ -85,7 +104,8 @@ 'fstype': storage_item.filesystem_type | default('xfs'), 'name': storage_item.name + idx|string, 'size': storage_item.disk_size | default(0), - 'type': storage_item.disk_type | default('gp3') + 'type': storage_item.disk_type | default('gp3'), + 'iops': storage_item.disk_iops | default(omit) } ]) %} {%- set _ = av_vol.pop(0) -%} @@ -106,6 +126,7 @@ instance: "{{ __sap_vm_provision_task_provision_host_single.instance_ids[0] }}" volume_type: "{{ vol_item.type }}" volume_size: "{{ vol_item.size }}" + iops: "{{ vol_item.iops | default(omit) }}" device_name: "{{ vol_item.device }}" delete_on_termination: true access_key: "{{ sap_vm_provision_aws_access_key }}" @@ -155,6 +176,7 @@ delegate_hostname: "{{ inventory_hostname }}" delegate_sap_vm_provision_dns_root_domain_name: "{{ sap_vm_provision_dns_root_domain }}" + ### begin block, parameters will be applied to each task within the block - name: Allow login from root OS User remote_user: ec2-user @@ -167,12 +189,17 @@ ansible_ssh_common_args: -o ConnectTimeout=180 -o ControlMaster=auto -o ControlPersist=3600s -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o ForwardX11=no -o ProxyCommand='ssh -W %h:%p {{ delegate_sap_vm_provision_bastion_user }}@{{ delegate_sap_vm_provision_bastion_public_ip }} -p {{ delegate_sap_vm_provision_bastion_ssh_port }} -i {{ delegate_sap_vm_provision_ssh_bastion_private_key_file_path }} -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null' block: + - name: Wait until SSH connection is available + ansible.builtin.wait_for_connection: + timeout: 300 + + # Remove everything before ssh-rsa/ed25519 but do not catch key comment that uses 'ssh-' - name: Fix root authorized_keys entries ansible.builtin.replace: path: /root/.ssh/authorized_keys backup: true - regexp: '(^.*ssh-rsa)' - replace: 'ssh-rsa' + regexp: '(^.*command.* ssh-)' + replace: 'ssh-' - name: Permit root login register: __sap_vm_provision_task_os_sshd_config @@ -193,7 +220,7 @@ - name: Append loop value to register ansible.builtin.set_fact: - __sap_vm_provision_task_provision_host_single: "{{ __sap_vm_provision_task_provision_host_single | combine( { 'host_node' : inventory_hostname } , { 'sap_host_type' : lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan][scaleout_origin_host_spec | default(inventory_hostname)].sap_host_type } , { 'sap_system_type' : (lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan][scaleout_origin_host_spec | default(inventory_hostname)].sap_system_type | default('')) } ) }}" + __sap_vm_provision_task_provision_host_single: "{{ __sap_vm_provision_task_provision_host_single | combine( { 'host_node' : inventory_hostname } , { 'sap_host_type' : target_provision_host_spec.sap_host_type } , { 'sap_system_type' : (target_provision_host_spec.sap_system_type | default('')) } ) }}" - name: Append output to merged register ansible.builtin.set_fact: diff --git a/roles/sap_vm_provision/tasks/platform_ansible/aws_ec2_vs/execute_setup_ha.yml b/roles/sap_vm_provision/tasks/platform_ansible/aws_ec2_vs/execute_setup_ha.yml index 0ae617e..18e5c58 100644 --- a/roles/sap_vm_provision/tasks/platform_ansible/aws_ec2_vs/execute_setup_ha.yml +++ b/roles/sap_vm_provision/tasks/platform_ansible/aws_ec2_vs/execute_setup_ha.yml @@ -27,7 +27,7 @@ purge_routes: false state: present routes: - - dest: "{{ sap_ha_pacemaker_cluster_vip_hana_primary_ip_address | default('192.168.1.90/32') }}" + - dest: "{{ sap_vm_provision_ha_vip_hana_primary }}" instance_id: "{{ hostvars[host_node].ansible_board_asset_tag }}" access_key: "{{ sap_vm_provision_aws_access_key }}" secret_key: "{{ sap_vm_provision_aws_secret_access_key }}" @@ -47,10 +47,11 @@ record: "{{ sap_swpm_db_host }}.{{ hostvars[host_node].sap_vm_provision_dns_root_domain }}" type: A ttl: 7200 - value: "{{ (sap_ha_pacemaker_cluster_vip_hana_primary_ip_address | default('192.168.1.90/32')) | regex_replace('/.*', '') }}" + value: "{{ sap_vm_provision_ha_vip_hana_primary | regex_replace('/.*', '') }}" wait: true access_key: "{{ sap_vm_provision_aws_access_key }}" secret_key: "{{ sap_vm_provision_aws_secret_access_key }}" + overwrite: "{{ sap_vm_provision_aws_dns_overwrite if sap_vm_provision_aws_dns_overwrite | bool else false }}" loop: "{{ (groups['hana_primary'] | default([])) }}" loop_control: loop_var: host_node @@ -68,7 +69,7 @@ purge_routes: false state: present routes: - - dest: "{{ sap_vm_temp_vip_anydb_primary | default('192.168.1.90/32') }}" + - dest: "{{ sap_vm_provision_ha_vip_anydb_primary }}" instance_id: "{{ hostvars[host_node].ansible_board_asset_tag }}" access_key: "{{ sap_vm_provision_aws_access_key }}" secret_key: "{{ sap_vm_provision_aws_secret_access_key }}" @@ -88,10 +89,11 @@ record: "{{ sap_swpm_db_host }}.{{ hostvars[host_node].sap_vm_provision_dns_root_domain }}" type: A ttl: 7200 - value: "{{ (sap_vm_temp_vip_anydb_primary | default('192.168.1.90/32')) | regex_replace('/.*', '') }}" + value: "{{ sap_vm_provision_ha_vip_anydb_primary | regex_replace('/.*', '') }}" wait: true access_key: "{{ sap_vm_provision_aws_access_key }}" secret_key: "{{ sap_vm_provision_aws_secret_access_key }}" + overwrite: "{{ sap_vm_provision_aws_dns_overwrite if sap_vm_provision_aws_dns_overwrite | bool else false }}" loop: "{{ (groups['anydb_primary'] | default([])) }}" loop_control: loop_var: host_node @@ -109,7 +111,7 @@ purge_routes: false state: present routes: - - dest: "{{ sap_ha_pacemaker_cluster_vip_nwas_abap_ascs_ip_address | default('192.168.2.10/32') }}" + - dest: "{{ sap_vm_provision_ha_vip_nwas_abap_ascs }}" instance_id: "{{ hostvars[host_node].ansible_board_asset_tag }}" access_key: "{{ sap_vm_provision_aws_access_key }}" secret_key: "{{ sap_vm_provision_aws_secret_access_key }}" @@ -129,10 +131,11 @@ record: "{{ sap_swpm_ascs_instance_hostname }}.{{ hostvars[host_node].sap_vm_provision_dns_root_domain }}" type: A ttl: 7200 - value: "{{ (sap_ha_pacemaker_cluster_vip_nwas_abap_ascs_ip_address | default('192.168.2.10/32')) | regex_replace('/.*', '') }}" + value: "{{ sap_vm_provision_ha_vip_nwas_abap_ascs | regex_replace('/.*', '') }}" wait: true access_key: "{{ sap_vm_provision_aws_access_key }}" secret_key: "{{ sap_vm_provision_aws_secret_access_key }}" + overwrite: "{{ sap_vm_provision_aws_dns_overwrite if sap_vm_provision_aws_dns_overwrite | bool else false }}" loop: "{{ (groups['nwas_ascs'] | default([])) }}" loop_control: loop_var: host_node @@ -150,7 +153,7 @@ purge_routes: false state: present routes: - - dest: "{{ sap_ha_pacemaker_cluster_vip_nwas_abap_ers_ip_address | default('192.168.2.11/32') }}" + - dest: "{{ sap_vm_provision_ha_vip_nwas_abap_ers }}" instance_id: "{{ hostvars[host_node].ansible_board_asset_tag }}" access_key: "{{ sap_vm_provision_aws_access_key }}" secret_key: "{{ sap_vm_provision_aws_secret_access_key }}" @@ -170,10 +173,11 @@ record: "{{ sap_swpm_ers_instance_hostname }}.{{ hostvars[host_node].sap_vm_provision_dns_root_domain }}" type: A ttl: 7200 - value: "{{ (sap_ha_pacemaker_cluster_vip_nwas_abap_ers_ip_address | default('192.168.2.11/32')) | regex_replace('/.*', '') }}" + value: "{{ sap_vm_provision_ha_vip_nwas_abap_ers | regex_replace('/.*', '') }}" wait: true access_key: "{{ sap_vm_provision_aws_access_key }}" secret_key: "{{ sap_vm_provision_aws_secret_access_key }}" + overwrite: "{{ sap_vm_provision_aws_dns_overwrite if sap_vm_provision_aws_dns_overwrite | bool else false }}" loop: "{{ (groups['nwas_ers'] | default([])) }}" loop_control: loop_var: host_node @@ -193,7 +197,7 @@ # purge_routes: false # state: present # routes: -# - dest: "{{ sap_ha_pacemaker_cluster_vip_nwas_abap_pas_ip_address | default('192.168.2.12/32') }}" +# - dest: "{{ sap_vm_provision_ha_vip_nwas_abap_pas }}" # instance_id: "{{ hostvars[host_node].ansible_board_asset_tag }}" # loop: "{{ (groups['nwas_pas'] | default([])) }}" # loop_control: @@ -209,7 +213,7 @@ # record: "{{ sap_swpm_pas_instance_hostname }}.{{ hostvars[host_node].sap_vm_provision_dns_root_domain }}" # type: A # ttl: 7200 -# value: "{{ (sap_ha_pacemaker_cluster_vip_nwas_abap_pas_ip_address | default('192.168.2.12/32')) | regex_replace('/.*', '') }}" +# value: "{{ sap_vm_provision_ha_vip_nwas_abap_pas | regex_replace('/.*', '') }}" # wait: true # loop: "{{ (groups['nwas_pas'] | default([])) }}" # loop_control: @@ -229,7 +233,7 @@ # purge_routes: false # state: present # routes: -# - dest: "{{ sap_ha_pacemaker_cluster_vip_nwas_abap_aas_ip_address | default('192.168.2.13/32') }}" +# - dest: "{{ sap_vm_provision_ha_vip_nwas_abap_aas }}" # instance_id: "{{ hostvars[host_node].ansible_board_asset_tag }}" # loop: "{{ (groups['nwas_aas'] | default([])) }}" # loop_control: @@ -245,7 +249,7 @@ # record: "{{ sap_swpm_aas_instance_hostname }}.{{ hostvars[host_node].sap_vm_provision_dns_root_domain }}" # type: A # ttl: 7200 -# value: "{{ (sap_ha_pacemaker_cluster_vip_nwas_abap_aas_ip_address | default('192.168.2.13/32')) | regex_replace('/.*', '') }}" +# value: "{{ sap_vm_provision_ha_vip_nwas_abap_aas | regex_replace('/.*', '') }}" # wait: true # loop: "{{ (groups['nwas_aas'] | default([])) }}" # loop_control: @@ -254,11 +258,26 @@ # - groups["nwas_ers"] is defined and (groups["nwas_ers"]|length>0) -- name: AWS IAM Role - HA-Role-Pacemaker +# Setup custom IAM Role name using sap_vm_provision_aws_ha_iam_role +- name: AWS IAM Role - Prepare IAM Role name + ansible.builtin.set_fact: + __sap_vm_provision_aws_ha_iam_role: + "{{ sap_vm_provision_aws_ha_iam_role + if sap_vm_provision_aws_ha_iam_role is defined and sap_vm_provision_aws_ha_iam_role | length > 0 + else 'HA-Role-Pacemaker' }}" + +# Following IAM Roles, Policies and Instance Profiles are created based on: +# https://documentation.suse.com/sbp/sap-15/html/SLES4SAP-hana-sr-guide-perfopt-15-aws/index.html#id-aws-roles-and-policies +# https://docs.aws.amazon.com/sap/latest/sap-netweaver/sles-netweaver-ha-settings.html#stonith +# https://access.redhat.com/articles/4175371#create-policies +# https://docs.aws.amazon.com/sap/latest/sap-netweaver/rhel-netweaver-ha-settings.html#stonith +# https://docs.aws.amazon.com/sap/latest/sap-hana/sap-hana-on-aws-cluster-configuration-prerequisites.html#sap-hana-on-aws-create-the-stonith-policy + +- name: AWS IAM Role - {{ __sap_vm_provision_aws_ha_iam_role }} register: __sap_vm_provision_task_aws_iam_role_ha_pacemaker no_log: "{{ __sap_vm_provision_no_log }}" amazon.aws.iam_role: - name: "HA-Role-Pacemaker" + name: "{{ __sap_vm_provision_aws_ha_iam_role }}" assume_role_policy_document: | { "Version": "2012-10-17", @@ -283,7 +302,7 @@ amazon.aws.iam_policy: state: present iam_type: role - iam_name: "HA-Role-Pacemaker" + iam_name: "{{ __sap_vm_provision_aws_ha_iam_role }}" policy_name: "HA-Policy-DataProvider" policy_json: | { @@ -319,7 +338,7 @@ amazon.aws.iam_policy: state: present iam_type: role - iam_name: "HA-Role-Pacemaker" + iam_name: "{{ __sap_vm_provision_aws_ha_iam_role }}" policy_name: "HA-Policy-OverlayVirtualIPAgent" policy_json: | { @@ -352,7 +371,7 @@ amazon.aws.iam_policy: state: present iam_type: role - iam_name: "HA-Role-Pacemaker" + iam_name: "{{ __sap_vm_provision_aws_ha_iam_role }}" policy_name: "HA-Policy-STONITH-SAPHANA" policy_json: | { @@ -374,11 +393,14 @@ "Action": [ "ec2:ModifyInstanceAttribute", "ec2:StartInstances", - "ec2:StopInstances" + "ec2:StopInstances", + "ec2:RebootInstances" ], "Resource": [ - "arn:aws:ec2:{{ sap_vm_provision_aws_region }}:{{ __sap_vm_provision_task_aws_account_info.account }}:instance/{{ hostvars[groups['hana_primary'][0]].ansible_host }}", - "arn:aws:ec2:{{ sap_vm_provision_aws_region }}:{{ __sap_vm_provision_task_aws_account_info.account }}:instance/{{ hostvars[groups['hana_secondary'][0]].ansible_host }}" + "arn:aws:ec2:{{ sap_vm_provision_aws_region }}:{{ __sap_vm_provision_task_aws_account_info.account }}:instance/{{ + hostvars[groups['hana_primary'][0]].__sap_vm_provision_task_provision_host_single_info.instances[0].instance_id }}", + "arn:aws:ec2:{{ sap_vm_provision_aws_region }}:{{ __sap_vm_provision_task_aws_account_info.account }}:instance/{{ + hostvars[groups['hana_secondary'][0]].__sap_vm_provision_task_provision_host_single_info.instances[0].instance_id }}" ] } ] @@ -387,6 +409,51 @@ secret_key: "{{ sap_vm_provision_aws_secret_access_key }}" when: groups["hana_secondary"] is defined and (groups["hana_secondary"]|length>0) +# AWS HA for SAP - STONITH of SAP ANYDB +- name: AWS IAM Policy - HA-Policy-STONITH-SAPANYDB + register: __sap_vm_provision_task_aws_iam_policy_stonith_sapanydb + no_log: "{{ __sap_vm_provision_no_log }}" + amazon.aws.iam_policy: + state: present + iam_type: role + iam_name: "{{ __sap_vm_provision_aws_ha_iam_role }}" + policy_name: "HA-Policy-STONITH-SAPANYDB" + policy_json: | + { + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "Stmt1424870324000", + "Effect": "Allow", + "Action": [ + "ec2:DescribeInstances", + "ec2:DescribeInstanceAttribute", + "ec2:DescribeTags" + ], + "Resource": "*" + }, + { + "Sid": "Stmt1424870324001", + "Effect": "Allow", + "Action": [ + "ec2:ModifyInstanceAttribute", + "ec2:StartInstances", + "ec2:StopInstances", + "ec2:RebootInstances" + ], + "Resource": [ + "arn:aws:ec2:{{ sap_vm_provision_aws_region }}:{{ __sap_vm_provision_task_aws_account_info.account }}:instance/{{ + hostvars[groups['anydb_primary'][0]].__sap_vm_provision_task_provision_host_single_info.instances[0].instance_id }}", + "arn:aws:ec2:{{ sap_vm_provision_aws_region }}:{{ __sap_vm_provision_task_aws_account_info.account }}:instance/{{ + hostvars[groups['anydb_secondary'][0]].__sap_vm_provision_task_provision_host_single_info.instances[0].instance_id }}" + ] + } + ] + } + access_key: "{{ sap_vm_provision_aws_access_key }}" + secret_key: "{{ sap_vm_provision_aws_secret_access_key }}" + when: groups["anydb_secondary"] is defined and (groups["anydb_secondary"]|length>0) + # AWS HA for SAP - STONITH of SAP NWAS - name: AWS IAM Policy - HA-Policy-STONITH-SAPNWAS register: __sap_vm_provision_task_aws_iam_policy_stonith_sapnwas @@ -394,7 +461,7 @@ amazon.aws.iam_policy: state: present iam_type: role - iam_name: "HA-Role-Pacemaker" + iam_name: "{{ __sap_vm_provision_aws_ha_iam_role }}" policy_name: "HA-Policy-STONITH-SAPNWAS" policy_json: | { @@ -419,8 +486,10 @@ "ec2:StopInstances" ], "Resource": [ - "arn:aws:ec2:{{ sap_vm_provision_aws_region }}:{{ __sap_vm_provision_task_aws_account_info.account }}:instance/{{ hostvars[groups['nwas_ascs'][0]].ansible_host }}", - "arn:aws:ec2:{{ sap_vm_provision_aws_region }}:{{ __sap_vm_provision_task_aws_account_info.account }}:instance/{{ hostvars[groups['nwas_ers'][0]].ansible_host }}" + "arn:aws:ec2:{{ sap_vm_provision_aws_region }}:{{ __sap_vm_provision_task_aws_account_info.account }}:instance/{{ + hostvars[groups['nwas_ascs'][0]].__sap_vm_provision_task_provision_host_single_info.instances[0].instance_id }}", + "arn:aws:ec2:{{ sap_vm_provision_aws_region }}:{{ __sap_vm_provision_task_aws_account_info.account }}:instance/{{ + hostvars[groups['nwas_ers'][0]].__sap_vm_provision_task_provision_host_single_info.instances[0].instance_id }}" ] } ] @@ -429,6 +498,14 @@ secret_key: "{{ sap_vm_provision_aws_secret_access_key }}" when: groups["nwas_ers"] is defined and (groups["nwas_ers"]|length>0) + +- name: AWS IAM Role - Prepare IAM Instance Profile name + ansible.builtin.set_fact: + __sap_vm_provision_aws_ha_iam_instance_profile: + "{{ sap_vm_provision_aws_ha_iam_instance_profile + if sap_vm_provision_aws_ha_iam_instance_profile is defined and sap_vm_provision_aws_ha_iam_instance_profile | length > 0 + else 'HA-Instance-Profile-Pacemaker-Cluster' }}" + # Equivalent to # aws iam create-instance-profile --instance-profile-name "HA-Instance-Profile-Pacemaker-Cluster" # aws iam add-role-to-instance-profile --role-name "HA-Role-Pacemaker" --instance-profile-name "HA-Instance-Profile-Pacemaker-Cluster" @@ -437,8 +514,8 @@ no_log: "{{ __sap_vm_provision_no_log }}" amazon.aws.iam_instance_profile: state: present - name: "HA-Instance-Profile-Pacemaker-Cluster" - role: "HA-Role-Pacemaker" + name: "{{ __sap_vm_provision_aws_ha_iam_instance_profile }}" + role: "{{ __sap_vm_provision_aws_ha_iam_role }}" path: "/" access_key: "{{ sap_vm_provision_aws_access_key }}" secret_key: "{{ sap_vm_provision_aws_secret_access_key }}" @@ -449,7 +526,7 @@ no_log: "{{ __sap_vm_provision_no_log }}" amazon.aws.ec2_instance: instance_ids: "{{ hostvars[host_node].ansible_board_asset_tag }}" - iam_instance_profile: "HA-Instance-Profile-Pacemaker-Cluster" + iam_instance_profile: "{{ __sap_vm_provision_aws_ha_iam_instance_profile }}" access_key: "{{ sap_vm_provision_aws_access_key }}" secret_key: "{{ sap_vm_provision_aws_secret_access_key }}" loop: "{{ [ [ groups['hana_primary'] | default([]) ] , [ groups['hana_secondary'] | default([]) ] ] | flatten | select() }}" @@ -458,13 +535,28 @@ when: groups["hana_secondary"] is defined and (groups["hana_secondary"]|length>0) ignore_errors: true +# Equivalent to aws ec2 associate-iam-instance-profile --iam-instance-profile "Name=HA-Instance-Profile-Pacemaker-Cluster" --instance-id {{ hostvars[host_node].ansible_board_asset_tag }} +- name: AWS EC2 Instances - associate AWS IAM Instance Profile for SAP ANYDB + register: __sap_vm_provision_task_aws_iam_associate_instance_sapanydb + no_log: "{{ __sap_vm_provision_no_log }}" + amazon.aws.ec2_instance: + instance_ids: "{{ hostvars[host_node].ansible_board_asset_tag }}" + iam_instance_profile: "{{ __sap_vm_provision_aws_ha_iam_instance_profile }}" + access_key: "{{ sap_vm_provision_aws_access_key }}" + secret_key: "{{ sap_vm_provision_aws_secret_access_key }}" + loop: "{{ [ [ groups['anydb_primary'] | default([]) ] , [ groups['anydb_secondary'] | default([]) ] ] | flatten | select() }}" + loop_control: + loop_var: host_node + when: groups["anydb_secondary"] is defined and (groups["anydb_secondary"]|length>0) + ignore_errors: true + # Equivalent to aws ec2 associate-iam-instance-profile --iam-instance-profile "Name=HA-Instance-Profile-Pacemaker-Cluster" --instance-id {{ hostvars[host_node].ansible_board_asset_tag }} - name: AWS EC2 Instances - associate AWS IAM Instance Profile for SAP NetWeaver register: __sap_vm_provision_task_aws_iam_associate_instance_sapnwas no_log: "{{ __sap_vm_provision_no_log }}" amazon.aws.ec2_instance: instance_ids: "{{ hostvars[host_node].ansible_board_asset_tag }}" - iam_instance_profile: "HA-Instance-Profile-Pacemaker-Cluster" + iam_instance_profile: "{{ __sap_vm_provision_aws_ha_iam_instance_profile }}" access_key: "{{ sap_vm_provision_aws_access_key }}" secret_key: "{{ sap_vm_provision_aws_secret_access_key }}" loop: "{{ [ [ groups['nwas_ascs'] | default([]) ] , [ groups['nwas_ers'] | default([]) ] , [ groups['nwas_pas'] | default([]) ] , [ groups['nwas_aas'] | default([]) ] ] | flatten | select() }}" diff --git a/roles/sap_vm_provision/tasks/platform_ansible/gcp_ce_vm/execute_main.yml b/roles/sap_vm_provision/tasks/platform_ansible/gcp_ce_vm/execute_main.yml index 3639180..cd82b75 100644 --- a/roles/sap_vm_provision/tasks/platform_ansible/gcp_ce_vm/execute_main.yml +++ b/roles/sap_vm_provision/tasks/platform_ansible/gcp_ce_vm/execute_main.yml @@ -44,6 +44,50 @@ auth_kind: "serviceaccount" service_account_file: "{{ sap_vm_provision_gcp_credentials_json }}" + - name: Gather GCP Private DNS information + no_log: "{{ __sap_vm_provision_no_log }}" + register: __sap_vm_provision_task_gcp_pdns_info + google.cloud.gcp_dns_managed_zone_info: + project: "{{ sap_vm_provision_gcp_project }}" + dns_name: "{{ sap_vm_provision_dns_root_domain }}." + auth_kind: "serviceaccount" + service_account_file: "{{ sap_vm_provision_gcp_credentials_json }}" + + # There is no reason to proceed if resources were not found. + - name: "Check if resources were found (OS Image, VPC, VPC Subnet, DNS)" + # Ensure that fail message is shown in rescue block + register: __sap_vm_provision_task_gcp_resources_found + ansible.builtin.fail: + msg: | + Variable {{ item[0] }} has no resources. + Ensure that provided {{ item[1] }} exists. + loop_control: + label: "{{ item[1] }}" + loop: + - ["{{ __sap_vm_provision_task_gcp_os_image_info }}", "OS Image"] + - ["{{ __sap_vm_provision_task_gcp_vpc_info }}", "VPC"] + - ["{{ __sap_vm_provision_task_gcp_vpc_subnet_info }}", "VPC Subnet"] + - ["{{ __sap_vm_provision_task_gcp_pdns_info }}", "DNS"] + when: item[0].resources is not defined or item[0].resources | length == 0 + + + # - name: Create Placement Policies when High Availability + # no_log: "{{ __sap_vm_provision_no_log }}" + # register: __sap_vm_provision_task_gcp_availability_policy + # run_once: true + # google.cloud.gcp_compute_resource_policy: + # project: "{{ sap_vm_provision_gcp_project }}" + # region: "{{ sap_vm_provision_gcp_region }}" + # name: "{{ sap_vm_provision_aws_placement_resource_name }}-{{ item }}" + # description: "Spread Placement Policy created by Ansible Playbooks for SAP" + # # VM instances (HA Pairs) spread across up to 3 Availability Domains (different racks) + # group_placement_policy: + # availability_domain_count: 3 # Set Placement Policy to Spread + # # collocation: COLLOCATED # Set Placement Policy to Collocated, not used + # # vm_count: 0 # Use when bulk-create of VMs, not used + # auth_kind: "serviceaccount" + # service_account_file: "{{ sap_vm_provision_gcp_credentials_json }}" + - name: Set fact to hold loop variables from include_tasks ansible.builtin.set_fact: register_provisioned_host_all: [] @@ -77,7 +121,7 @@ - name: Set fact to hold all inventory hosts in all groups ansible.builtin.set_fact: - groups_merged_list: "{{ [ [ groups['hana_primary'] | default([]) ] , [ groups['hana_secondary'] | default([]) ] , [ groups['nwas_ascs'] | default([]) ] , [ groups['nwas_ers'] | default([]) ] , [ groups['nwas_pas'] | default([]) ] , [ groups['nwas_aas'] | default([]) ] ] | flatten | select() }}" + groups_merged_list: "{{ [ [ groups['hana_primary'] | default([]) ] , [ groups['hana_secondary'] | default([]) ] , [ groups['anydb_primary'] | default([]) ] , [ groups['anydb_secondary'] | default([]) ] , [ groups['nwas_ascs'] | default([]) ] , [ groups['nwas_ers'] | default([]) ] , [ groups['nwas_pas'] | default([]) ] , [ groups['nwas_aas'] | default([]) ] ] | flatten | select() }}" - name: Set Ansible Vars register: __sap_vm_provision_task_ansible_vars_set @@ -95,25 +139,6 @@ auth_kind: "serviceaccount" service_account_file: "{{ sap_vm_provision_gcp_credentials_json }}" - - name: Gather GCP VPC Subnet information - no_log: "{{ __sap_vm_provision_no_log }}" - register: __sap_vm_provision_task_gcp_vpc_subnet_info - google.cloud.gcp_compute_subnetwork_info: - project: "{{ sap_vm_provision_gcp_project }}" - region: "{{ sap_vm_provision_gcp_region }}" - filters: - - name = {{ sap_vm_provision_gcp_vpc_subnet_name }} - auth_kind: "serviceaccount" - service_account_file: "{{ sap_vm_provision_gcp_credentials_json }}" - - - name: Gather GCP Private DNS information - no_log: "{{ __sap_vm_provision_no_log }}" - register: __sap_vm_provision_task_gcp_pdns_info - google.cloud.gcp_dns_managed_zone_info: - project: "{{ sap_vm_provision_gcp_project }}" - dns_name: "{{ sap_vm_provision_dns_root_domain }}." - auth_kind: "serviceaccount" - service_account_file: "{{ sap_vm_provision_gcp_credentials_json }}" # - name: Gather information about GCP Router and table for the VPC Subnet # no_log: "{{ __sap_vm_provision_no_log }}" @@ -171,10 +196,10 @@ - __sap_vm_provision_task_provision_host_single - __sap_vm_provision_task_provision_host_single_info - __sap_vm_provision_task_provision_host_all_add - - __sap_vm_provision_task_gcp_vpc_subnet_info - __sap_vm_provision_task_gcp_pdns_info - __sap_vm_provision_task_gcp_router_info - __sap_vm_provision_task_gcp_pdns_records + - __sap_vm_provision_task_gcp_resources_found loop_control: loop_var: loop_item index_var: loop_item_index @@ -282,6 +307,10 @@ ansible.builtin.include_tasks: file: common/set_ansible_vars_storage.yml + - name: Register Package Repositories for OS Images with Bring-Your-Own-Subscription (BYOS) + ansible.builtin.include_tasks: + file: common/register_os.yml + - name: Ansible Task block to execute on target inventory hosts - High Availability delegate_to: "{{ inventory_hostname }}" @@ -296,6 +325,37 @@ state: stopped enabled: false + # Ensure that backend load balancer configuration is allowed before Load balancers are created + # SUSE: https://cloud.google.com/solutions/sap/docs/netweaver-ha-config-sles#enable-back-end-comms + # RHEL: https://cloud.google.com/solutions/sap/docs/netweaver-ha-config-rhel#enable-back-end-comms + - name: Stop google-guest-agent service + ansible.builtin.service: + name: google-guest-agent + state: stopped + + - name: Update /etc/default/instance_configs.cfg file + register: __sap_vm_provision_task_gcp_instance_config + ansible.builtin.blockinfile: + path: /etc/default/instance_configs.cfg + create: true + mode: '0644' + block: | + [IpForwarding] + ethernet_proto_id = 66 + ip_aliases = true + target_instance_ips = false + + [NetworkInterfaces] + dhclient_script = /sbin/google-dhclient-script + dhcp_command = + ip_forwarding = false + setup = true + + - name: Start google-guest-agent service + ansible.builtin.service: + name: google-guest-agent + state: started + - name: Ansible Task block for looped provisioning of High Availability resources for Google Cloud CE VMs delegate_to: localhost diff --git a/roles/sap_vm_provision/tasks/platform_ansible/gcp_ce_vm/execute_provision.yml b/roles/sap_vm_provision/tasks/platform_ansible/gcp_ce_vm/execute_provision.yml index 6050f77..45e965a 100644 --- a/roles/sap_vm_provision/tasks/platform_ansible/gcp_ce_vm/execute_provision.yml +++ b/roles/sap_vm_provision/tasks/platform_ansible/gcp_ce_vm/execute_provision.yml @@ -14,12 +14,16 @@ - sap_vm_provision_calculate_sap_hana_scaleout_active_coordinator is defined - not inventory_hostname in lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan].keys() +- name: Set fact for host specifications of the provision target + ansible.builtin.set_fact: + target_provision_host_spec: "{{ lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan][scaleout_origin_host_spec | default(inventory_hostname)] }}" + # Create flat list with names for each volume to be created. - name: Set fact for target device map ansible.builtin.set_fact: storage_disks_map: | {% set disks_map = [] -%} - {% for storage_item in lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan][scaleout_origin_host_spec | default(inventory_hostname)].storage_definition -%} + {% for storage_item in target_provision_host_spec.storage_definition -%} {% for idx in range(0, storage_item.disk_count | default(1)) -%} {% if (storage_item.filesystem_type is defined) -%} {% if ('swap' in storage_item.filesystem_type and storage_item.swap_path is not defined) @@ -29,7 +33,8 @@ 'definition_key': storage_item.name, 'name': storage_item.name + idx|string, 'size': storage_item.disk_size | default(0), - 'type': storage_item.disk_type | default('') + 'type': storage_item.disk_type | default('pd-balanced'), + 'iops': storage_item.disk_iops | default(omit) } ]) %} {%- endif %} @@ -51,6 +56,8 @@ zone: "{{ sap_vm_provision_gcp_region_zone }}" name: "{{ inventory_hostname + '-vol-' + vol_item.name | replace('_', '-')}}" size_gb: "{{ vol_item.size }}" + type: "{{ vol_item.type }}" + provisioned_iops: "{{ vol_item.iops | default(omit) }}" auth_kind: "serviceaccount" service_account_file: "{{ sap_vm_provision_gcp_credentials_json }}" loop: "{{ storage_disks_map }}" @@ -101,8 +108,8 @@ project: "{{ sap_vm_provision_gcp_project }}" zone: "{{ sap_vm_provision_gcp_region_zone }}" name: "{{ inventory_hostname }}" - machine_type: "{{ lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan][scaleout_origin_host_spec | default(inventory_hostname)].virtual_machine_profile }}" - can_ip_forward: "{{ lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan][scaleout_origin_host_spec | default(inventory_hostname)].disable_ip_anti_spoofing }}" # When disable the Anti IP Spoofing = true, then Can IP Forward = true + machine_type: "{{ target_provision_host_spec.virtual_machine_profile }}" + can_ip_forward: "{{ target_provision_host_spec.disable_ip_anti_spoofing }}" # When disable the Anti IP Spoofing = true, then Can IP Forward = true network_interfaces: - network: selfLink: "{{ __sap_vm_provision_task_gcp_vpc_info.resources[0].selfLink }}" @@ -122,12 +129,6 @@ auth_kind: "serviceaccount" service_account_file: "{{ sap_vm_provision_gcp_credentials_json }}" -# Required as state: present on Ansible Module gcp_compute_instance does not allow for waiting until VM has booted -- name: Wait 90 seconds for Google Cloud VM to boot - ansible.builtin.pause: - seconds: 90 - prompt: "" - when: __sap_vm_provision_task_provision_host_single.changed - name: Read Google Cloud VM information no_log: "{{ __sap_vm_provision_no_log }}" @@ -171,6 +172,12 @@ ansible_ssh_common_args: -o ConnectTimeout=180 -o ControlMaster=auto -o ControlPersist=3600s -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o ForwardX11=no -o ProxyCommand='ssh -W %h:%p {{ delegate_sap_vm_provision_bastion_user }}@{{ delegate_sap_vm_provision_bastion_public_ip }} -p {{ delegate_sap_vm_provision_bastion_ssh_port }} -i {{ delegate_sap_vm_provision_ssh_bastion_private_key_file_path }} -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null' block: + # Required as state: present on Ansible Module gcp_compute_instance does not allow for waiting until VM has booted + # wait_for_connection is used instead to ensure connection is available before proceeding. + - name: Wait until SSH connection is available + ansible.builtin.wait_for_connection: + timeout: 300 + - name: Create .ssh directory for root user ansible.builtin.file: path: /root/.ssh @@ -203,7 +210,7 @@ - name: Append loop value to register ansible.builtin.set_fact: - __sap_vm_provision_task_provision_host_single: "{{ __sap_vm_provision_task_provision_host_single | combine( { 'host_node' : inventory_hostname } , { 'sap_host_type' : lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan][scaleout_origin_host_spec | default(inventory_hostname)].sap_host_type } , { 'sap_system_type' : (lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan][scaleout_origin_host_spec | default(inventory_hostname)].sap_system_type | default('')) } ) }}" + __sap_vm_provision_task_provision_host_single: "{{ __sap_vm_provision_task_provision_host_single | combine( { 'host_node' : inventory_hostname } , { 'sap_host_type' : target_provision_host_spec.sap_host_type } , { 'sap_system_type' : (target_provision_host_spec.sap_system_type | default('')) } ) }}" - name: Append output to merged register ansible.builtin.set_fact: diff --git a/roles/sap_vm_provision/tasks/platform_ansible/gcp_ce_vm/execute_setup_ha.yml b/roles/sap_vm_provision/tasks/platform_ansible/gcp_ce_vm/execute_setup_ha.yml index ba71fdb..72bdb95 100644 --- a/roles/sap_vm_provision/tasks/platform_ansible/gcp_ce_vm/execute_setup_ha.yml +++ b/roles/sap_vm_provision/tasks/platform_ansible/gcp_ce_vm/execute_setup_ha.yml @@ -10,7 +10,7 @@ # state: present # project: "{{ sap_vm_provision_gcp_project }}" # name: "{{ sap_swpm_db_host }}-vip" -# dest_range: "{{ (sap_ha_pacemaker_cluster_vip_hana_primary_ip_address | default('192.168.1.90/32')) | regex_replace('/.*', '') }}" +# dest_range: "{{ sap_vm_provision_ha_vip_hana_primary | regex_replace('/.*', '') }}" # next_hop_instance: # selfLink: "{{ __sap_vm_provision_task_provision_host_single_info | json_query('results[*].resources[?name==`' + host_node + '`].selfLink') | flatten | join(' ') }}" # network: @@ -33,7 +33,7 @@ dnsName: "{{ hostvars[host_node].sap_vm_provision_dns_root_domain }}." name: "{{ sap_swpm_db_host }}.{{ hostvars[host_node].sap_vm_provision_dns_root_domain }}." target: - - "{{ (sap_ha_pacemaker_cluster_vip_hana_primary_ip_address | default('192.168.1.90/32')) | regex_replace('/.*', '') }}" + - "{{ sap_vm_provision_ha_vip_hana_primary | regex_replace('/.*', '') }}" type: A ttl: 7200 auth_kind: "serviceaccount" @@ -52,7 +52,7 @@ # state: present # project: "{{ sap_vm_provision_gcp_project }}" # name: "{{ sap_swpm_db_host }}-vip" -# dest_range: "{{ (sap_vm_temp_vip_anydb_primary | default('192.168.1.90/32')) | regex_replace('/.*', '') }}" +# dest_range: "{{ sap_vm_provision_ha_vip_anydb_primary | regex_replace('/.*', '') }}" # next_hop_instance: # selfLink: "{{ __sap_vm_provision_task_provision_host_single_info | json_query('results[*].resources[?name==`' + host_node + '`].selfLink') | flatten | join(' ') }}" # network: @@ -75,7 +75,7 @@ dnsName: "{{ hostvars[host_node].sap_vm_provision_dns_root_domain }}." name: "{{ sap_swpm_db_host }}.{{ hostvars[host_node].sap_vm_provision_dns_root_domain }}." target: - - "{{ (sap_vm_temp_vip_anydb_primary | default('192.168.1.90/32')) | regex_replace('/.*', '') }}" + - "{{ sap_vm_provision_ha_vip_anydb_primary | regex_replace('/.*', '') }}" type: A ttl: 7200 auth_kind: "serviceaccount" @@ -94,7 +94,7 @@ # state: present # project: "{{ sap_vm_provision_gcp_project }}" # name: "{{ sap_swpm_ascs_instance_hostname }}-vip" -# dest_range: "{{ (sap_ha_pacemaker_cluster_vip_nwas_abap_ascs_ip_address | default('192.168.2.10/32')) | regex_replace('/.*', '') }}" +# dest_range: "{{ sap_vm_provision_ha_vip_nwas_abap_ascs | regex_replace('/.*', '') }}" # next_hop_instance: # selfLink: "{{ __sap_vm_provision_task_provision_host_single_info | json_query('results[*].resources[?name==`' + host_node + '`].selfLink') | flatten | join(' ') }}" # network: @@ -117,7 +117,7 @@ dnsName: "{{ hostvars[host_node].sap_vm_provision_dns_root_domain }}." name: "{{ sap_swpm_ascs_instance_hostname }}.{{ hostvars[host_node].sap_vm_provision_dns_root_domain }}." target: - - "{{ (sap_ha_pacemaker_cluster_vip_nwas_abap_ascs_ip_address | default('192.168.2.10/32')) | regex_replace('/.*', '') }}" + - "{{ sap_vm_provision_ha_vip_nwas_abap_ascs | regex_replace('/.*', '') }}" type: A ttl: 7200 auth_kind: "serviceaccount" @@ -136,7 +136,7 @@ # state: present # project: "{{ sap_vm_provision_gcp_project }}" # name: "{{ sap_swpm_ers_instance_hostname }}-vip" -# dest_range: "{{ (sap_ha_pacemaker_cluster_vip_nwas_abap_ers_ip_address | default('192.168.2.11/32')) | regex_replace('/.*', '') }}" +# dest_range: "{{ sap_vm_provision_ha_vip_nwas_abap_ers | regex_replace('/.*', '') }}" # next_hop_instance: # selfLink: "{{ __sap_vm_provision_task_provision_host_single_info | json_query('results[*].resources[?name==`' + host_node + '`].selfLink') | flatten | join(' ') }}" # network: @@ -159,7 +159,7 @@ dnsName: "{{ hostvars[host_node].sap_vm_provision_dns_root_domain }}." name: "{{ sap_swpm_ers_instance_hostname }}.{{ hostvars[host_node].sap_vm_provision_dns_root_domain }}." target: - - "{{ (sap_ha_pacemaker_cluster_vip_nwas_abap_ers_ip_address | default('192.168.2.11/32')) | regex_replace('/.*', '') }}" + - "{{ sap_vm_provision_ha_vip_nwas_abap_ers | regex_replace('/.*', '') }}" type: A ttl: 7200 auth_kind: "serviceaccount" @@ -180,7 +180,7 @@ # state: present # project: "{{ sap_vm_provision_gcp_project }}" # name: "{{ sap_swpm_pas_instance_hostname }}-vip" -# dest_range: "{{ (sap_ha_pacemaker_cluster_vip_nwas_abap_pas_ip_address | default('192.168.2.12/32')) | regex_replace('/.*', '') }}" +# dest_range: "{{ sap_vm_provision_ha_vip_nwas_abap_pas | regex_replace('/.*', '') }}" # next_hop_instance: # selfLink: "{{ __sap_vm_provision_task_provision_host_single_info | json_query('results[*].resources[?name==`' + host_node + '`].selfLink') | flatten | join(' ') }}" # network: @@ -203,7 +203,7 @@ # dnsName: "{{ hostvars[host_node].sap_vm_provision_dns_root_domain }}." # name: "{{ sap_swpm_pas_instance_hostname }}.{{ hostvars[host_node].sap_vm_provision_dns_root_domain }}." # target: -# - "{{ (sap_ha_pacemaker_cluster_vip_nwas_abap_pas_ip_address | default('192.168.2.12/32')) | regex_replace('/.*', '') }}" +# - "{{ sap_vm_provision_ha_vip_nwas_abap_pas | regex_replace('/.*', '') }}" # type: A # ttl: 7200 # auth_kind: "serviceaccount" @@ -222,7 +222,7 @@ # state: present # project: "{{ sap_vm_provision_gcp_project }}" # name: "{{ sap_swpm_aas_instance_hostname }}-vip" -# dest_range: "{{ (sap_ha_pacemaker_cluster_vip_nwas_abap_aas_ip_address | default('192.168.2.13/32')) | regex_replace('/.*', '') }}" +# dest_range: "{{ sap_vm_provision_ha_vip_nwas_abap_aas | regex_replace('/.*', '') }}" # next_hop_instance: # selfLink: "{{ __sap_vm_provision_task_provision_host_single_info | json_query('results[*].resources[?name==`' + host_node + '`].selfLink') | flatten | join(' ') }}" # network: @@ -245,7 +245,7 @@ # dnsName: "{{ hostvars[host_node].sap_vm_provision_dns_root_domain }}." # name: "{{ sap_swpm_aas_instance_hostname }}.{{ hostvars[host_node].sap_vm_provision_dns_root_domain }}." # target: -# - "{{ (sap_ha_pacemaker_cluster_vip_nwas_abap_aas_ip_address | default('192.168.2.13/32')) | regex_replace('/.*', '') }}" +# - "{{ sap_vm_provision_ha_vip_nwas_abap_aas | regex_replace('/.*', '') }}" # type: A # ttl: 7200 # auth_kind: "serviceaccount" @@ -280,7 +280,7 @@ project: "{{ sap_vm_provision_gcp_project }}" region: "{{ sap_vm_provision_gcp_region }}" subnetwork: { "selfLink": "{{ __sap_vm_provision_task_gcp_vpc_subnet_info.resources[0].selfLink }}" } - name: "lb-reserved-static-ip-vip-hana-{{ vip_item_nr }}" + name: "{{ sap_vm_provision_ha_load_balancer_name_hana + '-reserved-static-ip-vip' + (vip_item_nr | string) }}" address_type: internal address: "{{ vip_item | regex_replace('/.*', '') }}" #network_tier: PREMIUM # An address with type INTERNAL cannot have a network tier @@ -291,7 +291,7 @@ - vip_item | length > 0 - groups["hana_secondary"] is defined and (groups["hana_secondary"]|length>0) loop: - - "{{ sap_ha_pacemaker_cluster_vip_hana_primary_ip_address | default('192.168.1.90/32') }}" + - "{{ sap_vm_provision_ha_vip_hana_primary }}" loop_control: index_var: vip_item_nr loop_var: vip_item @@ -302,7 +302,7 @@ google.cloud.gcp_compute_health_check: state: present project: "{{ sap_vm_provision_gcp_project }}" - name: "lb-probe-hc-vip-hana" + name: "{{ sap_vm_provision_ha_load_balancer_name_hana + '-probe-hc-vip' }}" type: TCP tcp_health_check: port: 55550 @@ -338,7 +338,7 @@ google.cloud.gcp_compute_instance_group: state: present project: "{{ sap_vm_provision_gcp_project }}" - name: "lb-instance-group-hana-primary" + name: "{{ sap_vm_provision_ha_load_balancer_name_hana + '-primary-instance-group' }}" zone: "{{ __sap_vm_provision_task_provision_host_single_info | json_query('results[*].resources[?name==`' + host_node + '`].zone') | flatten | join(' ') | basename }}" instances: - { "selfLink": "{{ __sap_vm_provision_task_provision_host_single_info | json_query('results[*].resources[?name==`' + host_node + '`].selfLink') | flatten | join(' ') }}" } @@ -359,7 +359,7 @@ google.cloud.gcp_compute_instance_group: state: present project: "{{ sap_vm_provision_gcp_project }}" - name: "lb-instance-group-hana-secondary" + name: "{{ sap_vm_provision_ha_load_balancer_name_hana + '-secondary-instance-group' }}" zone: "{{ __sap_vm_provision_task_provision_host_single_info | json_query('results[*].resources[?name==`' + host_node + '`].zone') | flatten | join(' ') | basename }}" instances: - { "selfLink": "{{ __sap_vm_provision_task_provision_host_single_info | json_query('results[*].resources[?name==`' + host_node + '`].selfLink') | flatten | join(' ') }}" } @@ -383,7 +383,7 @@ state: present project: "{{ sap_vm_provision_gcp_project }}" region: "{{ sap_vm_provision_gcp_region }}" - name: "lb-backend-service-hana" + name: "{{ sap_vm_provision_ha_load_balancer_name_hana + '-backend-service' }}" backends: - group: "{{ __sap_vm_provision_task_gcp_lb_instance_group1.results[0].selfLink }}" balancing_mode: CONNECTION # UTILIZATION , RATE , CONNECTION @@ -412,13 +412,13 @@ state: present project: "{{ sap_vm_provision_gcp_project }}" region: "{{ sap_vm_provision_gcp_region }}" - name: "lb-fwd-rule-hana-{{ vip_item_nr }}" + name: "{{ sap_vm_provision_ha_load_balancer_name_hana + '-fwd-rule' + (vip_item_nr | string) }}" #target: "{{ target_instance_group_pool }}" ip_address: "{{ vip_item | regex_replace('/.*', '') }}" all_ports: true # For internal network load balancing, allow any ports to be forwarded to the backend service (can not be set if ports are defined) allow_global_access: false # Only for use if access to the SAP HANA Database Server is required from outside of the GCP Region backend_service: { "selfLink": "{{ __sap_vm_provision_task_gcp_lb_backend_service_regional.selfLink }}" } # Mandatory, otherwise error "Invalid value for field 'resource.target'" - #backend_service: { "selfLink": "https://www.googleapis.com/compute/v1/projects/{{ sap_vm_provision_gcp_project }}/regions/{{ sap_vm_provision_gcp_region }}/backendServices/lb-backend-service-hana" } + #backend_service: { "selfLink": "https://www.googleapis.com/compute/v1/projects/{{ sap_vm_provision_gcp_project }}/regions/{{ sap_vm_provision_gcp_region }}/backendServices/{{ sap_vm_provision_ha_load_balancer_name_hana + '-backend-service' }}" } subnetwork: { "selfLink": "{{ __sap_vm_provision_task_gcp_vpc_subnet_info.resources[0].selfLink }}" } load_balancing_scheme: INTERNAL network_tier: PREMIUM @@ -428,7 +428,7 @@ - vip_item | length > 0 - groups["hana_secondary"] is defined and (groups["hana_secondary"]|length>0) loop: - - "{{ sap_ha_pacemaker_cluster_vip_hana_primary_ip_address | default('192.168.1.90/32') }}" + - "{{ sap_vm_provision_ha_vip_hana_primary }}" loop_control: index_var: vip_item_nr loop_var: vip_item @@ -440,7 +440,7 @@ project: "{{ sap_vm_provision_gcp_project }}" region: "{{ sap_vm_provision_gcp_region }}" filters: - - name = "lb-backend-service-hana" + - name = "{{ sap_vm_provision_ha_load_balancer_name_hana + '-backend-service' }}" auth_kind: "serviceaccount" service_account_file: "{{ sap_vm_provision_gcp_credentials_json }}" when: @@ -455,7 +455,7 @@ project: "{{ sap_vm_provision_gcp_project }}" region: "{{ sap_vm_provision_gcp_region }}" subnetwork: { "selfLink": "{{ __sap_vm_provision_task_gcp_vpc_subnet_info.resources[0].selfLink }}" } - name: "lb-reserved-static-ip-vip-anydb-{{ vip_item_nr }}" + name: "{{ sap_vm_provision_ha_load_balancer_name_anydb + '-reserved-static-ip-vip' + (vip_item_nr | string) }}" address_type: internal address: "{{ vip_item | regex_replace('/.*', '') }}" #network_tier: PREMIUM # An address with type INTERNAL cannot have a network tier @@ -466,7 +466,7 @@ - vip_item | length > 0 - groups["anydb_secondary"] is defined and (groups["anydb_secondary"]|length>0) loop: - - "{{ sap_vm_temp_vip_anydb_primary | default('192.168.1.90/32') }}" + - "{{ sap_vm_provision_ha_vip_anydb_primary }}" loop_control: index_var: vip_item_nr loop_var: vip_item @@ -477,7 +477,7 @@ google.cloud.gcp_compute_health_check: state: present project: "{{ sap_vm_provision_gcp_project }}" - name: "lb-probe-hc-vip-anydb" + name: "{{ sap_vm_provision_ha_load_balancer_name_anydb + '-probe-hc-vip' }}" type: TCP tcp_health_check: port: 55550 @@ -513,7 +513,7 @@ google.cloud.gcp_compute_instance_group: state: present project: "{{ sap_vm_provision_gcp_project }}" - name: "lb-instance-group-anydb-primary" + name: "{{ sap_vm_provision_ha_load_balancer_name_anydb + '-primary-instance-group' }}" zone: "{{ __sap_vm_provision_task_provision_host_single_info | json_query('results[*].resources[?name==`' + host_node + '`].zone') | flatten | join(' ') | basename }}" instances: - { "selfLink": "{{ __sap_vm_provision_task_provision_host_single_info | json_query('results[*].resources[?name==`' + host_node + '`].selfLink') | flatten | join(' ') }}" } @@ -534,7 +534,7 @@ google.cloud.gcp_compute_instance_group: state: present project: "{{ sap_vm_provision_gcp_project }}" - name: "lb-instance-group-anydb-secondary" + name: "{{ sap_vm_provision_ha_load_balancer_name_anydb + '-secondary-instance-group' }}" zone: "{{ __sap_vm_provision_task_provision_host_single_info | json_query('results[*].resources[?name==`' + host_node + '`].zone') | flatten | join(' ') | basename }}" instances: - { "selfLink": "{{ __sap_vm_provision_task_provision_host_single_info | json_query('results[*].resources[?name==`' + host_node + '`].selfLink') | flatten | join(' ') }}" } @@ -558,7 +558,7 @@ state: present project: "{{ sap_vm_provision_gcp_project }}" region: "{{ sap_vm_provision_gcp_region }}" - name: "lb-backend-service-anydb" + name: "{{ sap_vm_provision_ha_load_balancer_name_anydb + '-backend-service' }}" backends: - group: "{{ __sap_vm_provision_task_gcp_lb_instance_group1.results[0].selfLink }}" balancing_mode: CONNECTION # UTILIZATION , RATE , CONNECTION @@ -587,13 +587,13 @@ state: present project: "{{ sap_vm_provision_gcp_project }}" region: "{{ sap_vm_provision_gcp_region }}" - name: "lb-fwd-rule-anydb-{{ vip_item_nr }}" + name: "{{ sap_vm_provision_ha_load_balancer_name_anydb + '-fwd-rule' + (vip_item_nr | string) }}" #target: "{{ target_instance_group_pool }}" ip_address: "{{ vip_item | regex_replace('/.*', '') }}" all_ports: true # For internal network load balancing, allow any ports to be forwarded to the backend service (can not be set if ports are defined) allow_global_access: false # Only for use if access to the SAP AnyDB Database Server is required from outside of the GCP Region backend_service: { "selfLink": "{{ __sap_vm_provision_task_gcp_lb_backend_service_regional.selfLink }}" } # Mandatory, otherwise error "Invalid value for field 'resource.target'" - #backend_service: { "selfLink": "https://www.googleapis.com/compute/v1/projects/{{ sap_vm_provision_gcp_project }}/regions/{{ sap_vm_provision_gcp_region }}/backendServices/lb-backend-service-anydb" } + #backend_service: { "selfLink": "https://www.googleapis.com/compute/v1/projects/{{ sap_vm_provision_gcp_project }}/regions/{{ sap_vm_provision_gcp_region }}/backendServices/{{ sap_vm_provision_ha_load_balancer_name_anydb + '-backend-service' }}" } subnetwork: { "selfLink": "{{ __sap_vm_provision_task_gcp_vpc_subnet_info.resources[0].selfLink }}" } load_balancing_scheme: INTERNAL network_tier: PREMIUM @@ -603,7 +603,7 @@ - vip_item | length > 0 - groups["anydb_secondary"] is defined and (groups["anydb_secondary"]|length>0) loop: - - "{{ sap_vm_temp_vip_anydb_primary | default('192.168.1.90/32') }}" + - "{{ sap_vm_provision_ha_vip_anydb_primary }}" loop_control: index_var: vip_item_nr loop_var: vip_item @@ -615,7 +615,7 @@ project: "{{ sap_vm_provision_gcp_project }}" region: "{{ sap_vm_provision_gcp_region }}" filters: - - name = "lb-backend-service-anydb" + - name = "{{ sap_vm_provision_ha_load_balancer_name_anydb + '-backend-service' }}" auth_kind: "serviceaccount" service_account_file: "{{ sap_vm_provision_gcp_credentials_json }}" when: @@ -630,9 +630,9 @@ project: "{{ sap_vm_provision_gcp_project }}" region: "{{ sap_vm_provision_gcp_region }}" subnetwork: { "selfLink": "{{ __sap_vm_provision_task_gcp_vpc_subnet_info.resources[0].selfLink }}" } - name: "lb-reserved-static-ip-vip-nwas-ascs" + name: "{{ sap_vm_provision_ha_load_balancer_name_nwas + '-ascs-reserved-static-ip-vip' }}" address_type: internal - address: "{{ sap_ha_pacemaker_cluster_vip_nwas_abap_ascs_ip_address | regex_replace('/.*', '') }}" + address: "{{ sap_vm_provision_ha_vip_nwas_abap_ascs | regex_replace('/.*', '') }}" #network_tier: PREMIUM # An address with type INTERNAL cannot have a network tier purpose: GCE_ENDPOINT # GCE_ENDPOINT is for addresses used by VMs, alias IP ranges, and internal load balancers auth_kind: "serviceaccount" @@ -648,9 +648,9 @@ project: "{{ sap_vm_provision_gcp_project }}" region: "{{ sap_vm_provision_gcp_region }}" subnetwork: { "selfLink": "{{ __sap_vm_provision_task_gcp_vpc_subnet_info.resources[0].selfLink }}" } - name: "lb-reserved-static-ip-vip-nwas-ers" + name: "{{ sap_vm_provision_ha_load_balancer_name_nwas + '-ers-reserved-static-ip-vip' }}" address_type: internal - address: "{{ sap_ha_pacemaker_cluster_vip_nwas_abap_ers_ip_address | regex_replace('/.*', '') }}" + address: "{{ sap_vm_provision_ha_vip_nwas_abap_ers | regex_replace('/.*', '') }}" #network_tier: PREMIUM # An address with type INTERNAL cannot have a network tier purpose: GCE_ENDPOINT # GCE_ENDPOINT is for addresses used by VMs, alias IP ranges, and internal load balancers auth_kind: "serviceaccount" @@ -664,7 +664,7 @@ google.cloud.gcp_compute_health_check: state: present project: "{{ sap_vm_provision_gcp_project }}" - name: "lb-probe-hc-vip-nwas-ascs" + name: "{{ sap_vm_provision_ha_load_balancer_name_nwas + '-ascs-probe-hc-vip' }}" type: TCP tcp_health_check: port: 55551 @@ -684,7 +684,7 @@ google.cloud.gcp_compute_health_check: state: present project: "{{ sap_vm_provision_gcp_project }}" - name: "lb-probe-hc-vip-nwas-ers" + name: "{{ sap_vm_provision_ha_load_balancer_name_nwas + '-ers-probe-hc-vip' }}" type: TCP tcp_health_check: port: 55552 @@ -720,7 +720,7 @@ google.cloud.gcp_compute_instance_group: state: present project: "{{ sap_vm_provision_gcp_project }}" - name: "lb-instance-group-nwas-ascs" + name: "{{ sap_vm_provision_ha_load_balancer_name_nwas + '-ascs-instance-group' }}" zone: "{{ __sap_vm_provision_task_provision_host_single_info | json_query('results[*].resources[?name==`' + host_node + '`].zone') | flatten | join(' ') | basename }}" instances: - { "selfLink": "{{ __sap_vm_provision_task_provision_host_single_info | json_query('results[*].resources[?name==`' + host_node + '`].selfLink') | flatten | join(' ') }}" } @@ -741,7 +741,7 @@ google.cloud.gcp_compute_instance_group: state: present project: "{{ sap_vm_provision_gcp_project }}" - name: "lb-instance-group-nwas-ers" + name: "{{ sap_vm_provision_ha_load_balancer_name_nwas + '-ers-instance-group' }}" zone: "{{ __sap_vm_provision_task_provision_host_single_info | json_query('results[*].resources[?name==`' + host_node + '`].zone') | flatten | join(' ') | basename }}" instances: - { "selfLink": "{{ __sap_vm_provision_task_provision_host_single_info | json_query('results[*].resources[?name==`' + host_node + '`].selfLink') | flatten | join(' ') }}" } @@ -765,7 +765,7 @@ state: present project: "{{ sap_vm_provision_gcp_project }}" region: "{{ sap_vm_provision_gcp_region }}" - name: "lb-backend-service-nwas-ascs" + name: "{{ sap_vm_provision_ha_load_balancer_name_nwas + '-ascs-backend-service' }}" backends: - group: "{{ __sap_vm_provision_task_gcp_lb_instance_group1.results[0].selfLink }}" balancing_mode: CONNECTION # UTILIZATION , RATE , CONNECTION @@ -796,7 +796,7 @@ state: present project: "{{ sap_vm_provision_gcp_project }}" region: "{{ sap_vm_provision_gcp_region }}" - name: "lb-backend-service-nwas-ers" + name: "{{ sap_vm_provision_ha_load_balancer_name_nwas + '-ers-backend-service' }}" backends: - group: "{{ __sap_vm_provision_task_gcp_lb_instance_group2.results[0].selfLink }}" balancing_mode: CONNECTION # UTILIZATION , RATE , CONNECTION @@ -820,18 +820,18 @@ - name: Create Google Cloud Compute Engine Forwarding Rule (aka. Frontend IP and Port) for SAP NetWeaver ASCS no_log: "{{ __sap_vm_provision_no_log }}" - register: __sap_vm_provision_task_gcp_lb_forwarding_rule + register: __sap_vm_provision_task_gcp_lb_forwarding_rule_nwas_ascs google.cloud.gcp_compute_forwarding_rule: state: present project: "{{ sap_vm_provision_gcp_project }}" region: "{{ sap_vm_provision_gcp_region }}" - name: "lb-fwd-rule-nwas-ascs" + name: "{{ sap_vm_provision_ha_load_balancer_name_nwas + '-ascs-fwd-rule' }}" #target: "{{ target_instance_group_pool }}" - ip_address: "{{ sap_ha_pacemaker_cluster_vip_nwas_abap_ascs_ip_address | regex_replace('/.*', '') }}" + ip_address: "{{ sap_vm_provision_ha_vip_nwas_abap_ascs | regex_replace('/.*', '') }}" all_ports: true # For internal network load balancing, allow any ports to be forwarded to the backend service (can not be set if ports are defined) allow_global_access: false # Only for use if access to the SAP NetWeaver Database Server is required from outside of the GCP Region backend_service: { "selfLink": "{{ __sap_vm_provision_task_gcp_lb_backend_service_regional_ascs.selfLink }}" } # Mandatory, otherwise error "Invalid value for field 'resource.target'" - #backend_service: { "selfLink": "https://www.googleapis.com/compute/v1/projects/{{ sap_vm_provision_gcp_project }}/regions/{{ sap_vm_provision_gcp_region }}/backendServices/lb-backend-service-nwas-ascs" } + #backend_service: { "selfLink": "https://www.googleapis.com/compute/v1/projects/{{ sap_vm_provision_gcp_project }}/regions/{{ sap_vm_provision_gcp_region }}/backendServices/{{ sap_vm_provision_ha_load_balancer_name_nwas + '-ascs-backend-service' }}" } subnetwork: { "selfLink": "{{ __sap_vm_provision_task_gcp_vpc_subnet_info.resources[0].selfLink }}" } load_balancing_scheme: INTERNAL network_tier: PREMIUM @@ -842,18 +842,18 @@ - name: Create Google Cloud Compute Engine Forwarding Rule (aka. Frontend IP and Port) for SAP NetWeaver ERS no_log: "{{ __sap_vm_provision_no_log }}" - register: __sap_vm_provision_task_gcp_lb_forwarding_rule + register: __sap_vm_provision_task_gcp_lb_forwarding_rule_nwas_ers google.cloud.gcp_compute_forwarding_rule: state: present project: "{{ sap_vm_provision_gcp_project }}" region: "{{ sap_vm_provision_gcp_region }}" - name: "lb-fwd-rule-nwas-ascs" + name: "{{ sap_vm_provision_ha_load_balancer_name_nwas + '-ers-fwd-rule' }}" #target: "{{ target_instance_group_pool }}" - ip_address: "{{ sap_ha_pacemaker_cluster_vip_nwas_abap_ers_ip_address | regex_replace('/.*', '') }}" + ip_address: "{{ sap_vm_provision_ha_vip_nwas_abap_ers | regex_replace('/.*', '') }}" all_ports: true # For internal network load balancing, allow any ports to be forwarded to the backend service (can not be set if ports are defined) allow_global_access: false # Only for use if access to the SAP NetWeaver Database Server is required from outside of the GCP Region - backend_service: { "selfLink": "{{ __sap_vm_provision_task_gcp_lb_backend_service_regional_ascs.selfLink }}" } # Mandatory, otherwise error "Invalid value for field 'resource.target'" - #backend_service: { "selfLink": "https://www.googleapis.com/compute/v1/projects/{{ sap_vm_provision_gcp_project }}/regions/{{ sap_vm_provision_gcp_region }}/backendServices/lb-backend-service-nwas-ascs" } + backend_service: { "selfLink": "{{ __sap_vm_provision_task_gcp_lb_backend_service_regional_ers.selfLink }}" } # Mandatory, otherwise error "Invalid value for field 'resource.target'" + #backend_service: { "selfLink": "https://www.googleapis.com/compute/v1/projects/{{ sap_vm_provision_gcp_project }}/regions/{{ sap_vm_provision_gcp_region }}/backendServices/{{ sap_vm_provision_ha_load_balancer_name_nwas + '-ers-backend-service' }}" } subnetwork: { "selfLink": "{{ __sap_vm_provision_task_gcp_vpc_subnet_info.resources[0].selfLink }}" } load_balancing_scheme: INTERNAL network_tier: PREMIUM @@ -870,7 +870,7 @@ project: "{{ sap_vm_provision_gcp_project }}" region: "{{ sap_vm_provision_gcp_region }}" filters: - - name = "lb-backend-service-nwas-ascs" + - name = "{{ sap_vm_provision_ha_load_balancer_name_nwas + '-ascs-backend-service' }}" auth_kind: "serviceaccount" service_account_file: "{{ sap_vm_provision_gcp_credentials_json }}" when: @@ -883,7 +883,7 @@ project: "{{ sap_vm_provision_gcp_project }}" region: "{{ sap_vm_provision_gcp_region }}" filters: - - name = "lb-backend-service-nwas-ers" + - name = "{{ sap_vm_provision_ha_load_balancer_name_nwas + '-ers-backend-service' }}" auth_kind: "serviceaccount" service_account_file: "{{ sap_vm_provision_gcp_credentials_json }}" when: diff --git a/roles/sap_vm_provision/tasks/platform_ansible/gcp_ce_vm/post_deployment_execute.yml b/roles/sap_vm_provision/tasks/platform_ansible/gcp_ce_vm/post_deployment_execute.yml index d9dc144..57e0540 100644 --- a/roles/sap_vm_provision/tasks/platform_ansible/gcp_ce_vm/post_deployment_execute.yml +++ b/roles/sap_vm_provision/tasks/platform_ansible/gcp_ce_vm/post_deployment_execute.yml @@ -10,7 +10,6 @@ # GCP_AUTH_KIND: "serviceaccount" # GCP_SERVICE_ACCOUNT_FILE: "{{ sap_vm_provision_gcp_credentials_json }}" when: - - sap_ha_pacemaker_cluster_msazure_resource_group is defined - (groups["hana_secondary"] is defined and (groups["hana_secondary"] | length>0)) or (groups["nwas_ers"] is defined and (groups["nwas_ers"] | length>0)) or (groups["anydb_secondary"] is defined and (groups["anydb_secondary"] | length>0)) block: @@ -50,7 +49,7 @@ google.cloud.gcp_compute_health_check: state: present project: "{{ sap_vm_provision_gcp_project }}" - name: "lb-probe-hc-vip-hana" + name: "{{ sap_vm_provision_ha_load_balancer_name_hana + '-probe-hc-vip' }}" type: TCP tcp_health_check: port: "{{ __sap_vm_provision_task_gcp_lb_healthcheck_hana }}" @@ -70,7 +69,7 @@ google.cloud.gcp_compute_health_check: state: present project: "{{ sap_vm_provision_gcp_project }}" - name: "lb-probe-hc-vip-anydb" + name: "{{ sap_vm_provision_ha_load_balancer_name_anydb + '-probe-hc-vip' }}" type: TCP tcp_health_check: port: 62700 @@ -90,7 +89,7 @@ google.cloud.gcp_compute_health_check: state: present project: "{{ sap_vm_provision_gcp_project }}" - name: "lb-probe-hc-vip-nwas-ascs" + name: "{{ sap_vm_provision_ha_load_balancer_name_nwas + '-ascs-probe-hc-vip' }}" type: TCP tcp_health_check: port: "{{ __sap_vm_provision_task_gcp_lb_healthcheck_nwas_ascs }}" @@ -110,7 +109,7 @@ google.cloud.gcp_compute_health_check: state: present project: "{{ sap_vm_provision_gcp_project }}" - name: "lb-probe-hc-vip-nwas-ers" + name: "{{ sap_vm_provision_ha_load_balancer_name_nwas + '-ers-probe-hc-vip' }}" type: TCP tcp_health_check: port: "{{ __sap_vm_provision_task_gcp_lb_healthcheck_nwas_ers }}" diff --git a/roles/sap_vm_provision/tasks/platform_ansible/ibmcloud_powervs/execute_main.yml b/roles/sap_vm_provision/tasks/platform_ansible/ibmcloud_powervs/execute_main.yml index a7642b5..ce786b0 100644 --- a/roles/sap_vm_provision/tasks/platform_ansible/ibmcloud_powervs/execute_main.yml +++ b/roles/sap_vm_provision/tasks/platform_ansible/ibmcloud_powervs/execute_main.yml @@ -3,16 +3,21 @@ - name: Set fact for IBM Power VS location to the colocated IBM Cloud Availability Zone (VPC) ansible.builtin.set_fact: list_ibmcloud_powervs_location_to_ibmcloud_availability_zone: + dal10: "us-south-1" dal12: "us-south-2" - us-south: "us-south-3" # naming of IBM Power VS location 'us-south' was previous naming convention, would otherwise be 'DAL13' - us-east: "us-east-1" # naming of IBM Power VS location 'us-east' was previous naming convention, would otherwise be 'WDC04' - # wdc06: "us-east-2" # No Cloud Connection available at this location + us-south: "us-south-3" # naming of IBM Power VS location 'us-south' was previous naming convention, would otherwise be 'dal13' + us-east: "us-east-1" # naming of IBM Power VS location 'us-east' was previous naming convention, would otherwise be 'wdc04' + wdc06: "us-east-2" + wdc07: "us-east-3" sao01: "br-sao-1" + sao02: "br-sao-2" tor01: "ca-tor-1" - eu-de-1: "eu-de-2" # naming of IBM Power VS location 'eu-de-1' was previous naming convention, would otherwise be 'FRA04' - eu-de-2: "eu-de-3" # naming of IBM Power VS location 'eu-de-2' was previous naming convention, would otherwise be 'FRA05' + eu-de-1: "eu-de-2" # naming of IBM Power VS location 'eu-de-1' was previous naming convention, would otherwise be 'fra04' + eu-de-2: "eu-de-3" # naming of IBM Power VS location 'eu-de-2' was previous naming convention, would otherwise be 'fra05' lon04: "eu-gb-1" lon06: "eu-gb-3" + mad02: "eu-es-1" + mad04: "eu-es-2" syd04: "au-syd-2" syd05: "au-syd-3" tok04: "jp-tok-2" @@ -23,16 +28,21 @@ - name: Set fact for IBM Power VS location to the IBM Power VS Region API Endpoints ansible.builtin.set_fact: list_ibmcloud_powervs_location_to_powervs_region: + dal10: "us-south" dal12: "us-south" us-south: "us-south" us-east: "us-east" - # wdc06: "us-east" # no Cloud Connection available at this location + wdc06: "us-east" + wdc07: "us-east" sao01: "sao" + sao02: "sao" tor01: "tor" eu-de-1: "eu-de" eu-de-2: "eu-de" lon04: "lon" lon06: "lon" + mad02: "mad" + mad04: "mad" syd04: "syd" syd05: "syd" tok04: "tok" @@ -56,6 +66,7 @@ - name: Identify Resource Group info no_log: "{{ __sap_vm_provision_no_log }}" register: __sap_vm_provision_task_ibmcloud_resource_group + run_once: true ibm.cloudcollection.ibm_resource_group_info: name: "{{ sap_vm_provision_ibmcloud_resource_group_name }}" ibmcloud_api_key: "{{ sap_vm_provision_ibmcloud_api_key }}" @@ -65,6 +76,7 @@ - name: Identify Resource Group info for IBM Cloud Private DNS no_log: "{{ __sap_vm_provision_no_log }}" register: __sap_vm_provision_task_ibmcloud_resource_group_dns + run_once: true ibm.cloudcollection.ibm_resource_group_info: name: "{{ sap_vm_provision_ibmcloud_private_dns_resource_group_name }}" ibmcloud_api_key: "{{ sap_vm_provision_ibmcloud_api_key }}" @@ -73,6 +85,7 @@ - name: Identify IBM Power Infrastructure Workspace no_log: "{{ __sap_vm_provision_no_log }}" register: __sap_vm_provision_task_ibmcloud_pi_workspace_service_instance + run_once: true ibm.cloudcollection.ibm_resource_instance_info: resource_group_id: "{{ __sap_vm_provision_task_ibmcloud_resource_group.resource.id }}" location: "{{ sap_vm_provision_ibmcloud_powervs_location }}" @@ -80,9 +93,20 @@ name: "{{ sap_vm_provision_ibmcloud_powervs_workspace_name }}" ibmcloud_api_key: "{{ sap_vm_provision_ibmcloud_api_key }}" + - name: Identify IBM Power Infrastructure Workspace capabilities + no_log: "{{ __sap_vm_provision_no_log }}" + register: __sap_vm_provision_task_ibmcloud_pi_workspace_capabilities + run_once: true + environment: + IC_REGION: "{{ sap_vm_provision_ibmcloud_powervs_region }}" + ibm.cloudcollection.ibm_pi_workspace_info: + pi_cloud_instance_id: "{{ __sap_vm_provision_task_ibmcloud_pi_workspace_service_instance.resource.guid }}" # must be GUID, not CRN + ibmcloud_api_key: "{{ sap_vm_provision_ibmcloud_api_key }}" + - name: Identify pre-loaded IBM Power Infrastructure SSH Public Key info no_log: "{{ __sap_vm_provision_no_log }}" register: __sap_vm_provision_task_ibmcloud_pi_ssh_public_key + run_once: true environment: IC_REGION: "{{ sap_vm_provision_ibmcloud_powervs_region }}" ibm.cloudcollection.ibm_pi_key_info: @@ -93,6 +117,7 @@ - name: Identify IBM Power Infrastructure VLAN Subnet info no_log: "{{ __sap_vm_provision_no_log }}" register: __sap_vm_provision_task_ibmcloud_pi_subnet + run_once: true environment: IC_REGION: "{{ sap_vm_provision_ibmcloud_powervs_region }}" ibm.cloudcollection.ibm_pi_network_info: @@ -107,7 +132,7 @@ then Subnet DNS Default should use IBM Cloud IaaS Backbone DNS Resolver 161.26.0.10/11 (which will be populated into /etc/resolv.conf). Otherwise cloud-init actions during provisioning may not be successful. when: - - not sap_vm_provision_proxy_web_forward_proxy_ip is defined + - __sap_vm_provision_task_ibmcloud_pi_workspace_capabilities.resource.pi_workspace_capabilities['power-edge-router'] - not (__sap_vm_provision_task_ibmcloud_pi_subnet.resource.dns | first) in ['161.26.0.10', '161.26.0.11'] # DNS may exist in separate Resource Group @@ -115,6 +140,7 @@ - name: Identify IBM Cloud Private DNS instance no_log: "{{ __sap_vm_provision_no_log }}" register: __sap_vm_provision_task_ibmcloud_pdns_service_instance + run_once: true ibm.cloudcollection.ibm_resource_instance_info: resource_group_id: "{{ __sap_vm_provision_task_ibmcloud_resource_group.resource.id if __sap_vm_provision_task_ibmcloud_resource_group_dns is skipped else __sap_vm_provision_task_ibmcloud_resource_group_dns.resource.id }}" location: global @@ -125,13 +151,30 @@ - name: Identify IBM Cloud Private DNS Zone info no_log: "{{ __sap_vm_provision_no_log }}" register: __sap_vm_provision_task_ibmcloud_pdns + run_once: true ibm.cloudcollection.ibm_dns_zones_info: instance_id: "{{ __sap_vm_provision_task_ibmcloud_pdns_service_instance.resource.guid }}" ibmcloud_api_key: "{{ sap_vm_provision_ibmcloud_api_key }}" + - name: Identify IBM Cloud Private DNS Custom Resolvers info + no_log: "{{ __sap_vm_provision_no_log }}" + register: __sap_vm_provision_task_ibmcloud_pdns_custom_resolvers + run_once: true + ibm.cloudcollection.ibm_dns_custom_resolvers_info: + instance_id: "{{ __sap_vm_provision_task_ibmcloud_pdns_service_instance.resource.guid }}" + ibmcloud_api_key: "{{ sap_vm_provision_ibmcloud_api_key }}" + + - name: Check if input IBM Cloud Private DNS Customer Resolver IP exists + ansible.builtin.fail: + msg: + IBM Cloud Private DNS instance does not contain the input Custom Resolver IP Address. + Please create a Custom Resolver in this IBM Cloud Private DNS instance. + when: not sap_vm_provision_ibmcloud_private_dns_custom_resolver_ip in (__sap_vm_provision_task_ibmcloud_pdns_custom_resolvers.resource.custom_resolvers | map(attribute='locations') | list | flatten | map(attribute='dns_server_ip') | list) + - name: Identify IBM Power Infrastructure OS Catalog Stock Image list no_log: "{{ __sap_vm_provision_no_log }}" register: __sap_vm_provision_task_ibmcloud_pi_os_image_list + run_once: true environment: IC_REGION: "{{ sap_vm_provision_ibmcloud_powervs_region }}" ibm.cloudcollection.ibm_pi_catalog_images_info: @@ -146,6 +189,7 @@ - name: Import Boot Image to current IBM Power Infrastructure Workspace from the IBM Power Infrastructure OS Catalog Stock Image no_log: "{{ __sap_vm_provision_no_log }}" register: __sap_vm_provision_task_ibmcloud_pi_os_image_provisioned + run_once: true environment: IC_REGION: "{{ sap_vm_provision_ibmcloud_powervs_region }}" ibm.cloudcollection.ibm_pi_image: @@ -154,11 +198,11 @@ pi_image_name: "{{ sap_vm_provision_ibmcloud_powervs_host_os_image }}-boot" ibmcloud_api_key: "{{ sap_vm_provision_ibmcloud_api_key }}" failed_when: not __sap_vm_provision_task_ibmcloud_pi_os_image_provisioned.rc == 0 and not 'already exists' in __sap_vm_provision_task_ibmcloud_pi_os_image_provisioned.stderr - run_once: true - name: Identify IBM Power Infrastructure Workspace imported OS Image list no_log: "{{ __sap_vm_provision_no_log }}" register: __sap_vm_provision_task_ibmcloud_pi_imported_os_image_list + run_once: true environment: IC_REGION: "{{ sap_vm_provision_ibmcloud_powervs_region }}" ibm.cloudcollection.ibm_pi_images_info: @@ -173,13 +217,43 @@ - name: Check for existing Boot Image imported already from IBM Power Infrastructure OS Catalog Stock Image no_log: "{{ __sap_vm_provision_no_log }}" register: __sap_vm_provision_task_ibmcloud_pi_os_image_existing + run_once: true environment: IC_REGION: "{{ sap_vm_provision_ibmcloud_powervs_region }}" ibm.cloudcollection.ibm_pi_image_info: pi_cloud_instance_id: "{{ __sap_vm_provision_task_ibmcloud_pi_workspace_service_instance.resource.guid }}" # must be GUID, not CRN pi_image_name: "{{ register_ibmcloud_pi_imported_os_image_selected.name }}" ibmcloud_api_key: "{{ sap_vm_provision_ibmcloud_api_key }}" + + - name: Create IBM Power Infrastructure Server Placement Groups when High Availability + no_log: "{{ __sap_vm_provision_no_log }}" + register: __sap_vm_provision_task_ibmcloud_placement_group + run_once: true + ibm.cloudcollection.ibm_pi_placement_group: + pi_cloud_instance_id: "{{ __sap_vm_provision_task_ibmcloud_pi_workspace_service_instance.resource.guid }}" # must be GUID, not CRN + pi_placement_group_name: "{{ sap_vm_provision_ibmcloud_placement_resource_name }}-{{ item }}" + pi_placement_group_policy: "anti-affinity" + ibmcloud_api_key: "{{ sap_vm_provision_ibmcloud_api_key }}" + loop: + - "{{ 'hana' if 'hana_secondary' in lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan] | json_query('*') | map(attribute='sap_host_type') else '' }}" + - "{{ 'anydb' if 'anydb_secondary' in lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan] | json_query('*') | map(attribute='sap_host_type') else '' }}" + - "{{ 'nwas' if 'nwas_ers' in lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan] | json_query('*') | map(attribute='sap_host_type') else '' }}" + when: + - sap_vm_provision_ibmcloud_placement_resource_name is defined + - sap_vm_provision_ibmcloud_placement_strategy_spread + - not item == '' + + - name: Identify created IBM Power Infrastructure Server Placement Groups when High Availability + no_log: "{{ __sap_vm_provision_no_log }}" + register: __sap_vm_provision_task_ibmcloud_placement_groups_list run_once: true + ibm.cloudcollection.ibm_pi_placement_groups_info: + pi_cloud_instance_id: "{{ __sap_vm_provision_task_ibmcloud_pi_workspace_service_instance.resource.guid }}" # must be GUID, not CRN + ibmcloud_api_key: "{{ sap_vm_provision_ibmcloud_api_key }}" + when: + - sap_vm_provision_ibmcloud_placement_resource_name is defined + - sap_vm_provision_ibmcloud_placement_strategy_spread + - name: Set fact to hold loop variables from include_tasks ansible.builtin.set_fact: @@ -215,7 +289,7 @@ - name: Set fact to hold all inventory hosts in all groups ansible.builtin.set_fact: - groups_merged_list: "{{ [ [ groups['hana_primary'] | default([]) ] , [ groups['hana_secondary'] | default([]) ] , [ groups['nwas_ascs'] | default([]) ] , [ groups['nwas_ers'] | default([]) ] , [ groups['nwas_pas'] | default([]) ] , [ groups['nwas_aas'] | default([]) ] ] | flatten | select() }}" + groups_merged_list: "{{ [ [ groups['hana_primary'] | default([]) ] , [ groups['hana_secondary'] | default([]) ] , [ groups['anydb_primary'] | default([]) ] , [ groups['anydb_secondary'] | default([]) ] , [ groups['nwas_ascs'] | default([]) ] , [ groups['nwas_ers'] | default([]) ] , [ groups['nwas_pas'] | default([]) ] , [ groups['nwas_aas'] | default([]) ] ] | flatten | select() }}" - name: Set Ansible Vars register: __sap_vm_provision_task_ansible_vars_set @@ -317,10 +391,15 @@ - name: Append IBM Cloud Private DNS to /etc/resolv.conf ansible.builtin.lineinfile: path: /etc/resolv.conf - line: nameserver 161.26.0.10 - - # Not applicable to the IBM PowerVS Workspace enabled with Power Edge Router (from Q4-2023 onwards) - - name: Register Web Forward Proxy (for legacy Power Infrastructure Cloud Connections networking configuration) + line: nameserver {{ sap_vm_provision_ibmcloud_private_dns_custom_resolver_ip }} + + # Required Web Forward Proxy + # For IBM PowerVS Workspace enabled with Power Edge Router (from Q4-2023 onwards), + # the SNAT (VPC Public Gateway) service is not routable from IBM Power Virtual Server hosts + # and cannot be used for outbound Public Internet connectivity + # For IBM PowerVS Workspace with legacy Cloud Connection, + # required for both outbound Public Internet connectivity and internal traffic to other IBM Cloud Services + - name: Register Web Forward Proxy ansible.builtin.include_tasks: file: common/register_proxy.yml when: sap_vm_provision_proxy_web_forward_proxy_ip is defined @@ -337,7 +416,11 @@ set -o pipefail && if [ "${workspace_legacy_cc}" = true ]; then activation_script_exec=$(cat /usr/share/powervs-fls/powervs-fls-readme.md | grep networklayer.com | sed "s|Private.proxy.IP.address:3128|$web_proxy_ip_port|" | sed 's|. ||') ; fi set -o pipefail && if [ "${workspace_legacy_cc}" = true ]; then nohup $activation_script_exec >/dev/null 2>&1 ; fi set -o pipefail && if [ "${workspace_legacy_cc}" = true ]; then sleep 120 ; fi - when: ansible_os_family == "RedHat" + when: + - ansible_os_family == 'RedHat' + - sap_vm_provision_os_registration_script_command is undefined + - (sap_vm_provision_os_online_registration_user is undefined) or (sap_vm_provision_os_online_registration_passcode is undefined) + - not __sap_vm_provision_task_ibmcloud_pi_workspace_capabilities.resource.pi_workspace_capabilities['power-edge-router'] # Not applicable to the IBM PowerVS Workspace enabled with Power Edge Router (from Q4-2023 onwards) # Extract the generated command string and activation key from /usr/share, then execute script from /usr/local/bin @@ -351,7 +434,15 @@ set -o pipefail && if [ "${workspace_legacy_cc}" = true ]; then nohup $activation_script_exec >/dev/null 2>&1 ; fi set -o pipefail && if [ "${workspace_legacy_cc}" = true ]; then sleep 120 ; fi set -o pipefail && SUSEConnect --product PackageHub/{{ ansible_distribution_version }}/ppc64le - when: ansible_os_family == "Suse" + when: + - ansible_os_family == 'Suse' + - sap_vm_provision_os_registration_script_command is undefined + - (sap_vm_provision_os_online_registration_user is undefined) or (sap_vm_provision_os_online_registration_passcode is undefined) + - not __sap_vm_provision_task_ibmcloud_pi_workspace_capabilities.resource.pi_workspace_capabilities['power-edge-router'] + + - name: Register Package Repositories for OS Images with Bring-Your-Own-Subscription (BYOS) + ansible.builtin.include_tasks: + file: common/register_os.yml - name: Verify connection to NFS ansible.builtin.wait_for: @@ -361,7 +452,9 @@ sleep: 10 connect_timeout: 15 timeout: 120 - when: sap_vm_provision_nfs_mount_point is defined + when: + - sap_vm_provision_nfs_mount_point is defined + - (sap_vm_provision_nfs_mount_point | default('')) | length > 0 - name: Verify connection to separate NFS for SAP Transport Directory ansible.builtin.wait_for: @@ -371,7 +464,9 @@ sleep: 10 connect_timeout: 15 timeout: 120 - when: sap_vm_provision_nfs_mount_point_separate_sap_transport_dir is defined + when: + - sap_vm_provision_nfs_mount_point_separate_sap_transport_dir is defined + - (sap_vm_provision_nfs_mount_point_separate_sap_transport_dir | default('')) | length > 0 # Ensure lock to RHEL major.minor version # Lock using subscription-manager release --set or /var/lib/rhsm/cache/releasever.json, alternatively using /etc/yum/vars/releasever or /etc/dnf/vars/releasever diff --git a/roles/sap_vm_provision/tasks/platform_ansible/ibmcloud_powervs/execute_provision.yml b/roles/sap_vm_provision/tasks/platform_ansible/ibmcloud_powervs/execute_provision.yml index 3862b07..803063a 100644 --- a/roles/sap_vm_provision/tasks/platform_ansible/ibmcloud_powervs/execute_provision.yml +++ b/roles/sap_vm_provision/tasks/platform_ansible/ibmcloud_powervs/execute_provision.yml @@ -14,6 +14,44 @@ - sap_vm_provision_calculate_sap_hana_scaleout_active_coordinator is defined - not inventory_hostname in lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan].keys() +- name: Set fact for host specifications of the provision target + ansible.builtin.set_fact: + target_provision_host_spec: "{{ lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan][scaleout_origin_host_spec | default(inventory_hostname)] }}" + +# Create flat list with names for each volume to be created. +# Create flat list with disk tiers. +- name: Set fact for target device map + ansible.builtin.set_fact: + storage_disks_map: | + {% set disks_map = [] -%} + {% for storage_item in target_provision_host_spec.storage_definition -%} + {% for idx in range(0, storage_item.disk_count | default(1)) -%} + {% if (storage_item.filesystem_type is defined) -%} + {% if ('swap' in storage_item.filesystem_type and storage_item.swap_path is not defined) + or ('swap' not in storage_item.filesystem_type and storage_item.nfs_path is not defined) -%} + {% set vol = disks_map.extend([ + { + 'definition_key': storage_item.name, + 'name': storage_item.name + idx|string, + 'size': storage_item.disk_size | default(0), + 'type': storage_item.disk_type | default('tier3') + } + ]) %} + {%- endif %} + {%- endif %} + {%- endfor %} + {%- endfor %} + {{ disks_map }} + __sap_vm_provision_task_storage_type_tier: "{{ target_provision_host_spec.storage_definition | selectattr('disk_type','defined') | map(attribute='disk_type') | select() | list | unique }}" + +- name: Confirm IBM Power Virtual Server Storage Type Tier + ansible.builtin.fail: + msg: + IBM Power Virtual Servers require a static configuration for the Storage Type Tier, + and all attached Block Storage Volumes must use this Storage Type Tier. + Edit the Storage Definition variable to use the same Storage Type Tier for each Block Storage Volume. + when: __sap_vm_provision_task_storage_type_tier | length > 1 + # Status will change from Building > Warning (VM = Active, Health = Warning) > Active. The Ansible Task will continue once the Active status has been reached. - name: Provision IBM Power Virtual Server instance on IBM Cloud @@ -25,21 +63,36 @@ pi_instance_name: "{{ inventory_hostname }}" pi_image_id: "{{ __sap_vm_provision_task_ibmcloud_pi_os_image_existing.resource.id }}" - pi_sys_type: "{{ lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan][scaleout_origin_host_spec | default(inventory_hostname)].ibmcloud_powervs_hardware_machine_type }}" + pi_sys_type: "{{ target_provision_host_spec.ibmcloud_powervs_hardware_machine_type }}" - pi_sap_profile_id: "{{ lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan][scaleout_origin_host_spec | default(inventory_hostname)].virtual_machine_profile }}" + pi_sap_profile_id: "{{ target_provision_host_spec.virtual_machine_profile }}" pi_key_pair_name: "{{ sap_vm_provision_ibmcloud_powervs_key_pair_name_ssh_host_public_key }}" pi_network: - network_id: "{{ __sap_vm_provision_task_ibmcloud_pi_subnet.resource.id }}" - pi_storage_type: tier1 + # Storage Type Tier is a static configuration for the Virtual Server, it cannot be amended + # All Block Storage Volumes attached to the Virtual Server, must use the set Storage Type Tier: + # tier0 (25 IOPS/GB), tier1 (10 IOPS/GB), tier3 (3 IOPS/GB), tier5k (Fixed 5000 IOPS) + pi_storage_type: "{{ __sap_vm_provision_task_storage_type_tier | first }}" + pi_storage_pool_affinity: true #pi_volume_ids: [] pi_pin_policy: none pi_health_status: OK + pi_placement_group_id: "{{ ( + (__sap_vm_provision_task_ibmcloud_placement_groups_list.resource.placement_groups | selectattr('name','search','hana'))[0].id + if ('hana_primary' in target_provision_host_spec.sap_host_type or 'hana_secondary' in target_provision_host_spec.sap_host_type) and not __sap_vm_provision_task_ibmcloud_placement_groups_list is skipped + else + (__sap_vm_provision_task_ibmcloud_placement_groups_list.resource.placement_groups | selectattr('name','search','anydb'))[0].id + if ('anydb_primary' in target_provision_host_spec.sap_host_type or 'anydb_secondary' in target_provision_host_spec.sap_host_type) and not __sap_vm_provision_task_ibmcloud_placement_groups_list is skipped + else + (__sap_vm_provision_task_ibmcloud_placement_groups_list.resource.placement_groups | selectattr('name','search','nwas'))[0].id + if ('nwas_ascs' in target_provision_host_spec.sap_host_type or 'nwas_ers' in target_provision_host_spec.sap_host_type) and not __sap_vm_provision_task_ibmcloud_placement_groups_list is skipped + ) | default(omit) }}" + ibmcloud_api_key: "{{ sap_vm_provision_ibmcloud_api_key }}" # Use check to avoid idempotency issues with legacy ibm.cloudcollection Ansible Collection (until ibm.cloud Ansible Collection is out of beta) @@ -52,31 +105,6 @@ ibmcloud_api_key: "{{ sap_vm_provision_ibmcloud_api_key }}" -# Create flat list with names for each volume to be created. -- name: Set fact for target device map - ansible.builtin.set_fact: - storage_disks_map: | - {% set disks_map = [] -%} - {% for storage_item in lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan][scaleout_origin_host_spec | default(inventory_hostname)].storage_definition -%} - {% for idx in range(0, storage_item.disk_count | default(1)) -%} - {% if (storage_item.filesystem_type is defined) -%} - {% if ('swap' in storage_item.filesystem_type and storage_item.swap_path is not defined) - or ('swap' not in storage_item.filesystem_type and storage_item.nfs_path is not defined) -%} - {% set vol = disks_map.extend([ - { - 'definition_key': storage_item.name, - 'name': storage_item.name + idx|string, - 'size': storage_item.disk_size | default(0), - 'type': storage_item.disk_type | default('general-purpose') - } - ]) %} - {%- endif %} - {%- endif %} - {%- endfor %} - {%- endfor %} - {{ disks_map }} - - - name: Provision IBM Power Infrastructure Block Storage volumes for IBM Power VS instance filesystems no_log: "{{ __sap_vm_provision_no_log }}" register: __sap_vm_provision_task_provision_host_single_volumes @@ -218,7 +246,7 @@ - name: Append loop value to register ansible.builtin.set_fact: - __sap_vm_provision_task_provision_host_single: "{{ __sap_vm_provision_task_provision_host_single | combine( { 'host_node' : inventory_hostname } , { 'sap_host_type' : lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan][scaleout_origin_host_spec | default(inventory_hostname)].sap_host_type } , { 'sap_system_type' : (lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan][scaleout_origin_host_spec | default(inventory_hostname)].sap_system_type | default('')) } ) }}" + __sap_vm_provision_task_provision_host_single: "{{ __sap_vm_provision_task_provision_host_single | combine( { 'host_node' : inventory_hostname } , { 'sap_host_type' : target_provision_host_spec.sap_host_type } , { 'sap_system_type' : (target_provision_host_spec.sap_system_type | default('')) } ) }}" - name: Append output to merged register ansible.builtin.set_fact: diff --git a/roles/sap_vm_provision/tasks/platform_ansible/ibmcloud_vs/execute_main.yml b/roles/sap_vm_provision/tasks/platform_ansible/ibmcloud_vs/execute_main.yml index 501d6d1..cfa1293 100644 --- a/roles/sap_vm_provision/tasks/platform_ansible/ibmcloud_vs/execute_main.yml +++ b/roles/sap_vm_provision/tasks/platform_ansible/ibmcloud_vs/execute_main.yml @@ -12,6 +12,7 @@ - name: Identify Resource Group info no_log: "{{ __sap_vm_provision_no_log }}" register: __sap_vm_provision_task_ibmcloud_resource_group + run_once: true ibm.cloudcollection.ibm_resource_group_info: name: "{{ sap_vm_provision_ibmcloud_resource_group_name }}" ibmcloud_api_key: "{{ sap_vm_provision_ibmcloud_api_key }}" @@ -21,6 +22,7 @@ - name: Identify Resource Group info for Private DNS no_log: "{{ __sap_vm_provision_no_log }}" register: __sap_vm_provision_task_ibmcloud_resource_group_dns + run_once: true ibm.cloudcollection.ibm_resource_group_info: name: "{{ sap_vm_provision_ibmcloud_private_dns_resource_group_name }}" ibmcloud_api_key: "{{ sap_vm_provision_ibmcloud_api_key }}" @@ -29,6 +31,7 @@ - name: Identify pre-loaded SSH Public Key info no_log: "{{ __sap_vm_provision_no_log }}" register: __sap_vm_provision_task_ibmcloud_ssh_public_key + run_once: true ibm.cloudcollection.ibm_is_ssh_key_info: name: "{{ sap_vm_provision_ibmcloud_key_pair_name_ssh_host_public_key }}" ibmcloud_api_key: "{{ sap_vm_provision_ibmcloud_api_key }}" @@ -36,6 +39,7 @@ - name: Identify VPC Subnet info no_log: "{{ __sap_vm_provision_no_log }}" register: __sap_vm_provision_task_ibmcloud_vpc_subnet + run_once: true ibm.cloudcollection.ibm_is_subnet_info: name: "{{ sap_vm_provision_ibmcloud_vpc_subnet_name }}" ibmcloud_api_key: "{{ sap_vm_provision_ibmcloud_api_key }}" @@ -43,6 +47,7 @@ - name: Identify VPC Security Group info no_log: "{{ __sap_vm_provision_no_log }}" register: __sap_vm_provision_task_ibmcloud_vpc_sg + run_once: true ibm.cloudcollection.ibm_is_security_group_info: name: "{{ item }}" ibmcloud_api_key: "{{ sap_vm_provision_ibmcloud_api_key }}" @@ -51,6 +56,7 @@ - name: Identify Private DNS instance no_log: "{{ __sap_vm_provision_no_log }}" register: __sap_vm_provision_task_ibmcloud_pdns_service_instance + run_once: true ibm.cloudcollection.ibm_resource_instance_info: resource_group_id: "{{ __sap_vm_provision_task_ibmcloud_resource_group.resource.id }}" location: global @@ -61,6 +67,7 @@ - name: Identify Private DNS Zone info no_log: "{{ __sap_vm_provision_no_log }}" register: __sap_vm_provision_task_ibmcloud_pdns + run_once: true ibm.cloudcollection.ibm_dns_zones_info: instance_id: "{{ __sap_vm_provision_task_ibmcloud_pdns_service_instance.resource.guid }}" ibmcloud_api_key: "{{ sap_vm_provision_ibmcloud_api_key }}" @@ -68,10 +75,29 @@ - name: Identify OS Image list no_log: "{{ __sap_vm_provision_no_log }}" register: __sap_vm_provision_task_ibmcloud_os_image_list + run_once: true ibm.cloudcollection.ibm_is_images_info: status: available ibmcloud_api_key: "{{ sap_vm_provision_ibmcloud_api_key }}" + - name: Create Placement Groups when High Availability + no_log: "{{ __sap_vm_provision_no_log }}" + register: __sap_vm_provision_task_ibmcloud_placement_group + run_once: true + ibm.cloudcollection.ibm_is_placement_group: + resource_group: "{{ __sap_vm_provision_task_ibmcloud_resource_group.resource.id }}" + name: "{{ sap_vm_provision_ibmcloud_placement_resource_name }}-{{ item }}" + strategy: power_spread + ibmcloud_api_key: "{{ sap_vm_provision_ibmcloud_api_key }}" + loop: + - "{{ 'hana' if 'hana_secondary' in lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan] | json_query('*') | map(attribute='sap_host_type') else '' }}" + - "{{ 'anydb' if 'anydb_secondary' in lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan] | json_query('*') | map(attribute='sap_host_type') else '' }}" + - "{{ 'nwas' if 'nwas_ers' in lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan] | json_query('*') | map(attribute='sap_host_type') else '' }}" + when: + - sap_vm_provision_ibmcloud_placement_resource_name is defined + - sap_vm_provision_ibmcloud_placement_strategy_spread + - not item == '' + - name: Set fact to hold loop variables from include_tasks ansible.builtin.set_fact: register_provisioned_host_all: [] @@ -90,7 +116,7 @@ ansible.builtin.add_host: name: "{{ add_item[0].host_node }}" groups: "{{ add_item[0].sap_system_type + '_' if (add_item[0].sap_system_type != '') }}{{ add_item[0].sap_host_type }}" - ansible_host: "{{ add_item[0].resource.primary_network_interface[0].primary_ipv4_address }}" + ansible_host: "{{ add_item[0].resource.primary_network_attachment[0].virtual_network_interface[0].primary_ip[0].address | default(add_item[0].resource.primary_network_attachment[0].primary_ip[0].address) }}" # use default to handle different r/ds data structure ansible_user: "root" ansible_ssh_private_key_file: "{{ sap_vm_provision_ssh_host_private_key_file_path }}" ansible_ssh_common_args: -o ConnectTimeout=180 -o ControlMaster=auto -o ControlPersist=3600s -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o ForwardX11=no -o ProxyCommand='ssh -W %h:%p {{ sap_vm_provision_bastion_user }}@{{ sap_vm_provision_bastion_public_ip }} -p {{ sap_vm_provision_bastion_ssh_port }} -i {{ sap_vm_provision_ssh_bastion_private_key_file_path }} -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null' @@ -105,7 +131,7 @@ - name: Set fact to hold all inventory hosts in all groups ansible.builtin.set_fact: - groups_merged_list: "{{ [ [ groups['hana_primary'] | default([]) ] , [ groups['hana_secondary'] | default([]) ] , [ groups['nwas_ascs'] | default([]) ] , [ groups['nwas_ers'] | default([]) ] , [ groups['nwas_pas'] | default([]) ] , [ groups['nwas_aas'] | default([]) ] ] | flatten | select() }}" + groups_merged_list: "{{ [ [ groups['hana_primary'] | default([]) ] , [ groups['hana_secondary'] | default([]) ] , [ groups['anydb_primary'] | default([]) ] , [ groups['anydb_secondary'] | default([]) ] , [ groups['nwas_ascs'] | default([]) ] , [ groups['nwas_ers'] | default([]) ] , [ groups['nwas_pas'] | default([]) ] , [ groups['nwas_aas'] | default([]) ] ] | flatten | select() }}" - name: Set Ansible Vars register: __sap_vm_provision_task_ansible_vars_set @@ -194,6 +220,10 @@ ansible.builtin.include_tasks: file: common/set_ansible_vars_storage.yml + - name: Register Package Repositories for OS Images with Bring-Your-Own-Subscription (BYOS) + ansible.builtin.include_tasks: + file: common/register_os.yml + # - name: Ansible Task block to execute on target inventory hosts - High Availability # delegate_to: "{{ inventory_hostname }}" diff --git a/roles/sap_vm_provision/tasks/platform_ansible/ibmcloud_vs/execute_provision.yml b/roles/sap_vm_provision/tasks/platform_ansible/ibmcloud_vs/execute_provision.yml index cc0525e..1c6ea45 100644 --- a/roles/sap_vm_provision/tasks/platform_ansible/ibmcloud_vs/execute_provision.yml +++ b/roles/sap_vm_provision/tasks/platform_ansible/ibmcloud_vs/execute_provision.yml @@ -14,6 +14,10 @@ - sap_vm_provision_calculate_sap_hana_scaleout_active_coordinator is defined - not inventory_hostname in lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan].keys() +- name: Set fact for host specifications of the provision target + ansible.builtin.set_fact: + target_provision_host_spec: "{{ lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan][scaleout_origin_host_spec | default(inventory_hostname)] }}" + - name: Provision IBM Cloud Virtual Server instance no_log: "{{ __sap_vm_provision_no_log }}" register: __sap_vm_provision_task_provision_host_single @@ -21,7 +25,7 @@ state: available name: "{{ inventory_hostname }}" image: "{{ (__sap_vm_provision_task_ibmcloud_os_image_list.resource.images | select('search', lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_os_image_dictionary')[sap_vm_provision_ibmcloud_vs_host_os_image]) | sort(reverse=True,case_sensitive=False,attribute='name') | first).id }}" - profile: "{{ lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan][scaleout_origin_host_spec | default(inventory_hostname)].virtual_machine_profile }}" + profile: "{{ target_provision_host_spec.virtual_machine_profile }}" keys: - "{{ __sap_vm_provision_task_ibmcloud_ssh_public_key.resource.id }}" @@ -29,15 +33,30 @@ zone: "{{ sap_vm_provision_ibmcloud_availability_zone }}" vpc: "{{ __sap_vm_provision_task_ibmcloud_vpc_subnet.resource.vpc }}" - # The Subnet assigned to the primary Virtual Network Interface (vNIC) cannot be changed - # The Name and Security Group assigned to the Primary Network Interface (vNIC) are editable - primary_network_interface: - - name: "{{ inventory_hostname }}-vnic0" - subnet: "{{ __sap_vm_provision_task_ibmcloud_vpc_subnet.resource.id }}" - allow_ip_spoofing: "{{ lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan][scaleout_origin_host_spec | default(inventory_hostname)].disable_ip_anti_spoofing }}" # When disable the Anti IP Spoofing = true, then Allow IP Spoofing = true - security_groups: "{{ __sap_vm_provision_task_ibmcloud_vpc_sg.results | map(attribute='resource.id') }}" + # The Subnet assigned to the deprecated primary Virtual Network Interface (vNIC) cannot be changed + # The Name and Security Group assigned are editable + # primary_network_interface: + # - name: "{{ inventory_hostname }}-vnic0" + # subnet: "{{ __sap_vm_provision_task_ibmcloud_vpc_subnet.resource.id }}" + # allow_ip_spoofing: "{{ target_provision_host_spec.disable_ip_anti_spoofing }}" # When disable the Anti IP Spoofing = true, then Allow IP Spoofing = true + # security_groups: "{{ __sap_vm_provision_task_ibmcloud_vpc_sg.results | map(attribute='resource.id') }}" #network_interfaces: + # The Subnet assigned to the primary Virtual Network Interface (VNI) cannot be changed + # The Name and Security Group assigned are editable + primary_network_attachment: + - name: "{{ inventory_hostname }}-vni0-attach" + virtual_network_interface: + - name: "{{ inventory_hostname }}-vni0" + resource_group: "{{ __sap_vm_provision_task_ibmcloud_resource_group.resource.id }}" + subnet: "{{ __sap_vm_provision_task_ibmcloud_vpc_subnet.resource.id }}" + security_groups: "{{ __sap_vm_provision_task_ibmcloud_vpc_sg.results | map(attribute='resource.id') }}" + allow_ip_spoofing: "{{ target_provision_host_spec.disable_ip_anti_spoofing }}" # When disable the Anti IP Spoofing = true, then Allow IP Spoofing = true + enable_infrastructure_nat: true # must be true as Virtual Server instances require Infrastructure NAT + protocol_state_filtering_mode: "auto" + auto_delete: true # if VNI created separately, must be false + #network_attachments: + auto_delete_volume: true boot_volume: - name: "{{ inventory_hostname }}-boot-0" @@ -47,6 +66,17 @@ protocol: https response_hop_limit: 5 + placement_group: "{{ ( + (__sap_vm_provision_task_ibmcloud_placement_group.results | selectattr('item','==','hana'))[0].resource.id + if ('hana_primary' in target_provision_host_spec.sap_host_type or 'hana_secondary' in target_provision_host_spec.sap_host_type) and not __sap_vm_provision_task_ibmcloud_placement_group is skipped + else + (__sap_vm_provision_task_ibmcloud_placement_group.results | selectattr('item','==','anydb'))[0].resource.id + if ('anydb_primary' in target_provision_host_spec.sap_host_type or 'anydb_secondary' in target_provision_host_spec.sap_host_type) and not __sap_vm_provision_task_ibmcloud_placement_group is skipped + else + (__sap_vm_provision_task_ibmcloud_placement_group.results | selectattr('item','==','nwas'))[0].resource.id + if ('nwas_ascs' in target_provision_host_spec.sap_host_type or 'nwas_ers' in target_provision_host_spec.sap_host_type) and not __sap_vm_provision_task_ibmcloud_placement_group is skipped + ) | default(omit) }}" + ibmcloud_api_key: "{{ sap_vm_provision_ibmcloud_api_key }}" @@ -55,7 +85,7 @@ ansible.builtin.set_fact: storage_disks_map: | {% set disks_map = [] -%} - {% for storage_item in lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan][scaleout_origin_host_spec | default(inventory_hostname)].storage_definition -%} + {% for storage_item in target_provision_host_spec.storage_definition -%} {% for idx in range(0, storage_item.disk_count | default(1)) -%} {% if (storage_item.filesystem_type is defined) -%} {% if ('swap' in storage_item.filesystem_type and storage_item.swap_path is not defined) @@ -65,7 +95,8 @@ 'definition_key': storage_item.name, 'name': storage_item.name + idx|string, 'size': storage_item.disk_size | default(0), - 'type': storage_item.disk_type | default('general-purpose') + 'type': storage_item.disk_type | default('general-purpose'), + 'iops': storage_item.disk_iops | default(omit) } ]) %} {%- endif %} @@ -83,6 +114,7 @@ name: "{{ inventory_hostname + '-vol-' + vol_item.name | replace('_', '-')}}" profile: "{{ vol_item.type }}" capacity: "{{ vol_item.size }}" + iops: "{{ vol_item.iops | default(omit) }}" ibmcloud_api_key: "{{ sap_vm_provision_ibmcloud_api_key }}" loop: "{{ storage_disks_map }}" loop_control: @@ -126,7 +158,7 @@ - name: Create fact for delegate host IP ansible.builtin.set_fact: - provisioned_private_ip: "{{ __sap_vm_provision_task_provision_host_single.resource.primary_network_interface[0].primary_ipv4_address }}" + provisioned_private_ip: "{{ __sap_vm_provision_task_provision_host_single.resource.primary_network_attachment[0].virtual_network_interface[0].primary_ip[0].address | default(__sap_vm_provision_task_provision_host_single.resource.primary_network_attachment[0].primary_ip[0].address) }}" # use default to handle different r/ds data structure - name: Copy facts to delegate host @@ -138,7 +170,7 @@ delegate_sap_vm_provision_bastion_ssh_port: "{{ sap_vm_provision_bastion_ssh_port }}" delegate_sap_vm_provision_ssh_bastion_private_key_file_path: "{{ sap_vm_provision_ssh_bastion_private_key_file_path }}" delegate_sap_vm_provision_ssh_host_private_key_file_path: "{{ sap_vm_provision_ssh_host_private_key_file_path }}" - delegate_private_ip: "{{ __sap_vm_provision_task_provision_host_single.resource.primary_network_interface[0].primary_ipv4_address }}" + delegate_private_ip: "{{ __sap_vm_provision_task_provision_host_single.resource.primary_network_attachment[0].virtual_network_interface[0].primary_ip[0].address | default(__sap_vm_provision_task_provision_host_single.resource.primary_network_attachment[0].primary_ip[0].address) }}" # use default to handle different r/ds data structure delegate_hostname: "{{ inventory_hostname }}" delegate_sap_vm_provision_dns_root_domain_name: "{{ sap_vm_provision_dns_root_domain }}" @@ -187,7 +219,7 @@ - name: Append loop value to register ansible.builtin.set_fact: - __sap_vm_provision_task_provision_host_single: "{{ __sap_vm_provision_task_provision_host_single | combine( { 'host_node' : inventory_hostname } , { 'sap_host_type' : lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan][scaleout_origin_host_spec | default(inventory_hostname)].sap_host_type } , { 'sap_system_type' : (lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan][scaleout_origin_host_spec | default(inventory_hostname)].sap_system_type | default('')) } ) }}" + __sap_vm_provision_task_provision_host_single: "{{ __sap_vm_provision_task_provision_host_single | combine( { 'host_node' : inventory_hostname } , { 'sap_host_type' : target_provision_host_spec.sap_host_type } , { 'sap_system_type' : (target_provision_host_spec.sap_system_type | default('')) } ) }}" - name: Append output to merged register ansible.builtin.set_fact: diff --git a/roles/sap_vm_provision/tasks/platform_ansible/ibmcloud_vs/execute_setup_ha.yml b/roles/sap_vm_provision/tasks/platform_ansible/ibmcloud_vs/execute_setup_ha.yml index eeeb283..58e98c9 100644 --- a/roles/sap_vm_provision/tasks/platform_ansible/ibmcloud_vs/execute_setup_ha.yml +++ b/roles/sap_vm_provision/tasks/platform_ansible/ibmcloud_vs/execute_setup_ha.yml @@ -11,7 +11,7 @@ source_resource_type: load-balancer target_service_name: dns-svcs ibmcloud_api_key: "{{ sap_vm_provision_ibmcloud_api_key }}" - failed_when: not __sap_vm_provision_task_ibmcloud_iam_auth_policy.rc == 0 and not 'access policy with identical attributes already exists' in __sap_vm_provision_task_ibmcloud_iam_auth_policy.stderr + failed_when: not __sap_vm_provision_task_ibmcloud_iam_auth_policy.rc == 0 and not 'already exists' in __sap_vm_provision_task_ibmcloud_iam_auth_policy.stderr # The IBM Cloud Load Balancer is provisioned before Linux Pacemaker and requires a temporary Health Check Probe port to be used with an an active OS service listening. @@ -39,15 +39,15 @@ logging: true # For ALB L7, not NLB L4 #profile: network-fixed / dynamic # For NLB L4, not ALB L7 #route_mode: false # For NLB L4, not ALB L7 - # dns: # Unsupported by legacy Ansible Collection, use IBM Cloud CLI as workaround - # instance_crn: "{{ __sap_vm_provision_task_ibmcloud_pdns_service_instance.resource.resource_crn }}" - # zone_id: "{{ (__sap_vm_provision_task_ibmcloud_pdns.resource.dns_zones | selectattr('name', '==', sap_vm_provision_dns_root_domain) | first).zone_id }}" + dns: + - instance_crn: "{{ __sap_vm_provision_task_ibmcloud_pdns_service_instance.resource.resource_crn }}" + zone_id: "{{ (__sap_vm_provision_task_ibmcloud_pdns.resource.dns_zones | selectattr('name', '==', sap_vm_provision_dns_root_domain) | first).zone_id }}" ibmcloud_api_key: "{{ sap_vm_provision_ibmcloud_api_key }}" loop: - - "{{ 'lb-sap-ha-hana' if (groups['hana_secondary'] is defined and (groups['hana_secondary'] | length>0)) else '' }}" - - "{{ 'lb-sap-ha-anydb' if (groups['anydb_secondary'] is defined and (groups['anydb_secondary'] | length>0)) else '' }}" - - "{{ 'lb-sap-ha-nwas-ascs' if (groups['nwas_ers'] is defined and (groups['nwas_ers'] | length>0)) else '' }}" - - "{{ 'lb-sap-ha-nwas-ers' if (groups['nwas_ers'] is defined and (groups['nwas_ers'] | length>0)) else '' }}" + - "{{ sap_vm_provision_ha_load_balancer_name_hana if (groups['hana_secondary'] is defined and (groups['hana_secondary'] | length>0)) else '' }}" + - "{{ sap_vm_provision_ha_load_balancer_name_anydb if (groups['anydb_secondary'] is defined and (groups['anydb_secondary'] | length>0)) else '' }}" + - "{{ sap_vm_provision_ha_load_balancer_name_nwas + '-ascs' if (groups['nwas_ers'] is defined and (groups['nwas_ers'] | length>0)) else '' }}" + - "{{ sap_vm_provision_ha_load_balancer_name_nwas + '-ers' if (groups['nwas_ers'] is defined and (groups['nwas_ers'] | length>0)) else '' }}" loop_control: label: "Waiting for {{ item }}" when: not item == '' @@ -65,20 +65,20 @@ # failed_when: not __sap_vm_provision_task_ibmcloud_lb_provision_parallel_async_status.rc == 0 # Workaround to missing Ansible Module functionality in legacy Ansible Collection -- name: IBM Cloud CLI append DNS to Load Balancer - no_log: "{{ __sap_vm_provision_no_log }}" - register: __sap_vm_provision_task_ibmcloud_lb_update_dns - ansible.builtin.shell: | - ibmcloud config --quiet --check-version false && ibmcloud login --apikey="{{ sap_vm_provision_ibmcloud_api_key }}" -g {{ sap_vm_provision_ibmcloud_resource_group_name }} -r {{ sap_vm_provision_ibmcloud_region }} --quiet - ibmcloud plugin install infrastructure-service -f --quiet - ibmcloud is load-balancer-update "{{ item }}" --dns-instance-crn "{{ __sap_vm_provision_task_ibmcloud_pdns_service_instance.resource.resource_crn }}" --dns-zone-id "{{ (__sap_vm_provision_task_ibmcloud_pdns.resource.dns_zones | selectattr('name', '==', sap_vm_provision_dns_root_domain) | first).zone_id }}" - loop: - - "{{ 'lb-sap-ha-hana' if (groups['hana_secondary'] is defined and (groups['hana_secondary'] | length>0)) else '' }}" - - "{{ 'lb-sap-ha-anydb' if (groups['anydb_secondary'] is defined and (groups['anydb_secondary'] | length>0)) else '' }}" - - "{{ 'lb-sap-ha-nwas-ascs' if (groups['nwas_ers'] is defined and (groups['nwas_ers'] | length>0)) else '' }}" - - "{{ 'lb-sap-ha-nwas-ers' if (groups['nwas_ers'] is defined and (groups['nwas_ers'] | length>0)) else '' }}" - when: not item == '' - failed_when: not __sap_vm_provision_task_ibmcloud_lb_update_dns.rc == 0 and not 'nothing to update the load balancer with' in __sap_vm_provision_task_ibmcloud_lb_update_dns.stderr +# - name: IBM Cloud CLI append DNS to Load Balancer +# no_log: "{{ __sap_vm_provision_no_log }}" +# register: __sap_vm_provision_task_ibmcloud_lb_update_dns +# ansible.builtin.shell: | +# ibmcloud config --quiet --check-version false && ibmcloud login --apikey="{{ sap_vm_provision_ibmcloud_api_key }}" -g {{ sap_vm_provision_ibmcloud_resource_group_name }} -r {{ sap_vm_provision_ibmcloud_region }} --quiet +# ibmcloud plugin install infrastructure-service -f --quiet +# ibmcloud is load-balancer-update "{{ item }}" --dns-instance-crn "{{ __sap_vm_provision_task_ibmcloud_pdns_service_instance.resource.resource_crn }}" --dns-zone-id "{{ (__sap_vm_provision_task_ibmcloud_pdns.resource.dns_zones | selectattr('name', '==', sap_vm_provision_dns_root_domain) | first).zone_id }}" +# loop: +# - "{{ sap_vm_provision_ha_load_balancer_name_hana if (groups['hana_secondary'] is defined and (groups['hana_secondary'] | length>0)) else '' }}" +# - "{{ sap_vm_provision_ha_load_balancer_name_anydb if (groups['anydb_secondary'] is defined and (groups['anydb_secondary'] | length>0)) else '' }}" +# - "{{ sap_vm_provision_ha_load_balancer_name_nwas + '-ascs' if (groups['nwas_ers'] is defined and (groups['nwas_ers'] | length>0)) else '' }}" +# - "{{ sap_vm_provision_ha_load_balancer_name_nwas + '-ers' if (groups['nwas_ers'] is defined and (groups['nwas_ers'] | length>0)) else '' }}" +# when: not item == '' +# failed_when: not __sap_vm_provision_task_ibmcloud_lb_update_dns.rc == 0 and not 'nothing to update the load balancer with' in __sap_vm_provision_task_ibmcloud_lb_update_dns.stderr - name: Identify IBM Cloud Virtual Servers info no_log: "{{ __sap_vm_provision_no_log }}" @@ -87,6 +87,11 @@ vpc: "{{ __sap_vm_provision_task_ibmcloud_vpc_subnet.resource.vpc }}" ibmcloud_api_key: "{{ sap_vm_provision_ibmcloud_api_key }}" +# - name: Identify IBM Cloud Load Balancers info +# no_log: "{{ __sap_vm_provision_no_log }}" +# register: __sap_vm_provision_task_ibmcloud_lb_all_info +# ibm.cloudcollection.ibm_is_lbs_info: +# ibmcloud_api_key: "{{ sap_vm_provision_ibmcloud_api_key }}" # Workaround for bug which populates region and ibmcloud_api_key as TF arguments for ibm_is_lbs_info Ansible Module in legacy Ansible Collection - name: IBM Cloud CLI execution to list Load Balancer/s info @@ -99,14 +104,33 @@ - name: Set fact for IBM Cloud Load Balancer/s info ansible.builtin.set_fact: - ibmcloud_lbs_all_info: "{{ __sap_vm_provision_task_ibmcloud_lb_all_info_shell.stdout | split('Space:') | last | trim | from_json }}" + ibmcloud_lbs_all_info: "{{ ('[' + (__sap_vm_provision_task_ibmcloud_lb_all_info_shell.stdout | split('[',1) | last)) | trim | from_json }}" # Required because CLI executed with API Key - name: Set fact for Ansible Tasks debug with redaction ansible.builtin.set_fact: - __sap_vm_provision_task_ibmcloud_lb_update_dns: "{{ __sap_vm_provision_task_ibmcloud_lb_update_dns | regex_replace(sap_vm_provision_ibmcloud_api_key) }}" + # __sap_vm_provision_task_ibmcloud_lb_update_dns: "{{ __sap_vm_provision_task_ibmcloud_lb_update_dns | regex_replace(sap_vm_provision_ibmcloud_api_key) }}" __sap_vm_provision_task_ibmcloud_lb_all_info_shell: "{{ __sap_vm_provision_task_ibmcloud_lb_all_info_shell | regex_replace(sap_vm_provision_ibmcloud_api_key) }}" +- name: SAP System port numbers to listen (as applicable) on IBM Cloud Load Balancer + ansible.builtin.debug: + msg: + - "{{ ('3' + sap_system_hana_db_instance_nr + '13') if sap_system_hana_db_instance_nr is defined else 'IGNORE' }} - SAP HANA - System DB SQL" + - "{{ ('3' + sap_system_hana_db_instance_nr + '15') if sap_system_hana_db_instance_nr is defined else 'IGNORE' }} - SAP HANA - MDC Tenant 1 SQL" + - "{{ ('5' + sap_system_hana_db_instance_nr + '13') if sap_system_hana_db_instance_nr is defined else 'IGNORE' }} - SAP HANA - startsrv HTTP" + - "{{ ('5' + sap_system_hana_db_instance_nr + '14') if sap_system_hana_db_instance_nr is defined else 'IGNORE' }} - SAP HANA - startsrv HTTPS" + - "{{ '5912' if groups['anydb_secondary'] is defined else 'IGNORE' }} - SAP AnyDB - IBM Db2 Communication Port" + - "{{ ('32' + sap_system_nwas_abap_ascs_instance_nr) if sap_system_nwas_abap_ascs_instance_nr is defined else 'IGNORE' }} - SAP NetWeaver ASCS - Dispatcher sapdp process" + - "{{ ('36' + sap_system_nwas_abap_ascs_instance_nr) if sap_system_nwas_abap_ascs_instance_nr is defined else 'IGNORE' }} - SAP NetWeaver ASCS - Message Server sapms process" + - "{{ ('81' + sap_system_nwas_abap_ascs_instance_nr) if sap_system_nwas_abap_ascs_instance_nr is defined else 'IGNORE' }} - SAP NetWeaver ASCS - Message Server HTTP sapms process" + - "{{ ('39' + sap_system_nwas_abap_ascs_instance_nr) if sap_system_nwas_abap_ascs_instance_nr is defined else 'IGNORE' }} - SAP NetWeaver ASCS - Enqueue Server sapenq process" + - "{{ ('5' + sap_system_nwas_abap_ascs_instance_nr + '16') if sap_system_nwas_abap_ascs_instance_nr is defined else 'IGNORE' }} - SAP NetWeaver ASCS - Enqueue Replicator Server sapenqrepl process" + - "{{ ('5' + sap_system_nwas_abap_ascs_instance_nr + '13') if sap_system_nwas_abap_ascs_instance_nr is defined else 'IGNORE' }} - SAP NetWeaver ASCS - SAP Start Service (SAPControl SOAP) HTTP sapctrl process" + - "{{ ('5' + sap_system_nwas_abap_ascs_instance_nr + '14') if sap_system_nwas_abap_ascs_instance_nr is defined else 'IGNORE' }} - SAP NetWeaver ASCS - SAP Start Service (SAPControl SOAP) HTTPS (Secure) sapctrls" + - "{{ ('39' + sap_system_nwas_abap_ers_instance_nr) if sap_system_nwas_abap_ers_instance_nr is defined else 'IGNORE' }} - SAP NetWeaver ERS - Enqueue Replication Server sapenqr process" + - "{{ ('5' + sap_system_nwas_abap_ers_instance_nr + '13') if sap_system_nwas_abap_ers_instance_nr is defined else 'IGNORE' }} - SAP NetWeaver ERS - SAP Start Service (SAPControl SOAP) HTTP sapctrl process" + - "{{ ('5' + sap_system_nwas_abap_ers_instance_nr + '14') if sap_system_nwas_abap_ers_instance_nr is defined else 'IGNORE' }} - SAP NetWeaver ERS - SAP Start Service (SAPControl SOAP) HTTPS (Secure) sapctrls" + # Create IBM Cloud Load Balancer Back-end Pools @@ -114,8 +138,8 @@ no_log: "{{ __sap_vm_provision_no_log }}" register: __sap_vm_provision_task_ibmcloud_lb_pool_hana1 ibm.cloudcollection.ibm_is_lb_pool: - name: lb-sap-ha-hana-pool-sysdb-sql - lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-hana'))[0].id }}" + name: "{{ sap_vm_provision_ha_load_balancer_name_hana + '-pool-sysdb-sql' }}" + lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_hana))[0].id }}" algorithm: weighted_round_robin protocol: tcp health_delay: 20 @@ -130,8 +154,8 @@ no_log: "{{ __sap_vm_provision_no_log }}" register: __sap_vm_provision_task_ibmcloud_lb_pool_hana2 ibm.cloudcollection.ibm_is_lb_pool: - name: lb-sap-ha-hana-pool-mdc1-sql - lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-hana'))[0].id }}" + name: "{{ sap_vm_provision_ha_load_balancer_name_hana + '-pool-mdc1-sql' }}" + lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_hana))[0].id }}" algorithm: weighted_round_robin protocol: tcp health_delay: 20 @@ -146,8 +170,8 @@ no_log: "{{ __sap_vm_provision_no_log }}" register: __sap_vm_provision_task_ibmcloud_lb_pool_hana3 ibm.cloudcollection.ibm_is_lb_pool: - name: lb-sap-ha-hana-pool-startsrv-http - lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-hana'))[0].id }}" + name: "{{ sap_vm_provision_ha_load_balancer_name_hana + '-pool-startsrv-http' }}" + lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_hana))[0].id }}" algorithm: weighted_round_robin protocol: tcp health_delay: 20 @@ -162,8 +186,8 @@ no_log: "{{ __sap_vm_provision_no_log }}" register: __sap_vm_provision_task_ibmcloud_lb_pool_hana4 ibm.cloudcollection.ibm_is_lb_pool: - name: lb-sap-ha-hana-pool-startsrv-https - lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-hana'))[0].id }}" + name: "{{ sap_vm_provision_ha_load_balancer_name_hana + '-pool-startsrv-https' }}" + lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_hana))[0].id }}" algorithm: weighted_round_robin protocol: tcp health_delay: 20 @@ -177,8 +201,8 @@ - name: Create IBM Cloud Load Balancer Back-end Pool for SAP AnyDB - IBM Db2 Communication Port no_log: "{{ __sap_vm_provision_no_log }}" ibm.cloudcollection.ibm_is_lb_pool: - name: lb-sap-ha-anydb-pool-ibmdb2 - lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-anydb'))[0].id }}" + name: "{{ sap_vm_provision_ha_load_balancer_name_anydb + '-pool-ibmdb2' }}" + lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_anydb))[0].id }}" algorithm: weighted_round_robin protocol: tcp health_delay: 20 @@ -193,8 +217,8 @@ no_log: "{{ __sap_vm_provision_no_log }}" register: __sap_vm_provision_task_ibmcloud_lb_pool_nwas_ascs1 ibm.cloudcollection.ibm_is_lb_pool: - name: lb-sap-ha-nwas-ascs-pool-dp - lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-nwas-ascs'))[0].id }}" + name: "{{ sap_vm_provision_ha_load_balancer_name_nwas + '-ascs-pool-dp' }}" + lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_nwas + '-ascs'))[0].id }}" algorithm: weighted_round_robin protocol: tcp health_delay: 20 @@ -209,8 +233,8 @@ no_log: "{{ __sap_vm_provision_no_log }}" register: __sap_vm_provision_task_ibmcloud_lb_pool_nwas_ascs2 ibm.cloudcollection.ibm_is_lb_pool: - name: lb-sap-ha-nwas-ascs-pool-ms - lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-nwas-ascs'))[0].id }}" + name: "{{ sap_vm_provision_ha_load_balancer_name_nwas + '-ascs-pool-ms' }}" + lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_nwas + '-ascs'))[0].id }}" algorithm: weighted_round_robin protocol: tcp health_delay: 20 @@ -221,12 +245,12 @@ ibmcloud_api_key: "{{ sap_vm_provision_ibmcloud_api_key }}" when: (groups['nwas_ers'] is defined and (groups['nwas_ers'] | length>0)) -- name: Create IBM Cloud Load Balancer Back-end Pool for SAP NetWeaver ASCS - Enqueue Server sapenq process +- name: Create IBM Cloud Load Balancer Back-end Pool for SAP NetWeaver ASCS - Message Server HTTP sapms process no_log: "{{ __sap_vm_provision_no_log }}" register: __sap_vm_provision_task_ibmcloud_lb_pool_nwas_ascs3 ibm.cloudcollection.ibm_is_lb_pool: - name: lb-sap-ha-nwas-ascs-pool-enq - lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-nwas-ascs'))[0].id }}" + name: "{{ sap_vm_provision_ha_load_balancer_name_nwas + '-ascs-pool-ms-http' }}" + lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_nwas + '-ascs'))[0].id }}" algorithm: weighted_round_robin protocol: tcp health_delay: 20 @@ -237,12 +261,12 @@ ibmcloud_api_key: "{{ sap_vm_provision_ibmcloud_api_key }}" when: (groups['nwas_ers'] is defined and (groups['nwas_ers'] | length>0)) -- name: Create IBM Cloud Load Balancer Back-end Pool for SAP NetWeaver ASCS - SAP Start Service (SAPControl SOAP) HTTP sapctrl process +- name: Create IBM Cloud Load Balancer Back-end Pool for SAP NetWeaver ASCS - Enqueue Server sapenq process no_log: "{{ __sap_vm_provision_no_log }}" register: __sap_vm_provision_task_ibmcloud_lb_pool_nwas_ascs4 ibm.cloudcollection.ibm_is_lb_pool: - name: lb-sap-ha-nwas-ascs-pool-sapctrl - lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-nwas-ascs'))[0].id }}" + name: "{{ sap_vm_provision_ha_load_balancer_name_nwas + '-ascs-pool-enq' }}" + lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_nwas + '-ascs'))[0].id }}" algorithm: weighted_round_robin protocol: tcp health_delay: 20 @@ -253,12 +277,44 @@ ibmcloud_api_key: "{{ sap_vm_provision_ibmcloud_api_key }}" when: (groups['nwas_ers'] is defined and (groups['nwas_ers'] | length>0)) -- name: Create IBM Cloud Load Balancer Back-end Pool for SAP NetWeaver ASCS - SAP Start Service (SAPControl SOAP) HTTPS (Secure) sapctrls +- name: Create IBM Cloud Load Balancer Back-end Pool for SAP NetWeaver ASCS - Enqueue Replicator Server sapenqrepl process no_log: "{{ __sap_vm_provision_no_log }}" register: __sap_vm_provision_task_ibmcloud_lb_pool_nwas_ascs5 ibm.cloudcollection.ibm_is_lb_pool: - name: lb-sap-ha-nwas-ascs-pool-sapctrls - lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-nwas-ascs'))[0].id }}" + name: "{{ sap_vm_provision_ha_load_balancer_name_nwas + '-ascs-pool-enqrepl' }}" + lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_nwas + '-ascs'))[0].id }}" + algorithm: weighted_round_robin + protocol: tcp + health_delay: 20 + health_retries: 2 + health_timeout: 10 + health_type: tcp + health_monitor_port: 55551 + ibmcloud_api_key: "{{ sap_vm_provision_ibmcloud_api_key }}" + when: (groups['nwas_ers'] is defined and (groups['nwas_ers'] | length>0)) + +- name: Create IBM Cloud Load Balancer Back-end Pool for SAP NetWeaver ASCS - SAP Start Service (SAPControl SOAP) HTTP sapctrl process + no_log: "{{ __sap_vm_provision_no_log }}" + register: __sap_vm_provision_task_ibmcloud_lb_pool_nwas_ascs6 + ibm.cloudcollection.ibm_is_lb_pool: + name: "{{ sap_vm_provision_ha_load_balancer_name_nwas + '-ascs-pool-sapctrl' }}" + lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_nwas + '-ascs'))[0].id }}" + algorithm: weighted_round_robin + protocol: tcp + health_delay: 20 + health_retries: 2 + health_timeout: 10 + health_type: tcp + health_monitor_port: 55551 + ibmcloud_api_key: "{{ sap_vm_provision_ibmcloud_api_key }}" + when: (groups['nwas_ers'] is defined and (groups['nwas_ers'] | length>0)) + +- name: Create IBM Cloud Load Balancer Back-end Pool for SAP NetWeaver ASCS - SAP Start Service (SAPControl SOAP) HTTPS (Secure) sapctrls + no_log: "{{ __sap_vm_provision_no_log }}" + register: __sap_vm_provision_task_ibmcloud_lb_pool_nwas_ascs7 + ibm.cloudcollection.ibm_is_lb_pool: + name: "{{ sap_vm_provision_ha_load_balancer_name_nwas + '-ascs-pool-sapctrls' }}" + lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_nwas + '-ascs'))[0].id }}" algorithm: weighted_round_robin protocol: tcp health_delay: 20 @@ -273,8 +329,8 @@ # no_log: "{{ __sap_vm_provision_no_log }}" # register: __sap_vm_provision_task_ibmcloud_lb_pool_nwas_ers1 # ibm.cloudcollection.ibm_is_lb_pool: -# name: lb-sap-ha-nwas-ers-pool-dp -# lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-nwas-ers'))[0].id }}" +# name: "{{ sap_vm_provision_ha_load_balancer_name_nwas + '-ers-pool-dp' }}" +# lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_nwas + '-ers'))[0].id }}" # algorithm: weighted_round_robin # protocol: tcp # health_delay: 20 @@ -289,8 +345,8 @@ # no_log: "{{ __sap_vm_provision_no_log }}" # register: __sap_vm_provision_task_ibmcloud_lb_pool_nwas_ers2 # ibm.cloudcollection.ibm_is_lb_pool: -# name: lb-sap-ha-nwas-ers-pool-ms -# lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-nwas-ers'))[0].id }}" +# name: "{{ sap_vm_provision_ha_load_balancer_name_nwas + '-ers-pool-ms' }}" +# lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_nwas + '-ers'))[0].id }}" # algorithm: weighted_round_robin # protocol: tcp # health_delay: 20 @@ -305,8 +361,8 @@ no_log: "{{ __sap_vm_provision_no_log }}" register: __sap_vm_provision_task_ibmcloud_lb_pool_nwas_ers3 ibm.cloudcollection.ibm_is_lb_pool: - name: lb-sap-ha-nwas-ers-pool-enqr - lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-nwas-ers'))[0].id }}" + name: "{{ sap_vm_provision_ha_load_balancer_name_nwas + '-ers-pool-enqr' }}" + lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_nwas + '-ers'))[0].id }}" algorithm: weighted_round_robin protocol: tcp health_delay: 20 @@ -321,8 +377,8 @@ no_log: "{{ __sap_vm_provision_no_log }}" register: __sap_vm_provision_task_ibmcloud_lb_pool_nwas_ers4 ibm.cloudcollection.ibm_is_lb_pool: - name: lb-sap-ha-nwas-ers-pool-sapctrl - lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-nwas-ers'))[0].id }}" + name: "{{ sap_vm_provision_ha_load_balancer_name_nwas + '-ers-pool-sapctrl' }}" + lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_nwas + '-ers'))[0].id }}" algorithm: weighted_round_robin protocol: tcp health_delay: 20 @@ -337,8 +393,8 @@ no_log: "{{ __sap_vm_provision_no_log }}" register: __sap_vm_provision_task_ibmcloud_lb_pool_nwas_ers5 ibm.cloudcollection.ibm_is_lb_pool: - name: lb-sap-ha-nwas-ers-pool-sapctrls - lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-nwas-ers'))[0].id }}" + name: "{{ sap_vm_provision_ha_load_balancer_name_nwas + '-ers-pool-sapctrls' }}" + lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_nwas + '-ers'))[0].id }}" algorithm: weighted_round_robin protocol: tcp health_delay: 20 @@ -361,6 +417,238 @@ loop: "{{ ibmcloud_lbs_all_info | map(attribute='id') }}" +# Create IBM Cloud Load Balancer Front-end Listeners (open port for Virtual IPs) +# Configure prior to Back-end Pool Server Members, as this will increase execution speed +# by avoiding LB verification check/reload once the LB is active with Server Members + +# When IBM Cloud Load Balancer, Application Load Balancer Type: +# - Important to increase the Front-end Listener Idle Connection Timeout (sec), +# if the received-and-forwarded request becomes idle (no data received/sent sent/received) +# then the idle connection is closed by default/minimum after 50 seconds and maximum 7200 seconds (2 hours). +# - This can impact SAP SWPM waiting for SAP HANA Data Load to complete, and other long-running actions. + +- name: Create IBM Cloud Load Balancer Front-end Listener for SAP HANA - System DB SQL + no_log: "{{ __sap_vm_provision_no_log }}" + register: __sap_vm_provision_task_ibmcloud_lb_frontend_listener_hana1 + ibm.cloudcollection.ibm_is_lb_listener: + lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_hana))[0].id }}" + default_pool: "{{ (__sap_vm_provision_task_ibmcloud_lb_pools.results | json_query('[*].resource.pools') | flatten | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_hana + '-pool-sysdb-sql'))[0].id }}" + protocol: tcp + port: "{{ ('3' + sap_system_hana_db_instance_nr + '13') | int }}" + idle_connection_timeout: 600 # 10 minutes + ibmcloud_api_key: "{{ sap_vm_provision_ibmcloud_api_key }}" + when: (groups['hana_secondary'] is defined and (groups['hana_secondary'] | length>0)) + failed_when: not __sap_vm_provision_task_ibmcloud_lb_frontend_listener_hana1.rc == 0 and not 'listener_duplicate_port' in __sap_vm_provision_task_ibmcloud_lb_frontend_listener_hana1.stderr + +- name: Create IBM Cloud Load Balancer Front-end Listener for SAP HANA - MDC Tenant 1 SQL + no_log: "{{ __sap_vm_provision_no_log }}" + register: __sap_vm_provision_task_ibmcloud_lb_frontend_listener_hana2 + ibm.cloudcollection.ibm_is_lb_listener: + lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_hana))[0].id }}" + default_pool: "{{ (__sap_vm_provision_task_ibmcloud_lb_pools.results | json_query('[*].resource.pools') | flatten | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_hana + '-pool-mdc1-sql'))[0].id }}" + protocol: tcp + port: "{{ ('3' + sap_system_hana_db_instance_nr + '15') | int }}" + idle_connection_timeout: 600 # 10 minutes + ibmcloud_api_key: "{{ sap_vm_provision_ibmcloud_api_key }}" + when: (groups['hana_secondary'] is defined and (groups['hana_secondary'] | length>0)) + failed_when: not __sap_vm_provision_task_ibmcloud_lb_frontend_listener_hana2.rc == 0 and not 'listener_duplicate_port' in __sap_vm_provision_task_ibmcloud_lb_frontend_listener_hana2.stderr + +- name: Create IBM Cloud Load Balancer Front-end Listener for SAP HANA - startsrv HTTP + no_log: "{{ __sap_vm_provision_no_log }}" + register: __sap_vm_provision_task_ibmcloud_lb_frontend_listener_hana3 + ibm.cloudcollection.ibm_is_lb_listener: + lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_hana))[0].id }}" + default_pool: "{{ (__sap_vm_provision_task_ibmcloud_lb_pools.results | json_query('[*].resource.pools') | flatten | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_hana + '-pool-startsrv-http'))[0].id }}" + protocol: tcp + port: "{{ ('5' + sap_system_hana_db_instance_nr + '13') | int }}" + idle_connection_timeout: 600 # 10 minutes + ibmcloud_api_key: "{{ sap_vm_provision_ibmcloud_api_key }}" + when: (groups['hana_secondary'] is defined and (groups['hana_secondary'] | length>0)) + failed_when: not __sap_vm_provision_task_ibmcloud_lb_frontend_listener_hana3.rc == 0 and not 'listener_duplicate_port' in __sap_vm_provision_task_ibmcloud_lb_frontend_listener_hana3.stderr + +- name: Create IBM Cloud Load Balancer Front-end Listener for SAP HANA - startsrv HTTPS + no_log: "{{ __sap_vm_provision_no_log }}" + register: __sap_vm_provision_task_ibmcloud_lb_frontend_listener_hana4 + ibm.cloudcollection.ibm_is_lb_listener: + lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_hana))[0].id }}" + default_pool: "{{ (__sap_vm_provision_task_ibmcloud_lb_pools.results | json_query('[*].resource.pools') | flatten | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_hana + '-pool-startsrv-https'))[0].id }}" + protocol: tcp + port: "{{ ('5' + sap_system_hana_db_instance_nr + '14') | int }}" + idle_connection_timeout: 600 # 10 minutes + ibmcloud_api_key: "{{ sap_vm_provision_ibmcloud_api_key }}" + when: (groups['hana_secondary'] is defined and (groups['hana_secondary'] | length>0)) + failed_when: not __sap_vm_provision_task_ibmcloud_lb_frontend_listener_hana4.rc == 0 and not 'listener_duplicate_port' in __sap_vm_provision_task_ibmcloud_lb_frontend_listener_hana4.stderr + +- name: Create IBM Cloud Load Balancer Front-end Listener for SAP AnyDB - IBM Db2 Communication Port + no_log: "{{ __sap_vm_provision_no_log }}" + register: __sap_vm_provision_task_ibmcloud_lb_frontend_listener_anydb1 + ibm.cloudcollection.ibm_is_lb_listener: + lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_anydb))[0].id }}" + default_pool: "{{ (__sap_vm_provision_task_ibmcloud_lb_pools.results | json_query('[*].resource.pools') | flatten | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_anydb + '-pool-ibmdb2'))[0].id }}" + protocol: tcp + port: 5912 + idle_connection_timeout: 600 # 10 minutes + ibmcloud_api_key: "{{ sap_vm_provision_ibmcloud_api_key }}" + when: (groups['anydb_secondary'] is defined and (groups['anydb_secondary'] | length>0)) + failed_when: not __sap_vm_provision_task_ibmcloud_lb_frontend_listener_anydb1.rc == 0 and not 'listener_duplicate_port' in __sap_vm_provision_task_ibmcloud_lb_frontend_listener_anydb1.stderr + +- name: Create IBM Cloud Load Balancer Front-end Listener for SAP NetWeaver ASCS - Dispatcher sapdp process + no_log: "{{ __sap_vm_provision_no_log }}" + register: __sap_vm_provision_task_ibmcloud_lb_frontend_listener_ascs1 + ibm.cloudcollection.ibm_is_lb_listener: + lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_nwas + '-ascs'))[0].id }}" + default_pool: "{{ (__sap_vm_provision_task_ibmcloud_lb_pools.results | json_query('[*].resource.pools') | flatten | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_nwas + '-ascs-pool-dp'))[0].id }}" + protocol: tcp + port: "{{ ('32' + sap_system_nwas_abap_ascs_instance_nr) | int }}" + idle_connection_timeout: 600 # 10 minutes + ibmcloud_api_key: "{{ sap_vm_provision_ibmcloud_api_key }}" + when: (groups['nwas_ers'] is defined and (groups['nwas_ers'] | length>0)) + failed_when: not __sap_vm_provision_task_ibmcloud_lb_frontend_listener_ascs1.rc == 0 and not 'listener_duplicate_port' in __sap_vm_provision_task_ibmcloud_lb_frontend_listener_ascs1.stderr + +- name: Create IBM Cloud Load Balancer Front-end Listener for SAP NetWeaver ASCS - Message Server sapms process + no_log: "{{ __sap_vm_provision_no_log }}" + register: __sap_vm_provision_task_ibmcloud_lb_frontend_listener_ascs2 + ibm.cloudcollection.ibm_is_lb_listener: + lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_nwas + '-ascs'))[0].id }}" + default_pool: "{{ (__sap_vm_provision_task_ibmcloud_lb_pools.results | json_query('[*].resource.pools') | flatten | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_nwas + '-ascs-pool-ms'))[0].id }}" + protocol: tcp + port: "{{ ('36' + sap_system_nwas_abap_ascs_instance_nr) | int }}" + idle_connection_timeout: 600 # 10 minutes + ibmcloud_api_key: "{{ sap_vm_provision_ibmcloud_api_key }}" + when: (groups['nwas_ers'] is defined and (groups['nwas_ers'] | length>0)) + failed_when: not __sap_vm_provision_task_ibmcloud_lb_frontend_listener_ascs2.rc == 0 and not 'listener_duplicate_port' in __sap_vm_provision_task_ibmcloud_lb_frontend_listener_ascs2.stderr + +- name: Create IBM Cloud Load Balancer Front-end Listener for SAP NetWeaver ASCS - Message Server HTTP sapms process + no_log: "{{ __sap_vm_provision_no_log }}" + register: __sap_vm_provision_task_ibmcloud_lb_frontend_listener_ascs3 + ibm.cloudcollection.ibm_is_lb_listener: + lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_nwas + '-ascs'))[0].id }}" + default_pool: "{{ (__sap_vm_provision_task_ibmcloud_lb_pools.results | json_query('[*].resource.pools') | flatten | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_nwas + '-ascs-pool-ms-http'))[0].id }}" + protocol: tcp + port: "{{ ('81' + sap_system_nwas_abap_ascs_instance_nr) | int }}" + idle_connection_timeout: 600 # 10 minutes + ibmcloud_api_key: "{{ sap_vm_provision_ibmcloud_api_key }}" + when: (groups['nwas_ers'] is defined and (groups['nwas_ers'] | length>0)) + failed_when: not __sap_vm_provision_task_ibmcloud_lb_frontend_listener_ascs3.rc == 0 and not 'listener_duplicate_port' in __sap_vm_provision_task_ibmcloud_lb_frontend_listener_ascs3.stderr + +- name: Create IBM Cloud Load Balancer Front-end Listener for SAP NetWeaver ASCS - Enqueue Server sapenq process + no_log: "{{ __sap_vm_provision_no_log }}" + register: __sap_vm_provision_task_ibmcloud_lb_frontend_listener_ascs4 + ibm.cloudcollection.ibm_is_lb_listener: + lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_nwas + '-ascs'))[0].id }}" + default_pool: "{{ (__sap_vm_provision_task_ibmcloud_lb_pools.results | json_query('[*].resource.pools') | flatten | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_nwas + '-ascs-pool-enq'))[0].id }}" + protocol: tcp + port: "{{ ('39' + sap_system_nwas_abap_ascs_instance_nr) | int }}" + idle_connection_timeout: 600 # 10 minutes + ibmcloud_api_key: "{{ sap_vm_provision_ibmcloud_api_key }}" + when: (groups['nwas_ers'] is defined and (groups['nwas_ers'] | length>0)) + failed_when: not __sap_vm_provision_task_ibmcloud_lb_frontend_listener_ascs4.rc == 0 and not 'listener_duplicate_port' in __sap_vm_provision_task_ibmcloud_lb_frontend_listener_ascs4.stderr + +- name: Create IBM Cloud Load Balancer Front-end Listener for SAP NetWeaver ASCS - Enqueue Replicator Server sapenqrepl process + no_log: "{{ __sap_vm_provision_no_log }}" + register: __sap_vm_provision_task_ibmcloud_lb_frontend_listener_ascs5 + ibm.cloudcollection.ibm_is_lb_listener: + lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_nwas + '-ascs'))[0].id }}" + default_pool: "{{ (__sap_vm_provision_task_ibmcloud_lb_pools.results | json_query('[*].resource.pools') | flatten | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_nwas + '-ascs-pool-enqrepl'))[0].id }}" + protocol: tcp + port: "{{ ('5' + sap_system_nwas_abap_ascs_instance_nr + '16') | int }}" + idle_connection_timeout: 600 # 10 minutes + ibmcloud_api_key: "{{ sap_vm_provision_ibmcloud_api_key }}" + when: (groups['nwas_ers'] is defined and (groups['nwas_ers'] | length>0)) + failed_when: not __sap_vm_provision_task_ibmcloud_lb_frontend_listener_ascs5.rc == 0 and not 'listener_duplicate_port' in __sap_vm_provision_task_ibmcloud_lb_frontend_listener_ascs5.stderr + +- name: Create IBM Cloud Load Balancer Front-end Listener for SAP NetWeaver ASCS - SAP Start Service (SAPControl SOAP) HTTP sapctrl process + no_log: "{{ __sap_vm_provision_no_log }}" + register: __sap_vm_provision_task_ibmcloud_lb_frontend_listener_ascs6 + ibm.cloudcollection.ibm_is_lb_listener: + lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_nwas + '-ascs'))[0].id }}" + default_pool: "{{ (__sap_vm_provision_task_ibmcloud_lb_pools.results | json_query('[*].resource.pools') | flatten | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_nwas + '-ascs-pool-sapctrl'))[0].id }}" + protocol: tcp + port: "{{ ('5' + sap_system_nwas_abap_ascs_instance_nr + '13') | int }}" + idle_connection_timeout: 600 # 10 minutes + ibmcloud_api_key: "{{ sap_vm_provision_ibmcloud_api_key }}" + when: (groups['nwas_ers'] is defined and (groups['nwas_ers'] | length>0)) + failed_when: not __sap_vm_provision_task_ibmcloud_lb_frontend_listener_ascs6.rc == 0 and not 'listener_duplicate_port' in __sap_vm_provision_task_ibmcloud_lb_frontend_listener_ascs6.stderr + +- name: Create IBM Cloud Load Balancer Front-end Listener for SAP NetWeaver ASCS - SAP Start Service (SAPControl SOAP) HTTPS (Secure) sapctrls + no_log: "{{ __sap_vm_provision_no_log }}" + register: __sap_vm_provision_task_ibmcloud_lb_frontend_listener_ascs7 + ibm.cloudcollection.ibm_is_lb_listener: + lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_nwas + '-ascs'))[0].id }}" + default_pool: "{{ (__sap_vm_provision_task_ibmcloud_lb_pools.results | json_query('[*].resource.pools') | flatten | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_nwas + '-ascs-pool-sapctrls'))[0].id }}" + protocol: tcp + port: "{{ ('5' + sap_system_nwas_abap_ascs_instance_nr + '14') | int }}" + idle_connection_timeout: 600 # 10 minutes + ibmcloud_api_key: "{{ sap_vm_provision_ibmcloud_api_key }}" + when: (groups['nwas_ers'] is defined and (groups['nwas_ers'] | length>0)) + failed_when: not __sap_vm_provision_task_ibmcloud_lb_frontend_listener_ascs7.rc == 0 and not 'listener_duplicate_port' in __sap_vm_provision_task_ibmcloud_lb_frontend_listener_ascs7.stderr + +# - name: Create IBM Cloud Load Balancer Front-end Listener for SAP NetWeaver ERS - Dispatcher sapdp process +# no_log: "{{ __sap_vm_provision_no_log }}" +# register: __sap_vm_provision_task_ibmcloud_lb_frontend_listener_ers1 +# ibm.cloudcollection.ibm_is_lb_listener: +# lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_nwas + '-ers'))[0].id }}" +# default_pool: "{{ (__sap_vm_provision_task_ibmcloud_lb_pools.results | json_query('[*].resource.pools') | flatten | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_nwas + '-ers-pool-dp'))[0].id }}" +# protocol: tcp +# port: "{{ ('32' + sap_system_nwas_abap_ers_instance_nr) | int }}" +# idle_connection_timeout: 600 # 10 minutes +# ibmcloud_api_key: "{{ sap_vm_provision_ibmcloud_api_key }}" +# when: (groups['nwas_ers'] is defined and (groups['nwas_ers'] | length>0)) +# failed_when: not __sap_vm_provision_task_ibmcloud_lb_frontend_listener_ers1.rc == 0 and not 'listener_duplicate_port' in __sap_vm_provision_task_ibmcloud_lb_frontend_listener_ers1.stderr + +# - name: Create IBM Cloud Load Balancer Front-end Listener for SAP NetWeaver ERS - Message Server sapms process +# no_log: "{{ __sap_vm_provision_no_log }}" +# register: __sap_vm_provision_task_ibmcloud_lb_frontend_listener_ers2 +# ibm.cloudcollection.ibm_is_lb_listener: +# lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_nwas + '-ers'))[0].id }}" +# default_pool: "{{ (__sap_vm_provision_task_ibmcloud_lb_pools.results | json_query('[*].resource.pools') | flatten | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_nwas + '-ers-pool-ms'))[0].id }}" +# protocol: tcp +# port: "{{ ('36' + sap_system_nwas_abap_ers_instance_nr) | int }}" +# idle_connection_timeout: 600 # 10 minutes +# ibmcloud_api_key: "{{ sap_vm_provision_ibmcloud_api_key }}" +# when: (groups['nwas_ers'] is defined and (groups['nwas_ers'] | length>0)) +# failed_when: not __sap_vm_provision_task_ibmcloud_lb_frontend_listener_ers2.rc == 0 and not 'listener_duplicate_port' in __sap_vm_provision_task_ibmcloud_lb_frontend_listener_ers2.stderr + +- name: Create IBM Cloud Load Balancer Front-end Listener for SAP NetWeaver ERS - Enqueue Replication Server sapenqr process + no_log: "{{ __sap_vm_provision_no_log }}" + register: __sap_vm_provision_task_ibmcloud_lb_frontend_listener_ers3 + ibm.cloudcollection.ibm_is_lb_listener: + lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_nwas + '-ers'))[0].id }}" + default_pool: "{{ (__sap_vm_provision_task_ibmcloud_lb_pools.results | json_query('[*].resource.pools') | flatten | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_nwas + '-ers-pool-enqr'))[0].id }}" + protocol: tcp + port: "{{ ('39' + sap_system_nwas_abap_ers_instance_nr) | int }}" + idle_connection_timeout: 600 # 10 minutes + ibmcloud_api_key: "{{ sap_vm_provision_ibmcloud_api_key }}" + when: (groups['nwas_ers'] is defined and (groups['nwas_ers'] | length>0)) + failed_when: not __sap_vm_provision_task_ibmcloud_lb_frontend_listener_ers3.rc == 0 and not 'listener_duplicate_port' in __sap_vm_provision_task_ibmcloud_lb_frontend_listener_ers3.stderr + +- name: Create IBM Cloud Load Balancer Front-end Listener for SAP NetWeaver ERS - SAP Start Service (SAPControl SOAP) HTTP sapctrl process + no_log: "{{ __sap_vm_provision_no_log }}" + register: __sap_vm_provision_task_ibmcloud_lb_frontend_listener_ers4 + ibm.cloudcollection.ibm_is_lb_listener: + lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_nwas + '-ers'))[0].id }}" + default_pool: "{{ (__sap_vm_provision_task_ibmcloud_lb_pools.results | json_query('[*].resource.pools') | flatten | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_nwas + '-ers-pool-sapctrl'))[0].id }}" + protocol: tcp + port: "{{ ('5' + sap_system_nwas_abap_ers_instance_nr + '13') | int }}" + idle_connection_timeout: 600 # 10 minutes + ibmcloud_api_key: "{{ sap_vm_provision_ibmcloud_api_key }}" + when: (groups['nwas_ers'] is defined and (groups['nwas_ers'] | length>0)) + failed_when: not __sap_vm_provision_task_ibmcloud_lb_frontend_listener_ers4.rc == 0 and not 'listener_duplicate_port' in __sap_vm_provision_task_ibmcloud_lb_frontend_listener_ers4.stderr + +- name: Create IBM Cloud Load Balancer Front-end Listener for SAP NetWeaver ERS - SAP Start Service (SAPControl SOAP) HTTPS (Secure) sapctrls + no_log: "{{ __sap_vm_provision_no_log }}" + register: __sap_vm_provision_task_ibmcloud_lb_frontend_listener_ers5 + ibm.cloudcollection.ibm_is_lb_listener: + lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_nwas + '-ers'))[0].id }}" + default_pool: "{{ (__sap_vm_provision_task_ibmcloud_lb_pools.results | json_query('[*].resource.pools') | flatten | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_nwas + '-ers-pool-sapctrls'))[0].id }}" + protocol: tcp + port: "{{ ('5' + sap_system_nwas_abap_ers_instance_nr + '14') | int }}" + idle_connection_timeout: 600 # 10 minutes + ibmcloud_api_key: "{{ sap_vm_provision_ibmcloud_api_key }}" + when: (groups['nwas_ers'] is defined and (groups['nwas_ers'] | length>0)) + failed_when: not __sap_vm_provision_task_ibmcloud_lb_frontend_listener_ers5.rc == 0 and not 'listener_duplicate_port' in __sap_vm_provision_task_ibmcloud_lb_frontend_listener_ers5.stderr + + # Append Server Members to the IBM Cloud Load Balancer Back-end Pools # Primary @@ -368,9 +656,9 @@ no_log: "{{ __sap_vm_provision_no_log }}" register: __sap_vm_provision_task_ibmcloud_lb_pool_members_hana1 ibm.cloudcollection.ibm_is_lb_pool_member: - lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-hana'))[0].id }}" - pool: "{{ (__sap_vm_provision_task_ibmcloud_lb_pools.results | json_query('[*].resource.pools') | flatten | selectattr('name', '==', 'lb-sap-ha-hana-pool-sysdb-sql'))[0].id }}" - target_address: "{{ (__sap_vm_provision_task_ibmcloud_vs_all_info.resource.instances | selectattr('name', '==', host_node))[0].primary_network_interface[0].primary_ipv4_address }}" + lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_hana))[0].id }}" + pool: "{{ (__sap_vm_provision_task_ibmcloud_lb_pools.results | json_query('[*].resource.pools') | flatten | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_hana + '-pool-sysdb-sql'))[0].id }}" + target_address: "{{ (__sap_vm_provision_task_ibmcloud_vs_all_info.resource.instances | selectattr('name', '==', host_node))[0].primary_network_attachment[0].primary_ip[0].address }}" port: "{{ ('3' + sap_system_hana_db_instance_nr + '13') | int }}" weight: 100 ibmcloud_api_key: "{{ sap_vm_provision_ibmcloud_api_key }}" @@ -385,9 +673,9 @@ no_log: "{{ __sap_vm_provision_no_log }}" register: __sap_vm_provision_task_ibmcloud_lb_pool_members_hana2 ibm.cloudcollection.ibm_is_lb_pool_member: - lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-hana'))[0].id }}" - pool: "{{ (__sap_vm_provision_task_ibmcloud_lb_pools.results | json_query('[*].resource.pools') | flatten | selectattr('name', '==', 'lb-sap-ha-hana-pool-sysdb-sql'))[0].id }}" - target_address: "{{ (__sap_vm_provision_task_ibmcloud_vs_all_info.resource.instances | selectattr('name', '==', host_node))[0].primary_network_interface[0].primary_ipv4_address }}" + lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_hana))[0].id }}" + pool: "{{ (__sap_vm_provision_task_ibmcloud_lb_pools.results | json_query('[*].resource.pools') | flatten | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_hana + '-pool-sysdb-sql'))[0].id }}" + target_address: "{{ (__sap_vm_provision_task_ibmcloud_vs_all_info.resource.instances | selectattr('name', '==', host_node))[0].primary_network_attachment[0].primary_ip[0].address }}" port: "{{ ('3' + sap_system_hana_db_instance_nr + '13') | int }}" weight: 1 ibmcloud_api_key: "{{ sap_vm_provision_ibmcloud_api_key }}" @@ -403,9 +691,9 @@ no_log: "{{ __sap_vm_provision_no_log }}" register: __sap_vm_provision_task_ibmcloud_lb_pool_members_hana3 ibm.cloudcollection.ibm_is_lb_pool_member: - lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-hana'))[0].id }}" - pool: "{{ (__sap_vm_provision_task_ibmcloud_lb_pools.results | json_query('[*].resource.pools') | flatten | selectattr('name', '==', 'lb-sap-ha-hana-pool-mdc1-sql'))[0].id }}" - target_address: "{{ (__sap_vm_provision_task_ibmcloud_vs_all_info.resource.instances | selectattr('name', '==', host_node))[0].primary_network_interface[0].primary_ipv4_address }}" + lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_hana))[0].id }}" + pool: "{{ (__sap_vm_provision_task_ibmcloud_lb_pools.results | json_query('[*].resource.pools') | flatten | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_hana + '-pool-mdc1-sql'))[0].id }}" + target_address: "{{ (__sap_vm_provision_task_ibmcloud_vs_all_info.resource.instances | selectattr('name', '==', host_node))[0].primary_network_attachment[0].primary_ip[0].address }}" port: "{{ ('3' + sap_system_hana_db_instance_nr + '15') | int }}" weight: 100 ibmcloud_api_key: "{{ sap_vm_provision_ibmcloud_api_key }}" @@ -420,9 +708,9 @@ no_log: "{{ __sap_vm_provision_no_log }}" register: __sap_vm_provision_task_ibmcloud_lb_pool_members_hana4 ibm.cloudcollection.ibm_is_lb_pool_member: - lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-hana'))[0].id }}" - pool: "{{ (__sap_vm_provision_task_ibmcloud_lb_pools.results | json_query('[*].resource.pools') | flatten | selectattr('name', '==', 'lb-sap-ha-hana-pool-mdc1-sql'))[0].id }}" - target_address: "{{ (__sap_vm_provision_task_ibmcloud_vs_all_info.resource.instances | selectattr('name', '==', host_node))[0].primary_network_interface[0].primary_ipv4_address }}" + lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_hana))[0].id }}" + pool: "{{ (__sap_vm_provision_task_ibmcloud_lb_pools.results | json_query('[*].resource.pools') | flatten | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_hana + '-pool-mdc1-sql'))[0].id }}" + target_address: "{{ (__sap_vm_provision_task_ibmcloud_vs_all_info.resource.instances | selectattr('name', '==', host_node))[0].primary_network_attachment[0].primary_ip[0].address }}" port: "{{ ('3' + sap_system_hana_db_instance_nr + '15') | int }}" weight: 1 ibmcloud_api_key: "{{ sap_vm_provision_ibmcloud_api_key }}" @@ -438,9 +726,9 @@ no_log: "{{ __sap_vm_provision_no_log }}" register: __sap_vm_provision_task_ibmcloud_lb_pool_members_hana5 ibm.cloudcollection.ibm_is_lb_pool_member: - lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-hana'))[0].id }}" - pool: "{{ (__sap_vm_provision_task_ibmcloud_lb_pools.results | json_query('[*].resource.pools') | flatten | selectattr('name', '==', 'lb-sap-ha-hana-pool-startsrv-http'))[0].id }}" - target_address: "{{ (__sap_vm_provision_task_ibmcloud_vs_all_info.resource.instances | selectattr('name', '==', host_node))[0].primary_network_interface[0].primary_ipv4_address }}" + lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_hana))[0].id }}" + pool: "{{ (__sap_vm_provision_task_ibmcloud_lb_pools.results | json_query('[*].resource.pools') | flatten | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_hana + '-pool-startsrv-http'))[0].id }}" + target_address: "{{ (__sap_vm_provision_task_ibmcloud_vs_all_info.resource.instances | selectattr('name', '==', host_node))[0].primary_network_attachment[0].primary_ip[0].address }}" port: "{{ ('5' + sap_system_hana_db_instance_nr + '13') | int }}" weight: 100 ibmcloud_api_key: "{{ sap_vm_provision_ibmcloud_api_key }}" @@ -455,9 +743,9 @@ no_log: "{{ __sap_vm_provision_no_log }}" register: __sap_vm_provision_task_ibmcloud_lb_pool_members_hana6 ibm.cloudcollection.ibm_is_lb_pool_member: - lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-hana'))[0].id }}" - pool: "{{ (__sap_vm_provision_task_ibmcloud_lb_pools.results | json_query('[*].resource.pools') | flatten | selectattr('name', '==', 'lb-sap-ha-hana-pool-startsrv-http'))[0].id }}" - target_address: "{{ (__sap_vm_provision_task_ibmcloud_vs_all_info.resource.instances | selectattr('name', '==', host_node))[0].primary_network_interface[0].primary_ipv4_address }}" + lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_hana))[0].id }}" + pool: "{{ (__sap_vm_provision_task_ibmcloud_lb_pools.results | json_query('[*].resource.pools') | flatten | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_hana + '-pool-startsrv-http'))[0].id }}" + target_address: "{{ (__sap_vm_provision_task_ibmcloud_vs_all_info.resource.instances | selectattr('name', '==', host_node))[0].primary_network_attachment[0].primary_ip[0].address }}" port: "{{ ('5' + sap_system_hana_db_instance_nr + '13') | int }}" weight: 1 ibmcloud_api_key: "{{ sap_vm_provision_ibmcloud_api_key }}" @@ -473,9 +761,9 @@ no_log: "{{ __sap_vm_provision_no_log }}" register: __sap_vm_provision_task_ibmcloud_lb_pool_members_hana7 ibm.cloudcollection.ibm_is_lb_pool_member: - lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-hana'))[0].id }}" - pool: "{{ (__sap_vm_provision_task_ibmcloud_lb_pools.results | json_query('[*].resource.pools') | flatten | selectattr('name', '==', 'lb-sap-ha-hana-pool-startsrv-https'))[0].id }}" - target_address: "{{ (__sap_vm_provision_task_ibmcloud_vs_all_info.resource.instances | selectattr('name', '==', host_node))[0].primary_network_interface[0].primary_ipv4_address }}" + lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_hana))[0].id }}" + pool: "{{ (__sap_vm_provision_task_ibmcloud_lb_pools.results | json_query('[*].resource.pools') | flatten | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_hana + '-pool-startsrv-https'))[0].id }}" + target_address: "{{ (__sap_vm_provision_task_ibmcloud_vs_all_info.resource.instances | selectattr('name', '==', host_node))[0].primary_network_attachment[0].primary_ip[0].address }}" port: "{{ ('5' + sap_system_hana_db_instance_nr + '14') | int }}" weight: 100 ibmcloud_api_key: "{{ sap_vm_provision_ibmcloud_api_key }}" @@ -490,9 +778,9 @@ no_log: "{{ __sap_vm_provision_no_log }}" register: __sap_vm_provision_task_ibmcloud_lb_pool_members_hana8 ibm.cloudcollection.ibm_is_lb_pool_member: - lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-hana'))[0].id }}" - pool: "{{ (__sap_vm_provision_task_ibmcloud_lb_pools.results | json_query('[*].resource.pools') | flatten | selectattr('name', '==', 'lb-sap-ha-hana-pool-startsrv-https'))[0].id }}" - target_address: "{{ (__sap_vm_provision_task_ibmcloud_vs_all_info.resource.instances | selectattr('name', '==', host_node))[0].primary_network_interface[0].primary_ipv4_address }}" + lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_hana))[0].id }}" + pool: "{{ (__sap_vm_provision_task_ibmcloud_lb_pools.results | json_query('[*].resource.pools') | flatten | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_hana + '-pool-startsrv-https'))[0].id }}" + target_address: "{{ (__sap_vm_provision_task_ibmcloud_vs_all_info.resource.instances | selectattr('name', '==', host_node))[0].primary_network_attachment[0].primary_ip[0].address }}" port: "{{ ('5' + sap_system_hana_db_instance_nr + '14') | int }}" weight: 1 ibmcloud_api_key: "{{ sap_vm_provision_ibmcloud_api_key }}" @@ -508,9 +796,9 @@ no_log: "{{ __sap_vm_provision_no_log }}" register: __sap_vm_provision_task_ibmcloud_lb_pool_members_anydb1 ibm.cloudcollection.ibm_is_lb_pool_member: - lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-anydb'))[0].id }}" - pool: "{{ (__sap_vm_provision_task_ibmcloud_lb_pools.results | json_query('[*].resource.pools') | flatten | selectattr('name', '==', 'lb-sap-ha-anydb-pool-ibmdb2'))[0].id }}" - target_address: "{{ (__sap_vm_provision_task_ibmcloud_vs_all_info.resource.instances | selectattr('name', '==', host_node))[0].primary_network_interface[0].primary_ipv4_address }}" + lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_anydb))[0].id }}" + pool: "{{ (__sap_vm_provision_task_ibmcloud_lb_pools.results | json_query('[*].resource.pools') | flatten | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_anydb + '-pool-ibmdb2'))[0].id }}" + target_address: "{{ (__sap_vm_provision_task_ibmcloud_vs_all_info.resource.instances | selectattr('name', '==', host_node))[0].primary_network_attachment[0].primary_ip[0].address }}" port: 5912 weight: 100 ibmcloud_api_key: "{{ sap_vm_provision_ibmcloud_api_key }}" @@ -525,9 +813,9 @@ no_log: "{{ __sap_vm_provision_no_log }}" register: __sap_vm_provision_task_ibmcloud_lb_pool_members_anydb2 ibm.cloudcollection.ibm_is_lb_pool_member: - lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-anydb'))[0].id }}" - pool: "{{ (__sap_vm_provision_task_ibmcloud_lb_pools.results | json_query('[*].resource.pools') | flatten | selectattr('name', '==', 'lb-sap-ha-anydb-pool-ibmdb2'))[0].id }}" - target_address: "{{ (__sap_vm_provision_task_ibmcloud_vs_all_info.resource.instances | selectattr('name', '==', host_node))[0].primary_network_interface[0].primary_ipv4_address }}" + lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_anydb))[0].id }}" + pool: "{{ (__sap_vm_provision_task_ibmcloud_lb_pools.results | json_query('[*].resource.pools') | flatten | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_anydb + '-pool-ibmdb2'))[0].id }}" + target_address: "{{ (__sap_vm_provision_task_ibmcloud_vs_all_info.resource.instances | selectattr('name', '==', host_node))[0].primary_network_attachment[0].primary_ip[0].address }}" port: 5912 weight: 1 ibmcloud_api_key: "{{ sap_vm_provision_ibmcloud_api_key }}" @@ -543,9 +831,9 @@ no_log: "{{ __sap_vm_provision_no_log }}" register: __sap_vm_provision_task_ibmcloud_lb_pool_members_nwas_ascs1 ibm.cloudcollection.ibm_is_lb_pool_member: - lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-nwas-ascs'))[0].id }}" - pool: "{{ (__sap_vm_provision_task_ibmcloud_lb_pools.results | json_query('[*].resource.pools') | flatten | selectattr('name', '==', 'lb-sap-ha-nwas-ascs-pool-dp'))[0].id }}" - target_address: "{{ (__sap_vm_provision_task_ibmcloud_vs_all_info.resource.instances | selectattr('name', '==', host_node))[0].primary_network_interface[0].primary_ipv4_address }}" + lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_nwas + '-ascs'))[0].id }}" + pool: "{{ (__sap_vm_provision_task_ibmcloud_lb_pools.results | json_query('[*].resource.pools') | flatten | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_nwas + '-ascs-pool-dp'))[0].id }}" + target_address: "{{ (__sap_vm_provision_task_ibmcloud_vs_all_info.resource.instances | selectattr('name', '==', host_node))[0].primary_network_attachment[0].primary_ip[0].address }}" port: "{{ ('32' + sap_system_nwas_abap_ascs_instance_nr) | int }}" weight: 100 ibmcloud_api_key: "{{ sap_vm_provision_ibmcloud_api_key }}" @@ -560,9 +848,9 @@ no_log: "{{ __sap_vm_provision_no_log }}" register: __sap_vm_provision_task_ibmcloud_lb_pool_members_nwas_ascs2 ibm.cloudcollection.ibm_is_lb_pool_member: - lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-nwas-ascs'))[0].id }}" - pool: "{{ (__sap_vm_provision_task_ibmcloud_lb_pools.results | json_query('[*].resource.pools') | flatten | selectattr('name', '==', 'lb-sap-ha-nwas-ascs-pool-dp'))[0].id }}" - target_address: "{{ (__sap_vm_provision_task_ibmcloud_vs_all_info.resource.instances | selectattr('name', '==', host_node))[0].primary_network_interface[0].primary_ipv4_address }}" + lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_nwas + '-ascs'))[0].id }}" + pool: "{{ (__sap_vm_provision_task_ibmcloud_lb_pools.results | json_query('[*].resource.pools') | flatten | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_nwas + '-ascs-pool-dp'))[0].id }}" + target_address: "{{ (__sap_vm_provision_task_ibmcloud_vs_all_info.resource.instances | selectattr('name', '==', host_node))[0].primary_network_attachment[0].primary_ip[0].address }}" port: "{{ ('32' + sap_system_nwas_abap_ascs_instance_nr) | int }}" weight: 1 ibmcloud_api_key: "{{ sap_vm_provision_ibmcloud_api_key }}" @@ -578,9 +866,9 @@ no_log: "{{ __sap_vm_provision_no_log }}" register: __sap_vm_provision_task_ibmcloud_lb_pool_members_nwas_ascs3 ibm.cloudcollection.ibm_is_lb_pool_member: - lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-nwas-ascs'))[0].id }}" - pool: "{{ (__sap_vm_provision_task_ibmcloud_lb_pools.results | json_query('[*].resource.pools') | flatten | selectattr('name', '==', 'lb-sap-ha-nwas-ascs-pool-ms'))[0].id }}" - target_address: "{{ (__sap_vm_provision_task_ibmcloud_vs_all_info.resource.instances | selectattr('name', '==', host_node))[0].primary_network_interface[0].primary_ipv4_address }}" + lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_nwas + '-ascs'))[0].id }}" + pool: "{{ (__sap_vm_provision_task_ibmcloud_lb_pools.results | json_query('[*].resource.pools') | flatten | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_nwas + '-ascs-pool-ms'))[0].id }}" + target_address: "{{ (__sap_vm_provision_task_ibmcloud_vs_all_info.resource.instances | selectattr('name', '==', host_node))[0].primary_network_attachment[0].primary_ip[0].address }}" port: "{{ ('36' + sap_system_nwas_abap_ascs_instance_nr) | int }}" weight: 100 ibmcloud_api_key: "{{ sap_vm_provision_ibmcloud_api_key }}" @@ -595,9 +883,9 @@ no_log: "{{ __sap_vm_provision_no_log }}" register: __sap_vm_provision_task_ibmcloud_lb_pool_members_nwas_ascs4 ibm.cloudcollection.ibm_is_lb_pool_member: - lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-nwas-ascs'))[0].id }}" - pool: "{{ (__sap_vm_provision_task_ibmcloud_lb_pools.results | json_query('[*].resource.pools') | flatten | selectattr('name', '==', 'lb-sap-ha-nwas-ascs-pool-ms'))[0].id }}" - target_address: "{{ (__sap_vm_provision_task_ibmcloud_vs_all_info.resource.instances | selectattr('name', '==', host_node))[0].primary_network_interface[0].primary_ipv4_address }}" + lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_nwas + '-ascs'))[0].id }}" + pool: "{{ (__sap_vm_provision_task_ibmcloud_lb_pools.results | json_query('[*].resource.pools') | flatten | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_nwas + '-ascs-pool-ms'))[0].id }}" + target_address: "{{ (__sap_vm_provision_task_ibmcloud_vs_all_info.resource.instances | selectattr('name', '==', host_node))[0].primary_network_attachment[0].primary_ip[0].address }}" port: "{{ ('36' + sap_system_nwas_abap_ascs_instance_nr) | int }}" weight: 1 ibmcloud_api_key: "{{ sap_vm_provision_ibmcloud_api_key }}" @@ -609,14 +897,14 @@ # Primary -- name: Create IBM Cloud Load Balancer Back-end Pool Members for SAP NetWeaver ASCS - Enqueue Server sapenq process - Member 1 +- name: Create IBM Cloud Load Balancer Back-end Pool Members for SAP NetWeaver ASCS - Message Server HTTP sapms process - Member 1 no_log: "{{ __sap_vm_provision_no_log }}" register: __sap_vm_provision_task_ibmcloud_lb_pool_members_nwas_ascs5 ibm.cloudcollection.ibm_is_lb_pool_member: - lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-nwas-ascs'))[0].id }}" - pool: "{{ (__sap_vm_provision_task_ibmcloud_lb_pools.results | json_query('[*].resource.pools') | flatten | selectattr('name', '==', 'lb-sap-ha-nwas-ascs-pool-enq'))[0].id }}" - target_address: "{{ (__sap_vm_provision_task_ibmcloud_vs_all_info.resource.instances | selectattr('name', '==', host_node))[0].primary_network_interface[0].primary_ipv4_address }}" - port: "{{ ('39' + sap_system_nwas_abap_ascs_instance_nr) | int }}" + lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_nwas + '-ascs'))[0].id }}" + pool: "{{ (__sap_vm_provision_task_ibmcloud_lb_pools.results | json_query('[*].resource.pools') | flatten | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_nwas + '-ascs-pool-ms-http'))[0].id }}" + target_address: "{{ (__sap_vm_provision_task_ibmcloud_vs_all_info.resource.instances | selectattr('name', '==', host_node))[0].primary_network_attachment[0].primary_ip[0].address }}" + port: "{{ ('81' + sap_system_nwas_abap_ascs_instance_nr) | int }}" weight: 100 ibmcloud_api_key: "{{ sap_vm_provision_ibmcloud_api_key }}" loop: "{{ (groups['nwas_ascs'] | default([]) ) }}" @@ -626,14 +914,14 @@ failed_when: not __sap_vm_provision_task_ibmcloud_lb_pool_members_nwas_ascs5.rc == 0 and not 'already exists in a pool' in __sap_vm_provision_task_ibmcloud_lb_pool_members_nwas_ascs5.stderr # Secondary -- name: Create IBM Cloud Load Balancer Back-end Pool Members for SAP NetWeaver ASCS - Enqueue Server sapenq process - Member 2 (Failover/Secondary) +- name: Create IBM Cloud Load Balancer Back-end Pool Members for SAP NetWeaver ASCS - Message Server HTTP sapms process - Member 2 (Failover/Secondary) no_log: "{{ __sap_vm_provision_no_log }}" register: __sap_vm_provision_task_ibmcloud_lb_pool_members_nwas_ascs6 ibm.cloudcollection.ibm_is_lb_pool_member: - lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-nwas-ascs'))[0].id }}" - pool: "{{ (__sap_vm_provision_task_ibmcloud_lb_pools.results | json_query('[*].resource.pools') | flatten | selectattr('name', '==', 'lb-sap-ha-nwas-ascs-pool-enq'))[0].id }}" - target_address: "{{ (__sap_vm_provision_task_ibmcloud_vs_all_info.resource.instances | selectattr('name', '==', host_node))[0].primary_network_interface[0].primary_ipv4_address }}" - port: "{{ ('39' + sap_system_nwas_abap_ascs_instance_nr) | int }}" + lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_nwas + '-ascs'))[0].id }}" + pool: "{{ (__sap_vm_provision_task_ibmcloud_lb_pools.results | json_query('[*].resource.pools') | flatten | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_nwas + '-ascs-pool-ms-http'))[0].id }}" + target_address: "{{ (__sap_vm_provision_task_ibmcloud_vs_all_info.resource.instances | selectattr('name', '==', host_node))[0].primary_network_attachment[0].primary_ip[0].address }}" + port: "{{ ('81' + sap_system_nwas_abap_ascs_instance_nr) | int }}" weight: 1 ibmcloud_api_key: "{{ sap_vm_provision_ibmcloud_api_key }}" loop: "{{ (groups['nwas_ers'] | default([]) ) }}" @@ -644,14 +932,14 @@ # Primary -- name: Create IBM Cloud Load Balancer Back-end Pool Members for SAP NetWeaver ASCS - SAP Start Service (SAPControl SOAP) HTTP sapctrl process - Member 1 +- name: Create IBM Cloud Load Balancer Back-end Pool Members for SAP NetWeaver ASCS - Enqueue Server sapenq process - Member 1 no_log: "{{ __sap_vm_provision_no_log }}" register: __sap_vm_provision_task_ibmcloud_lb_pool_members_nwas_ascs7 ibm.cloudcollection.ibm_is_lb_pool_member: - lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-nwas-ascs'))[0].id }}" - pool: "{{ (__sap_vm_provision_task_ibmcloud_lb_pools.results | json_query('[*].resource.pools') | flatten | selectattr('name', '==', 'lb-sap-ha-nwas-ascs-pool-sapctrl'))[0].id }}" - target_address: "{{ (__sap_vm_provision_task_ibmcloud_vs_all_info.resource.instances | selectattr('name', '==', host_node))[0].primary_network_interface[0].primary_ipv4_address }}" - port: "{{ ('5' + sap_system_nwas_abap_ascs_instance_nr + '13') | int }}" + lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_nwas + '-ascs'))[0].id }}" + pool: "{{ (__sap_vm_provision_task_ibmcloud_lb_pools.results | json_query('[*].resource.pools') | flatten | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_nwas + '-ascs-pool-enq'))[0].id }}" + target_address: "{{ (__sap_vm_provision_task_ibmcloud_vs_all_info.resource.instances | selectattr('name', '==', host_node))[0].primary_network_attachment[0].primary_ip[0].address }}" + port: "{{ ('39' + sap_system_nwas_abap_ascs_instance_nr) | int }}" weight: 100 ibmcloud_api_key: "{{ sap_vm_provision_ibmcloud_api_key }}" loop: "{{ (groups['nwas_ascs'] | default([]) ) }}" @@ -661,14 +949,14 @@ failed_when: not __sap_vm_provision_task_ibmcloud_lb_pool_members_nwas_ascs7.rc == 0 and not 'already exists in a pool' in __sap_vm_provision_task_ibmcloud_lb_pool_members_nwas_ascs7.stderr # Secondary -- name: Create IBM Cloud Load Balancer Back-end Pool Members for SAP NetWeaver ASCS - SAP Start Service (SAPControl SOAP) HTTP sapctrl process - Member 2 (Failover/Secondary) +- name: Create IBM Cloud Load Balancer Back-end Pool Members for SAP NetWeaver ASCS - Enqueue Server sapenq process - Member 2 (Failover/Secondary) no_log: "{{ __sap_vm_provision_no_log }}" register: __sap_vm_provision_task_ibmcloud_lb_pool_members_nwas_ascs8 ibm.cloudcollection.ibm_is_lb_pool_member: - lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-nwas-ascs'))[0].id }}" - pool: "{{ (__sap_vm_provision_task_ibmcloud_lb_pools.results | json_query('[*].resource.pools') | flatten | selectattr('name', '==', 'lb-sap-ha-nwas-ascs-pool-sapctrl'))[0].id }}" - target_address: "{{ (__sap_vm_provision_task_ibmcloud_vs_all_info.resource.instances | selectattr('name', '==', host_node))[0].primary_network_interface[0].primary_ipv4_address }}" - port: "{{ ('5' + sap_system_nwas_abap_ascs_instance_nr + '13') | int }}" + lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_nwas + '-ascs'))[0].id }}" + pool: "{{ (__sap_vm_provision_task_ibmcloud_lb_pools.results | json_query('[*].resource.pools') | flatten | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_nwas + '-ascs-pool-enq'))[0].id }}" + target_address: "{{ (__sap_vm_provision_task_ibmcloud_vs_all_info.resource.instances | selectattr('name', '==', host_node))[0].primary_network_attachment[0].primary_ip[0].address }}" + port: "{{ ('39' + sap_system_nwas_abap_ascs_instance_nr) | int }}" weight: 1 ibmcloud_api_key: "{{ sap_vm_provision_ibmcloud_api_key }}" loop: "{{ (groups['nwas_ers'] | default([]) ) }}" @@ -679,14 +967,14 @@ # Primary -- name: Create IBM Cloud Load Balancer Back-end Pool Members for SAP NetWeaver ASCS - SAP Start Service (SAPControl SOAP) HTTPS (Secure) sapctrls - Member 1 +- name: Create IBM Cloud Load Balancer Back-end Pool Members for SAP NetWeaver ASCS - Enqueue Replicator Server sapenqrepl process - Member 1 no_log: "{{ __sap_vm_provision_no_log }}" register: __sap_vm_provision_task_ibmcloud_lb_pool_members_nwas_ascs9 ibm.cloudcollection.ibm_is_lb_pool_member: - lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-nwas-ascs'))[0].id }}" - pool: "{{ (__sap_vm_provision_task_ibmcloud_lb_pools.results | json_query('[*].resource.pools') | flatten | selectattr('name', '==', 'lb-sap-ha-nwas-ascs-pool-sapctrls'))[0].id }}" - target_address: "{{ (__sap_vm_provision_task_ibmcloud_vs_all_info.resource.instances | selectattr('name', '==', host_node))[0].primary_network_interface[0].primary_ipv4_address }}" - port: "{{ ('5' + sap_system_nwas_abap_ascs_instance_nr + '14') | int }}" + lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_nwas + '-ascs'))[0].id }}" + pool: "{{ (__sap_vm_provision_task_ibmcloud_lb_pools.results | json_query('[*].resource.pools') | flatten | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_nwas + '-ascs-pool-enqrepl'))[0].id }}" + target_address: "{{ (__sap_vm_provision_task_ibmcloud_vs_all_info.resource.instances | selectattr('name', '==', host_node))[0].primary_network_attachment[0].primary_ip[0].address }}" + port: "{{ ('5' + sap_system_nwas_abap_ascs_instance_nr + '16') | int }}" weight: 100 ibmcloud_api_key: "{{ sap_vm_provision_ibmcloud_api_key }}" loop: "{{ (groups['nwas_ascs'] | default([]) ) }}" @@ -696,14 +984,14 @@ failed_when: not __sap_vm_provision_task_ibmcloud_lb_pool_members_nwas_ascs9.rc == 0 and not 'already exists in a pool' in __sap_vm_provision_task_ibmcloud_lb_pool_members_nwas_ascs9.stderr # Secondary -- name: Create IBM Cloud Load Balancer Back-end Pool Members for SAP NetWeaver ASCS - SAP Start Service (SAPControl SOAP) HTTPS (Secure) sapctrls - Member 2 (Failover/Secondary) +- name: Create IBM Cloud Load Balancer Back-end Pool Members for SAP NetWeaver ASCS - Enqueue Replicator Server sapenqrepl process - Member 2 (Failover/Secondary) no_log: "{{ __sap_vm_provision_no_log }}" register: __sap_vm_provision_task_ibmcloud_lb_pool_members_nwas_ascs10 ibm.cloudcollection.ibm_is_lb_pool_member: - lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-nwas-ascs'))[0].id }}" - pool: "{{ (__sap_vm_provision_task_ibmcloud_lb_pools.results | json_query('[*].resource.pools') | flatten | selectattr('name', '==', 'lb-sap-ha-nwas-ascs-pool-sapctrls'))[0].id }}" - target_address: "{{ (__sap_vm_provision_task_ibmcloud_vs_all_info.resource.instances | selectattr('name', '==', host_node))[0].primary_network_interface[0].primary_ipv4_address }}" - port: "{{ ('5' + sap_system_nwas_abap_ascs_instance_nr + '14') | int }}" + lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_nwas + '-ascs'))[0].id }}" + pool: "{{ (__sap_vm_provision_task_ibmcloud_lb_pools.results | json_query('[*].resource.pools') | flatten | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_nwas + '-ascs-pool-enqrepl'))[0].id }}" + target_address: "{{ (__sap_vm_provision_task_ibmcloud_vs_all_info.resource.instances | selectattr('name', '==', host_node))[0].primary_network_attachment[0].primary_ip[0].address }}" + port: "{{ ('5' + sap_system_nwas_abap_ascs_instance_nr + '16') | int }}" weight: 1 ibmcloud_api_key: "{{ sap_vm_provision_ibmcloud_api_key }}" loop: "{{ (groups['nwas_ers'] | default([]) ) }}" @@ -713,14 +1001,84 @@ failed_when: not __sap_vm_provision_task_ibmcloud_lb_pool_members_nwas_ascs10.rc == 0 and not 'already exists in a pool' in __sap_vm_provision_task_ibmcloud_lb_pool_members_nwas_ascs10.stderr +# Primary +- name: Create IBM Cloud Load Balancer Back-end Pool Members for SAP NetWeaver ASCS - SAP Start Service (SAPControl SOAP) HTTP sapctrl process - Member 1 + no_log: "{{ __sap_vm_provision_no_log }}" + register: __sap_vm_provision_task_ibmcloud_lb_pool_members_nwas_ascs11 + ibm.cloudcollection.ibm_is_lb_pool_member: + lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_nwas + '-ascs'))[0].id }}" + pool: "{{ (__sap_vm_provision_task_ibmcloud_lb_pools.results | json_query('[*].resource.pools') | flatten | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_nwas + '-ascs-pool-sapctrl'))[0].id }}" + target_address: "{{ (__sap_vm_provision_task_ibmcloud_vs_all_info.resource.instances | selectattr('name', '==', host_node))[0].primary_network_attachment[0].primary_ip[0].address }}" + port: "{{ ('5' + sap_system_nwas_abap_ascs_instance_nr + '13') | int }}" + weight: 100 + ibmcloud_api_key: "{{ sap_vm_provision_ibmcloud_api_key }}" + loop: "{{ (groups['nwas_ascs'] | default([]) ) }}" + loop_control: + loop_var: host_node + when: (groups['nwas_ers'] is defined and (groups['nwas_ers'] | length>0)) + failed_when: not __sap_vm_provision_task_ibmcloud_lb_pool_members_nwas_ascs11.rc == 0 and not 'already exists in a pool' in __sap_vm_provision_task_ibmcloud_lb_pool_members_nwas_ascs11.stderr + +# Secondary +- name: Create IBM Cloud Load Balancer Back-end Pool Members for SAP NetWeaver ASCS - SAP Start Service (SAPControl SOAP) HTTP sapctrl process - Member 2 (Failover/Secondary) + no_log: "{{ __sap_vm_provision_no_log }}" + register: __sap_vm_provision_task_ibmcloud_lb_pool_members_nwas_ascs12 + ibm.cloudcollection.ibm_is_lb_pool_member: + lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_nwas + '-ascs'))[0].id }}" + pool: "{{ (__sap_vm_provision_task_ibmcloud_lb_pools.results | json_query('[*].resource.pools') | flatten | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_nwas + '-ascs-pool-sapctrl'))[0].id }}" + target_address: "{{ (__sap_vm_provision_task_ibmcloud_vs_all_info.resource.instances | selectattr('name', '==', host_node))[0].primary_network_attachment[0].primary_ip[0].address }}" + port: "{{ ('5' + sap_system_nwas_abap_ascs_instance_nr + '13') | int }}" + weight: 1 + ibmcloud_api_key: "{{ sap_vm_provision_ibmcloud_api_key }}" + loop: "{{ (groups['nwas_ers'] | default([]) ) }}" + loop_control: + loop_var: host_node + when: (groups['nwas_ers'] is defined and (groups['nwas_ers'] | length>0)) + failed_when: not __sap_vm_provision_task_ibmcloud_lb_pool_members_nwas_ascs12.rc == 0 and not 'already exists in a pool' in __sap_vm_provision_task_ibmcloud_lb_pool_members_nwas_ascs12.stderr + + +# Primary +- name: Create IBM Cloud Load Balancer Back-end Pool Members for SAP NetWeaver ASCS - SAP Start Service (SAPControl SOAP) HTTPS (Secure) sapctrls - Member 1 + no_log: "{{ __sap_vm_provision_no_log }}" + register: __sap_vm_provision_task_ibmcloud_lb_pool_members_nwas_ascs13 + ibm.cloudcollection.ibm_is_lb_pool_member: + lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_nwas + '-ascs'))[0].id }}" + pool: "{{ (__sap_vm_provision_task_ibmcloud_lb_pools.results | json_query('[*].resource.pools') | flatten | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_nwas + '-ascs-pool-sapctrls'))[0].id }}" + target_address: "{{ (__sap_vm_provision_task_ibmcloud_vs_all_info.resource.instances | selectattr('name', '==', host_node))[0].primary_network_attachment[0].primary_ip[0].address }}" + port: "{{ ('5' + sap_system_nwas_abap_ascs_instance_nr + '14') | int }}" + weight: 100 + ibmcloud_api_key: "{{ sap_vm_provision_ibmcloud_api_key }}" + loop: "{{ (groups['nwas_ascs'] | default([]) ) }}" + loop_control: + loop_var: host_node + when: (groups['nwas_ers'] is defined and (groups['nwas_ers'] | length>0)) + failed_when: not __sap_vm_provision_task_ibmcloud_lb_pool_members_nwas_ascs13.rc == 0 and not 'already exists in a pool' in __sap_vm_provision_task_ibmcloud_lb_pool_members_nwas_ascs13.stderr + +# Secondary +- name: Create IBM Cloud Load Balancer Back-end Pool Members for SAP NetWeaver ASCS - SAP Start Service (SAPControl SOAP) HTTPS (Secure) sapctrls - Member 2 (Failover/Secondary) + no_log: "{{ __sap_vm_provision_no_log }}" + register: __sap_vm_provision_task_ibmcloud_lb_pool_members_nwas_ascs14 + ibm.cloudcollection.ibm_is_lb_pool_member: + lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_nwas + '-ascs'))[0].id }}" + pool: "{{ (__sap_vm_provision_task_ibmcloud_lb_pools.results | json_query('[*].resource.pools') | flatten | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_nwas + '-ascs-pool-sapctrls'))[0].id }}" + target_address: "{{ (__sap_vm_provision_task_ibmcloud_vs_all_info.resource.instances | selectattr('name', '==', host_node))[0].primary_network_attachment[0].primary_ip[0].address }}" + port: "{{ ('5' + sap_system_nwas_abap_ascs_instance_nr + '14') | int }}" + weight: 1 + ibmcloud_api_key: "{{ sap_vm_provision_ibmcloud_api_key }}" + loop: "{{ (groups['nwas_ers'] | default([]) ) }}" + loop_control: + loop_var: host_node + when: (groups['nwas_ers'] is defined and (groups['nwas_ers'] | length>0)) + failed_when: not __sap_vm_provision_task_ibmcloud_lb_pool_members_nwas_ascs14.rc == 0 and not 'already exists in a pool' in __sap_vm_provision_task_ibmcloud_lb_pool_members_nwas_ascs14.stderr + + # Primary # - name: Create IBM Cloud Load Balancer Back-end Pool Members for SAP NetWeaver ERS - Dispatcher sapdp process - Member 1 # no_log: "{{ __sap_vm_provision_no_log }}" # register: __sap_vm_provision_task_ibmcloud_lb_pool_members_nwas_ers1 # ibm.cloudcollection.ibm_is_lb_pool_member: -# lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-nwas-ers'))[0].id }}" -# pool: "{{ (__sap_vm_provision_task_ibmcloud_lb_pools.results | json_query('[*].resource.pools') | flatten | selectattr('name', '==', 'lb-sap-ha-nwas-ers-pool-dp'))[0].id }}" -# target_address: "{{ (__sap_vm_provision_task_ibmcloud_vs_all_info.resource.instances | selectattr('name', '==', host_node))[0].primary_network_interface[0].primary_ipv4_address }}" +# lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_nwas + '-ers'))[0].id }}" +# pool: "{{ (__sap_vm_provision_task_ibmcloud_lb_pools.results | json_query('[*].resource.pools') | flatten | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_nwas + '-ers-pool-dp'))[0].id }}" +# target_address: "{{ (__sap_vm_provision_task_ibmcloud_vs_all_info.resource.instances | selectattr('name', '==', host_node))[0].primary_network_attachment[0].primary_ip[0].address }}" # port: "{{ ('32' + sap_system_nwas_abap_ers_instance_nr) | int }}" # weight: 100 # ibmcloud_api_key: "{{ sap_vm_provision_ibmcloud_api_key }}" @@ -735,9 +1093,9 @@ # no_log: "{{ __sap_vm_provision_no_log }}" # register: __sap_vm_provision_task_ibmcloud_lb_pool_members_nwas_ers2 # ibm.cloudcollection.ibm_is_lb_pool_member: -# lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-nwas-ers'))[0].id }}" -# pool: "{{ (__sap_vm_provision_task_ibmcloud_lb_pools.results | json_query('[*].resource.pools') | flatten | selectattr('name', '==', 'lb-sap-ha-nwas-ers-pool-dp'))[0].id }}" -# target_address: "{{ (__sap_vm_provision_task_ibmcloud_vs_all_info.resource.instances | selectattr('name', '==', host_node))[0].primary_network_interface[0].primary_ipv4_address }}" +# lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_nwas + '-ers'))[0].id }}" +# pool: "{{ (__sap_vm_provision_task_ibmcloud_lb_pools.results | json_query('[*].resource.pools') | flatten | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_nwas + '-ers-pool-dp'))[0].id }}" +# target_address: "{{ (__sap_vm_provision_task_ibmcloud_vs_all_info.resource.instances | selectattr('name', '==', host_node))[0].primary_network_attachment[0].primary_ip[0].address }}" # port: "{{ ('32' + sap_system_nwas_abap_ers_instance_nr) | int }}" # weight: 1 # ibmcloud_api_key: "{{ sap_vm_provision_ibmcloud_api_key }}" @@ -753,9 +1111,9 @@ # no_log: "{{ __sap_vm_provision_no_log }}" # register: __sap_vm_provision_task_ibmcloud_lb_pool_members_nwas_ers3 # ibm.cloudcollection.ibm_is_lb_pool_member: -# lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-nwas-ers'))[0].id }}" -# pool: "{{ (__sap_vm_provision_task_ibmcloud_lb_pools.results | json_query('[*].resource.pools') | flatten | selectattr('name', '==', 'lb-sap-ha-nwas-ers-pool-ms'))[0].id }}" -# target_address: "{{ (__sap_vm_provision_task_ibmcloud_vs_all_info.resource.instances | selectattr('name', '==', host_node))[0].primary_network_interface[0].primary_ipv4_address }}" +# lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_nwas + '-ers'))[0].id }}" +# pool: "{{ (__sap_vm_provision_task_ibmcloud_lb_pools.results | json_query('[*].resource.pools') | flatten | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_nwas + '-ers-pool-ms'))[0].id }}" +# target_address: "{{ (__sap_vm_provision_task_ibmcloud_vs_all_info.resource.instances | selectattr('name', '==', host_node))[0].primary_network_attachment[0].primary_ip[0].address }}" # port: "{{ ('36' + sap_system_nwas_abap_ers_instance_nr) | int }}" # weight: 100 # ibmcloud_api_key: "{{ sap_vm_provision_ibmcloud_api_key }}" @@ -770,9 +1128,9 @@ # no_log: "{{ __sap_vm_provision_no_log }}" # register: __sap_vm_provision_task_ibmcloud_lb_pool_members_nwas_ers4 # ibm.cloudcollection.ibm_is_lb_pool_member: -# lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-nwas-ers'))[0].id }}" -# pool: "{{ (__sap_vm_provision_task_ibmcloud_lb_pools.results | json_query('[*].resource.pools') | flatten | selectattr('name', '==', 'lb-sap-ha-nwas-ers-pool-ms'))[0].id }}" -# target_address: "{{ (__sap_vm_provision_task_ibmcloud_vs_all_info.resource.instances | selectattr('name', '==', host_node))[0].primary_network_interface[0].primary_ipv4_address }}" +# lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_nwas + '-ers'))[0].id }}" +# pool: "{{ (__sap_vm_provision_task_ibmcloud_lb_pools.results | json_query('[*].resource.pools') | flatten | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_nwas + '-ers-pool-ms'))[0].id }}" +# target_address: "{{ (__sap_vm_provision_task_ibmcloud_vs_all_info.resource.instances | selectattr('name', '==', host_node))[0].primary_network_attachment[0].primary_ip[0].address }}" # port: "{{ ('36' + sap_system_nwas_abap_ers_instance_nr) | int }}" # weight: 1 # ibmcloud_api_key: "{{ sap_vm_provision_ibmcloud_api_key }}" @@ -788,9 +1146,9 @@ no_log: "{{ __sap_vm_provision_no_log }}" register: __sap_vm_provision_task_ibmcloud_lb_pool_members_nwas_ers5 ibm.cloudcollection.ibm_is_lb_pool_member: - lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-nwas-ers'))[0].id }}" - pool: "{{ (__sap_vm_provision_task_ibmcloud_lb_pools.results | json_query('[*].resource.pools') | flatten | selectattr('name', '==', 'lb-sap-ha-nwas-ers-pool-enqr'))[0].id }}" - target_address: "{{ (__sap_vm_provision_task_ibmcloud_vs_all_info.resource.instances | selectattr('name', '==', host_node))[0].primary_network_interface[0].primary_ipv4_address }}" + lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_nwas + '-ers'))[0].id }}" + pool: "{{ (__sap_vm_provision_task_ibmcloud_lb_pools.results | json_query('[*].resource.pools') | flatten | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_nwas + '-ers-pool-enqr'))[0].id }}" + target_address: "{{ (__sap_vm_provision_task_ibmcloud_vs_all_info.resource.instances | selectattr('name', '==', host_node))[0].primary_network_attachment[0].primary_ip[0].address }}" port: "{{ ('39' + sap_system_nwas_abap_ers_instance_nr) | int }}" weight: 100 ibmcloud_api_key: "{{ sap_vm_provision_ibmcloud_api_key }}" @@ -805,9 +1163,9 @@ no_log: "{{ __sap_vm_provision_no_log }}" register: __sap_vm_provision_task_ibmcloud_lb_pool_members_nwas_ers6 ibm.cloudcollection.ibm_is_lb_pool_member: - lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-nwas-ers'))[0].id }}" - pool: "{{ (__sap_vm_provision_task_ibmcloud_lb_pools.results | json_query('[*].resource.pools') | flatten | selectattr('name', '==', 'lb-sap-ha-nwas-ers-pool-enqr'))[0].id }}" - target_address: "{{ (__sap_vm_provision_task_ibmcloud_vs_all_info.resource.instances | selectattr('name', '==', host_node))[0].primary_network_interface[0].primary_ipv4_address }}" + lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_nwas + '-ers'))[0].id }}" + pool: "{{ (__sap_vm_provision_task_ibmcloud_lb_pools.results | json_query('[*].resource.pools') | flatten | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_nwas + '-ers-pool-enqr'))[0].id }}" + target_address: "{{ (__sap_vm_provision_task_ibmcloud_vs_all_info.resource.instances | selectattr('name', '==', host_node))[0].primary_network_attachment[0].primary_ip[0].address }}" port: "{{ ('39' + sap_system_nwas_abap_ers_instance_nr) | int }}" weight: 1 ibmcloud_api_key: "{{ sap_vm_provision_ibmcloud_api_key }}" @@ -823,9 +1181,9 @@ no_log: "{{ __sap_vm_provision_no_log }}" register: __sap_vm_provision_task_ibmcloud_lb_pool_members_nwas_ers7 ibm.cloudcollection.ibm_is_lb_pool_member: - lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-nwas-ers'))[0].id }}" - pool: "{{ (__sap_vm_provision_task_ibmcloud_lb_pools.results | json_query('[*].resource.pools') | flatten | selectattr('name', '==', 'lb-sap-ha-nwas-ers-pool-sapctrl'))[0].id }}" - target_address: "{{ (__sap_vm_provision_task_ibmcloud_vs_all_info.resource.instances | selectattr('name', '==', host_node))[0].primary_network_interface[0].primary_ipv4_address }}" + lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_nwas + '-ers'))[0].id }}" + pool: "{{ (__sap_vm_provision_task_ibmcloud_lb_pools.results | json_query('[*].resource.pools') | flatten | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_nwas + '-ers-pool-sapctrl'))[0].id }}" + target_address: "{{ (__sap_vm_provision_task_ibmcloud_vs_all_info.resource.instances | selectattr('name', '==', host_node))[0].primary_network_attachment[0].primary_ip[0].address }}" port: "{{ ('5' + sap_system_nwas_abap_ers_instance_nr + '13') | int }}" weight: 100 ibmcloud_api_key: "{{ sap_vm_provision_ibmcloud_api_key }}" @@ -840,9 +1198,9 @@ no_log: "{{ __sap_vm_provision_no_log }}" register: __sap_vm_provision_task_ibmcloud_lb_pool_members_nwas_ers8 ibm.cloudcollection.ibm_is_lb_pool_member: - lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-nwas-ers'))[0].id }}" - pool: "{{ (__sap_vm_provision_task_ibmcloud_lb_pools.results | json_query('[*].resource.pools') | flatten | selectattr('name', '==', 'lb-sap-ha-nwas-ers-pool-sapctrl'))[0].id }}" - target_address: "{{ (__sap_vm_provision_task_ibmcloud_vs_all_info.resource.instances | selectattr('name', '==', host_node))[0].primary_network_interface[0].primary_ipv4_address }}" + lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_nwas + '-ers'))[0].id }}" + pool: "{{ (__sap_vm_provision_task_ibmcloud_lb_pools.results | json_query('[*].resource.pools') | flatten | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_nwas + '-ers-pool-sapctrl'))[0].id }}" + target_address: "{{ (__sap_vm_provision_task_ibmcloud_vs_all_info.resource.instances | selectattr('name', '==', host_node))[0].primary_network_attachment[0].primary_ip[0].address }}" port: "{{ ('5' + sap_system_nwas_abap_ers_instance_nr + '13') | int }}" weight: 1 ibmcloud_api_key: "{{ sap_vm_provision_ibmcloud_api_key }}" @@ -858,9 +1216,9 @@ no_log: "{{ __sap_vm_provision_no_log }}" register: __sap_vm_provision_task_ibmcloud_lb_pool_members_nwas_ers9 ibm.cloudcollection.ibm_is_lb_pool_member: - lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-nwas-ers'))[0].id }}" - pool: "{{ (__sap_vm_provision_task_ibmcloud_lb_pools.results | json_query('[*].resource.pools') | flatten | selectattr('name', '==', 'lb-sap-ha-nwas-ers-pool-sapctrls'))[0].id }}" - target_address: "{{ (__sap_vm_provision_task_ibmcloud_vs_all_info.resource.instances | selectattr('name', '==', host_node))[0].primary_network_interface[0].primary_ipv4_address }}" + lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_nwas + '-ers'))[0].id }}" + pool: "{{ (__sap_vm_provision_task_ibmcloud_lb_pools.results | json_query('[*].resource.pools') | flatten | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_nwas + '-ers-pool-sapctrls'))[0].id }}" + target_address: "{{ (__sap_vm_provision_task_ibmcloud_vs_all_info.resource.instances | selectattr('name', '==', host_node))[0].primary_network_attachment[0].primary_ip[0].address }}" port: "{{ ('5' + sap_system_nwas_abap_ers_instance_nr + '14') | int }}" weight: 100 ibmcloud_api_key: "{{ sap_vm_provision_ibmcloud_api_key }}" @@ -875,9 +1233,9 @@ no_log: "{{ __sap_vm_provision_no_log }}" register: __sap_vm_provision_task_ibmcloud_lb_pool_members_nwas_ers10 ibm.cloudcollection.ibm_is_lb_pool_member: - lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-nwas-ers'))[0].id }}" - pool: "{{ (__sap_vm_provision_task_ibmcloud_lb_pools.results | json_query('[*].resource.pools') | flatten | selectattr('name', '==', 'lb-sap-ha-nwas-ers-pool-sapctrls'))[0].id }}" - target_address: "{{ (__sap_vm_provision_task_ibmcloud_vs_all_info.resource.instances | selectattr('name', '==', host_node))[0].primary_network_interface[0].primary_ipv4_address }}" + lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_nwas + '-ers'))[0].id }}" + pool: "{{ (__sap_vm_provision_task_ibmcloud_lb_pools.results | json_query('[*].resource.pools') | flatten | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_nwas + '-ers-pool-sapctrls'))[0].id }}" + target_address: "{{ (__sap_vm_provision_task_ibmcloud_vs_all_info.resource.instances | selectattr('name', '==', host_node))[0].primary_network_attachment[0].primary_ip[0].address }}" port: "{{ ('5' + sap_system_nwas_abap_ers_instance_nr + '14') | int }}" weight: 1 ibmcloud_api_key: "{{ sap_vm_provision_ibmcloud_api_key }}" @@ -888,189 +1246,6 @@ failed_when: not __sap_vm_provision_task_ibmcloud_lb_pool_members_nwas_ers10.rc == 0 and not 'already exists in a pool' in __sap_vm_provision_task_ibmcloud_lb_pool_members_nwas_ers10.stderr -# Create IBM Cloud Load Balancer Front-end Listeners (open port for Virtual IPs) - -- name: Create IBM Cloud Load Balancer Front-end Listener for SAP HANA - System DB SQL - no_log: "{{ __sap_vm_provision_no_log }}" - register: __sap_vm_provision_task_ibmcloud_lb_frontend_listener_hana1 - ibm.cloudcollection.ibm_is_lb_listener: - lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-hana'))[0].id }}" - default_pool: "{{ (__sap_vm_provision_task_ibmcloud_lb_pools.results | json_query('[*].resource.pools') | flatten | selectattr('name', '==', 'lb-sap-ha-hana-pool-sysdb-sql'))[0].id }}" - protocol: tcp - port: "{{ ('3' + sap_system_hana_db_instance_nr + '13') | int }}" - ibmcloud_api_key: "{{ sap_vm_provision_ibmcloud_api_key }}" - when: (groups['hana_secondary'] is defined and (groups['hana_secondary'] | length>0)) - failed_when: not __sap_vm_provision_task_ibmcloud_lb_frontend_listener_hana1.rc == 0 and not 'listener_duplicate_port' in __sap_vm_provision_task_ibmcloud_lb_frontend_listener_hana1.stderr - -- name: Create IBM Cloud Load Balancer Front-end Listener for SAP HANA - MDC Tenant 1 SQL - no_log: "{{ __sap_vm_provision_no_log }}" - register: __sap_vm_provision_task_ibmcloud_lb_frontend_listener_hana2 - ibm.cloudcollection.ibm_is_lb_listener: - lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-hana'))[0].id }}" - default_pool: "{{ (__sap_vm_provision_task_ibmcloud_lb_pools.results | json_query('[*].resource.pools') | flatten | selectattr('name', '==', 'lb-sap-ha-hana-pool-mdc1-sql'))[0].id }}" - protocol: tcp - port: "{{ ('3' + sap_system_hana_db_instance_nr + '15') | int }}" - ibmcloud_api_key: "{{ sap_vm_provision_ibmcloud_api_key }}" - when: (groups['hana_secondary'] is defined and (groups['hana_secondary'] | length>0)) - failed_when: not __sap_vm_provision_task_ibmcloud_lb_frontend_listener_hana2.rc == 0 and not 'listener_duplicate_port' in __sap_vm_provision_task_ibmcloud_lb_frontend_listener_hana2.stderr - -- name: Create IBM Cloud Load Balancer Front-end Listener for SAP HANA - startsrv HTTP - no_log: "{{ __sap_vm_provision_no_log }}" - register: __sap_vm_provision_task_ibmcloud_lb_frontend_listener_hana3 - ibm.cloudcollection.ibm_is_lb_listener: - lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-hana'))[0].id }}" - default_pool: "{{ (__sap_vm_provision_task_ibmcloud_lb_pools.results | json_query('[*].resource.pools') | flatten | selectattr('name', '==', 'lb-sap-ha-hana-pool-startsrv-http'))[0].id }}" - protocol: tcp - port: "{{ ('5' + sap_system_hana_db_instance_nr + '13') | int }}" - ibmcloud_api_key: "{{ sap_vm_provision_ibmcloud_api_key }}" - when: (groups['hana_secondary'] is defined and (groups['hana_secondary'] | length>0)) - failed_when: not __sap_vm_provision_task_ibmcloud_lb_frontend_listener_hana3.rc == 0 and not 'listener_duplicate_port' in __sap_vm_provision_task_ibmcloud_lb_frontend_listener_hana3.stderr - -- name: Create IBM Cloud Load Balancer Front-end Listener for SAP HANA - startsrv HTTPS - no_log: "{{ __sap_vm_provision_no_log }}" - register: __sap_vm_provision_task_ibmcloud_lb_frontend_listener_hana4 - ibm.cloudcollection.ibm_is_lb_listener: - lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-hana'))[0].id }}" - default_pool: "{{ (__sap_vm_provision_task_ibmcloud_lb_pools.results | json_query('[*].resource.pools') | flatten | selectattr('name', '==', 'lb-sap-ha-hana-pool-startsrv-https'))[0].id }}" - protocol: tcp - port: "{{ ('5' + sap_system_hana_db_instance_nr + '14') | int }}" - ibmcloud_api_key: "{{ sap_vm_provision_ibmcloud_api_key }}" - when: (groups['hana_secondary'] is defined and (groups['hana_secondary'] | length>0)) - failed_when: not __sap_vm_provision_task_ibmcloud_lb_frontend_listener_hana4.rc == 0 and not 'listener_duplicate_port' in __sap_vm_provision_task_ibmcloud_lb_frontend_listener_hana4.stderr - -- name: Create IBM Cloud Load Balancer Front-end Listener for SAP AnyDB - IBM Db2 Communication Port - no_log: "{{ __sap_vm_provision_no_log }}" - register: __sap_vm_provision_task_ibmcloud_lb_frontend_listener_anydb1 - ibm.cloudcollection.ibm_is_lb_listener: - lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-anydb'))[0].id }}" - default_pool: "{{ (__sap_vm_provision_task_ibmcloud_lb_pools.results | json_query('[*].resource.pools') | flatten | selectattr('name', '==', 'lb-sap-ha-anydb-pool-ibmdb2'))[0].id }}" - protocol: tcp - port: 5912 - ibmcloud_api_key: "{{ sap_vm_provision_ibmcloud_api_key }}" - when: (groups['anydb_secondary'] is defined and (groups['anydb_secondary'] | length>0)) - failed_when: not __sap_vm_provision_task_ibmcloud_lb_frontend_listener_anydb1.rc == 0 and not 'listener_duplicate_port' in __sap_vm_provision_task_ibmcloud_lb_frontend_listener_anydb1.stderr - -- name: Create IBM Cloud Load Balancer Front-end Listener for SAP NetWeaver ASCS - Dispatcher sapdp process - no_log: "{{ __sap_vm_provision_no_log }}" - register: __sap_vm_provision_task_ibmcloud_lb_frontend_listener_ascs1 - ibm.cloudcollection.ibm_is_lb_listener: - lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-nwas-ascs'))[0].id }}" - default_pool: "{{ (__sap_vm_provision_task_ibmcloud_lb_pools.results | json_query('[*].resource.pools') | flatten | selectattr('name', '==', 'lb-sap-ha-nwas-ascs-pool-dp'))[0].id }}" - protocol: tcp - port: "{{ ('32' + sap_system_nwas_abap_ascs_instance_nr) | int }}" - ibmcloud_api_key: "{{ sap_vm_provision_ibmcloud_api_key }}" - when: (groups['nwas_ers'] is defined and (groups['nwas_ers'] | length>0)) - failed_when: not __sap_vm_provision_task_ibmcloud_lb_frontend_listener_ascs1.rc == 0 and not 'listener_duplicate_port' in __sap_vm_provision_task_ibmcloud_lb_frontend_listener_ascs1.stderr - -- name: Create IBM Cloud Load Balancer Front-end Listener for SAP NetWeaver ASCS - Message Server sapms process - no_log: "{{ __sap_vm_provision_no_log }}" - register: __sap_vm_provision_task_ibmcloud_lb_frontend_listener_ascs2 - ibm.cloudcollection.ibm_is_lb_listener: - lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-nwas-ascs'))[0].id }}" - default_pool: "{{ (__sap_vm_provision_task_ibmcloud_lb_pools.results | json_query('[*].resource.pools') | flatten | selectattr('name', '==', 'lb-sap-ha-nwas-ascs-pool-ms'))[0].id }}" - protocol: tcp - port: "{{ ('36' + sap_system_nwas_abap_ascs_instance_nr) | int }}" - ibmcloud_api_key: "{{ sap_vm_provision_ibmcloud_api_key }}" - when: (groups['nwas_ers'] is defined and (groups['nwas_ers'] | length>0)) - failed_when: not __sap_vm_provision_task_ibmcloud_lb_frontend_listener_ascs2.rc == 0 and not 'listener_duplicate_port' in __sap_vm_provision_task_ibmcloud_lb_frontend_listener_ascs2.stderr - -- name: Create IBM Cloud Load Balancer Front-end Listener for SAP NetWeaver ASCS - Enqueue Server sapenq process - no_log: "{{ __sap_vm_provision_no_log }}" - register: __sap_vm_provision_task_ibmcloud_lb_frontend_listener_ascs3 - ibm.cloudcollection.ibm_is_lb_listener: - lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-nwas-ascs'))[0].id }}" - default_pool: "{{ (__sap_vm_provision_task_ibmcloud_lb_pools.results | json_query('[*].resource.pools') | flatten | selectattr('name', '==', 'lb-sap-ha-nwas-ascs-pool-enq'))[0].id }}" - protocol: tcp - port: "{{ ('39' + sap_system_nwas_abap_ascs_instance_nr) | int }}" - ibmcloud_api_key: "{{ sap_vm_provision_ibmcloud_api_key }}" - when: (groups['nwas_ers'] is defined and (groups['nwas_ers'] | length>0)) - failed_when: not __sap_vm_provision_task_ibmcloud_lb_frontend_listener_ascs3.rc == 0 and not 'listener_duplicate_port' in __sap_vm_provision_task_ibmcloud_lb_frontend_listener_ascs3.stderr - -- name: Create IBM Cloud Load Balancer Front-end Listener for SAP NetWeaver ASCS - SAP Start Service (SAPControl SOAP) HTTP sapctrl process - no_log: "{{ __sap_vm_provision_no_log }}" - register: __sap_vm_provision_task_ibmcloud_lb_frontend_listener_ascs4 - ibm.cloudcollection.ibm_is_lb_listener: - lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-nwas-ascs'))[0].id }}" - default_pool: "{{ (__sap_vm_provision_task_ibmcloud_lb_pools.results | json_query('[*].resource.pools') | flatten | selectattr('name', '==', 'lb-sap-ha-nwas-ascs-pool-sapctrl'))[0].id }}" - protocol: tcp - port: "{{ ('5' + sap_system_nwas_abap_ascs_instance_nr + '13') | int }}" - ibmcloud_api_key: "{{ sap_vm_provision_ibmcloud_api_key }}" - when: (groups['nwas_ers'] is defined and (groups['nwas_ers'] | length>0)) - failed_when: not __sap_vm_provision_task_ibmcloud_lb_frontend_listener_ascs4.rc == 0 and not 'listener_duplicate_port' in __sap_vm_provision_task_ibmcloud_lb_frontend_listener_ascs4.stderr - -- name: Create IBM Cloud Load Balancer Front-end Listener for SAP NetWeaver ASCS - SAP Start Service (SAPControl SOAP) HTTPS (Secure) sapctrls - no_log: "{{ __sap_vm_provision_no_log }}" - register: __sap_vm_provision_task_ibmcloud_lb_frontend_listener_ascs5 - ibm.cloudcollection.ibm_is_lb_listener: - lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-nwas-ascs'))[0].id }}" - default_pool: "{{ (__sap_vm_provision_task_ibmcloud_lb_pools.results | json_query('[*].resource.pools') | flatten | selectattr('name', '==', 'lb-sap-ha-nwas-ascs-pool-sapctrls'))[0].id }}" - protocol: tcp - port: "{{ ('5' + sap_system_nwas_abap_ascs_instance_nr + '14') | int }}" - ibmcloud_api_key: "{{ sap_vm_provision_ibmcloud_api_key }}" - when: (groups['nwas_ers'] is defined and (groups['nwas_ers'] | length>0)) - failed_when: not __sap_vm_provision_task_ibmcloud_lb_frontend_listener_ascs5.rc == 0 and not 'listener_duplicate_port' in __sap_vm_provision_task_ibmcloud_lb_frontend_listener_ascs5.stderr - -# - name: Create IBM Cloud Load Balancer Front-end Listener for SAP NetWeaver ERS - Dispatcher sapdp process -# no_log: "{{ __sap_vm_provision_no_log }}" -# register: __sap_vm_provision_task_ibmcloud_lb_frontend_listener_ers1 -# ibm.cloudcollection.ibm_is_lb_listener: -# lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-nwas-ers'))[0].id }}" -# default_pool: "{{ (__sap_vm_provision_task_ibmcloud_lb_pools.results | json_query('[*].resource.pools') | flatten | selectattr('name', '==', 'lb-sap-ha-nwas-ers-pool-dp'))[0].id }}" -# protocol: tcp -# port: "{{ ('32' + sap_system_nwas_abap_ers_instance_nr) | int }}" -# ibmcloud_api_key: "{{ sap_vm_provision_ibmcloud_api_key }}" -# when: (groups['nwas_ers'] is defined and (groups['nwas_ers'] | length>0)) -# failed_when: not __sap_vm_provision_task_ibmcloud_lb_frontend_listener_ers1.rc == 0 and not 'listener_duplicate_port' in __sap_vm_provision_task_ibmcloud_lb_frontend_listener_ers1.stderr - -# - name: Create IBM Cloud Load Balancer Front-end Listener for SAP NetWeaver ERS - Message Server sapms process -# no_log: "{{ __sap_vm_provision_no_log }}" -# register: __sap_vm_provision_task_ibmcloud_lb_frontend_listener_ers2 -# ibm.cloudcollection.ibm_is_lb_listener: -# lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-nwas-ers'))[0].id }}" -# default_pool: "{{ (__sap_vm_provision_task_ibmcloud_lb_pools.results | json_query('[*].resource.pools') | flatten | selectattr('name', '==', 'lb-sap-ha-nwas-ers-pool-ms'))[0].id }}" -# protocol: tcp -# port: "{{ ('36' + sap_system_nwas_abap_ers_instance_nr) | int }}" -# ibmcloud_api_key: "{{ sap_vm_provision_ibmcloud_api_key }}" -# when: (groups['nwas_ers'] is defined and (groups['nwas_ers'] | length>0)) -# failed_when: not __sap_vm_provision_task_ibmcloud_lb_frontend_listener_ers2.rc == 0 and not 'listener_duplicate_port' in __sap_vm_provision_task_ibmcloud_lb_frontend_listener_ers2.stderr - -- name: Create IBM Cloud Load Balancer Front-end Listener for SAP NetWeaver ERS - Enqueue Replication Server sapenqr process - no_log: "{{ __sap_vm_provision_no_log }}" - register: __sap_vm_provision_task_ibmcloud_lb_frontend_listener_ers3 - ibm.cloudcollection.ibm_is_lb_listener: - lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-nwas-ers'))[0].id }}" - default_pool: "{{ (__sap_vm_provision_task_ibmcloud_lb_pools.results | json_query('[*].resource.pools') | flatten | selectattr('name', '==', 'lb-sap-ha-nwas-ers-pool-enqr'))[0].id }}" - protocol: tcp - port: "{{ ('39' + sap_system_nwas_abap_ers_instance_nr) | int }}" - ibmcloud_api_key: "{{ sap_vm_provision_ibmcloud_api_key }}" - when: (groups['nwas_ers'] is defined and (groups['nwas_ers'] | length>0)) - failed_when: not __sap_vm_provision_task_ibmcloud_lb_frontend_listener_ers3.rc == 0 and not 'listener_duplicate_port' in __sap_vm_provision_task_ibmcloud_lb_frontend_listener_ers3.stderr - -- name: Create IBM Cloud Load Balancer Front-end Listener for SAP NetWeaver ERS - SAP Start Service (SAPControl SOAP) HTTP sapctrl process - no_log: "{{ __sap_vm_provision_no_log }}" - register: __sap_vm_provision_task_ibmcloud_lb_frontend_listener_ers4 - ibm.cloudcollection.ibm_is_lb_listener: - lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-nwas-ers'))[0].id }}" - default_pool: "{{ (__sap_vm_provision_task_ibmcloud_lb_pools.results | json_query('[*].resource.pools') | flatten | selectattr('name', '==', 'lb-sap-ha-nwas-ers-pool-sapctrl'))[0].id }}" - protocol: tcp - port: "{{ ('5' + sap_system_nwas_abap_ers_instance_nr + '13') | int }}" - ibmcloud_api_key: "{{ sap_vm_provision_ibmcloud_api_key }}" - when: (groups['nwas_ers'] is defined and (groups['nwas_ers'] | length>0)) - failed_when: not __sap_vm_provision_task_ibmcloud_lb_frontend_listener_ers4.rc == 0 and not 'listener_duplicate_port' in __sap_vm_provision_task_ibmcloud_lb_frontend_listener_ers4.stderr - -- name: Create IBM Cloud Load Balancer Front-end Listener for SAP NetWeaver ERS - SAP Start Service (SAPControl SOAP) HTTPS (Secure) sapctrls - no_log: "{{ __sap_vm_provision_no_log }}" - register: __sap_vm_provision_task_ibmcloud_lb_frontend_listener_ers5 - ibm.cloudcollection.ibm_is_lb_listener: - lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-nwas-ers'))[0].id }}" - default_pool: "{{ (__sap_vm_provision_task_ibmcloud_lb_pools.results | json_query('[*].resource.pools') | flatten | selectattr('name', '==', 'lb-sap-ha-nwas-ers-pool-sapctrls'))[0].id }}" - protocol: tcp - port: "{{ ('5' + sap_system_nwas_abap_ers_instance_nr + '14') | int }}" - ibmcloud_api_key: "{{ sap_vm_provision_ibmcloud_api_key }}" - when: (groups['nwas_ers'] is defined and (groups['nwas_ers'] | length>0)) - failed_when: not __sap_vm_provision_task_ibmcloud_lb_frontend_listener_ers5.rc == 0 and not 'listener_duplicate_port' in __sap_vm_provision_task_ibmcloud_lb_frontend_listener_ers5.stderr - - # Set DNS A Record for Virtual IP (use the first of the IBM Cloud Load Balancer instance assigned Private IPs in the VPC Subnet Range) - name: IBM Cloud Private DNS Record for SAP HANA HA Virtual Hostname @@ -1080,7 +1255,7 @@ instance_id: "{{ __sap_vm_provision_task_ibmcloud_pdns_service_instance.resource.guid }}" zone_id: "{{ (__sap_vm_provision_task_ibmcloud_pdns.resource.dns_zones | selectattr('name', '==', sap_vm_provision_dns_root_domain) | first).zone_id }}" name: "{{ sap_swpm_db_host }}.{{ hostvars[host_node].sap_vm_provision_dns_root_domain }}" # Host FQDN - rdata: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-hana'))[0].private_ips[0].address }}" # IP Address + rdata: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_hana))[0].private_ips[0].address }}" # IP Address type: A ttl: 7200 ibmcloud_api_key: "{{ sap_vm_provision_ibmcloud_api_key }}" @@ -1098,7 +1273,7 @@ instance_id: "{{ __sap_vm_provision_task_ibmcloud_pdns_service_instance.resource.guid }}" zone_id: "{{ (__sap_vm_provision_task_ibmcloud_pdns.resource.dns_zones | selectattr('name', '==', sap_vm_provision_dns_root_domain) | first).zone_id }}" name: "{{ sap_swpm_db_host }}.{{ hostvars[host_node].sap_vm_provision_dns_root_domain }}" # Host FQDN - rdata: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-anydb'))[0].private_ips[0].address }}" # IP Address + rdata: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_anydb))[0].private_ips[0].address }}" # IP Address type: A ttl: 7200 ibmcloud_api_key: "{{ sap_vm_provision_ibmcloud_api_key }}" @@ -1116,7 +1291,7 @@ instance_id: "{{ __sap_vm_provision_task_ibmcloud_pdns_service_instance.resource.guid }}" zone_id: "{{ (__sap_vm_provision_task_ibmcloud_pdns.resource.dns_zones | selectattr('name', '==', sap_vm_provision_dns_root_domain) | first).zone_id }}" name: "{{ sap_swpm_ascs_instance_hostname }}.{{ hostvars[host_node].sap_vm_provision_dns_root_domain }}" # Host FQDN - rdata: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-nwas-ascs'))[0].private_ips[0].address }}" # IP Address + rdata: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_nwas + '-ascs'))[0].private_ips[0].address }}" # IP Address type: A ttl: 7200 ibmcloud_api_key: "{{ sap_vm_provision_ibmcloud_api_key }}" @@ -1134,7 +1309,7 @@ instance_id: "{{ __sap_vm_provision_task_ibmcloud_pdns_service_instance.resource.guid }}" zone_id: "{{ (__sap_vm_provision_task_ibmcloud_pdns.resource.dns_zones | selectattr('name', '==', sap_vm_provision_dns_root_domain) | first).zone_id }}" name: "{{ sap_swpm_ers_instance_hostname }}.{{ hostvars[host_node].sap_vm_provision_dns_root_domain }}" # Host FQDN - rdata: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-nwas-ers'))[0].private_ips[0].address }}" # IP Address + rdata: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_nwas + '-ers'))[0].private_ips[0].address }}" # IP Address type: A ttl: 7200 ibmcloud_api_key: "{{ sap_vm_provision_ibmcloud_api_key }}" @@ -1146,26 +1321,54 @@ - groups["nwas_ers"] is defined and (groups["nwas_ers"]|length>0) -- name: Set facts for all hosts - use facts from IBM Cloud Load Balancer - SAP HANA +- name: Set facts for all hosts - use facts from localhost - HA/DR - IBM Cloud Load Balancer - SAP HANA Primary node ansible.builtin.set_fact: - sap_ha_pacemaker_cluster_vip_hana_primary_ip_address: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-hana'))[0].private_ips[0].address }}" + sap_vm_provision_ha_vip_hana_primary: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_hana))[0].private_ips[0].address }}" + sap_vm_temp_vip_hana_primary: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_hana))[0].private_ips[0].address }}" + sap_ha_pacemaker_cluster_vip_hana_primary_ip_address: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_hana))[0].private_ips[0].address }}" when: - sap_ha_pacemaker_cluster_ibmcloud_region is defined - groups["hana_secondary"] is defined and (groups["hana_secondary"]|length>0) + delegate_to: "{{ item }}" + delegate_facts: true + loop: "{{ groups_merged_list }}" - -- name: Set facts for all hosts - use facts from IBM Cloud Load Balancer - SAP AnyDB +- name: Set facts for all hosts - use facts from localhost - HA/DR - IBM Cloud Load Balancer - SAP AnyDB Primary node ansible.builtin.set_fact: - sap_vm_temp_vip_anydb_primary: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-anydb'))[0].private_ips[0].address }}" + sap_vm_provision_ha_vip_anydb_primary: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_anydb))[0].private_ips[0].address }}" + sap_vm_temp_vip_anydb_primary: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_anydb))[0].private_ips[0].address }}" + sap_ha_install_anydb_ibmdb2_vip_primary_ip_address: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_anydb))[0].private_ips[0].address }}" when: - sap_ha_pacemaker_cluster_ibmcloud_region is defined - groups["anydb_secondary"] is defined and (groups["anydb_secondary"]|length>0) + delegate_to: "{{ item }}" + delegate_facts: true + loop: "{{ groups_merged_list }}" - -- name: Set facts for all hosts - use facts from IBM Cloud Load Balancer - SAP NetWeaver +- name: Set facts for all hosts - use facts from localhost - HA/DR - IBM Cloud Load Balancer - SAP NetWeaver ASCS and ERS ansible.builtin.set_fact: - sap_ha_pacemaker_cluster_vip_nwas_abap_ascs_ip_address: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-nwas-ascs'))[0].private_ips[0].address }}" - sap_ha_pacemaker_cluster_vip_nwas_abap_ers_ip_address: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-nwas-ers'))[0].private_ips[0].address }}" + sap_vm_provision_ha_vip_nwas_abap_ascs: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_nwas + '-ascs'))[0].private_ips[0].address }}" + sap_vm_temp_vip_nwas_abap_ascs: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_nwas + '-ascs'))[0].private_ips[0].address }}" + sap_ha_pacemaker_cluster_vip_nwas_abap_ascs_ip_address: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_nwas + '-ascs'))[0].private_ips[0].address }}" + sap_vm_provision_ha_vip_nwas_abap_ers: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_nwas + '-ers'))[0].private_ips[0].address }}" + sap_vm_temp_vip_nwas_abap_ers: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_nwas + '-ers'))[0].private_ips[0].address }}" + sap_ha_pacemaker_cluster_vip_nwas_abap_ers_ip_address: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_nwas + '-ers'))[0].private_ips[0].address }}" when: - sap_ha_pacemaker_cluster_ibmcloud_region is defined - groups["nwas_ers"] is defined and (groups["nwas_ers"]|length>0) + delegate_to: "{{ item }}" + delegate_facts: true + loop: "{{ groups_merged_list }}" + +# - name: Set facts for all hosts - use facts from localhost - HA/DR - IBM Cloud Load Balancer - SAP NetWeaver PAS and AAS +# ansible.builtin.set_fact: +# sap_vm_temp_vip_nwas_abap_pas: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_nwas + '-pas'))[0].private_ips[0].address }}" +# sap_ha_pacemaker_cluster_vip_nwas_abap_pas_ip_address: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_nwas + '-pas'))[0].private_ips[0].address }}" +# sap_vm_temp_vip_nwas_abap_aas: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_nwas + '-aas'))[0].private_ips[0].address }}" +# sap_ha_pacemaker_cluster_vip_nwas_abap_aas_ip_address: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_nwas + '-aas'))[0].private_ips[0].address }}" +# when: +# - sap_ha_pacemaker_cluster_ibmcloud_region is defined +# - groups["nwas_aas"] is defined and (groups["nwas_aas"]|length>0) +# delegate_to: "{{ item }}" +# delegate_facts: true +# loop: "{{ groups_merged_list }}" diff --git a/roles/sap_vm_provision/tasks/platform_ansible/ibmcloud_vs/post_deployment_execute.yml b/roles/sap_vm_provision/tasks/platform_ansible/ibmcloud_vs/post_deployment_execute.yml index e665532..1a570d5 100644 --- a/roles/sap_vm_provision/tasks/platform_ansible/ibmcloud_vs/post_deployment_execute.yml +++ b/roles/sap_vm_provision/tasks/platform_ansible/ibmcloud_vs/post_deployment_execute.yml @@ -54,15 +54,16 @@ # Workaround for bug which populates region and ibmcloud_api_key as TF arguments for ibm_is_lbs_info Ansible Module in legacy Ansible Collection - name: IBM Cloud CLI execution to list Load Balancer/s info + no_log: "{{ __sap_vm_provision_no_log }}" register: __sap_vm_provision_task_ibmcloud_lb_all_info_shell ansible.builtin.shell: | - ibmcloud config --quiet --check-version false && ibmcloud login -g {{ sap_vm_provision_ibmcloud_resource_group_name }} -r {{ sap_vm_provision_ibmcloud_region }} --quiet + ibmcloud config --quiet --check-version false && ibmcloud login --apikey="{{ sap_vm_provision_ibmcloud_api_key }}" -g {{ sap_vm_provision_ibmcloud_resource_group_name }} -r {{ sap_vm_provision_ibmcloud_region }} --quiet #ibmcloud plugin install infrastructure-service -f --quiet ibmcloud is load-balancers --quiet --output json - name: Set fact for IBM Cloud Load Balancer/s info ansible.builtin.set_fact: - ibmcloud_lbs_all_info: "{{ __sap_vm_provision_task_ibmcloud_lb_all_info_shell.stdout | split('Space:') | last | trim | from_json }}" + ibmcloud_lbs_all_info: "{{ ('[' + (__sap_vm_provision_task_ibmcloud_lb_all_info_shell.stdout | split('[',1) | last)) | trim | from_json }}" - name: Set fact for IBM Cloud Load Balancer Back-end Pools to target ansible.builtin.set_fact: @@ -74,7 +75,7 @@ loop: "{{ ibmcloud_lbs_all_info }}" loop_control: label: "{{ item.name }}" - when: "('lb-sap-ha-hana' in item.name) or ('lb-sap-ha-anydb' in item.name) or ('lb-sap-ha-nwas-ascs' in item.name) or ('lb-sap-ha-nwas-ers' in item.name)" + when: "(sap_vm_provision_ha_load_balancer_name_hana in item.name) or (sap_vm_provision_ha_load_balancer_name_anydb in item.name) or (sap_vm_provision_ha_load_balancer_name_nwas + '-ascs' in item.name) or (sap_vm_provision_ha_load_balancer_name_nwas + '-ers' in item.name)" # - name: Identify IBM Cloud Load Balancer Back-end Pools # no_log: "{{ __sap_vm_provision_no_log }}" @@ -85,15 +86,15 @@ # loop: "{{ ibmcloud_lbs_all_info }}" # loop_control: # label: "{{ item.name }}" - # when: "('lb-sap-ha-hana' in item.name) or ('lb-sap-ha-anydb' in item.name) or ('lb-sap-ha-nwas-ascs' in item.name) or ('lb-sap-ha-nwas-ers' in item.name)" + # when: "(sap_vm_provision_ha_load_balancer_name_hana in item.name) or (sap_vm_provision_ha_load_balancer_name_anydb in item.name) or (sap_vm_provision_ha_load_balancer_name_nwas + '-ascs' in item.name) or (sap_vm_provision_ha_load_balancer_name_nwas + '-ers' in item.name)" - name: Update IBM Cloud Load Balancer Back-end Pool Health Check Port to Linux Pacemaker controlled listening port for SAP HANA - System DB SQL no_log: "{{ __sap_vm_provision_no_log }}" register: __sap_vm_provision_task_ibmcloud_lb_pool_hana1 ibm.cloudcollection.ibm_is_lb_pool: - id: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-hana'))[0].id }}/{{ ((ibmcloud_lbs_all_info | json_query('[*].pools') | flatten) | selectattr('name', '==', 'lb-sap-ha-hana-pool-sysdb-sql') | first).id }}" - name: lb-sap-ha-hana-pool-sysdb-sql + id: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_hana))[0].id }}/{{ ((ibmcloud_lbs_all_info | json_query('[*].pools') | flatten) | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_hana + '-pool-sysdb-sql') | first).id }}" + name: "{{ sap_vm_provision_ha_load_balancer_name_hana + '-pool-sysdb-sql' }}" # lb: # Do not use, will force create new resource algorithm: weighted_round_robin protocol: tcp @@ -109,8 +110,8 @@ no_log: "{{ __sap_vm_provision_no_log }}" register: __sap_vm_provision_task_ibmcloud_lb_pool_hana2 ibm.cloudcollection.ibm_is_lb_pool: - id: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-hana'))[0].id }}/{{ ((ibmcloud_lbs_all_info | json_query('[*].pools') | flatten) | selectattr('name', '==', 'lb-sap-ha-hana-pool-mdc1-sql') | first).id }}" - name: lb-sap-ha-hana-pool-mdc1-sql + id: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_hana))[0].id }}/{{ ((ibmcloud_lbs_all_info | json_query('[*].pools') | flatten) | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_hana + '-pool-mdc1-sql') | first).id }}" + name: "{{ sap_vm_provision_ha_load_balancer_name_hana + '-pool-mdc1-sql' }}" # lb: # Do not use, will force create new resource algorithm: weighted_round_robin protocol: tcp @@ -126,8 +127,8 @@ no_log: "{{ __sap_vm_provision_no_log }}" register: __sap_vm_provision_task_ibmcloud_lb_pool_hana3 ibm.cloudcollection.ibm_is_lb_pool: - id: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-hana'))[0].id }}/{{ ((ibmcloud_lbs_all_info | json_query('[*].pools') | flatten) | selectattr('name', '==', 'lb-sap-ha-hana-pool-startsrv-http') | first).id }}" - name: lb-sap-ha-hana-pool-startsrv-http + id: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_hana))[0].id }}/{{ ((ibmcloud_lbs_all_info | json_query('[*].pools') | flatten) | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_hana + '-pool-startsrv-http') | first).id }}" + name: "{{ sap_vm_provision_ha_load_balancer_name_hana + '-pool-startsrv-http' }}" # lb: # Do not use, will force create new resource algorithm: weighted_round_robin protocol: tcp @@ -143,8 +144,8 @@ no_log: "{{ __sap_vm_provision_no_log }}" register: __sap_vm_provision_task_ibmcloud_lb_pool_hana4 ibm.cloudcollection.ibm_is_lb_pool: - id: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-hana'))[0].id }}/{{ ((ibmcloud_lbs_all_info | json_query('[*].pools') | flatten) | selectattr('name', '==', 'lb-sap-ha-hana-pool-startsrv-https') | first).id }}" - name: lb-sap-ha-hana-pool-startsrv-https + id: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_hana))[0].id }}/{{ ((ibmcloud_lbs_all_info | json_query('[*].pools') | flatten) | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_hana + '-pool-startsrv-https') | first).id }}" + name: "{{ sap_vm_provision_ha_load_balancer_name_hana + '-pool-startsrv-https' }}" # lb: # Do not use, will force create new resource algorithm: weighted_round_robin protocol: tcp @@ -159,8 +160,8 @@ - name: Update IBM Cloud Load Balancer Back-end Pool Health Check Port to Linux Pacemaker controlled listening port for SAP AnyDB - IBM Db2 Communication Port no_log: "{{ __sap_vm_provision_no_log }}" ibm.cloudcollection.ibm_is_lb_pool: - id: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-anydb'))[0].id }}/{{ ((ibmcloud_lbs_all_info | json_query('[*].pools') | flatten) | selectattr('name', '==', 'lb-sap-ha-anydb-pool-ibmdb2') | first).id }}" - name: lb-sap-ha-anydb-pool-ibmdb2 + id: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_anydb))[0].id }}/{{ ((ibmcloud_lbs_all_info | json_query('[*].pools') | flatten) | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_anydb + '-anydb-pool-ibmdb2') | first).id }}" + name: "{{ sap_vm_provision_ha_load_balancer_name_anydb + '-ibmdb2' }}" # lb: # Do not use, will force create new resource algorithm: weighted_round_robin protocol: tcp @@ -176,8 +177,8 @@ no_log: "{{ __sap_vm_provision_no_log }}" register: __sap_vm_provision_task_ibmcloud_lb_pool_nwas_ascs1 ibm.cloudcollection.ibm_is_lb_pool: - id: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-nwas-ascs'))[0].id }}/{{ ((ibmcloud_lbs_all_info | json_query('[*].pools') | flatten) | selectattr('name', '==', 'lb-sap-ha-nwas-ascs-pool-dp') | first).id }}" - name: lb-sap-ha-nwas-ascs-pool-dp + id: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_nwas + '-ascs'))[0].id }}/{{ ((ibmcloud_lbs_all_info | json_query('[*].pools') | flatten) | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_nwas + '-ascs-pool-dp') | first).id }}" + name: "{{ sap_vm_provision_ha_load_balancer_name_nwas + '-ascs-pool-dp' }}" # lb: # Do not use, will force create new resource algorithm: weighted_round_robin protocol: tcp @@ -193,8 +194,8 @@ no_log: "{{ __sap_vm_provision_no_log }}" register: __sap_vm_provision_task_ibmcloud_lb_pool_nwas_ascs2 ibm.cloudcollection.ibm_is_lb_pool: - id: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-nwas-ascs'))[0].id }}/{{ ((ibmcloud_lbs_all_info | json_query('[*].pools') | flatten) | selectattr('name', '==', 'lb-sap-ha-nwas-ascs-pool-ms') | first).id }}" - name: lb-sap-ha-nwas-ascs-pool-ms + id: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_nwas + '-ascs'))[0].id }}/{{ ((ibmcloud_lbs_all_info | json_query('[*].pools') | flatten) | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_nwas + '-ascs-pool-ms') | first).id }}" + name: "{{ sap_vm_provision_ha_load_balancer_name_nwas + '-ascs-pool-ms' }}" # lb: # Do not use, will force create new resource algorithm: weighted_round_robin protocol: tcp @@ -206,12 +207,12 @@ ibmcloud_api_key: "{{ sap_vm_provision_ibmcloud_api_key }}" when: (groups['nwas_ers'] is defined and (groups['nwas_ers'] | length>0)) - - name: Update IBM Cloud Load Balancer Back-end Pool Health Check Port to Linux Pacemaker controlled listening port for SAP NetWeaver ASCS - Enqueue Server sapenq process + - name: Update IBM Cloud Load Balancer Back-end Pool Health Check Port to Linux Pacemaker controlled listening port for SAP NetWeaver ASCS - Message Server HTTP sapms process no_log: "{{ __sap_vm_provision_no_log }}" register: __sap_vm_provision_task_ibmcloud_lb_pool_nwas_ascs3 ibm.cloudcollection.ibm_is_lb_pool: - id: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-nwas-ascs'))[0].id }}/{{ ((ibmcloud_lbs_all_info | json_query('[*].pools') | flatten) | selectattr('name', '==', 'lb-sap-ha-nwas-ascs-pool-enq') | first).id }}" - name: lb-sap-ha-nwas-ascs-pool-enq + id: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_nwas + '-ascs'))[0].id }}/{{ ((ibmcloud_lbs_all_info | json_query('[*].pools') | flatten) | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_nwas + '-ascs-pool-ms') | first).id }}" + name: "{{ sap_vm_provision_ha_load_balancer_name_nwas + '-ascs-pool-ms-http' }}" # lb: # Do not use, will force create new resource algorithm: weighted_round_robin protocol: tcp @@ -223,12 +224,12 @@ ibmcloud_api_key: "{{ sap_vm_provision_ibmcloud_api_key }}" when: (groups['nwas_ers'] is defined and (groups['nwas_ers'] | length>0)) - - name: Update IBM Cloud Load Balancer Back-end Pool Health Check Port to Linux Pacemaker controlled listening port for SAP NetWeaver ASCS - SAP Start Service (SAPControl SOAP) HTTP sapctrl process + - name: Update IBM Cloud Load Balancer Back-end Pool Health Check Port to Linux Pacemaker controlled listening port for SAP NetWeaver ASCS - Enqueue Server sapenq process no_log: "{{ __sap_vm_provision_no_log }}" register: __sap_vm_provision_task_ibmcloud_lb_pool_nwas_ascs4 ibm.cloudcollection.ibm_is_lb_pool: - id: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-nwas-ascs'))[0].id }}/{{ ((ibmcloud_lbs_all_info | json_query('[*].pools') | flatten) | selectattr('name', '==', 'lb-sap-ha-nwas-ascs-pool-sapctrl') | first).id }}" - name: lb-sap-ha-nwas-ascs-pool-sapctrl + id: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_nwas + '-ascs'))[0].id }}/{{ ((ibmcloud_lbs_all_info | json_query('[*].pools') | flatten) | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_nwas + '-ascs-pool-enq') | first).id }}" + name: "{{ sap_vm_provision_ha_load_balancer_name_nwas + '-ascs-pool-enq' }}" # lb: # Do not use, will force create new resource algorithm: weighted_round_robin protocol: tcp @@ -240,12 +241,46 @@ ibmcloud_api_key: "{{ sap_vm_provision_ibmcloud_api_key }}" when: (groups['nwas_ers'] is defined and (groups['nwas_ers'] | length>0)) - - name: Update IBM Cloud Load Balancer Back-end Pool Health Check Port to Linux Pacemaker controlled listening port for SAP NetWeaver ASCS - SAP Start Service (SAPControl SOAP) HTTPS (Secure) sapctrls + - name: Update IBM Cloud Load Balancer Back-end Pool Health Check Port to Linux Pacemaker controlled listening port for SAP NetWeaver ASCS - Enqueue Replicator Server sapenqrepl process no_log: "{{ __sap_vm_provision_no_log }}" register: __sap_vm_provision_task_ibmcloud_lb_pool_nwas_ascs5 ibm.cloudcollection.ibm_is_lb_pool: - id: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-nwas-ascs'))[0].id }}/{{ ((ibmcloud_lbs_all_info | json_query('[*].pools') | flatten) | selectattr('name', '==', 'lb-sap-ha-nwas-ascs-pool-sapctrls') | first).id }}" - name: lb-sap-ha-nwas-ascs-pool-sapctrls + id: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_nwas + '-ascs'))[0].id }}/{{ ((ibmcloud_lbs_all_info | json_query('[*].pools') | flatten) | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_nwas + '-ascs-pool-enq') | first).id }}" + name: "{{ sap_vm_provision_ha_load_balancer_name_nwas + '-ascs-pool-enqrepl' }}" + # lb: # Do not use, will force create new resource + algorithm: weighted_round_robin + protocol: tcp + health_delay: 20 + health_retries: 2 + health_timeout: 10 + health_type: tcp + health_monitor_port: "{{ ibmcloud_lb_pool_healthcheck_nwas_ascs }}" + ibmcloud_api_key: "{{ sap_vm_provision_ibmcloud_api_key }}" + when: (groups['nwas_ers'] is defined and (groups['nwas_ers'] | length>0)) + + - name: Update IBM Cloud Load Balancer Back-end Pool Health Check Port to Linux Pacemaker controlled listening port for SAP NetWeaver ASCS - SAP Start Service (SAPControl SOAP) HTTP sapctrl process + no_log: "{{ __sap_vm_provision_no_log }}" + register: __sap_vm_provision_task_ibmcloud_lb_pool_nwas_ascs6 + ibm.cloudcollection.ibm_is_lb_pool: + id: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_nwas + '-ascs'))[0].id }}/{{ ((ibmcloud_lbs_all_info | json_query('[*].pools') | flatten) | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_nwas + '-ascs-pool-sapctrl') | first).id }}" + name: "{{ sap_vm_provision_ha_load_balancer_name_nwas + '-ascs-pool-sapctrl' }}" + # lb: # Do not use, will force create new resource + algorithm: weighted_round_robin + protocol: tcp + health_delay: 20 + health_retries: 2 + health_timeout: 10 + health_type: tcp + health_monitor_port: "{{ ibmcloud_lb_pool_healthcheck_nwas_ascs }}" + ibmcloud_api_key: "{{ sap_vm_provision_ibmcloud_api_key }}" + when: (groups['nwas_ers'] is defined and (groups['nwas_ers'] | length>0)) + + - name: Update IBM Cloud Load Balancer Back-end Pool Health Check Port to Linux Pacemaker controlled listening port for SAP NetWeaver ASCS - SAP Start Service (SAPControl SOAP) HTTPS (Secure) sapctrls + no_log: "{{ __sap_vm_provision_no_log }}" + register: __sap_vm_provision_task_ibmcloud_lb_pool_nwas_ascs7 + ibm.cloudcollection.ibm_is_lb_pool: + id: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_nwas + '-ascs'))[0].id }}/{{ ((ibmcloud_lbs_all_info | json_query('[*].pools') | flatten) | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_nwas + '-ascs-pool-sapctrls') | first).id }}" + name: "{{ sap_vm_provision_ha_load_balancer_name_nwas + '-ascs-pool-sapctrls' }}" # lb: # Do not use, will force create new resource algorithm: weighted_round_robin protocol: tcp @@ -261,8 +296,8 @@ # no_log: "{{ __sap_vm_provision_no_log }}" # register: __sap_vm_provision_task_ibmcloud_lb_pool_nwas_ers1 # ibm.cloudcollection.ibm_is_lb_pool: - # id: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-nwas-ers'))[0].id }}/{{ ((ibmcloud_lbs_all_info | json_query('[*].pools') | flatten) | selectattr('name', '==', 'lb-sap-ha-nwas-ers-pool-dp') | first).id }}" - # name: lb-sap-ha-nwas-ers-pool-dp + # id: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_nwas + '-ers'))[0].id }}/{{ ((ibmcloud_lbs_all_info | json_query('[*].pools') | flatten) | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_nwas + '-ers-pool-dp') | first).id }}" + # name: name: "{{ sap_vm_provision_ha_load_balancer_name_nwas + '-ers-pool-dp' }}" # # lb: # Do not use, will force create new resource # algorithm: weighted_round_robin # protocol: tcp @@ -278,8 +313,8 @@ # no_log: "{{ __sap_vm_provision_no_log }}" # register: __sap_vm_provision_task_ibmcloud_lb_pool_nwas_ers2 # ibm.cloudcollection.ibm_is_lb_pool: - # id: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-nwas-ers'))[0].id }}/{{ ((ibmcloud_lbs_all_info | json_query('[*].pools') | flatten) | selectattr('name', '==', 'lb-sap-ha-nwas-ers-pool-ms') | first).id }}" - # name: lb-sap-ha-nwas-ers-pool-ms + # id: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_nwas + '-ers'))[0].id }}/{{ ((ibmcloud_lbs_all_info | json_query('[*].pools') | flatten) | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_nwas + '-ers-pool-ms') | first).id }}" + # name: name: "{{ sap_vm_provision_ha_load_balancer_name_nwas + '-ers-pool-ms' }}" # # lb: # Do not use, will force create new resource # algorithm: weighted_round_robin # protocol: tcp @@ -295,8 +330,8 @@ no_log: "{{ __sap_vm_provision_no_log }}" register: __sap_vm_provision_task_ibmcloud_lb_pool_nwas_ers3 ibm.cloudcollection.ibm_is_lb_pool: - id: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-nwas-ers'))[0].id }}/{{ ((ibmcloud_lbs_all_info | json_query('[*].pools') | flatten) | selectattr('name', '==', 'lb-sap-ha-nwas-ers-pool-enqr') | first).id }}" - name: lb-sap-ha-nwas-ers-pool-enqr + id: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_nwas + '-ers'))[0].id }}/{{ ((ibmcloud_lbs_all_info | json_query('[*].pools') | flatten) | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_nwas + '-ers-pool-enqr') | first).id }}" + name: "{{ sap_vm_provision_ha_load_balancer_name_nwas + '-ers-pool-enqr' }}" # lb: # Do not use, will force create new resource algorithm: weighted_round_robin protocol: tcp @@ -312,8 +347,8 @@ no_log: "{{ __sap_vm_provision_no_log }}" register: __sap_vm_provision_task_ibmcloud_lb_pool_nwas_ers4 ibm.cloudcollection.ibm_is_lb_pool: - id: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-nwas-ers'))[0].id }}/{{ ((ibmcloud_lbs_all_info | json_query('[*].pools') | flatten) | selectattr('name', '==', 'lb-sap-ha-nwas-ers-pool-sapctrl') | first).id }}" - name: lb-sap-ha-nwas-ers-pool-sapctrl + id: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_nwas + '-ers'))[0].id }}/{{ ((ibmcloud_lbs_all_info | json_query('[*].pools') | flatten) | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_nwas + '-ers-pool-sapctrl') | first).id }}" + name: "{{ sap_vm_provision_ha_load_balancer_name_nwas + '-ers-pool-sapctrl' }}" # lb: # Do not use, will force create new resource algorithm: weighted_round_robin protocol: tcp @@ -329,8 +364,8 @@ no_log: "{{ __sap_vm_provision_no_log }}" register: __sap_vm_provision_task_ibmcloud_lb_pool_nwas_ers5 ibm.cloudcollection.ibm_is_lb_pool: - id: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-nwas-ers'))[0].id }}/{{ ((ibmcloud_lbs_all_info | json_query('[*].pools') | flatten) | selectattr('name', '==', 'lb-sap-ha-nwas-ers-pool-sapctrls') | first).id }}" - name: lb-sap-ha-nwas-ers-pool-sapctrls + id: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_nwas + '-ers'))[0].id }}/{{ ((ibmcloud_lbs_all_info | json_query('[*].pools') | flatten) | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_nwas + '-ers-pool-sapctrls') | first).id }}" + name: "{{ sap_vm_provision_ha_load_balancer_name_nwas + '-ers-pool-sapctrls' }}" # lb: # Do not use, will force create new resource algorithm: weighted_round_robin protocol: tcp diff --git a/roles/sap_vm_provision/tasks/platform_ansible/ibmpowervm_vm/execute_main.yml b/roles/sap_vm_provision/tasks/platform_ansible/ibmpowervm_vm/execute_main.yml index 023dc59..69f7a0a 100644 --- a/roles/sap_vm_provision/tasks/platform_ansible/ibmpowervm_vm/execute_main.yml +++ b/roles/sap_vm_provision/tasks/platform_ansible/ibmpowervm_vm/execute_main.yml @@ -52,6 +52,25 @@ auth: "{{ __sap_vm_provision_task_ibmpowervm_openstack_auth }}" throttle: 1 + - name: Create Collocation Rules when High Availability + no_log: "{{ __sap_vm_provision_no_log }}" + register: __sap_vm_provision_task_ibmpowervm_collocation_rule + run_once: true + openstack.cloud.server_group: + name: "{{ sap_vm_provision_ibmpowervm_placement_resource_name }}-{{ item }}" + policy: anti-affinity + #region_name: "RegionOne" + validate_certs: false # Allow Self-Signed Certificate + auth: "{{ __sap_vm_provision_task_ibmpowervm_openstack_auth }}" + loop: + - "{{ 'hana' if 'hana_secondary' in lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan] | json_query('*') | map(attribute='sap_host_type') else '' }}" + - "{{ 'anydb' if 'anydb_secondary' in lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan] | json_query('*') | map(attribute='sap_host_type') else '' }}" + - "{{ 'nwas' if 'nwas_ers' in lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan] | json_query('*') | map(attribute='sap_host_type') else '' }}" + when: + - sap_vm_provision_ibmpowervm_placement_resource_name is defined + - sap_vm_provision_ibmpowervm_placement_strategy_spread + - not item == '' + - name: Provision hosts to IBM PowerVM register: __sap_vm_provision_task_provision_host_all_run ansible.builtin.include_tasks: @@ -80,7 +99,7 @@ - name: Set fact to hold all inventory hosts in all groups ansible.builtin.set_fact: - groups_merged_list: "{{ [ [ groups['hana_primary'] | default([]) ] , [ groups['hana_secondary'] | default([]) ] , [ groups['nwas_ascs'] | default([]) ] , [ groups['nwas_ers'] | default([]) ] , [ groups['nwas_pas'] | default([]) ] , [ groups['nwas_aas'] | default([]) ] ] | flatten | select() }}" + groups_merged_list: "{{ [ [ groups['hana_primary'] | default([]) ] , [ groups['hana_secondary'] | default([]) ] , [ groups['anydb_primary'] | default([]) ] , [ groups['anydb_secondary'] | default([]) ] , [ groups['nwas_ascs'] | default([]) ] , [ groups['nwas_ers'] | default([]) ] , [ groups['nwas_pas'] | default([]) ] , [ groups['nwas_aas'] | default([]) ] ] | flatten | select() }}" - name: Set Ansible Vars register: __sap_vm_provision_task_ansible_vars_set diff --git a/roles/sap_vm_provision/tasks/platform_ansible/ibmpowervm_vm/execute_provision.yml b/roles/sap_vm_provision/tasks/platform_ansible/ibmpowervm_vm/execute_provision.yml index 9103db1..637f229 100644 --- a/roles/sap_vm_provision/tasks/platform_ansible/ibmpowervm_vm/execute_provision.yml +++ b/roles/sap_vm_provision/tasks/platform_ansible/ibmpowervm_vm/execute_provision.yml @@ -7,7 +7,7 @@ fail_msg: "FAIL: The length of the hostname is {{ inventory_hostname | length | int }} but must be less or equal to 13 characters!" # NOTE: Pre-requisite to create IBM PowerVC Storage Templates (OpenStack Cinder Volume Type), which is not possible from Ansible Collection for Openstack -# https://www.ibm.com/docs/en/powervc/1.4.3?topic=apis-supported-volume-type-extra-specs +# www.ibm.com/docs/en/powervc/latest?topic=apis-supported-volume-type-extra-specs ### Show IBM PowerVC Storage list ### openstack --insecure volume service list --service cinder-volume ### Show IBM PowerVC Storage Template list @@ -21,6 +21,10 @@ - sap_vm_provision_calculate_sap_hana_scaleout_active_coordinator is defined - not inventory_hostname in lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan].keys() +- name: Set fact for host specifications of the provision target + ansible.builtin.set_fact: + target_provision_host_spec: "{{ lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan][scaleout_origin_host_spec | default(inventory_hostname)] }}" + - name: Check if VM exists register: __sap_vm_provision_task_provision_host_single_check_exists openstack.cloud.server_info: @@ -52,7 +56,7 @@ - __sap_vm_provision_task_provision_host_single_check_exists.servers | length == 0 block: - # See documented IBM PowerVM Compute Template (OpenStack Flavor) extra specs - https://www.ibm.com/docs/en/powervc-cloud/latest?topic=apis-flavors-extra-specs + # See documented IBM PowerVM Compute Template (OpenStack Flavor) extra specs - www.ibm.com/docs/en/powervc/latest?topic=apis-flavors-extra-specs - name: Create IBM PowerVM Compute Template register: __sap_vm_provision_task_ibmpowervm_compute_template openstack.cloud.compute_flavor: @@ -62,8 +66,8 @@ ## Virtual Machine main resources definition # Assume SMT-8, 1 IBM Power CPU Core therefore divide by 8 = CPU Threads - vcpus: "{{ (lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan][inventory_hostname].ibmpowervm_vm_cpu_threads / lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan][inventory_hostname].ibmpowervm_vm_cpu_smt) | round(0, 'common') | int }}" # Virtual Processors (i.e. IBM Power CPU Cores), Desired. API must receive an integer - ram: "{{ (lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan][inventory_hostname].ibmpowervm_vm_memory_gib * 1024) | int }}" # Memory (MiB), Desired + vcpus: "{{ (target_provision_host_spec.ibmpowervm_vm_cpu_threads / target_provision_host_spec.ibmpowervm_vm_cpu_smt) | round(0, 'common') | int }}" # Virtual Processors (i.e. IBM Power CPU Cores), Desired. API must receive an integer + ram: "{{ (target_provision_host_spec.ibmpowervm_vm_memory_gib * 1024) | int }}" # Memory (MiB), Desired disk: 0 # Must be set to 0 otherwise conflicts with OS Image template swap: 0 # Must be set to 0 otherwise error "failed with exception: Build of instance xxxx was re-scheduled: list index out of range" is_public: true @@ -71,16 +75,16 @@ # After creation, modifications to extra_specs parameters may not be identified extra_specs: #### Virtual Processors (i.e. IBM Power CPU Cores) - for Production systems must be minimum of 4 #### - "powervm:min_vcpu": "{{ [((lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan][inventory_hostname].ibmpowervm_vm_cpu_threads / lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan][inventory_hostname].ibmpowervm_vm_cpu_smt) * 0.75) | round(0, 'floor'), 1] | max | int }}" # Virtual Processors (i.e. IBM Power CPU Cores), Minimum. Value of 1 is lowest possible. API must receive an integer - "powervm:max_vcpu": "{{ ((lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan][inventory_hostname].ibmpowervm_vm_cpu_threads / lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan][inventory_hostname].ibmpowervm_vm_cpu_smt) * 1.20) | round(0, 'ceil') | int }}" # Virtual Processors (i.e. IBM Power CPU Cores), Maximum. API must receive an integer + "powervm:min_vcpu": "{{ [((target_provision_host_spec.ibmpowervm_vm_cpu_threads / target_provision_host_spec.ibmpowervm_vm_cpu_smt) * 0.75) | round(0, 'floor'), 1] | max | int }}" # Virtual Processors (i.e. IBM Power CPU Cores), Minimum. Value of 1 is lowest possible. API must receive an integer + "powervm:max_vcpu": "{{ ((target_provision_host_spec.ibmpowervm_vm_cpu_threads / target_provision_host_spec.ibmpowervm_vm_cpu_smt) * 1.20) | round(0, 'ceil') | int }}" # Virtual Processors (i.e. IBM Power CPU Cores), Maximum. API must receive an integer #### Dynamic LPAR Entitled Capacity of Virtual Processor units (i.e. IBM Power CPU Cores guaranteed to be available) #### # Processing units set minimum to 80% of the minimum Virtual Processors (i.e. IBM Power CPU Cores) # Processing units set standard to 80% of the Virtual Processors (i.e. IBM Power CPU Cores) # Processing units set maximum to 100% of the maximum Virtual Processors (i.e. IBM Power CPU Cores) - "powervm:min_proc_units": "{{ ((((lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan][inventory_hostname].ibmpowervm_vm_cpu_threads / lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan][inventory_hostname].ibmpowervm_vm_cpu_smt) * 0.75) | round(1, 'floor')) * 0.8) | round(2, 'floor') | float }}" # Processing units, Minimum - "powervm:proc_units": "{{ ((lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan][inventory_hostname].ibmpowervm_vm_cpu_threads / lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan][inventory_hostname].ibmpowervm_vm_cpu_smt) | round(0, 'common')) * 0.8 | round(2, 'common') | float }}" # Processing units, Desired - "powervm:max_proc_units": "{{ ((lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan][inventory_hostname].ibmpowervm_vm_cpu_threads / lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan][inventory_hostname].ibmpowervm_vm_cpu_smt) * 1.20) | round(0, 'ceil') | float }}" # Processing units, Maximum + "powervm:min_proc_units": "{{ ((((target_provision_host_spec.ibmpowervm_vm_cpu_threads / target_provision_host_spec.ibmpowervm_vm_cpu_smt) * 0.75) | round(1, 'floor')) * 0.8) | round(2, 'floor') | float }}" # Processing units, Minimum + "powervm:proc_units": "{{ ((target_provision_host_spec.ibmpowervm_vm_cpu_threads / target_provision_host_spec.ibmpowervm_vm_cpu_smt) | round(0, 'common')) * 0.8 | round(2, 'common') | float }}" # Processing units, Desired + "powervm:max_proc_units": "{{ ((target_provision_host_spec.ibmpowervm_vm_cpu_threads / target_provision_host_spec.ibmpowervm_vm_cpu_smt) * 1.20) | round(0, 'ceil') | float }}" # Processing units, Maximum "powervm:dedicated_proc": "false" #"powervm:dedicated_sharing_mode": "share_idle_procs" # When 'dedicated_proc' true, share_idle_procs = "Allow processor sharing when the virtual machine is inactive" @@ -89,8 +93,8 @@ "powervm:shared_proc_pool_name": "{{ sap_vm_provision_ibmpowervm_host_group_shared_procesor_pool_name }}" "powervm:processor_compatibility": "default" - "powervm:min_mem": "{{ (lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan][inventory_hostname].ibmpowervm_vm_memory_gib * 1024) - (0.25 * (lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan][inventory_hostname].ibmpowervm_vm_memory_gib * 1024)) | round(0, 'ceil') | int }}" # Memory, Minimum. API must receive an integer - "powervm:max_mem": "{{ (lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan][inventory_hostname].ibmpowervm_vm_memory_gib * 1024) | round(0, 'ceil') | int }}" # Memory, Maximum. API must receive an integer + "powervm:min_mem": "{{ (target_provision_host_spec.ibmpowervm_vm_memory_gib * 1024) - (0.25 * (target_provision_host_spec.ibmpowervm_vm_memory_gib * 1024)) | round(0, 'ceil') | int }}" # Memory, Minimum. API must receive an integer + "powervm:max_mem": "{{ (target_provision_host_spec.ibmpowervm_vm_memory_gib * 1024) | round(0, 'ceil') | int }}" # Memory, Maximum. API must receive an integer #"powervm:ame_expansion_factor": 0 "powervm:enforce_affinity_check": "true" @@ -164,6 +168,25 @@ hostname: "{{ inventory_hostname }}" #userdata: | # cloud-init userdata + # Anti-Affinity, IBM PowerVM Collocation Rule (OpenStack Server Group ID) to provision VM to different hosts + # www.ibm.com/docs/en/powervc/latest?topic=powervc-terminology + # www.ibm.com/docs/en/powervc/latest?topic=powervc-collocation-rules + # www.ibm.com/support/pages/powervc-anti-affinity-co-location-rules + scheduler_hints: + # same_host: "" # Affinity, Provision VM to specific host + # additional_properties: + # "drivers:multipath": "0" + group: "{{ ( + (__sap_vm_provision_task_ibmpowervm_collocation_rule.results | selectattr('item','==','hana'))[0].name + if ('hana_primary' in target_provision_host_spec.sap_host_type or 'hana_secondary' in target_provision_host_spec.sap_host_type) + else + (__sap_vm_provision_task_ibmpowervm_collocation_rule.results | selectattr('item','==','anydb'))[0].name + if ('anydb_primary' in target_provision_host_spec.sap_host_type or 'anydb_secondary' in target_provision_host_spec.sap_host_type) + else + (__sap_vm_provision_task_ibmpowervm_collocation_rule.results | selectattr('item','==','nwas'))[0].name + if ('nwas_ascs' in target_provision_host_spec.sap_host_type or 'nwas_ers' in target_provision_host_spec.sap_host_type) + ) | default(omit) }}" + validate_certs: false # Allow Self-Signed Certificate auth: "{{ __sap_vm_provision_task_ibmpowervm_openstack_auth }}" @@ -174,6 +197,7 @@ until: __sap_vm_provision_task_provision_host_single.server.status is defined and __sap_vm_provision_task_provision_host_single.server.status == "ACTIVE" retries: 120 delay: 5 + throttle: "{{ (1 | int) if sap_vm_provision_ibmpowervm_placement_strategy_spread else default(omit) }}" ### End of boot disk and VM creation Ansible Task Block @@ -253,7 +277,7 @@ filesystem_volume_map: | {% set volume_map = [] -%} {% set av_vol = available_volumes -%} - {% for storage_item in lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan][scaleout_origin_host_spec | default(inventory_hostname)].storage_definition -%} + {% for storage_item in target_provision_host_spec.storage_definition -%} {% for idx in range(0, storage_item.disk_count | default(1)) -%} {% if (storage_item.filesystem_type is defined) -%} {% if ('swap' in storage_item.filesystem_type and storage_item.swap_path is not defined) @@ -288,15 +312,33 @@ openstack.cloud.volume: state: present name: "{{ inventory_hostname }}-vol_{{ vol_item.name }}" - # availability_zone: "" - # scheduler_hints: - # same_host: fs5200#IC-Cloud - # additional_properties: - # "drivers:multipath": "0" - size: "{{ vol_item.size }}" + size: "{{ vol_item.size }}" # GB volume_type: "{{ sap_vm_provision_ibmpowervm_storage_template_name }}" is_multiattach: false is_bootable: false + + availability_zone: "{{ sap_vm_provision_ibmpowervm_host_group_name }}" # IBM PowerVM Hypervisor Cluster Host Group Name + #region_name: "RegionOne" + + # Anti-Affinity, IBM PowerVC Collocation Rule (OpenStack Server Group ID) to provision VM to different hosts + # www.ibm.com/docs/en/powervc/latest?topic=powervc-terminology + # www.ibm.com/docs/en/powervc/latest?topic=powervc-collocation-rules + # www.ibm.com/support/pages/powervc-anti-affinity-co-location-rules + scheduler_hints: + # same_host: "" # Affinity, Provision VM Virtual Disks to specific host + # additional_properties: + # "drivers:multipath": "0" + group: "{{ ( + (__sap_vm_provision_task_ibmpowervm_collocation_rule.results | selectattr('item','==','hana'))[0].name + if ('hana_primary' in target_provision_host_spec.sap_host_type or 'hana_secondary' in target_provision_host_spec.sap_host_type) + else + (__sap_vm_provision_task_ibmpowervm_collocation_rule.results | selectattr('item','==','anydb'))[0].name + if ('anydb_primary' in target_provision_host_spec.sap_host_type or 'anydb_secondary' in target_provision_host_spec.sap_host_type) + else + (__sap_vm_provision_task_ibmpowervm_collocation_rule.results | selectattr('item','==','nwas'))[0].name + if ('nwas_ascs' in target_provision_host_spec.sap_host_type or 'nwas_ers' in target_provision_host_spec.sap_host_type) + ) | default(omit) }}" + validate_certs: false # Allow Self-Signed Certificate auth: "{{ __sap_vm_provision_task_ibmpowervm_openstack_auth }}" loop: "{{ filesystem_volume_map }}" @@ -361,7 +403,7 @@ - name: Append loop value to register ansible.builtin.set_fact: - __sap_vm_provision_task_provision_host_single: "{{ __sap_vm_provision_task_provision_host_single_info.servers[0] | combine( { 'host_node' : inventory_hostname } , { 'sap_host_type' : lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan][scaleout_origin_host_spec | default(inventory_hostname)].sap_host_type } , { 'sap_system_type' : (lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan][scaleout_origin_host_spec | default(inventory_hostname)].sap_system_type | default('')) } ) }}" + __sap_vm_provision_task_provision_host_single: "{{ __sap_vm_provision_task_provision_host_single_info.servers[0] | combine( { 'host_node' : inventory_hostname } , { 'sap_host_type' : target_provision_host_spec.sap_host_type } , { 'sap_system_type' : (target_provision_host_spec.sap_system_type | default('')) } ) }}" - name: Append output to merged register ansible.builtin.set_fact: diff --git a/roles/sap_vm_provision/tasks/platform_ansible/kubevirt_vm/execute_main.yml b/roles/sap_vm_provision/tasks/platform_ansible/kubevirt_vm/execute_main.yml index 7563763..513b0e1 100644 --- a/roles/sap_vm_provision/tasks/platform_ansible/kubevirt_vm/execute_main.yml +++ b/roles/sap_vm_provision/tasks/platform_ansible/kubevirt_vm/execute_main.yml @@ -1,4 +1,10 @@ --- +- name: Fail if sap_vm_provision_kubevirt_vm_os_user_password is not set and sap_vm_provision_kubevirt_vm_guest_ssh_auth_mechanism is set to password + ansible.builtin.fail: + msg: Password is not allowed to be empty or undefined (sap_vm_provision_kubevirt_vm_os_user_password). + when: + - sap_vm_provision_kubevirt_vm_guest_ssh_auth_mechanism == "password" + - sap_vm_provision_kubevirt_vm_os_user_password == "" or sap_vm_provision_kubevirt_vm_os_user_password == null - name: Ansible Task block for looped provisioning of KubeVirt Virtual Machines any_errors_fatal: true @@ -11,38 +17,6 @@ ansible.builtin.set_fact: register_provisioned_host_all: [] - - name: Set fact for auth - defaults - ansible.builtin.set_fact: - api_version: "kubevirt.io/v1" - validate_certs: "{{ default(lookup('env', 'K8S_AUTH_VERIFY_SSL')) | default(false) }}" - persist_config: "{{ default(lookup('env', 'K8S_AUTH_PERSIST_CONFIG')) | default(true) }}" - host: "{{ sap_vm_provision_kubevirt_cluster_url | default(lookup('env', 'K8S_AUTH_HOST')) | default(omit) }}" # Target Hypervisor Node - - - name: Set fact for auth - Kubeconfig - no_log: "{{ __sap_vm_provision_no_log }}" - ansible.builtin.set_fact: - kubeconfig: "{{ sap_vm_provision_kubevirt_kubeconfig_path | default(lookup('env', 'K8S_AUTH_KUBECONFIG')) | default(lookup('env', 'KUBECONFIG')) | default(omit) }}" - - - name: Set fact for auth - API Key - no_log: "{{ __sap_vm_provision_no_log }}" - ansible.builtin.set_fact: - api_key: "{{ sap_vm_provision_kubevirt_api_key | default(lookup('env', 'K8S_AUTH_API_KEY')) | default(omit) }}" - when: kubeconfig is defined - - - name: Set fact for auth - Username and Passwords - no_log: "{{ __sap_vm_provision_no_log }}" - ansible.builtin.set_fact: - username: "{{ sap_vm_provision_kubevirt_username | default(lookup('env', 'K8S_AUTH_USERNAME')) | default(omit) }}" - password: "{{ sap_vm_provision_kubevirt_username | default(lookup('env', 'K8S_AUTH_PASSWORD')) | default(omit) }}" - - # - name: Set fact for auth - Alternative - # no_log: "{{ __sap_vm_provision_no_log }}" - # ansible.builtin.set_fact: - # ca_cert: "{{ default(lookup('env', 'K8S_AUTH_SSL_CA_CERT')) | default(omit) }}" - # client_cert: "{{ default(lookup('env', 'K8S_AUTH_CERT_FILE')) | default(omit) }}" - # client_key: "{{ default(lookup('env', 'K8S_AUTH_KEY_FILE')) | default(omit) }}" - # context: "{{ default(lookup('env', 'K8S_AUTH_CONTEXT')) | default(omit) }}" - - name: Provision hosts to KubeVirt register: __sap_vm_provision_task_provision_host_all_run ansible.builtin.include_tasks: @@ -53,16 +27,13 @@ ansible.builtin.add_host: name: "{{ add_item[0].host_node }}" groups: "{{ add_item[0].sap_system_type + '_' if (add_item[0].sap_system_type != '') }}{{ add_item[0].sap_host_type }}" - ansible_host: "{{ add_item[0].reported_devices[0].ips[0].address }}" - ansible_user: "root" - ansible_ssh_private_key_file: "{{ sap_vm_provision_ssh_host_private_key_file_path }}" - ansible_ssh_common_args: -o ConnectTimeout=180 -o ControlMaster=auto -o ControlPersist=3600s -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o ForwardX11=no - loop: "{{ ansible_play_hosts | map('extract', hostvars, 'register_provisioned_host_all') }}" + ansible_host: "{{ add_item[0].provisioned_private_ip }}" + ansible_user: "{{ sap_vm_provision_kubevirt_vm_os_user }}" + loop: "{{ ansible_play_hosts | map('extract', hostvars, 'register_provisioned_host_all') }}" loop_control: label: "{{ add_item[0].host_node }}" loop_var: add_item - # Cannot override any variables from extravars input, see https://docs.ansible.com/ansible/latest/playbook_guide/playbooks_variables.html#understanding-variable-precedence # Ensure no default value exists for any prompted variable before execution of Ansible Playbook @@ -75,9 +46,6 @@ ansible.builtin.include_tasks: file: common/set_ansible_vars.yml - # - ansible.builtin.debug: - # var: __sap_vm_provision_task_provision_host_all_add.results - rescue: # This requires no_log set on each Ansible Task, and not set on the Ansible Task Block # This requires an Ansible Task Block containing the Ansible Tasks for calling @@ -100,9 +68,25 @@ - not lookup('ansible.builtin.vars', loop_item, default='') is skipped - lookup('ansible.builtin.vars', loop_item, default='') is failed +- name: Write private ssh key to ansible_controller + delegate_to: "{{ sap_vm_provision_kubevirt_vm_ansible_controller }}" + no_log: true + ansible.builtin.copy: + dest: "{{ sap_vm_provision_ssh_host_private_key_file_path }}" + content: "{{ sap_vm_provision_ssh_host_private_key_data }}" + mode: "0600" + when: sap_vm_provision_kubevirt_vm_guest_ssh_auth_mechanism == "private_key_data" - name: Ansible Task block to execute on target inventory hosts + remote_user: "{{ sap_vm_provision_kubevirt_vm_os_user }}" + become: true + become_user: root delegate_to: "{{ inventory_hostname }}" + vars: + ansible_password: "{{ sap_vm_provision_kubevirt_vm_os_user_password }}" + ansible_ssh_private_key_file: "{{ sap_vm_provision_ssh_host_private_key_file_path }}" + ansible_ssh_common_args: "-o ConnectTimeout=180 -o ControlMaster=auto -o ControlPersist=3600s -o UserKnownHostsFile=/dev/null -o ForwardX11=no -o ProxyJump={{ __sap_vm_provision_kubevirt_vm_register_execution_host_user }}@{{ sap_vm_provision_execution_host }}" + block: # Required to collect the remote host's facts for further processing @@ -148,3 +132,13 @@ - name: Register Package Repositories ansible.builtin.include_tasks: file: common/register_os.yml + + always: + + - name: Delete private ssh key from ansible_controller + delegate_to: "{{ sap_vm_provision_register_ansible_controller }}" + become: false + ansible.builtin.file: + path: "{{ sap_vm_provision_ssh_host_private_key_file_path }}" + state: absent + when: sap_vm_provision_kubevirt_vm_guest_ssh_auth_mechanism == "private_key_data" diff --git a/roles/sap_vm_provision/tasks/platform_ansible/kubevirt_vm/execute_provision.yml b/roles/sap_vm_provision/tasks/platform_ansible/kubevirt_vm/execute_provision.yml index e068c90..6ee2920 100644 --- a/roles/sap_vm_provision/tasks/platform_ansible/kubevirt_vm/execute_provision.yml +++ b/roles/sap_vm_provision/tasks/platform_ansible/kubevirt_vm/execute_provision.yml @@ -1,6 +1,4 @@ --- -# The tasks in this file are executed in a loop over the defined hosts - - name: Ensure short hostname is not longer than 13 characters (see SAP Note 611361) ansible.builtin.assert: that: (inventory_hostname | length | int) <= (13 | int) @@ -11,28 +9,35 @@ ansible.builtin.set_fact: scaleout_origin_host_spec: "{{ inventory_hostname | regex_replace('^(.+?)\\d*$', '\\1') }}" when: - - sap_vm_provision_calculate_sap_hana_scaleout_active_coordinator is defined - - not inventory_hostname in lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan].keys() + - sap_hana_scaleout_active_coordinator is defined + - not item in lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan].keys() + +- name: Set fact for VM name + ansible.builtin.set_fact: + __sap_vm_provision_register_vm_name: "{{ inventory_hostname }}" +- name: Set fact for VM config + ansible.builtin.set_fact: + __sap_vm_provision_register_vm_config: "{{ (lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan][__sap_vm_provision_register_vm_name]) }}" -- name: Set fact for downloaded OS Image +- name: Set fact for download OS Image ansible.builtin.set_fact: - os_image_downloaded: |- + os_image: |- {%- set disks_map = [ { - 'metadata': { 'name': (inventory_hostname + '-boot' | replace('_', '-')) }, + 'metadata': { 'name': (__sap_vm_provision_register_vm_name + '-boot' | replace('_', '-')) }, 'spec' : { 'source' : { 'registry' : { - 'url': sap_vm_provision_kubevirt_vm_host_os_image_url, + 'url': __sap_vm_provision_register_vm_config.os_image.url, 'pullMethod': 'node' }, }, 'storage' : { - 'accessModes': ['ReadWriteOnce'], + 'accessModes': ['ReadWriteMany'], 'resources': { 'requests': { - 'storage': '50Gi' + 'storage': __sap_vm_provision_register_vm_config.os_image.size } } } @@ -40,46 +45,45 @@ } ] -%} {{ disks_map }} + when: __sap_vm_provision_register_vm_config.os_image.url is defined -# - name: Set fact for existing OS Image -# ansible.builtin.set_fact: -# os_image_existing: | -# {%- set disks_map = [ -# { -# 'metadata': { 'name': (inventory_hostname + '-boot' | replace('_', '-')) }, -# 'spec' : { -# 'source' : { -# 'pvc' : { -# 'name': (inventory_hostname + '-boot' | replace('_', '-')), -# 'namespace': sap_vm_provision_kubevirt_target_namespace -# }, -# }, -# 'storage' : { -# 'accessModes': ['ReadWriteOnce'], -# 'resources': { -# 'requests': { -# 'storage': '25Gi' -# } -# } -# } -# } -# } -# ] -%} -# {{ disks_map }} - +- name: Set fact for existing OS Image + ansible.builtin.set_fact: + os_image: | + {%- set disks_map = [ + { + 'metadata': { 'name': (__sap_vm_provision_register_vm_name + '-boot' | replace('_', '-')) }, + 'spec' : { + 'source' : { + 'pvc' : { + 'name': __sap_vm_provision_register_vm_config.os_image.source_pvc_name, + 'namespace': __sap_vm_provision_register_vm_config.os_image.namespace + }, + }, + 'storage' : { + 'accessModes': ['ReadWriteMany'], + 'resources': { + 'requests': { + 'storage': __sap_vm_provision_register_vm_config.os_image.size + } + } + } + } + } + ] -%} + {{ disks_map }} + when: + - __sap_vm_provision_register_vm_config.os_image.source_pvc_name is defined + - __sap_vm_provision_register_vm_config.os_image.namespace is defined - name: Set fact for storage volume template map ansible.builtin.set_fact: storage_disks_map: |- {% set disks_map = [] -%} - {% for storage_item in lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan][scaleout_origin_host_spec | default(inventory_hostname)].storage_definition -%} - {% for idx in range(0, storage_item.disk_count | default(1)) -%} - {% if (storage_item.filesystem_type is defined) -%} - {% if ('swap' in storage_item.filesystem_type and storage_item.swap_path is not defined) - or ('swap' not in storage_item.filesystem_type and storage_item.nfs_path is not defined) -%} + {% for storage_item in __sap_vm_provision_register_vm_config.storage_definition -%} {% set vol = disks_map.extend([ { - 'metadata': { 'name': (inventory_hostname + '-' + storage_item.name + (idx | string) | replace('_', '-')) }, + 'metadata': { 'name': (__sap_vm_provision_register_vm_name + '-' + storage_item.name | replace('_', '-')) }, 'spec' : { 'source' : { 'blank' : {} @@ -95,18 +99,36 @@ } } }]) %} - {%- endif %} - {%- endif %} - {%- endfor %} {%- endfor %} {{ disks_map }} +- name: Set fact for storage volumes attachment list + ansible.builtin.set_fact: + storage_disk_name_list: |- + {% set disks_simple_map = [] -%} + {% for list_item in os_image -%} + {% set vol = disks_simple_map.extend([ + { + 'name': list_item.metadata.name, + 'dataVolume': { 'name': list_item.metadata.name }, + } + ]) %} + {%- endfor %} + {% for list_item in storage_disks_map -%} + {% set vol = disks_simple_map.extend([ + { + 'name': list_item.metadata.name, + 'dataVolume': { 'name': list_item.metadata.name }, + } + ]) %} + {%- endfor %} + {{ disks_simple_map }} - name: Set fact for storage volumes attachment list ansible.builtin.set_fact: storage_disk_name_list: |- {% set disks_simple_map = [] -%} - {% for list_item in os_image_downloaded -%} + {% for list_item in os_image -%} {% set vol = disks_simple_map.extend([ { 'name': list_item.metadata.name, @@ -124,88 +146,104 @@ {%- endfor %} {{ disks_simple_map }} +- name: Set fact for disk list + ansible.builtin.set_fact: + storage_disk_list: |- + {% set disks_list_simple = [] -%} + {% set vol = disks_list_simple.extend([ + { + 'name': __sap_vm_provision_register_vm_name + '-boot' | replace('_', '-'), + 'bootOrder': 1, + 'disk': { + 'bus': 'virtio', + 'io': 'native' + } + }, + { + 'name': 'cloudinit', + 'io': 'native', + 'disk': { + 'bus': 'virtio' + } + }, + ]) %} + {% for list_item in storage_disks_map -%} + {% set vol = disks_list_simple.extend([ + { + 'name': list_item.metadata.name, + 'io': 'native', + 'disk': { + 'bus': 'virtio' + } + } + ]) %} + {%- endfor %} + {{ disks_list_simple }} + - name: Set fact for cloud-init volume ansible.builtin.set_fact: cloud_init_volume: - name: cloudinit - cloudInitNoCloud: - userData: |- - #cloud-config - hostname: "{{ inventory_hostname_short }}" - "{{ 'user: ' + sap_vm_provision_kubevirt_os_user if sap_vm_provision_kubevirt_os_user is defined }}" - "{{ 'password: ' + sap_vm_provision_kubevirt_os_user_password if sap_vm_provision_kubevirt_os_user_password is defined }}" - chpasswd: - expire: false - ssh_authorized_keys: - - "{{ lookup('ansible.builtin.file', sap_vm_provision_ssh_host_public_key_file_path ) }}" - network: - version: 2 - ethernets: - eth0: - dhcp4: true - - -- name: Provision KubeVirt Virtual Machine - no_log: "{{ __sap_vm_provision_no_log }}" - register: __sap_vm_provision_task_provision_host_single - kubevirt.core.kubevirt_vm: - - ## Hypervisor Control Plane definition and credentials - api_version: "{{ api_version | default(omit) }}" - validate_certs: "{{ validate_certs | default(omit) }}" - persist_config: "{{ persist_config | default(omit) }}" - host: "{{ host | default(omit) }}" # Target Hypervisor Node - - kubeconfig: "{{ kubeconfig | default(omit) }}" - api_key: "{{ api_key | default(omit) }}" - username: "{{ username | default(omit) }}" - password: "{{ password | default(omit) }}" + disk: + bus: virtio + cloudInitNoCloud: "{{ __sap_vm_provision_register_vm_config.cloudinit }}" - # ca_cert: "{{ ca_cert | default(omit) }}" - # client_cert: "{{ client_cert | default(omit) }}" - # client_key: "{{ client_key | default(omit) }}" - # context: "{{ context | default(omit) }}" - - ## Virtual Machine target Hypervisor definition - namespace: "{{ sap_vm_provision_kubevirt_target_namespace }}" # Target namespace - - ## Virtual Machine definition - state: present - running: true - wait: true # ensure Virtual Machine in ready state before exiting Ansible Task - wait_sleep: 30 # 30 second poll for ready state - wait_timeout: 600 # 10 minute wait for ready state - force: false # Do not replace existing Virtual Machine with same name - name: "{{ inventory_hostname }}" - labels: - app: "{{ inventory_hostname }}" +- name: Set fact for network interfaces + ansible.builtin.set_fact: + __sap_vm_provision_register_network_interfaces: |- + {% set netifs = [] -%} + {% for list_item in __sap_vm_provision_register_vm_config.network_definition -%} + {% set ifs = netifs.extend([ + { + list_item.type: {}, + 'model': list_item.model, + 'name': list_item.name, + } + ]) %} + {%- endfor %} + {{ netifs }} - # Virtual Disk volume definitions - data_volume_templates: "{{ storage_disks_map }}" +- name: Set fact for networks definition + ansible.builtin.set_fact: + __sap_vm_provision_register_networks_definition: |- + {% set networks = [] -%} + {% for list_item in __sap_vm_provision_register_vm_config.network_definition -%} + {% set ifs = networks.extend([ + { + 'name': list_item.name, + 'multus': { 'networkName': list_item.networkName} + } + ]) %} + {%- endfor %} + {{ networks }} - # Virtual Machine configuration - #preference: - # name: fedora # OS Image, not used when data volume template and spec contains volume using registry OS Image - #instancetype: - # name: u1.medium # VM Template Size, not used when spec contains cpu and memory configuration - spec: +- name: Set fact for VM deploy config + ansible.builtin.set_fact: + __sap_vm_provision_register_vm_deploy_config: + volumes: "{{ storage_disk_name_list + cloud_init_volume }}" + networks: "{{ __sap_vm_provision_register_networks_definition }}" domain: - ioThreadsPolicy: auto - hostname: "{{ item }}" + # shared | auto, auto prevents live migration + ioThreadsPolicy: shared + hostname: "{{ __sap_vm_provision_register_vm_name }}" evictionStrategy: LiveMigrate terminationGracePeriodSeconds: 1800 # 30 minutes after stop request before VM is force terminated - resources: requests: - memory: "{{ (lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan][scaleout_origin_host_spec | default(inventory_hostname)].kubevirt_vm_memory_gib) + 16 }}Gi" # Static 16GB DRAM overhead for container runtime + memory: "{{ __sap_vm_provision_register_vm_config.kubevirt_vm_memory_gib + sap_vm_provision_kubevirt_vm_container_memory_overhead }}Gi" # memory + overhead for container runtime) - devices: {} + devices: + downwardMetrics: {} + networkInterfaceMultiqueue: true + blockMultiQueue: true + autoattachMemBalloon: false + disks: "{{ storage_disk_list }}" + interfaces: "{{ __sap_vm_provision_register_network_interfaces }}" cpu: - cores: "{{ (lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan][scaleout_origin_host_spec | default(inventory_hostname)].kubevirt_vm_cpu_threads) / kubevirt_vm_cpu_smt }}" - threads: "{{ lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan][scaleout_origin_host_spec | default(inventory_hostname)].kubevirt_vm_cpu_threads }}" + cores: "{{ __sap_vm_provision_register_vm_config.kubevirt_vm_cpu_cores }}" + threads: "{{ __sap_vm_provision_register_vm_config.kubevirt_vm_cpu_smt }}" dedicatedCpuPlacement: true - isolateEmulatorThread: true model: host-passthrough numa: guestMappingPassthrough: {} @@ -218,62 +256,57 @@ policy: require memory: - guest: "{{ lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan][scaleout_origin_host_spec | default(inventory_hostname)].kubevirt_vm_memory_gib }}Gi" + guest: "{{ __sap_vm_provision_register_vm_config.kubevirt_vm_memory_gib }}Gi" hugepages: pageSize: 1Gi - networks: - - name: bridge-network-definition - multus: - networkName: iface-bridge-sriov - - name: storage-network-definition - multus: - networkName: iface-storage-sriov - - name: multi-network-definition - multus: - networkName: iface-multi-sriov +- name: Provision KubeVirt Virtual Machine + kubevirt.core.kubevirt_vm: + api_version: "{{ api_version | default(omit) }}" + persist_config: "{{ persist_config | default(omit) }}" - volumes: "{{ storage_disk_name_list + cloud_init_volume }}" + ## Virtual Machine target Hypervisor definition + namespace: "{{ sap_vm_provision_kubevirt_vm_target_namespace }}" # Target namespace + ## Virtual Machine definition + state: present + running: true + wait: true # ensure Virtual Machine in ready state before exiting Ansible Task + wait_sleep: 30 # 30 second poll for ready state + wait_timeout: 600 # 10 minute wait for ready state + force: "{{ sap_vm_provision_overwrite_vm | default(false) }}" # Do not replace existing Virtual Machine with same name + name: "{{ __sap_vm_provision_register_vm_name }}" + labels: + app: "{{ __sap_vm_provision_register_vm_name }}" + # Virtual Disk volume definitions + data_volume_templates: "{{ storage_disks_map + os_image }}" + spec: "{{ __sap_vm_provision_register_vm_deploy_config }}" - name: Check VM status - no_log: "{{ __sap_vm_provision_no_log }}" - register: __sap_vm_provision_task_provision_host_single_info + register: register_provisioned_host_single_info kubevirt.core.kubevirt_vm_info: - name: "{{ inventory_hostname }}" - namespace: "{{ sap_vm_provision_kubevirt_target_namespace }}" - + name: "{{ __sap_vm_provision_register_vm_name }}" + namespace: "{{ sap_vm_provision_kubevirt_vm_target_namespace }}" + +- name: Get VMI details + kubernetes.core.k8s_info: + api_version: kubevirt.io/v1 + kind: VirtualMachineInstance + namespace: "{{ sap_vm_provision_kubevirt_vm_target_namespace }}" + name: "{{ __sap_vm_provision_register_vm_name }}" + register: vmi_info + until: vmi_info.resources[0].status.interfaces[0].ipAddress is defined + retries: 10 + delay: 30 - name: Create fact for delegate host IP ansible.builtin.set_fact: - provisioned_private_ip: "{{ __sap_vm_provision_task_provision_host_single_info.spec.UNKNOWN_VARIABLE_FOR_PRIVATE_IP_HERE }}" - - -- name: Collect only facts about hardware - register: __sap_vm_provision_task_ansible_facts_host_disks_info - ansible.builtin.setup: - gather_subset: - - hardware - remote_user: root - become: true - become_user: root - delegate_to: "{{ provisioned_private_ip }}" - delegate_facts: false - vars: - ansible_ssh_private_key_file: "{{ sap_vm_provision_ssh_host_private_key_file_path }}" - -#- name: Output disks -# ansible.builtin.debug: -# var: hostvars[inventory_hostname].ansible_devices.keys() | list - -#- name: Debug Ansible Facts devices used list -# ansible.builtin.debug: -# msg: "{{ __sap_vm_provision_task_ansible_facts_host_disks_info.ansible_facts.ansible_device_links.ids.keys() | list }}" - + provisioned_private_ip: "{{ vmi_info.resources[0].status.interfaces[0].ipAddress }}" +# How should this datastructure look like? when just using the provisioned_private_ip there is no need to use combine (would need two dicts). Using VMI info for now - name: Append loop value to register ansible.builtin.set_fact: - __sap_vm_provision_task_provision_host_single: "{{ __sap_vm_provision_task_provision_host_single_info.spec.UKNOWN_VARIABLE_HERE | combine( { 'host_node' : inventory_hostname } , { 'sap_host_type' : lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan][scaleout_origin_host_spec | default(inventory_hostname)].sap_host_type } , { 'sap_system_type' : (lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan][scaleout_origin_host_spec | default(inventory_hostname)].sap_system_type | default('')) } ) }}" + __sap_vm_provision_task_provision_host_single: "{{ vmi_info | combine( { 'host_node' : inventory_hostname } , { 'sap_host_type' : __sap_vm_provision_register_vm_config.sap_host_type } , { 'sap_system_type' : (__sap_vm_provision_register_vm_config.sap_system_type | default('')) }, {'provisioned_private_ip': provisioned_private_ip } ) }}" - name: Append output to merged register ansible.builtin.set_fact: diff --git a/roles/sap_vm_provision/tasks/platform_ansible/msazure_vm/execute_main.yml b/roles/sap_vm_provision/tasks/platform_ansible/msazure_vm/execute_main.yml index 6f5a96a..721cf70 100644 --- a/roles/sap_vm_provision/tasks/platform_ansible/msazure_vm/execute_main.yml +++ b/roles/sap_vm_provision/tasks/platform_ansible/msazure_vm/execute_main.yml @@ -12,10 +12,6 @@ # AZURE_SECRET: "{{ sap_vm_provision_msazure_app_client_secret }}" block: - - name: Set fact to hold loop variables from include_tasks - ansible.builtin.set_fact: - register_provisioned_host_all: [] - # Ansible Module name parameter, requires resource_group parameter # We cannot assume Resource Group if the SSH Public Key is managed by Administrators # Therefore use without any parameter to retrieve list of all SSH Public Keys and filter in Ansible @@ -50,6 +46,36 @@ ansible.builtin.set_fact: __sap_vm_provision_task_msazure_private_dns_auto_register_records: "{{ (__sap_vm_provision_task_msazure_private_dns_virtual_network_links.virtualnetworklinks | selectattr('virtual_network.id', 'search', sap_vm_provision_msazure_vnet_name))[0].registration_enabled }}" + # Alternative to MS Azure Availability Set, is MS Azure VM Scale Set using azure.azcollection.azure_rm_virtualmachinescaleset + - name: Create Placement Sets when High Availability + no_log: "{{ __sap_vm_provision_no_log }}" + register: __sap_vm_provision_task_msazure_availability_set + run_once: true + azure.azcollection.azure_rm_availabilityset: + resource_group: "{{ sap_vm_provision_msazure_resource_group_name }}" + location: "{{ sap_vm_provision_msazure_location_region }}" + name: "{{ sap_vm_provision_aws_placement_resource_name }}-{{ item }}" + # VM instances (HA Pairs) in the Availability Set spread across up to 3 Fault Domains (different racks) + platform_fault_domain_count: 3 + sku: Aligned # do not use Classic/ASM + # Azure credentials + subscription_id: "{{ sap_vm_provision_msazure_subscription_id }}" + tenant: "{{ sap_vm_provision_msazure_tenant_id }}" + client_id: "{{ sap_vm_provision_msazure_app_client_id }}" + secret: "{{ sap_vm_provision_msazure_app_client_secret }}" + loop: + - "{{ 'hana' if 'hana_secondary' in lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan] | json_query('*') | map(attribute='sap_host_type') else '' }}" + - "{{ 'anydb' if 'anydb_secondary' in lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan] | json_query('*') | map(attribute='sap_host_type') else '' }}" + - "{{ 'nwas' if 'nwas_ers' in lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan] | json_query('*') | map(attribute='sap_host_type') else '' }}" + when: + - sap_vm_provision_aws_placement_resource_name is defined + - sap_vm_provision_aws_placement_strategy_spread + - not item == '' + + - name: Set fact to hold loop variables from include_tasks + ansible.builtin.set_fact: + register_provisioned_host_all: [] + - name: Provision hosts to MS Azure register: __sap_vm_provision_task_provision_host_all_run ansible.builtin.include_tasks: @@ -82,7 +108,7 @@ - name: Set fact to hold all inventory hosts in all groups ansible.builtin.set_fact: - groups_merged_list: "{{ [ [ groups['hana_primary'] | default([]) ] , [ groups['hana_secondary'] | default([]) ] , [ groups['nwas_ascs'] | default([]) ] , [ groups['nwas_ers'] | default([]) ] , [ groups['nwas_pas'] | default([]) ] , [ groups['nwas_aas'] | default([]) ] ] | flatten | select() }}" + groups_merged_list: "{{ [ [ groups['hana_primary'] | default([]) ] , [ groups['hana_secondary'] | default([]) ] , [ groups['anydb_primary'] | default([]) ] , [ groups['anydb_secondary'] | default([]) ] , [ groups['nwas_ascs'] | default([]) ] , [ groups['nwas_ers'] | default([]) ] , [ groups['nwas_pas'] | default([]) ] , [ groups['nwas_aas'] | default([]) ] ] | flatten | select() }}" - name: Set Ansible Vars register: __sap_vm_provision_task_ansible_vars_set @@ -178,6 +204,10 @@ ansible.builtin.include_tasks: file: common/set_ansible_vars_storage.yml + - name: Register Package Repositories for OS Images with Bring-Your-Own-Subscription (BYOS) + ansible.builtin.include_tasks: + file: common/register_os.yml + - name: Ansible Task block to execute on target inventory hosts - High Availability delegate_to: "{{ inventory_hostname }}" diff --git a/roles/sap_vm_provision/tasks/platform_ansible/msazure_vm/execute_provision.yml b/roles/sap_vm_provision/tasks/platform_ansible/msazure_vm/execute_provision.yml index bf25c1d..d8a2b64 100644 --- a/roles/sap_vm_provision/tasks/platform_ansible/msazure_vm/execute_provision.yml +++ b/roles/sap_vm_provision/tasks/platform_ansible/msazure_vm/execute_provision.yml @@ -14,6 +14,10 @@ - sap_vm_provision_calculate_sap_hana_scaleout_active_coordinator is defined - not inventory_hostname in lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan].keys() +- name: Set fact for host specifications of the provision target + ansible.builtin.set_fact: + target_provision_host_spec: "{{ lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan][scaleout_origin_host_spec | default(inventory_hostname)] }}" + - name: Verify if network interface for MS Azure VM already exists (i.e. re-run) no_log: "{{ __sap_vm_provision_no_log }}" register: __sap_vm_provision_task_provision_host_single_vnic_info @@ -41,7 +45,17 @@ primary: true #private_ip_allocation_method: "Static" # When static, must define the specific IP Address enable_accelerated_networking: true - enable_ip_forwarding: "{{ lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan][scaleout_origin_host_spec | default(inventory_hostname)].disable_ip_anti_spoofing }}" # When disable the Anti IP Spoofing = true, then Enable IP Forwarding = true + enable_ip_forwarding: "{{ target_provision_host_spec.disable_ip_anti_spoofing }}" # When disable the Anti IP Spoofing = true, then Enable IP Forwarding = true + availability_set: "{{ ( + (__sap_vm_provision_task_msazure_availability_set.results | selectattr('item','==','hana'))[0].state.name + if ('hana_primary' in target_provision_host_spec.sap_host_type or 'hana_secondary' in target_provision_host_spec.sap_host_type) + else + (__sap_vm_provision_task_msazure_availability_set.results | selectattr('item','==','anydb'))[0].state.name + if ('anydb_primary' in target_provision_host_spec.sap_host_type or 'anydb_secondary' in target_provision_host_spec.sap_host_type) + else + (__sap_vm_provision_task_msazure_availability_set.results | selectattr('item','==','nwas'))[0].state.name + if ('nwas_ascs' in target_provision_host_spec.sap_host_type or 'nwas_ers' in target_provision_host_spec.sap_host_type) + ) | default(omit) }}" # Azure credentials subscription_id: "{{ sap_vm_provision_msazure_subscription_id }}" tenant: "{{ sap_vm_provision_msazure_tenant_id }}" @@ -61,7 +75,7 @@ ssh_public_keys: - path: /home/azureadmin/.ssh/authorized_keys key_data: "{{ __sap_vm_provision_task_msazure_key_pair_name_ssh_host_public_key_value }}" - vm_size: "{{ lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan][scaleout_origin_host_spec | default(inventory_hostname)].virtual_machine_profile }}" + vm_size: "{{ target_provision_host_spec.virtual_machine_profile }}" image: publisher: "{{ lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_os_image_dictionary')[sap_vm_provision_msazure_vm_host_os_image].publisher }}" offer: "{{ lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_os_image_dictionary')[sap_vm_provision_msazure_vm_host_os_image].offer }}" @@ -121,11 +135,7 @@ delegate_sap_vm_provision_ssh_bastion_private_key_file_path: "{{ sap_vm_provision_ssh_bastion_private_key_file_path }}" delegate_sap_vm_provision_ssh_host_private_key_file_path: "{{ sap_vm_provision_ssh_host_private_key_file_path }}" -- name: Collect only facts about hardware - register: __sap_vm_provision_task_ansible_facts_host_disks_info - ansible.builtin.setup: - gather_subset: - - hardware +- name: Block to collect only facts about hardware remote_user: azureadmin become: true become_user: root @@ -134,6 +144,22 @@ vars: ansible_ssh_private_key_file: "{{ delegate_sap_vm_provision_ssh_host_private_key_file_path }}" ansible_ssh_common_args: -o ConnectTimeout=180 -o ControlMaster=auto -o ControlPersist=3600s -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o ForwardX11=no -o ProxyCommand='ssh -W %h:%p {{ delegate_sap_vm_provision_bastion_user }}@{{ delegate_sap_vm_provision_bastion_public_ip }} -p {{ delegate_sap_vm_provision_bastion_ssh_port }} -i {{ delegate_sap_vm_provision_ssh_bastion_private_key_file_path }} -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null' + block: + + # Required as state: present on Ansible Module azure_rm_virtualmachine does waiting enough until VM has booted + # wait_for_connection is used instead to ensure connection is available before proceeding. + - name: Wait until SSH connection is available + ansible.builtin.wait_for_connection: + timeout: 300 + + - name: Collect only facts about hardware + register: __sap_vm_provision_task_ansible_facts_host_disks_info + ansible.builtin.setup: + gather_subset: + - hardware + retries: 60 + delay: 10 + #- name: Output disks # ansible.builtin.debug: @@ -171,7 +197,7 @@ filesystem_volume_map: | {% set volume_map = [] -%} {% set av_vol = available_volumes -%} - {% for storage_item in lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan][scaleout_origin_host_spec | default(inventory_hostname)].storage_definition -%} + {% for storage_item in target_provision_host_spec.storage_definition -%} {% for idx in range(0, storage_item.disk_count | default(1)) -%} {% if (storage_item.filesystem_type is defined) -%} {% if ('swap' in storage_item.filesystem_type and storage_item.swap_path is not defined) @@ -270,8 +296,8 @@ ansible.builtin.replace: path: /root/.ssh/authorized_keys backup: true - regexp: '(^.*ssh-rsa)' - replace: 'ssh-rsa' + regexp: '(^.*ssh-)' # Allow ssh-rsa , ssh-ed25519 etc. + replace: 'ssh-' - name: Permit root login register: __sap_vm_provision_task_os_sshd_config @@ -292,7 +318,7 @@ - name: Append loop value to register ansible.builtin.set_fact: - __sap_vm_provision_task_provision_host_single: "{{ __sap_vm_provision_task_provision_host_single | combine( { 'host_node' : inventory_hostname } , { 'sap_host_type' : lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan][scaleout_origin_host_spec | default(inventory_hostname)].sap_host_type } , { 'sap_system_type' : (lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan][scaleout_origin_host_spec | default(inventory_hostname)].sap_system_type | default('')) } ) }}" + __sap_vm_provision_task_provision_host_single: "{{ __sap_vm_provision_task_provision_host_single | combine( { 'host_node' : inventory_hostname } , { 'sap_host_type' : target_provision_host_spec.sap_host_type } , { 'sap_system_type' : (target_provision_host_spec.sap_system_type | default('')) } ) }}" - name: Append output to merged register ansible.builtin.set_fact: diff --git a/roles/sap_vm_provision/tasks/platform_ansible/msazure_vm/execute_setup_ha.yml b/roles/sap_vm_provision/tasks/platform_ansible/msazure_vm/execute_setup_ha.yml index 89d9c8a..511fd05 100644 --- a/roles/sap_vm_provision/tasks/platform_ansible/msazure_vm/execute_setup_ha.yml +++ b/roles/sap_vm_provision/tasks/platform_ansible/msazure_vm/execute_setup_ha.yml @@ -19,7 +19,7 @@ # resource_group: "{{ sap_vm_provision_msazure_resource_group_name }}" # route_table_name: "{{ __sap_vm_provision_task_msazure_vnet_subnet_rt_info.route_tables[0].id }}" # name: "{{ sap_swpm_db_host }}-rt" -# address_prefix: "{{ sap_ha_pacemaker_cluster_vip_hana_primary_ip_address | default('192.168.1.90/32') }}" +# address_prefix: "{{ sap_vm_provision_ha_vip_hana_primary }}" # next_hop_type: "virtual_appliance" # next_hop_ip_address: "{{ hostvars[host_node].ansible_host }}" # # Azure credentials @@ -42,7 +42,7 @@ relative_name: "{{ sap_swpm_db_host }}" record_type: A records: - - entry: "{{ (sap_ha_pacemaker_cluster_vip_hana_primary_ip_address | default('192.168.1.90/32')) | regex_replace('/.*', '') }}" + - entry: "{{ sap_vm_provision_ha_vip_hana_primary | regex_replace('/.*', '') }}" # Azure credentials subscription_id: "{{ sap_vm_provision_msazure_subscription_id }}" tenant: "{{ sap_vm_provision_msazure_tenant_id }}" @@ -55,6 +55,49 @@ - groups["hana_secondary"] is defined and (groups["hana_secondary"]|length>0) +# - name: Ansible MS Azure Route Table append route for SAP AnyDB HA +# no_log: "{{ __sap_vm_provision_no_log }}" +# register: __sap_vm_provision_task_msazure_vnet_subnet_rt_route_sap_anydb +# azure.azcollection.azure_rm_route: +# resource_group: "{{ sap_vm_provision_msazure_resource_group_name }}" +# route_table_name: "{{ __sap_vm_provision_task_msazure_vnet_subnet_rt_info.route_tables[0].id }}" +# name: "{{ sap_swpm_db_host }}-rt" +# address_prefix: "{{ sap_vm_provision_ha_vip_anydb_primary }}" +# next_hop_type: "virtual_appliance" +# next_hop_ip_address: "{{ hostvars[host_node].ansible_host }}" +# # Azure credentials +# subscription_id: "{{ sap_vm_provision_msazure_subscription_id }}" +# tenant: "{{ sap_vm_provision_msazure_tenant_id }}" +# client_id: "{{ sap_vm_provision_msazure_app_client_id }}" +# secret: "{{ sap_vm_provision_msazure_app_client_secret }}" +# loop: "{{ (groups['anydb_primary'] | default([])) }}" +# loop_control: +# loop_var: host_node +# when: +# - groups["anydb_secondary"] is defined and (groups["anydb_secondary"] | length>0) + +- name: Ansible MS Azure Private DNS Records for SAP AnyDB HA Virtual Hostname + no_log: "{{ __sap_vm_provision_no_log }}" + azure.azcollection.azure_rm_privatednsrecordset: + # DNS may exist in separate Resource Group. Use empty string var (or default false if undefined) to evaluate to false boolean, and use Python or logic operator + resource_group: "{{ (sap_vm_provision_msazure_private_dns_resource_group_name | default(false)) or sap_vm_provision_msazure_resource_group_name }}" + zone_name: "{{ hostvars[host_node].sap_vm_provision_dns_root_domain }}" + relative_name: "{{ sap_swpm_db_host }}" + record_type: A + records: + - entry: "{{ sap_vm_provision_ha_vip_anydb_primary | regex_replace('/.*', '') }}" + # Azure credentials + subscription_id: "{{ sap_vm_provision_msazure_subscription_id }}" + tenant: "{{ sap_vm_provision_msazure_tenant_id }}" + client_id: "{{ sap_vm_provision_msazure_app_client_id }}" + secret: "{{ sap_vm_provision_msazure_app_client_secret }}" + loop: "{{ (groups['anydb_primary'] | default([])) }}" + loop_control: + loop_var: host_node + when: + - groups["anydb_secondary"] is defined and (groups["anydb_secondary"]|length>0) + + # - name: Ansible MS Azure Route Table append route for SAP NetWeaver ASCS HA # no_log: "{{ __sap_vm_provision_no_log }}" # register: __sap_vm_provision_task_msazure_vnet_subnet_rt_route_sap_netweaver_ascs @@ -62,7 +105,7 @@ # resource_group: "{{ sap_vm_provision_msazure_resource_group_name }}" # route_table_name: "{{ __sap_vm_provision_task_msazure_vnet_subnet_rt_info.route_tables[0].id }}" # name: "{{ sap_swpm_ascs_instance_hostname }}-rt" -# address_prefix: "{{ sap_ha_pacemaker_cluster_vip_nwas_abap_ascs_ip_address | default('192.168.2.10/32') }}" +# address_prefix: "{{ sap_vm_provision_ha_vip_nwas_abap_ascs }}" # next_hop_type: "virtual_appliance" # next_hop_ip_address: "{{ hostvars[host_node].ansible_host }}" # # Azure credentials @@ -85,7 +128,7 @@ relative_name: "{{ sap_swpm_ascs_instance_hostname }}" record_type: A records: - - entry: "{{ (sap_ha_pacemaker_cluster_vip_nwas_abap_ascs_ip_address | default('192.168.2.10/32')) | regex_replace('/.*', '') }}" + - entry: "{{ sap_vm_provision_ha_vip_nwas_abap_ascs | regex_replace('/.*', '') }}" # Azure credentials subscription_id: "{{ sap_vm_provision_msazure_subscription_id }}" tenant: "{{ sap_vm_provision_msazure_tenant_id }}" @@ -105,7 +148,7 @@ # resource_group: "{{ sap_vm_provision_msazure_resource_group_name }}" # route_table_name: "{{ __sap_vm_provision_task_msazure_vnet_subnet_rt_info.route_tables[0].id }}" # name: "{{ sap_swpm_ers_instance_hostname }}-rt" -# address_prefix: "{{ sap_ha_pacemaker_cluster_vip_nwas_abap_ers_ip_address | default('192.168.2.11/32') }}" +# address_prefix: "{{ sap_vm_provision_ha_vip_nwas_abap_ers }}" # next_hop_type: "virtual_appliance" # next_hop_ip_address: "{{ hostvars[host_node].ansible_host }}" # # Azure credentials @@ -128,7 +171,7 @@ relative_name: "{{ sap_swpm_ers_instance_hostname }}" record_type: A records: - - entry: "{{ (sap_ha_pacemaker_cluster_vip_nwas_abap_ers_ip_address | default('192.168.2.11/32')) | regex_replace('/.*', '') }}" + - entry: "{{ sap_vm_provision_ha_vip_nwas_abap_ers | regex_replace('/.*', '') }}" # Azure credentials subscription_id: "{{ sap_vm_provision_msazure_subscription_id }}" tenant: "{{ sap_vm_provision_msazure_tenant_id }}" @@ -150,7 +193,7 @@ # resource_group: "{{ sap_vm_provision_msazure_resource_group_name }}" # route_table_name: "{{ __sap_vm_provision_task_msazure_vnet_subnet_rt_info.route_tables[0].id }}" # name: "{{ sap_swpm_pas_instance_hostname }}-rt" -# address_prefix: "{{ sap_ha_pacemaker_cluster_vip_nwas_abap_pas_ip_address | default('192.168.2.12/32') }}" +# address_prefix: "{{ sap_vm_provision_ha_vip_nwas_abap_pas }}" # next_hop_type: "virtual_appliance" # next_hop_ip_address: "{{ hostvars[host_node].ansible_host }}" # # Azure credentials @@ -173,7 +216,7 @@ # relative_name: "{{ sap_swpm_pas_instance_hostname }}" # record_type: A # records: -# - entry: "{{ (sap_ha_pacemaker_cluster_vip_nwas_abap_pas_ip_address | default('192.168.2.12/32')) | regex_replace('/.*', '') }}" +# - entry: "{{ sap_vm_provision_ha_vip_nwas_abap_pas | regex_replace('/.*', '') }}" # # Azure credentials # subscription_id: "{{ sap_vm_provision_msazure_subscription_id }}" # tenant: "{{ sap_vm_provision_msazure_tenant_id }}" @@ -193,7 +236,7 @@ # resource_group: "{{ sap_vm_provision_msazure_resource_group_name }}" # route_table_name: "{{ __sap_vm_provision_task_msazure_vnet_subnet_rt_info.route_tables[0].id }}" # name: "{{ sap_swpm_aas_instance_hostname }}-rt" -# address_prefix: "{{ sap_ha_pacemaker_cluster_vip_nwas_abap_aas_ip_address | default('192.168.2.13/32') }}" +# address_prefix: "{{ sap_vm_provision_ha_vip_nwas_abap_aas }}" # next_hop_type: "virtual_appliance" # next_hop_ip_address: "{{ hostvars[host_node].ansible_host }}" # # Azure credentials @@ -216,7 +259,7 @@ # relative_name: "{{ sap_swpm_aas_instance_hostname }}" # record_type: A # records: -# - entry: "{{ (sap_ha_pacemaker_cluster_vip_nwas_abap_aas_ip_address | default('192.168.2.13/32')) | regex_replace('/.*', '') }}" +# - entry: "{{ sap_vm_provision_ha_vip_nwas_abap_aas | regex_replace('/.*', '') }}" # # Azure credentials # subscription_id: "{{ sap_vm_provision_msazure_subscription_id }}" # tenant: "{{ sap_vm_provision_msazure_tenant_id }}" @@ -229,11 +272,18 @@ # - groups["nwas_ers"] is defined and (groups["nwas_ers"]|length>0) +- name: MS Azure IAM Role - Prepare IAM Role name + ansible.builtin.set_fact: + __sap_vm_provision_msazure_ha_iam_role: + "{{ sap_vm_provision_msazure_ha_iam_role + if sap_vm_provision_msazure_ha_iam_role is defined and sap_vm_provision_msazure_ha_iam_role | length > 0 + else 'Linux Fence Agent Role' }}" + - name: MS Azure IAM Role - Definition no_log: "{{ __sap_vm_provision_no_log }}" register: __sap_vm_provision_task_msazure_iam_role_fencing azure.azcollection.azure_rm_roledefinition: - name: "Linux Fence Agent Role" + name: "{{ __sap_vm_provision_msazure_ha_iam_role }}" description: "Allows to power-off and start virtual machines" #scope: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myresourceGroup assignable_scopes: @@ -251,6 +301,39 @@ tenant: "{{ sap_vm_provision_msazure_tenant_id }}" client_id: "{{ sap_vm_provision_msazure_app_client_id }}" secret: "{{ sap_vm_provision_msazure_app_client_secret }}" + state: present + # Custom Role can exist within different Subscriptions under same tenant + # Described in: https://github.com/Azure/azure-powershell/issues/4365#issuecomment-351171763 + # Error is ignored and validated in next step + ignore_errors: true + + # Second attempt to create Role with last segment of Subscription ID +- name: MS Azure IAM Role - Definition Subscription specific + when: + - __sap_vm_provision_task_msazure_iam_role_fencing is defined + - __sap_vm_provision_task_msazure_iam_role_fencing.failed + no_log: "{{ __sap_vm_provision_no_log }}" + register: __sap_vm_provision_task_msazure_iam_role_fencing_sub + azure.azcollection.azure_rm_roledefinition: + name: "{{ __sap_vm_provision_msazure_ha_iam_role }} {{ sap_vm_provision_msazure_subscription_id.split('-')[-1] }}" + description: "Allows to power-off and start virtual machines {{ sap_vm_provision_msazure_subscription_id.split('-')[-1] }}" + assignable_scopes: + - "/subscriptions/{{ sap_vm_provision_msazure_subscription_id }}" + permissions: + - actions: + - "Microsoft.Compute/*/read" + - "Microsoft.Compute/virtualMachines/powerOff/action" + - "Microsoft.Compute/virtualMachines/start/action" + # - data_actions: + # - not_actions: + # - not_data_actions: + # Azure credentials + subscription_id: "{{ sap_vm_provision_msazure_subscription_id }}" + tenant: "{{ sap_vm_provision_msazure_tenant_id }}" + client_id: "{{ sap_vm_provision_msazure_app_client_id }}" + secret: "{{ sap_vm_provision_msazure_app_client_secret }}" + state: present + - name: MS Azure - GenericRestClient call to Virtual Machine API to identify Managed Service Identity (MSI) no_log: "{{ __sap_vm_provision_no_log }}" @@ -275,7 +358,8 @@ azure.azcollection.azure_rm_roleassignment: #auth_source: msi role_definition_id: - "{{ __sap_vm_provision_task_msazure_iam_role_fencing.id }}" + "{{ __sap_vm_provision_task_msazure_iam_role_fencing.id if __sap_vm_provision_task_msazure_iam_role_fencing.id is defined + else __sap_vm_provision_task_msazure_iam_role_fencing_sub.id }}" scope: "/subscriptions/{{ sap_vm_provision_msazure_subscription_id }}" assignee_object_id: "{{ host_node.response[0].identity.principalId | default(none) }}" # Azure credentials @@ -346,7 +430,7 @@ lb_frontend_virtual_ips1: "{{ lb_frontend_virtual_ips1 | default([]) + [__ip_element] }}" vars: __ip_element: - name: "lb-vip-hana{{ vip_index_nr }}" + name: "{{ sap_vm_provision_ha_load_balancer_name_hana + '-vip' + (vip_index_nr | string) }}" private_ip_address: "{{ vip_item | regex_replace('/.*', '') }}" private_ip_allocation_method: "Static" subnet: "{{ __sap_vm_provision_task_msazure_vnet_subnet_info.subnets[0].id }}" @@ -355,7 +439,7 @@ - vip_item | length > 0 - groups["hana_secondary"] is defined and (groups["hana_secondary"]|length>0) loop: - - "{{ sap_ha_pacemaker_cluster_vip_hana_primary_ip_address | default() }}" + - "{{ sap_vm_provision_ha_vip_hana_primary }}" loop_control: index_var: vip_index_nr loop_var: vip_item @@ -365,7 +449,7 @@ lb_frontend_virtual_ips1: "{{ lb_frontend_virtual_ips1 | default([]) + [__ip_element] }}" vars: __ip_element: - name: "lb-vip-anydb{{ vip_index_nr }}" + name: "{{ sap_vm_provision_ha_load_balancer_name_anydb + '-vip' + (vip_index_nr | string) }}" private_ip_address: "{{ vip_item | regex_replace('/.*', '') }}" private_ip_allocation_method: "Static" subnet: "{{ __sap_vm_provision_task_msazure_vnet_subnet_info.subnets[0].id }}" @@ -374,7 +458,7 @@ - vip_item | length > 0 - groups["anydb_secondary"] is defined and (groups["anydb_secondary"]|length>0) loop: - - "{{ sap_vm_temp_vip_anydb_primary | default() }}" + - "{{ sap_vm_provision_ha_vip_anydb_primary }}" loop_control: index_var: vip_index_nr loop_var: vip_item @@ -384,7 +468,7 @@ lb_frontend_virtual_ips2: "{{ lb_frontend_virtual_ips2 | default([]) + [__ip_element] }}" vars: __ip_element: - name: "lb-vip-nwas{{ vip_index_nr }}" + name: "{{ sap_vm_provision_ha_load_balancer_name_nwas + '-vip' + (vip_index_nr | string) }}" private_ip_address: "{{ vip_item | regex_replace('/.*', '') }}" private_ip_allocation_method: "Static" subnet: "{{ __sap_vm_provision_task_msazure_vnet_subnet_info.subnets[0].id }}" @@ -393,8 +477,8 @@ - vip_item | length > 0 - groups["nwas_ers"] is defined and (groups["nwas_ers"]|length>0) loop: - - "{{ sap_ha_pacemaker_cluster_vip_nwas_abap_ascs_ip_address | default() }}" - - "{{ sap_ha_pacemaker_cluster_vip_nwas_abap_ers_ip_address | default() }}" + - "{{ sap_vm_provision_ha_vip_nwas_abap_ascs }}" + - "{{ sap_vm_provision_ha_vip_nwas_abap_ers }}" loop_control: index_var: vip_index_nr loop_var: vip_item @@ -405,7 +489,7 @@ lb_probes1: "{{ lb_probes1 | default([]) + [__probe_element] }}" vars: __probe_element: - name: "lb-probe-hc-vip-hana{{ sapinstance_index_nr }}" + name: "{{ sap_vm_provision_ha_load_balancer_name_hana + '-probe-hc-vip' + (sapinstance_index_nr | string) }}" protocol: Tcp port: "55550" # "{{ ('626' + sapinstance_item | string) | int }}" interval: 5 @@ -424,7 +508,7 @@ lb_probes1: "{{ lb_probes1 | default([]) + [__probe_element] }}" vars: __probe_element: - name: "lb-probe-hc-vip-anydb" + name: "{{ sap_vm_provision_ha_load_balancer_name_anydb + '-probe-hc-vip' }}" protocol: Tcp port: "55550" # 62700 interval: 5 @@ -437,9 +521,9 @@ lb_probes2: "{{ lb_probes2 | default([]) + [__probe_element] }}" vars: __probe_element: - name: "lb-probe-hc-vip-nwas{{ sapinstance_index_nr }}" + name: "{{ sap_vm_provision_ha_load_balancer_name_nwas + '-probe-hc-vip' + (sapinstance_index_nr | string) }}" protocol: Tcp - port: "{{ ('5555' + (sapinstance_index_nr + 1)) | string | int }}" # "{{ ('626' + sapinstance_item | string) | int }}" + port: "{{ ('5555' + (sapinstance_index_nr + 1) | string) | int }}" # "{{ ('626' + sapinstance_item | string) | int }}" interval: 5 fail_count: 2 when: @@ -458,13 +542,13 @@ lb_rules1: "{{ lb_rules1 | default([]) + [__rule_element] }}" vars: __rule_element: - name: "lb-rule-hana{{ rule_index_nr }}" - frontend_ip_configuration: "lb-vip-hana{{ rule_index_nr }}" - backend_address_pool: lb-backend-pool-hana + name: "{{ sap_vm_provision_ha_load_balancer_name_hana + '-rule' + (rule_index_nr | string) }}" + frontend_ip_configuration: "{{ sap_vm_provision_ha_load_balancer_name_hana + '-vip' + (rule_index_nr | string) }}" + backend_address_pool: "{{ sap_vm_provision_ha_load_balancer_name_hana + '-backend-pool' }}" protocol: All frontend_port: 0 # High Availability Ports (AnyPort), only on Internal Standard Load Balancer backend_port: 0 # High Availability Ports (AnyPort), only on Internal Standard Load Balancer - probe: "lb-probe-hc-vip-hana{{ rule_index_nr }}" + probe: "{{ sap_vm_provision_ha_load_balancer_name_hana + '-probe-hc-vip' + (rule_index_nr | string) }}" load_distribution: Default # Session persistence = None idle_timeout: 30 # 30 minutes enable_floating_ip: true # enable Frontend IP as a Floating IP (aka. Direct Server Return), if disabled then only 1 LB Rule allowed @@ -472,7 +556,7 @@ - rule_item | length > 0 - groups["hana_secondary"] is defined and (groups["hana_secondary"]|length>0) loop: - - "{{ sap_ha_pacemaker_cluster_vip_hana_primary_ip_address | default() }}" + - "{{ sap_vm_provision_ha_vip_hana_primary }}" loop_control: index_var: rule_index_nr loop_var: rule_item @@ -482,13 +566,13 @@ lb_rules1: "{{ lb_rules1 | default([]) + [__rule_element] }}" vars: __rule_element: - name: "lb-rule-anydb{{ rule_index_nr }}" - frontend_ip_configuration: "lb-vip-anydb{{ rule_index_nr }}" - backend_address_pool: lb-backend-pool-anydb + name: "{{ sap_vm_provision_ha_load_balancer_name_anydb + '-rule' + (rule_index_nr | string) }}" + frontend_ip_configuration: "{{ sap_vm_provision_ha_load_balancer_name_anydb + '-vip' + (rule_index_nr | string) }}" + backend_address_pool: "{{ sap_vm_provision_ha_load_balancer_name_anydb + '-backend-pool' }}" protocol: All frontend_port: 0 # High Availability Ports (AnyPort), only on Internal Standard Load Balancer backend_port: 0 # High Availability Ports (AnyPort), only on Internal Standard Load Balancer - probe: "lb-probe-hc-vip-anydb" + probe: "{{ sap_vm_provision_ha_load_balancer_name_anydb + '-probe-hc-vip' }}" load_distribution: Default # Session persistence = None idle_timeout: 30 # 30 minutes enable_floating_ip: true # enable Frontend IP as a Floating IP (aka. Direct Server Return), if disabled then only 1 LB Rule allowed @@ -496,7 +580,7 @@ - rule_item | length > 0 - groups["anydb_secondary"] is defined and (groups["anydb_secondary"]|length>0) loop: - - "{{ sap_vm_temp_vip_anydb_primary | default() }}" + - "{{ sap_vm_provision_ha_vip_anydb_primary }}" loop_control: index_var: rule_index_nr loop_var: rule_item @@ -506,13 +590,13 @@ lb_rules2: "{{ lb_rules2 | default([]) + [__rule_element] }}" vars: __rule_element: - name: "lb-rule-nwas{{ rule_index_nr }}" - frontend_ip_configuration: "lb-vip-nwas{{ rule_index_nr }}" - backend_address_pool: lb-backend-pool-nwas-ascs + name: "{{ sap_vm_provision_ha_load_balancer_name_nwas + '-rule' + (rule_index_nr | string) }}" + frontend_ip_configuration: "{{ sap_vm_provision_ha_load_balancer_name_nwas + '-vip' + (rule_index_nr | string) }}" + backend_address_pool: "{{ sap_vm_provision_ha_load_balancer_name_nwas + '-ascs-backend-pool' }}" protocol: All frontend_port: 0 # High Availability Ports (AnyPort), only on Internal Standard Load Balancer backend_port: 0 # High Availability Ports (AnyPort), only on Internal Standard Load Balancer - probe: "lb-probe-hc-vip-nwas{{ rule_index_nr }}" + probe: "{{ sap_vm_provision_ha_load_balancer_name_nwas + '-probe-hc-vip' + (rule_index_nr | string) }}" load_distribution: Default # Session persistence = None idle_timeout: 30 # 30 minutes enable_floating_ip: true # enable Frontend IP as a Floating IP (aka. Direct Server Return), if disabled then only 1 LB Rule allowed @@ -520,8 +604,8 @@ - rule_item | length > 0 - groups["nwas_ers"] is defined and (groups["nwas_ers"]|length>0) loop: - - "{{ sap_ha_pacemaker_cluster_vip_nwas_abap_ascs_ip_address | default() }}" - - "{{ sap_ha_pacemaker_cluster_vip_nwas_abap_ers_ip_address | default() }}" + - "{{ sap_vm_provision_ha_vip_nwas_abap_ascs }}" + - "{{ sap_vm_provision_ha_vip_nwas_abap_ers }}" loop_control: index_var: rule_index_nr loop_var: rule_item @@ -531,11 +615,11 @@ register: __sap_vm_provision_task_msazure_lb1a_info azure.azcollection.azure_rm_loadbalancer: resource_group: "{{ sap_vm_provision_msazure_resource_group_name }}" - name: "lb-sap-hana-ha" # "lb-sap-ha" + name: "{{ sap_vm_provision_ha_load_balancer_name_hana }}" sku: "Standard" # AnyPort (HA Port) Protocol rule is not allowed for basic SKU load balancer, use standard SKU load balancer instead frontend_ip_configurations: "{{ (lb_frontend_virtual_ips1 | default([])) }}" # "{{ (lb_frontend_virtual_ips1 | default([])) + (lb_frontend_virtual_ips2 | default([])) }}" backend_address_pools: - - name: lb-backend-pool-hana + - name: "{{ sap_vm_provision_ha_load_balancer_name_hana + '-backend-pool' }}" probes: "{{ (lb_probes1 | default([])) }}" # "{{ (lb_probes1 | default([])) + (lb_probes2 | default([])) }}" load_balancing_rules: "{{ (lb_rules1 | default([])) }}" # "{{ (lb_rules1 | default([])) + (lb_rules2 | default([])) }}" # Azure credentials @@ -550,11 +634,11 @@ register: __sap_vm_provision_task_msazure_lb1b_info azure.azcollection.azure_rm_loadbalancer: resource_group: "{{ sap_vm_provision_msazure_resource_group_name }}" - name: "lb-sap-anydb-ha" # "lb-sap-ha" + name: "{{ sap_vm_provision_ha_load_balancer_name_anydb }}" sku: "Standard" # AnyPort (HA Port) Protocol rule is not allowed for basic SKU load balancer, use standard SKU load balancer instead frontend_ip_configurations: "{{ (lb_frontend_virtual_ips1 | default([])) }}" # "{{ (lb_frontend_virtual_ips1 | default([])) + (lb_frontend_virtual_ips2 | default([])) }}" backend_address_pools: - - name: lb-backend-pool-anydb + - name: "{{ sap_vm_provision_ha_load_balancer_name_anydb + '-backend-pool' }}" probes: "{{ (lb_probes1 | default([])) }}" # "{{ (lb_probes1 | default([])) + (lb_probes2 | default([])) }}" load_balancing_rules: "{{ (lb_rules1 | default([])) }}" # "{{ (lb_rules1 | default([])) + (lb_rules2 | default([])) }}" # Azure credentials @@ -564,21 +648,16 @@ secret: "{{ sap_vm_provision_msazure_app_client_secret }}" when: (groups["anydb_secondary"] is defined and (groups["anydb_secondary"]|length>0)) - - name: MS Azure Load Balancer (network L4) - Define Ansible Variable of Load Balancer for Database Server - ansible.builtin.set_fact: - __sap_vm_provision_task_msazure_lb1_info: "{{ __sap_vm_provision_task_msazure_lb1a_info if (groups['hana_secondary'] is defined and (groups['hana_secondary']|length>0)) else __sap_vm_provision_task_msazure_lb1b_info if (groups['anydb_secondary'] is defined and (groups['anydb_secondary']|length>0)) }}" - when: (groups["hana_secondary"] is defined and (groups["hana_secondary"]|length>0)) or (groups["anydb_secondary"] is defined and (groups["anydb_secondary"]|length>0)) - - name: MS Azure Load Balancer (network L4) - Create NLB for SAP NetWeaver with Virtual IP and Health Probe configuration no_log: "{{ __sap_vm_provision_no_log }}" register: __sap_vm_provision_task_msazure_lb2_info azure.azcollection.azure_rm_loadbalancer: resource_group: "{{ sap_vm_provision_msazure_resource_group_name }}" - name: "lb-sap-nwas-ha" + name: "{{ sap_vm_provision_ha_load_balancer_name_nwas }}" sku: "Standard" # AnyPort (HA Port) Protocol rule is not allowed for basic SKU load balancer, use standard SKU load balancer instead frontend_ip_configurations: "{{ (lb_frontend_virtual_ips2 | default([])) }}" backend_address_pools: - - name: lb-backend-pool-nwas-ascs + - name: "{{ sap_vm_provision_ha_load_balancer_name_nwas + '-ascs-backend-pool' }}" probes: "{{ (lb_probes2 | default([])) }}" load_balancing_rules: "{{ (lb_rules2 | default([])) }}" # Azure credentials @@ -588,19 +667,24 @@ secret: "{{ sap_vm_provision_msazure_app_client_secret }}" when: (groups["nwas_ers"] is defined and (groups["nwas_ers"]|length>0)) + - name: MS Azure Load Balancer (network L4) - Define Ansible Variable of Load Balancer for Database Server + ansible.builtin.set_fact: + __sap_vm_provision_task_msazure_lb1_info: "{{ __sap_vm_provision_task_msazure_lb1a_info if (groups['hana_secondary'] is defined and (groups['hana_secondary']|length>0)) else __sap_vm_provision_task_msazure_lb1b_info if (groups['anydb_secondary'] is defined and (groups['anydb_secondary']|length>0)) }}" + when: (groups["hana_secondary"] is defined and (groups["hana_secondary"]|length>0)) or (groups["anydb_secondary"] is defined and (groups["anydb_secondary"]|length>0)) + - name: Set fact to hold loop variables from include_tasks when SAP HANA HA ansible.builtin.set_fact: - lb_ha_sap_hana: "{{ __sap_vm_provision_task_msazure_lb1_info.state.backend_address_pools | selectattr('name', '==', 'lb-backend-pool-hana') | map(attribute='id') | first }}" + lb_ha_sap_hana: "{{ __sap_vm_provision_task_msazure_lb1_info.state.backend_address_pools | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_hana + '-backend-pool') | map(attribute='id') | first }}" when: (groups["hana_secondary"] is defined and (groups["hana_secondary"]|length>0)) - name: Set fact to hold loop variables from include_tasks when SAP AnyDB HA ansible.builtin.set_fact: - lb_ha_sap_anydb: "{{ __sap_vm_provision_task_msazure_lb1_info.state.backend_address_pools | selectattr('name', '==', 'lb-backend-pool-anydb') | map(attribute='id') | first }}" - when: (groups["anyb_secondary"] is defined and (groups["anydb_secondary"]|length>0)) + lb_ha_sap_anydb: "{{ __sap_vm_provision_task_msazure_lb1_info.state.backend_address_pools | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_anydb + '-backend-pool') | map(attribute='id') | first }}" + when: (groups["anydb_secondary"] is defined and (groups["anydb_secondary"]|length>0)) - name: Set fact to hold loop variables from include_tasks when SAP NetWeaver HA ansible.builtin.set_fact: - lb_ha_sap_nwas: "{{ __sap_vm_provision_task_msazure_lb2_info.state.backend_address_pools | selectattr('name', '==', 'lb-backend-pool-nwas-ascs') | map(attribute='id') | first }}" + lb_ha_sap_nwas: "{{ __sap_vm_provision_task_msazure_lb2_info.state.backend_address_pools | selectattr('name', '==', sap_vm_provision_ha_load_balancer_name_nwas + '-ascs-backend-pool') | map(attribute='id') | first }}" when: (groups["nwas_ers"] is defined and (groups["nwas_ers"]|length>0)) - name: Update network interfaces for MS Azure VM - for SAP HANA HA with load balancing diff --git a/roles/sap_vm_provision/tasks/platform_ansible/msazure_vm/post_deployment_execute.yml b/roles/sap_vm_provision/tasks/platform_ansible/msazure_vm/post_deployment_execute.yml index 36845b8..dd4347e 100644 --- a/roles/sap_vm_provision/tasks/platform_ansible/msazure_vm/post_deployment_execute.yml +++ b/roles/sap_vm_provision/tasks/platform_ansible/msazure_vm/post_deployment_execute.yml @@ -64,7 +64,7 @@ lb_frontend_virtual_ips1: "{{ lb_frontend_virtual_ips1 | default([]) + [__ip_element] }}" vars: __ip_element: - name: "lb-vip-hana{{ vip_index_nr }}" + name: "{{ sap_vm_provision_ha_load_balancer_name_hana + '-vip' + (vip_index_nr | string) }}" private_ip_address: "{{ vip_item | regex_replace('/.*', '') }}" private_ip_allocation_method: "Static" subnet: "{{ __sap_vm_provision_task_msazure_vnet_subnet_info.subnets[0].id }}" @@ -73,7 +73,7 @@ - vip_item | length > 0 - groups["hana_secondary"] is defined and (groups["hana_secondary"]|length>0) loop: - - "{{ sap_ha_pacemaker_cluster_vip_hana_primary_ip_address | default() }}" + - "{{ sap_vm_provision_ha_vip_hana_primary }}" loop_control: index_var: vip_index_nr loop_var: vip_item @@ -83,7 +83,7 @@ lb_frontend_virtual_ips1: "{{ lb_frontend_virtual_ips1 | default([]) + [__ip_element] }}" vars: __ip_element: - name: "lb-vip-anydb{{ vip_index_nr }}" + name: "{{ sap_vm_provision_ha_load_balancer_name_anydb + '-vip' + (vip_index_nr | string) }}" private_ip_address: "{{ vip_item | regex_replace('/.*', '') }}" private_ip_allocation_method: "Static" subnet: "{{ __sap_vm_provision_task_msazure_vnet_subnet_info.subnets[0].id }}" @@ -92,7 +92,7 @@ - vip_item | length > 0 - groups["anydb_secondary"] is defined and (groups["anydb_secondary"]|length>0) loop: - - "{{ sap_vm_temp_vip_anydb_primary | default() }}" + - "{{ sap_vm_provision_ha_vip_anydb_primary }}" loop_control: index_var: vip_index_nr loop_var: vip_item @@ -102,7 +102,7 @@ lb_frontend_virtual_ips2: "{{ lb_frontend_virtual_ips2 | default([]) + [__ip_element] }}" vars: __ip_element: - name: "lb-vip-nwas{{ vip_index_nr }}" + name: "{{ sap_vm_provision_ha_load_balancer_name_nwas + '-vip' + (vip_index_nr | string) }}" private_ip_address: "{{ vip_item | regex_replace('/.*', '') }}" private_ip_allocation_method: "Static" subnet: "{{ __sap_vm_provision_task_msazure_vnet_subnet_info.subnets[0].id }}" @@ -111,8 +111,8 @@ - vip_item | length > 0 - groups["nwas_ers"] is defined and (groups["nwas_ers"]|length>0) loop: - - "{{ sap_ha_pacemaker_cluster_vip_nwas_abap_ascs_ip_address | default() }}" - - "{{ sap_ha_pacemaker_cluster_vip_nwas_abap_ers_ip_address | default() }}" + - "{{ sap_vm_provision_ha_vip_nwas_abap_ascs }}" + - "{{ sap_vm_provision_ha_vip_nwas_abap_ers }}" loop_control: index_var: vip_index_nr loop_var: vip_item @@ -123,7 +123,7 @@ lb_probes1: "{{ lb_probes1 | default([]) + [__probe_element] }}" vars: __probe_element: - name: "lb-probe-hc-vip-hana{{ healthcheck_index_nr }}" + name: "{{ sap_vm_provision_ha_load_balancer_name_hana + '-probe-hc-vip' + (healthcheck_index_nr | string) }}" protocol: Tcp port: "{{ healthcheck_item }}" interval: 5 @@ -142,7 +142,7 @@ lb_probes1: "{{ lb_probes1 | default([]) + [__probe_element] }}" vars: __probe_element: - name: "lb-probe-hc-vip-anydb" + name: "{{ sap_vm_provision_ha_load_balancer_name_anydb + '-probe-hc-vip' }}" protocol: Tcp port: "62700" interval: 5 @@ -155,7 +155,7 @@ lb_probes2: "{{ lb_probes2 | default([]) + [__probe_element] }}" vars: __probe_element: - name: "lb-probe-hc-vip-nwas{{ healthcheck_index_nr }}" + name: "{{ sap_vm_provision_ha_load_balancer_name_nwas + '-probe-hc-vip' + (healthcheck_index_nr | string) }}" protocol: Tcp port: "{{ healthcheck_item }}" interval: 5 @@ -176,13 +176,13 @@ lb_rules1: "{{ lb_rules1 | default([]) + [__rule_element] }}" vars: __rule_element: - name: "lb-rule-hana{{ rule_index_nr }}" - frontend_ip_configuration: "lb-vip-hana{{ rule_index_nr }}" - backend_address_pool: lb-backend-pool-hana + name: "{{ sap_vm_provision_ha_load_balancer_name_hana + '-rule' + (rule_index_nr | string) }}" + frontend_ip_configuration: "{{ sap_vm_provision_ha_load_balancer_name_hana + '-vip' + (rule_index_nr | string) }}" + backend_address_pool: "{{ sap_vm_provision_ha_load_balancer_name_hana + '-backend-pool' }}" protocol: All frontend_port: 0 # High Availability Ports (AnyPort), only on Internal Standard Load Balancer backend_port: 0 # High Availability Ports (AnyPort), only on Internal Standard Load Balancer - probe: "lb-probe-hc-vip-hana{{ rule_index_nr }}" + probe: "{{ sap_vm_provision_ha_load_balancer_name_hana + '-probe-hc-vip' + (rule_index_nr | string) }}" load_distribution: Default # Session persistence = None idle_timeout: 30 # 30 minutes enable_floating_ip: true # enable Frontend IP as a Floating IP (aka. Direct Server Return), if disabled then only 1 LB Rule allowed @@ -190,7 +190,7 @@ - rule_item | length > 0 - groups["hana_secondary"] is defined and (groups["hana_secondary"]|length>0) loop: - - "{{ sap_ha_pacemaker_cluster_vip_hana_primary_ip_address | default() }}" + - "{{ sap_vm_provision_ha_vip_hana_primary }}" loop_control: index_var: rule_index_nr loop_var: rule_item @@ -200,13 +200,13 @@ lb_rules1: "{{ lb_rules1 | default([]) + [__rule_element] }}" vars: __rule_element: - name: "lb-rule-anydb{{ rule_index_nr }}" - frontend_ip_configuration: "lb-vip-anydb{{ rule_index_nr }}" - backend_address_pool: lb-backend-pool-anydb + name: "{{ sap_vm_provision_ha_load_balancer_name_anydb + '-rule' + (rule_index_nr | string) }}" + frontend_ip_configuration: "{{ sap_vm_provision_ha_load_balancer_name_anydb + '-vip' + (rule_index_nr | string) }}" + backend_address_pool: "{{ sap_vm_provision_ha_load_balancer_name_anydb + '-backend-pool' }}" protocol: All frontend_port: 0 # High Availability Ports (AnyPort), only on Internal Standard Load Balancer backend_port: 0 # High Availability Ports (AnyPort), only on Internal Standard Load Balancer - probe: "lb-probe-hc-vip-anydb" + probe: "{{ sap_vm_provision_ha_load_balancer_name_anydb + '-probe-hc-vip' }}" load_distribution: Default # Session persistence = None idle_timeout: 30 # 30 minutes enable_floating_ip: true # enable Frontend IP as a Floating IP (aka. Direct Server Return), if disabled then only 1 LB Rule allowed @@ -214,7 +214,7 @@ - rule_item | length > 0 - groups["anydb_secondary"] is defined and (groups["anydb_secondary"]|length>0) loop: - - "{{ sap_vm_temp_vip_anydb_primary | default() }}" + - "{{ sap_vm_provision_ha_vip_anydb_primary }}" loop_control: index_var: rule_index_nr loop_var: rule_item @@ -224,13 +224,13 @@ lb_rules2: "{{ lb_rules2 | default([]) + [__rule_element] }}" vars: __rule_element: - name: "lb-rule-nwas{{ rule_index_nr }}" - frontend_ip_configuration: "lb-vip-nwas{{ rule_index_nr }}" - backend_address_pool: lb-backend-pool-nwas-ascs + name: "{{ sap_vm_provision_ha_load_balancer_name_nwas + '-rule' + (rule_index_nr | string) }}" + frontend_ip_configuration: "{{ sap_vm_provision_ha_load_balancer_name_nwas + '-vip' + (rule_index_nr | string) }}" + backend_address_pool: "{{ sap_vm_provision_ha_load_balancer_name_nwas + '-ascs-backend-pool' }}" protocol: All frontend_port: 0 # High Availability Ports (AnyPort), only on Internal Standard Load Balancer backend_port: 0 # High Availability Ports (AnyPort), only on Internal Standard Load Balancer - probe: "lb-probe-hc-vip-nwas{{ rule_index_nr }}" + probe: "{{ sap_vm_provision_ha_load_balancer_name_nwas + '-probe-hc-vip' + (rule_index_nr | string) }}" load_distribution: Default # Session persistence = None idle_timeout: 30 # 30 minutes enable_floating_ip: true # enable Frontend IP as a Floating IP (aka. Direct Server Return), if disabled then only 1 LB Rule allowed @@ -238,8 +238,8 @@ - rule_item | length > 0 - groups["nwas_ers"] is defined and (groups["nwas_ers"]|length>0) loop: - - "{{ sap_ha_pacemaker_cluster_vip_nwas_abap_ascs_ip_address | default() }}" - - "{{ sap_ha_pacemaker_cluster_vip_nwas_abap_ers_ip_address | default() }}" + - "{{ sap_vm_provision_ha_vip_nwas_abap_ascs }}" + - "{{ sap_vm_provision_ha_vip_nwas_abap_ers }}" loop_control: index_var: rule_index_nr loop_var: rule_item @@ -250,11 +250,11 @@ register: __sap_vm_provision_task_msazure_lb1_info azure.azcollection.azure_rm_loadbalancer: resource_group: "{{ sap_vm_provision_msazure_resource_group_name }}" - name: "lb-sap-hana-ha" # "lb-sap-ha" + name: "{{ sap_vm_provision_ha_load_balancer_name_hana }}" sku: "Standard" # AnyPort (HA Port) Protocol rule is not allowed for basic SKU load balancer, use standard SKU load balancer instead frontend_ip_configurations: "{{ (lb_frontend_virtual_ips1 | default([])) }}" # "{{ (lb_frontend_virtual_ips1 | default([])) + (lb_frontend_virtual_ips2 | default([])) }}" backend_address_pools: - - name: lb-backend-pool-hana + - name: "{{ sap_vm_provision_ha_load_balancer_name_hana + '-backend-pool' }}" probes: "{{ (lb_probes1 | default([])) }}" # "{{ (lb_probes1 | default([])) + (lb_probes2 | default([])) }}" load_balancing_rules: "{{ (lb_rules1 | default([])) }}" # "{{ (lb_rules1 | default([])) + (lb_rules2 | default([])) }}" # Azure credentials @@ -269,11 +269,11 @@ register: __sap_vm_provision_task_msazure_lb1_info azure.azcollection.azure_rm_loadbalancer: resource_group: "{{ sap_vm_provision_msazure_resource_group_name }}" - name: "lb-sap-anydb-ha" # "lb-sap-ha" + name: "{{ sap_vm_provision_ha_load_balancer_name_anydb }}" sku: "Standard" # AnyPort (HA Port) Protocol rule is not allowed for basic SKU load balancer, use standard SKU load balancer instead frontend_ip_configurations: "{{ (lb_frontend_virtual_ips1 | default([])) }}" # "{{ (lb_frontend_virtual_ips1 | default([])) + (lb_frontend_virtual_ips2 | default([])) }}" backend_address_pools: - - name: lb-backend-pool-anydb + - name: "{{ sap_vm_provision_ha_load_balancer_name_anydb + '-backend-pool' }}" probes: "{{ (lb_probes1 | default([])) }}" # "{{ (lb_probes1 | default([])) + (lb_probes2 | default([])) }}" load_balancing_rules: "{{ (lb_rules1 | default([])) }}" # "{{ (lb_rules1 | default([])) + (lb_rules2 | default([])) }}" # Azure credentials @@ -288,11 +288,11 @@ register: __sap_vm_provision_task_msazure_lb2_info azure.azcollection.azure_rm_loadbalancer: resource_group: "{{ sap_vm_provision_msazure_resource_group_name }}" - name: "lb-sap-nwas-ha" + name: "{{ sap_vm_provision_ha_load_balancer_name_nwas }}" sku: "Standard" # AnyPort (HA Port) Protocol rule is not allowed for basic SKU load balancer, use standard SKU load balancer instead frontend_ip_configurations: "{{ (lb_frontend_virtual_ips2 | default([])) }}" backend_address_pools: - - name: lb-backend-pool-nwas-ascs + - name: "{{ sap_vm_provision_ha_load_balancer_name_nwas + '-ascs-backend-pool' }}" probes: "{{ (lb_probes2 | default([])) }}" load_balancing_rules: "{{ (lb_rules2 | default([])) }}" # Azure credentials diff --git a/roles/sap_vm_provision/tasks/platform_ansible/ovirt_vm/execute_main.yml b/roles/sap_vm_provision/tasks/platform_ansible/ovirt_vm/execute_main.yml index adf33e8..028a8c9 100644 --- a/roles/sap_vm_provision/tasks/platform_ansible/ovirt_vm/execute_main.yml +++ b/roles/sap_vm_provision/tasks/platform_ansible/ovirt_vm/execute_main.yml @@ -50,7 +50,7 @@ - name: Set fact to hold all inventory hosts in all groups ansible.builtin.set_fact: - groups_merged_list: "{{ [ [ groups['hana_primary'] | default([]) ] , [ groups['hana_secondary'] | default([]) ] , [ groups['nwas_ascs'] | default([]) ] , [ groups['nwas_ers'] | default([]) ] , [ groups['nwas_pas'] | default([]) ] , [ groups['nwas_aas'] | default([]) ] ] | flatten | select() }}" + groups_merged_list: "{{ [ [ groups['hana_primary'] | default([]) ] , [ groups['hana_secondary'] | default([]) ] , [ groups['anydb_primary'] | default([]) ] , [ groups['anydb_secondary'] | default([]) ] , [ groups['nwas_ascs'] | default([]) ] , [ groups['nwas_ers'] | default([]) ] , [ groups['nwas_pas'] | default([]) ] , [ groups['nwas_aas'] | default([]) ] ] | flatten | select() }}" - name: Set Ansible Vars register: __sap_vm_provision_task_ansible_vars_set diff --git a/roles/sap_vm_provision/tasks/platform_ansible/ovirt_vm/execute_provision.yml b/roles/sap_vm_provision/tasks/platform_ansible/ovirt_vm/execute_provision.yml index a5efbb1..14746f7 100644 --- a/roles/sap_vm_provision/tasks/platform_ansible/ovirt_vm/execute_provision.yml +++ b/roles/sap_vm_provision/tasks/platform_ansible/ovirt_vm/execute_provision.yml @@ -14,6 +14,10 @@ - sap_vm_provision_calculate_sap_hana_scaleout_active_coordinator is defined - not inventory_hostname in lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan].keys() +- name: Set fact for host specifications of the provision target + ansible.builtin.set_fact: + target_provision_host_spec: "{{ lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan][scaleout_origin_host_spec | default(inventory_hostname)] }}" + - name: Check if VM exists register: __sap_vm_provision_task_provision_host_single_check_exists ovirt.ovirt.ovirt_vm_info: @@ -89,11 +93,11 @@ comment: "{{ inventory_hostname }} created by Ansible Playbook for SAP" ## Virtual Machine main resources definition - cpu_sockets: "{{ lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan][scaleout_origin_host_spec | default(inventory_hostname)].ovirt_vm_cpu_threads }}" + cpu_sockets: "{{ target_provision_host_spec.ovirt_vm_cpu_threads }}" # Size suffix uses IEC 60027-2 standard (for example 1GiB, 1024MiB) - memory: "{{ lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan][scaleout_origin_host_spec | default(inventory_hostname)].ovirt_vm_memory_gib }}GiB" + memory: "{{ target_provision_host_spec.ovirt_vm_memory_gib }}GiB" # Make sure guaranteed memory is defined to avoid error when calculated larger than set in VM template. - memory_guaranteed: "{{ lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan][scaleout_origin_host_spec | default(inventory_hostname)].ovirt_vm_memory_gib }}GiB" + memory_guaranteed: "{{ target_provision_host_spec.ovirt_vm_memory_gib }}GiB" ## Virtual Machine settings configuration # Do not use Memory (RAM) ballooning, avoid over-commit of Memory @@ -106,11 +110,11 @@ soundcard_enabled: false high_availability: false operating_system: "{{ sap_vm_provision_ovirt_vm_operating_system }}" - placement_policy: "{{ lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan][scaleout_origin_host_spec | default(inventory_hostname)].ovirt_vm_placement_policy | default('pinned') }}" + placement_policy: "{{ target_provision_host_spec.ovirt_vm_placement_policy | default('pinned') }}" stateless: false timezone: "{{ sap_vm_provision_ovirt_vm_timezone }}" # Virtual Machine Type: high_performance, server, desktop - type: "{{ lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan][scaleout_origin_host_spec | default(inventory_hostname)].ovirt_vm_type | default('high_performance') }}" + type: "{{ target_provision_host_spec.ovirt_vm_type | default('high_performance') }}" ## Virtual Machine Storage configuration disk_format: "{{ sap_vm_provision_ovirt_vm_disk_type }}" # RHV default is 'cow' = thin provisioning @@ -243,7 +247,7 @@ filesystem_volume_map: | {% set volume_map = [] -%} {% set av_vol = available_volumes -%} - {% for storage_item in lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan][scaleout_origin_host_spec | default(inventory_hostname)].storage_definition -%} + {% for storage_item in target_provision_host_spec.storage_definition -%} {% for idx in range(0, storage_item.disk_count | default(1)) -%} {% if (storage_item.filesystem_type is defined) -%} {% if ('swap' in storage_item.filesystem_type and storage_item.swap_path is not defined) @@ -296,7 +300,7 @@ - name: Append loop value to register ansible.builtin.set_fact: - __sap_vm_provision_task_provision_host_single: "{{ __sap_vm_provision_task_provision_host_single_info.ovirt_vms[0] | combine( { 'host_node' : inventory_hostname } , { 'sap_host_type' : lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan][scaleout_origin_host_spec | default(inventory_hostname)].sap_host_type } , { 'sap_system_type' : (lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan][scaleout_origin_host_spec | default(inventory_hostname)].sap_system_type | default('')) } ) }}" + __sap_vm_provision_task_provision_host_single: "{{ __sap_vm_provision_task_provision_host_single_info.ovirt_vms[0] | combine( { 'host_node' : inventory_hostname } , { 'sap_host_type' : target_provision_host_spec.sap_host_type } , { 'sap_system_type' : (target_provision_host_spec.sap_system_type | default('')) } ) }}" - name: Append output to merged register ansible.builtin.set_fact: diff --git a/roles/sap_vm_provision/tasks/platform_ansible/vmware_vm/execute_main.yml b/roles/sap_vm_provision/tasks/platform_ansible/vmware_vm/execute_main.yml index c340434..24d504d 100644 --- a/roles/sap_vm_provision/tasks/platform_ansible/vmware_vm/execute_main.yml +++ b/roles/sap_vm_provision/tasks/platform_ansible/vmware_vm/execute_main.yml @@ -45,7 +45,7 @@ - name: Set fact to hold all inventory hosts in all groups ansible.builtin.set_fact: - groups_merged_list: "{{ [ [ groups['hana_primary'] | default([]) ] , [ groups['hana_secondary'] | default([]) ] , [ groups['nwas_ascs'] | default([]) ] , [ groups['nwas_ers'] | default([]) ] , [ groups['nwas_pas'] | default([]) ] , [ groups['nwas_aas'] | default([]) ] ] | flatten | select() }}" + groups_merged_list: "{{ [ [ groups['hana_primary'] | default([]) ] , [ groups['hana_secondary'] | default([]) ] , [ groups['anydb_primary'] | default([]) ] , [ groups['anydb_secondary'] | default([]) ] , [ groups['nwas_ascs'] | default([]) ] , [ groups['nwas_ers'] | default([]) ] , [ groups['nwas_pas'] | default([]) ] , [ groups['nwas_aas'] | default([]) ] ] | flatten | select() }}" - name: Set Ansible Vars register: __sap_vm_provision_task_ansible_vars_set diff --git a/roles/sap_vm_provision/tasks/platform_ansible/vmware_vm/execute_provision.yml b/roles/sap_vm_provision/tasks/platform_ansible/vmware_vm/execute_provision.yml index 4f8d31e..12bbbc5 100644 --- a/roles/sap_vm_provision/tasks/platform_ansible/vmware_vm/execute_provision.yml +++ b/roles/sap_vm_provision/tasks/platform_ansible/vmware_vm/execute_provision.yml @@ -19,6 +19,10 @@ - sap_vm_provision_calculate_sap_hana_scaleout_active_coordinator is defined - not inventory_hostname in lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan].keys() +- name: Set fact for host specifications of the provision target + ansible.builtin.set_fact: + target_provision_host_spec: "{{ lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan][scaleout_origin_host_spec | default(inventory_hostname)] }}" + - name: Identify VM Folder no_log: "{{ __sap_vm_provision_no_log }}" register: __sap_vm_provision_task_vmware_vm_folder @@ -154,10 +158,10 @@ ## May cause conflict with powered_on parameter hardware_customization: cpu_update: - num_cpus: "{{ lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan][scaleout_origin_host_spec | default(inventory_hostname)].vmware_vm_cpu_threads }}" - num_cores_per_socket: "{{ lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan][scaleout_origin_host_spec | default(inventory_hostname)].vmware_vm_cpu_smt }}" + num_cpus: "{{ target_provision_host_spec.vmware_vm_cpu_threads }}" + num_cores_per_socket: "{{ target_provision_host_spec.vmware_vm_cpu_smt }}" memory_update: - memory: "{{ (lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan][scaleout_origin_host_spec | default(inventory_hostname)].vmware_vm_memory_gib | int) * 1024 }}" # MiB + memory: "{{ (target_provision_host_spec.vmware_vm_memory_gib | int) * 1024 }}" # MiB # nics: ## Virtual Machine Storage configuration @@ -410,7 +414,7 @@ filesystem_volume_map: | {% set volume_map = [] -%} {% set av_vol = available_volumes -%} - {% for storage_item in lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan][scaleout_origin_host_spec | default(inventory_hostname)].storage_definition -%} + {% for storage_item in target_provision_host_spec.storage_definition -%} {% for idx in range(0, storage_item.disk_count | default(1)) -%} {% if (storage_item.filesystem_type is defined) -%} {% if ('swap' in storage_item.filesystem_type and storage_item.swap_path is not defined) @@ -468,7 +472,7 @@ - name: Append loop value to register ansible.builtin.set_fact: - __sap_vm_provision_task_provision_host_single: "{{ __sap_vm_provision_task_provision_host_single_info | combine( { 'host_node' : inventory_hostname } , { '__sap_vm_provision_task_vmware_vm_network_info' : __sap_vm_provision_task_vmware_vm_network_info } , { 'sap_host_type' : lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan][scaleout_origin_host_spec | default(inventory_hostname)].sap_host_type } , { 'sap_system_type' : (lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan][scaleout_origin_host_spec | default(inventory_hostname)].sap_system_type | default('')) } ) }}" + __sap_vm_provision_task_provision_host_single: "{{ __sap_vm_provision_task_provision_host_single_info | combine( { 'host_node' : inventory_hostname } , { '__sap_vm_provision_task_vmware_vm_network_info' : __sap_vm_provision_task_vmware_vm_network_info } , { 'sap_host_type' : target_provision_host_spec.sap_host_type } , { 'sap_system_type' : (target_provision_host_spec.sap_system_type | default('')) } ) }}" - name: Append output to merged register ansible.builtin.set_fact: diff --git a/roles/sap_vm_provision/tasks/platform_ansible_to_terraform/aws_ec2_vs/execute_main.yml b/roles/sap_vm_provision/tasks/platform_ansible_to_terraform/aws_ec2_vs/execute_main.yml index 05df174..040e4d3 100644 --- a/roles/sap_vm_provision/tasks/platform_ansible_to_terraform/aws_ec2_vs/execute_main.yml +++ b/roles/sap_vm_provision/tasks/platform_ansible_to_terraform/aws_ec2_vs/execute_main.yml @@ -4,10 +4,12 @@ block: # Do not use ansible.builtin.copy as this will cause error 'not writable' on localhost (even if user has permissions) - - name: Copy Terraform Template files to temporary directory in current Ansible Playbook directory + # Should not cause overwrite of Terraform State files, but should overwrite populated Terraform Template files if they are updated + - name: Copy Terraform Template files to provided working directory path + run_once: true ansible.builtin.shell: | mkdir -p {{ sap_vm_provision_terraform_work_dir_path }} - cp -r {{ role_path }}/tasks/platform_ansible_to_terraform/{{ sap_vm_provision_iac_platform }}/tf_template/* {{ sap_vm_provision_terraform_work_dir_path }} + cp -R {{ role_path }}/tasks/platform_ansible_to_terraform/{{ sap_vm_provision_iac_platform }}/tf_template/* {{ sap_vm_provision_terraform_work_dir_path }} - name: Terraform Template for SAP - AWS register: terraform_template1_result @@ -15,6 +17,7 @@ AWS_ACCESS_KEY_ID: "{{ sap_vm_provision_aws_access_key }}" AWS_SECRET_ACCESS_KEY: "{{ sap_vm_provision_aws_secret_access_key }}" AWS_REGION: "{{ sap_vm_provision_aws_region }}" + run_once: true cloud.terraform.terraform: project_path: "{{ sap_vm_provision_terraform_work_dir_path }}" state: "{{ sap_vm_provision_terraform_state }}" @@ -34,12 +37,13 @@ map_host_specifications: "{{ lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary') }}" sap_vm_provision_host_specification_plan: "{{ sap_vm_provision_host_specification_plan }}" sap_vm_provision_aws_ec2_vs_host_os_image: "{{ sap_vm_provision_aws_ec2_vs_host_os_image }}" - sap_software_download_directory: "{{ sap_software_download_directory }}" + sap_install_media_detect_source_directory: "{{ sap_install_media_detect_source_directory }}" sap_hana_install_instance_nr: "{{ sap_hana_install_instance_nr | default('') }}" sap_nwas_abap_ascs_instance_no: "{{ sap_swpm_ascs_instance_nr | default('') }}" sap_nwas_abap_pas_instance_no: "{{ sap_swpm_pas_instance_nr | default('') }}" - name: Terraform Template output + run_once: true ansible.builtin.debug: var: terraform_template1_result @@ -60,6 +64,7 @@ - name: Read outputs from project path when: sap_vm_provision_terraform_state == "present" + run_once: true cloud.terraform.terraform_output: project_path: "{{ sap_vm_provision_terraform_work_dir_path }}" register: terraform_output_project_path diff --git a/roles/sap_vm_provision/tasks/platform_ansible_to_terraform/aws_ec2_vs/tf_template/tf_template_input_vars.tf b/roles/sap_vm_provision/tasks/platform_ansible_to_terraform/aws_ec2_vs/tf_template/tf_template_input_vars.tf index adb8562..61a0065 100644 --- a/roles/sap_vm_provision/tasks/platform_ansible_to_terraform/aws_ec2_vs/tf_template/tf_template_input_vars.tf +++ b/roles/sap_vm_provision/tasks/platform_ansible_to_terraform/aws_ec2_vs/tf_template/tf_template_input_vars.tf @@ -64,48 +64,51 @@ variable "sap_vm_provision_bastion_ssh_port" { variable "map_os_image_regex" { + description = "Map of operating systems OS Image regex, to identify latest OS Image for the OS major.minor version" + type = map(any) default = { rhel-8-1 = "*RHEL-8.1*_HVM*x86_64*" - - rhel-8-2 = "*RHEL-8.2*_HVM*x86_64*" - + # rhel-8-2 = "*RHEL-8.2*_HVM*x86_64*" // removed rhel-8-4 = "*RHEL-8.4*_HVM*x86_64*" - rhel-8-6 = "*RHEL-8.6*_HVM*x86_64*" - - rhel-7-7-sap-ha = "*RHEL-SAP-7.7*" - - rhel-7-9-sap-ha = "*RHEL-SAP-7.9*" - + rhel-8-8 = "*RHEL-8.8*_HVM*x86_64*" + rhel-8-10 = "*RHEL-8.10*_HVM*x86_64*" + rhel-9-0 = "*RHEL-9.0*_HVM*x86_64*" + rhel-9-1 = "*RHEL-9.1*_HVM*x86_64*" + rhel-9-2 = "*RHEL-9.2*_HVM*x86_64*" + rhel-9-3 = "*RHEL-9.3*_HVM*x86_64*" + rhel-9-4 = "*RHEL-9.4*_HVM*x86_64*" + + # rhel-7-7-sap-ha = "*RHEL-SAP-7.7*" // removed + # rhel-7-9-sap-ha = "*RHEL-SAP-7.9*" // removed rhel-8-1-sap-ha = "*RHEL-SAP-8.1.0*" - rhel-8-2-sap-ha = "*RHEL-SAP-8.2.0*" - rhel-8-4-sap-ha = "*RHEL-SAP-8.4.0*" - rhel-8-6-sap-ha = "*RHEL-SAP-8.6.0*" + rhel-8-8-sap-ha = "*RHEL-SAP-8.8.0*" + rhel-8-10-sap-ha = "*RHEL-SAP-8.10.0*" - sles-15-2 = "*suse-sles-15-sp2-v202*-hvm-ssd-x86_64*" - - sles-15-3 = "*suse-sles-15-sp3-v202*-hvm-ssd-x86_64*" - - sles-15-4 = "*suse-sles-15-sp4-v202*-hvm-ssd-x86_64*" + sles-12-5 = "*suse-sles-12-sp5-v202*-hvm-ssd-x86_64*" + # sles-15-2 = "*suse-sles-15-sp2-v202*-hvm-ssd-x86_64*" // removed + # sles-15-3 = "*suse-sles-15-sp3-v202*-hvm-ssd-x86_64*" // removed + # sles-15-4 = "*suse-sles-15-sp4-v202*-hvm-ssd-x86_64*" // removed + sles-15-5 = "*suse-sles-15-sp5-v202*-hvm-ssd-x86_64*" + sles-15-6 = "*suse-sles-15-sp6-v202*-hvm-ssd-x86_64*" sles-12-5-sap-ha = "*suse-sles-sap-12-sp5-v202*-hvm-ssd-x86_64*" - sles-15-1-sap-ha = "*suse-sles-sap-15-sp1-v202*-hvm-ssd-x86_64*" - sles-15-2-sap-ha = "*suse-sles-sap-15-sp2-v202*-hvm-ssd-x86_64*" - sles-15-3-sap-ha = "*suse-sles-sap-15-sp3-v202*-hvm-ssd-x86_64*" - sles-15-4-sap-ha = "*suse-sles-sap-15-sp4-v202*-hvm-ssd-x86_64*" + sles-15-5-sap-ha = "*suse-sles-sap-15-sp5-v202*-hvm-ssd-x86_64*" + sles-15-6-sap-ha = "*suse-sles-sap-15-sp6-v202*-hvm-ssd-x86_64*" } + } variable "sap_vm_provision_host_specification_plan" { @@ -116,12 +119,12 @@ variable "sap_vm_provision_aws_ec2_vs_host_os_image" { description = "Host OS Image. This variable uses the locals mapping with regex of OS Images, and will alter host provisioning." } -variable "sap_software_download_directory" { +variable "sap_install_media_detect_source_directory" { description = "Mount point for downloads of SAP Software" validation { error_message = "Directory must start with forward slash." - condition = can(regex("^/", var.sap_software_download_directory)) + condition = can(regex("^/", var.sap_install_media_detect_source_directory)) } } diff --git a/roles/sap_vm_provision/tasks/platform_ansible_to_terraform/gcp_ce_vm/execute_main.yml b/roles/sap_vm_provision/tasks/platform_ansible_to_terraform/gcp_ce_vm/execute_main.yml index 59f1275..8af889a 100644 --- a/roles/sap_vm_provision/tasks/platform_ansible_to_terraform/gcp_ce_vm/execute_main.yml +++ b/roles/sap_vm_provision/tasks/platform_ansible_to_terraform/gcp_ce_vm/execute_main.yml @@ -4,13 +4,16 @@ block: # Do not use ansible.builtin.copy as this will cause error 'not writable' on localhost (even if user has permissions) - - name: Copy Terraform Template files to temporary directory in current Ansible Playbook directory + # Should not cause overwrite of Terraform State files, but should overwrite populated Terraform Template files if they are updated + - name: Copy Terraform Template files to provided working directory path + run_once: true ansible.builtin.shell: | mkdir -p {{ sap_vm_provision_terraform_work_dir_path }} - cp -r {{ role_path }}/tasks/platform_ansible_to_terraform/{{ sap_vm_provision_iac_platform }}/tf_template/* {{ sap_vm_provision_terraform_work_dir_path }} + cp -R {{ role_path }}/tasks/platform_ansible_to_terraform/{{ sap_vm_provision_iac_platform }}/tf_template/* {{ sap_vm_provision_terraform_work_dir_path }} - name: Terraform Template for SAP - Google Cloud register: terraform_template1_result + run_once: true cloud.terraform.terraform: project_path: "{{ sap_vm_provision_terraform_work_dir_path }}" state: "{{ sap_vm_provision_terraform_state }}" @@ -30,12 +33,13 @@ map_host_specifications: "{{ lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary') }}" sap_vm_provision_host_specification_plan: "{{ sap_vm_provision_host_specification_plan }}" sap_vm_provision_gcp_ce_vm_host_os_image: "{{ sap_vm_provision_gcp_ce_vm_host_os_image }}" - sap_software_download_directory: "{{ sap_software_download_directory }}" + sap_install_media_detect_source_directory: "{{ sap_install_media_detect_source_directory }}" sap_hana_install_instance_nr: "{{ sap_hana_install_instance_nr | default('') }}" sap_nwas_abap_ascs_instance_no: "{{ sap_swpm_ascs_instance_nr | default('') }}" sap_nwas_abap_pas_instance_no: "{{ sap_swpm_pas_instance_nr | default('') }}" - name: Terraform Template output + run_once: true ansible.builtin.debug: var: terraform_template1_result @@ -56,6 +60,7 @@ - name: Read outputs from project path when: sap_vm_provision_terraform_state == "present" + run_once: true cloud.terraform.terraform_output: project_path: "{{ sap_vm_provision_terraform_work_dir_path }}" register: terraform_output_project_path diff --git a/roles/sap_vm_provision/tasks/platform_ansible_to_terraform/gcp_ce_vm/tf_template/tf_template_input_vars.tf b/roles/sap_vm_provision/tasks/platform_ansible_to_terraform/gcp_ce_vm/tf_template/tf_template_input_vars.tf index e1344b1..04f71b3 100644 --- a/roles/sap_vm_provision/tasks/platform_ansible_to_terraform/gcp_ce_vm/tf_template/tf_template_input_vars.tf +++ b/roles/sap_vm_provision/tasks/platform_ansible_to_terraform/gcp_ce_vm/tf_template/tf_template_input_vars.tf @@ -70,12 +70,12 @@ variable "sap_vm_provision_gcp_ce_vm_host_os_image" { description = "Host OS Image. This variable uses the locals mapping with regex of OS Images, and will alter host provisioning." } -variable "sap_software_download_directory" { +variable "sap_install_media_detect_source_directory" { description = "Mount point for downloads of SAP Software" validation { error_message = "Directory must start with forward slash." - condition = can(regex("^/", var.sap_software_download_directory)) + condition = can(regex("^/", var.sap_install_media_detect_source_directory)) } } @@ -122,70 +122,25 @@ variable "map_os_image_regex" { default = { - rhel-8-latest = { - project = "rhel-cloud" - family = "rhel-8" - }, - - rhel-7-7-sap-ha = { - project = "rhel-sap-cloud" - family = "rhel-7-7-sap-ha" - }, - - rhel-7-9-sap-ha = { - project = "rhel-sap-cloud" - family = "rhel-7-9-sap-ha" - }, - - rhel-8-1-sap-ha = { - project = "rhel-sap-cloud" - family = "rhel-8-1-sap-ha" - }, - - rhel-8-2-sap-ha = { - project = "rhel-sap-cloud" - family = "rhel-8-2-sap-ha" - }, - - rhel-8-4-sap-ha = { - project = "rhel-sap-cloud" - family = "rhel-8-4-sap-ha" - }, - - rhel-8-6-sap-ha = { - project = "rhel-sap-cloud" - family = "rhel-8-6-sap-ha" - }, - - sles-15-latest = { - project = "suse-cloud" - family = "sles-15" - }, - - sles-12-sp5-sap = { - project = "suse-sap-cloud" - family = "sles-12-sp5-sap" - }, - - sles-15-sp1-sap = { - project = "suse-sap-cloud" - family = "sles-15-sp1-sap" - }, - - sles-15-sp2-sap = { - project = "suse-sap-cloud" - family = "sles-15-sp2-sap" - }, - - sles-15-sp3-sap = { - project = "suse-sap-cloud" - family = "sles-15-sp3-sap" - }, - - sles-15-sp4-sap = { - project = "suse-sap-cloud" - family = "sles-15-sp4-sap" - }, + rhel-8-latest = { project = "rhel-cloud" , family = "rhel-8" }, + rhel-9-latest = { project = "rhel-cloud" , family = "rhel-9" }, + + sles-12-latest = { project = "suse-cloud" , family = "sles-12" }, + sles-15-latest = { project = "suse-cloud" , family = "sles-15" }, + + # rhel-7-7-sap-ha = { project = "rhel-sap-cloud" , family = "rhel-7-7-sap-ha" }, // removed + rhel-7-9-sap-ha = { project = "rhel-sap-cloud" , family = "rhel-7-9-sap-ha" }, + # rhel-8-1-sap-ha = { project = "rhel-sap-cloud" , family = "rhel-8-1-sap-ha" }, // removed + rhel-8-2-sap-ha = { project = "rhel-sap-cloud" , family = "rhel-8-2-sap-ha" }, + rhel-8-4-sap-ha = { project = "rhel-sap-cloud" , family = "rhel-8-4-sap-ha" }, + rhel-8-6-sap-ha = { project = "rhel-sap-cloud" , family = "rhel-8-6-sap-ha" }, + + sles-12-sp5-sap = { project = "suse-sap-cloud" , family = "sles-12-sp5-sap" }, + # sles-15-sp1-sap = { project = "suse-sap-cloud" , family = "sles-15-sp1-sap" }, // removed + sles-15-sp2-sap = { project = "suse-sap-cloud" , family = "sles-15-sp2-sap" }, + sles-15-sp3-sap = { project = "suse-sap-cloud" , family = "sles-15-sp3-sap" }, + sles-15-sp4-sap = { project = "suse-sap-cloud" , family = "sles-15-sp4-sap" }, + sles-15-sp5-sap = { project = "suse-sap-cloud" , family = "sles-15-sp5-sap" } } diff --git a/roles/sap_vm_provision/tasks/platform_ansible_to_terraform/ibmcloud_powervs/execute_main.yml b/roles/sap_vm_provision/tasks/platform_ansible_to_terraform/ibmcloud_powervs/execute_main.yml index f6c519a..f1c7e7b 100644 --- a/roles/sap_vm_provision/tasks/platform_ansible_to_terraform/ibmcloud_powervs/execute_main.yml +++ b/roles/sap_vm_provision/tasks/platform_ansible_to_terraform/ibmcloud_powervs/execute_main.yml @@ -4,13 +4,16 @@ block: # Do not use ansible.builtin.copy as this will cause error 'not writable' on localhost (even if user has permissions) - - name: Copy Terraform Template files to temporary directory in current Ansible Playbook directory + # Should not cause overwrite of Terraform State files, but should overwrite populated Terraform Template files if they are updated + - name: Copy Terraform Template files to provided working directory path + run_once: true ansible.builtin.shell: | mkdir -p {{ sap_vm_provision_terraform_work_dir_path }} - cp -r {{ role_path }}/tasks/platform_ansible_to_terraform/{{ sap_vm_provision_iac_platform }}/tf_template/* {{ sap_vm_provision_terraform_work_dir_path }} + cp -R {{ role_path }}/tasks/platform_ansible_to_terraform/{{ sap_vm_provision_iac_platform }}/tf_template/* {{ sap_vm_provision_terraform_work_dir_path }} - - name: Terraform Template for SAP - IBM Cloud + - name: Terraform Template for SAP - IBM Power Virtual Server with IBM Cloud register: terraform_template1_result + run_once: true cloud.terraform.terraform: project_path: "{{ sap_vm_provision_terraform_work_dir_path }}" state: "{{ sap_vm_provision_terraform_state }}" @@ -19,8 +22,8 @@ variables: ibmcloud_api_key: "{{ sap_vm_provision_ibmcloud_api_key }}" ibmcloud_resource_group: "{{ sap_vm_provision_ibmcloud_resource_group_name }}" - ibmcloud_vpc_availability_zone: "{{ sap_vm_provision_ibmcloud_availability_zone }}" ibmcloud_vpc_subnet_name: "{{ sap_vm_provision_ibmcloud_vpc_subnet_name }}" + ibmcloud_powervs_location: "{{ sap_vm_provision_ibmcloud_powervs_location }}" sap_vm_provision_resource_prefix: "{{ sap_vm_provision_resource_prefix }}" sap_vm_provision_dns_root_domain: "{{ sap_vm_provision_dns_root_domain }}" sap_vm_provision_bastion_os_image: "{{ sap_vm_provision_bastion_os_image }}" @@ -29,13 +32,14 @@ map_os_image_regex: "{{ lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_os_image_dictionary') }}" map_host_specifications: "{{ lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary') }}" sap_vm_provision_host_specification_plan: "{{ sap_vm_provision_host_specification_plan }}" - sap_vm_provision_ibmcloud_vs_host_os_image: "{{ sap_vm_provision_ibmcloud_vs_host_os_image }}" - sap_software_download_directory: "{{ sap_software_download_directory }}" + sap_vm_provision_ibmcloud_powervs_host_os_image: "{{ sap_vm_provision_ibmcloud_powervs_host_os_image }}" + sap_install_media_detect_source_directory: "{{ sap_install_media_detect_source_directory }}" sap_hana_install_instance_nr: "{{ sap_hana_install_instance_nr | default('') }}" sap_nwas_abap_ascs_instance_no: "{{ sap_swpm_ascs_instance_nr | default('') }}" sap_nwas_abap_pas_instance_no: "{{ sap_swpm_pas_instance_nr | default('') }}" - name: Terraform Template output + run_once: true ansible.builtin.debug: var: terraform_template1_result @@ -56,6 +60,7 @@ - name: Read outputs from project path when: sap_vm_provision_terraform_state == "present" + run_once: true cloud.terraform.terraform_output: project_path: "{{ sap_vm_provision_terraform_work_dir_path }}" register: terraform_output_project_path diff --git a/roles/sap_vm_provision/tasks/platform_ansible_to_terraform/ibmcloud_powervs/tf_template/tf_template.tf b/roles/sap_vm_provision/tasks/platform_ansible_to_terraform/ibmcloud_powervs/tf_template/tf_template.tf index 9d77d69..eefe6da 100644 --- a/roles/sap_vm_provision/tasks/platform_ansible_to_terraform/ibmcloud_powervs/tf_template/tf_template.tf +++ b/roles/sap_vm_provision/tasks/platform_ansible_to_terraform/ibmcloud_powervs/tf_template/tf_template.tf @@ -15,37 +15,20 @@ terraform { # Terraform Provider declaration provider "ibm" { - alias = "standard" - - # Define Provider inputs manually - # ibmcloud_api_key = "xxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" - # Define Provider inputs from given Terraform Variables ibmcloud_api_key = var.ibmcloud_api_key - - # If using IBM Cloud Automation Manager, the Provider declaration values are populated automatically - # from the Cloud Connection credentials (by using Environment Variables) - - # If using IBM Cloud Schematics, the Provider declaration values are populated automatically - region = local.ibmcloud_region - zone = lower(var.ibmcloud_powervs_location) // Required for IBM Power VS only - } - +# Terraform Provider (with Alias) declaration - for IBM Power Infrastructure environment via IBM Cloud provider "ibm" { - - alias = "powervs_secure" - + alias = "powervs_secure_enclave" + # Define Provider inputs from given Terraform Variables ibmcloud_api_key = var.ibmcloud_api_key - - region = local.ibmcloud_powervs_region - - zone = lower(var.ibmcloud_powervs_location) // Required for IBM Power VS only - + region = local.ibmcloud_powervs_region // IBM Power VS Region + zone = lower(var.ibmcloud_powervs_location) // IBM Power VS Location } @@ -163,7 +146,11 @@ module "run_account_bootstrap_powervs_workspace_module" { source = "github.com/sap-linuxlab/terraform.modules_for_sap//ibmcloud_powervs/account_bootstrap_powervs_workspace?ref=main" - providers = { ibm = ibm.standard } + # Define TF Module child provider name = TF Template parent provider name + providers = { + ibm.main = ibm.standard , + ibm.powervs_secure_enclave = ibm.powervs_secure_enclave + } module_var_resource_group_id = module.run_account_init_module.output_resource_group_id module_var_resource_prefix = var.sap_vm_provision_resource_prefix @@ -182,13 +169,18 @@ module "run_account_bootstrap_powervs_networks_module" { source = "github.com/sap-linuxlab/terraform.modules_for_sap//ibmcloud_powervs/account_bootstrap_powervs_networks?ref=main" - providers = { ibm = ibm.powervs_secure } + # Define TF Module child provider name = TF Template parent provider name + providers = { + ibm.main = ibm.standard , + ibm.powervs_secure_enclave = ibm.powervs_secure_enclave + } - module_var_resource_group_id = module.run_account_init_module.output_resource_group_id - module_var_resource_prefix = var.sap_vm_provision_resource_prefix - module_var_ibmcloud_power_zone = lower(var.ibmcloud_powervs_location) - module_var_ibmcloud_powervs_workspace_guid = module.run_account_bootstrap_powervs_workspace_module.output_power_group_guid - module_var_ibmcloud_vpc_crn = module.run_account_bootstrap_powervs_workspace_module.output_power_target_vpc_crn + module_var_resource_group_id = module.run_account_init_module.output_resource_group_id + module_var_resource_prefix = var.sap_vm_provision_resource_prefix + module_var_ibmcloud_power_zone = lower(var.ibmcloud_powervs_location) + module_var_ibmcloud_powervs_workspace_guid = module.run_account_bootstrap_powervs_workspace_module.output_power_guid + module_var_ibmcloud_vpc_crn = module.run_account_bootstrap_powervs_workspace_module.output_power_target_vpc_crn + module_var_ibmcloud_tgw_instance_name = module.run_account_bootstrap_module.output_tgw_name } @@ -204,10 +196,10 @@ module "run_powervs_interconnect_sg_update_module" { providers = { ibm = ibm.standard } - module_var_bastion_security_group_id = module.run_bastion_inject_module.output_bastion_security_group_id - module_var_host_security_group_id = module.run_account_bootstrap_module.output_host_security_group_id + module_var_bastion_security_group_id = module.run_bastion_inject_module.output_bastion_security_group_id + module_var_host_security_group_id = module.run_account_bootstrap_module.output_host_security_group_id - module_var_power_group_network_private_subnet = module.run_account_bootstrap_powervs_networks_module.output_power_group_network_private_subnet + module_var_power_network_private_subnet = module.run_account_bootstrap_powervs_networks_module.output_power_network_private_subnet } @@ -245,7 +237,7 @@ module "run_powervs_interconnect_proxy_provision_module" { module_var_host_private_ssh_key = module.run_account_bootstrap_module.output_host_private_ssh_key module_var_host_security_group_id = module.run_account_bootstrap_module.output_host_security_group_id - module_var_proxy_os_image = var.map_os_image_regex[var.sap_vm_provision_bastion_os_image] + module_var_proxy_os_image = var.map_os_image_regex_bastion[var.sap_vm_provision_bastion_os_image] module_var_dns_root_domain_name = var.sap_vm_provision_dns_root_domain module_var_dns_services_instance = module.run_account_bootstrap_module.output_host_dns_services_instance @@ -269,14 +261,18 @@ module "run_host_provision_module" { source = "github.com/sap-linuxlab/terraform.modules_for_sap//ibmcloud_powervs/host_provision?ref=main" - providers = { ibm = ibm.powervs_secure } + # Define TF Module child provider name = TF Template parent provider name + providers = { + ibm.main = ibm.standard , + ibm.powervs_secure_enclave = ibm.powervs_secure_enclave + } module_var_resource_group_id = module.run_account_init_module.output_resource_group_id module_var_resource_prefix = var.sap_vm_provision_resource_prefix module_var_resource_tags = var.resource_tags - module_var_ibm_power_group_guid = module.run_account_bootstrap_powervs_workspace_module.output_power_group_guid - module_var_power_group_networks = module.run_account_bootstrap_powervs_networks_module.output_power_group_networks + module_var_ibm_power_guid = module.run_account_bootstrap_powervs_workspace_module.output_power_guid + module_var_power_networks = module.run_account_bootstrap_powervs_networks_module.output_power_networks module_var_ibmcloud_vpc_subnet_name = local.ibmcloud_vpc_subnet_create_boolean ? module.run_account_init_module.output_vpc_subnet_name : var.ibmcloud_vpc_subnet_name @@ -288,12 +284,20 @@ module "run_host_provision_module" { module_var_host_public_ssh_key = module.run_account_bootstrap_module.output_host_public_ssh_key module_var_host_private_ssh_key = module.run_account_bootstrap_module.output_host_private_ssh_key - module_var_host_os_image = var.map_os_image_regex[var.sap_vm_provision_ibmcloud_vs_host_os_image] + module_var_host_os_image = var.map_os_image_regex[var.sap_vm_provision_ibmcloud_powervs_host_os_image] module_var_dns_root_domain_name = var.sap_vm_provision_dns_root_domain module_var_dns_services_instance = module.run_account_bootstrap_module.output_host_dns_services_instance - module_var_dns_proxy_ip = module.run_powervs_interconnect_proxy_provision_module.output_proxy_private_ip + module_var_dns_custom_resolver_ip = module.run_powervs_interconnect_proxy_provision_module.output_dns_custom_resolver_ip + + module_var_web_proxy_enable = true + module_var_web_proxy_url = "http://${module.run_powervs_interconnect_proxy_provision_module.output_proxy_private_ip}:${module.run_powervs_interconnect_proxy_provision_module.output_proxy_port_squid}" + module_var_web_proxy_exclusion = "localhost,127.0.0.1,${var.sap_vm_provision_dns_root_domain}" // Web Proxy exclusion list for hosts running on IBM Power (e.g. localhost,127.0.0.1,custom.root.domain) + + module_var_os_vendor_enable = false # After Terraform has provisioned hosts, this will be done by Ansible + module_var_os_vendor_account_user = "" + module_var_os_vendor_account_user_passcode = "" # Set Terraform Module Variables using for_each loop on a map Terraform Variable with nested objects @@ -303,17 +307,9 @@ module "run_host_provision_module" { module_var_virtual_server_hostname = each.key - module_var_virtual_server_profile = var.map_host_specifications[var.sap_vm_provision_host_specification_plan][each.key].virtual_server_profile + module_var_hardware_machine_type = var.map_host_specifications[var.sap_vm_provision_host_specification_plan][each.key].ibmcloud_powervs_hardware_machine_type + module_var_virtual_server_profile = var.map_host_specifications[var.sap_vm_provision_host_specification_plan][each.key].virtual_machine_profile module_var_storage_definition = [ for storage_item in var.map_host_specifications[var.sap_vm_provision_host_specification_plan][each.key]["storage_definition"] : storage_item if contains(keys(storage_item),"disk_size") && try(storage_item.swap_path,"") == "" ] - module_var_web_proxy_enable = false - module_var_os_vendor_enable = false - - module_var_web_proxy_url = "" - module_var_web_proxy_exclusion = "" - - module_var_os_vendor_account_user = "" - module_var_os_vendor_account_user_passcode = "" - } diff --git a/roles/sap_vm_provision/tasks/platform_ansible_to_terraform/ibmcloud_powervs/tf_template/tf_template_input_vars.tf b/roles/sap_vm_provision/tasks/platform_ansible_to_terraform/ibmcloud_powervs/tf_template/tf_template_input_vars.tf index e655c31..e343c2d 100644 --- a/roles/sap_vm_provision/tasks/platform_ansible_to_terraform/ibmcloud_powervs/tf_template/tf_template_input_vars.tf +++ b/roles/sap_vm_provision/tasks/platform_ansible_to_terraform/ibmcloud_powervs/tf_template/tf_template_input_vars.tf @@ -30,22 +30,25 @@ variable "map_ibm_powervs_to_vpc_az" { type = map(any) default = { - + dal10 = "us-south-1" dal12 = "us-south-2" - us-south = "us-south-3" // naming of IBM Power VS location 'us-south' was previous naming convention, would otherwise be 'DAL13' - us-east = "us-east-1" // naming of IBM Power VS location 'us-east' was previous naming convention, would otherwise be 'WDC04' - # wdc06 = "us-east-2" // No Cloud Connection available at this location + us-south = "us-south-3" // naming of IBM Power VS location 'us-south' was previous naming convention, would otherwise be 'dal13' + us-east = "us-east-1" // naming of IBM Power VS location 'us-east' was previous naming convention, would otherwise be 'wdc04' + wdc06 = "us-east-2" + wdc07 = "us-east-3" sao01 = "br-sao-1" + sao02 = "br-sao-2" tor01 = "ca-tor-1" - eu-de-1 = "eu-de-2" // naming of IBM Power VS location 'eu-de-1' was previous naming convention, would otherwise be 'FRA04' - eu-de-2 = "eu-de-3" // naming of IBM Power VS location 'eu-de-2' was previous naming convention, would otherwise be 'FRA05' + eu-de-1 = "eu-de-2" // naming of IBM Power VS location 'eu-de-1' was previous naming convention, would otherwise be 'fra04' + eu-de-2 = "eu-de-3" // naming of IBM Power VS location 'eu-de-2' was previous naming convention, would otherwise be 'fra05' lon04 = "eu-gb-1" lon06 = "eu-gb-3" + mad02 = "eu-es-1" + mad04 = "eu-es-2" syd04 = "au-syd-2" syd05 = "au-syd-3" tok04 = "jp-tok-2" osa21 = "jp-osa-1" - } } @@ -60,22 +63,25 @@ variable "map_ibm_powervs_location_to_powervs_region" { type = map(any) default = { - + dal10 = "us-south" dal12 = "us-south" us-south = "us-south" us-east = "us-east" - # wdc06 = "us-east" // no Cloud Connection available at this location + wdc06 = "us-east" + wdc07 = "us-east" sao01 = "sao" + sao02 = "sao" tor01 = "tor" eu-de-1 = "eu-de" eu-de-2 = "eu-de" lon04 = "lon" lon06 = "lon" + mad02 = "mad" + mad04 = "mad" syd04 = "syd" syd05 = "syd" tok04 = "tok" osa21 = "osa" - } } @@ -138,16 +144,16 @@ variable "sap_vm_provision_host_specification_plan" { description = "Host specification plans are xsmall_256gb. This variable uses the locals mapping with a nested list of host specifications, and will alter host provisioning." } -variable "sap_vm_provision_ibmcloud_vs_host_os_image" { +variable "sap_vm_provision_ibmcloud_powervs_host_os_image" { description = "Host OS Image. This variable uses the locals mapping with regex of OS Images, and will alter host provisioning." } -variable "sap_software_download_directory" { +variable "sap_install_media_detect_source_directory" { description = "Mount point for downloads of SAP Software" validation { error_message = "Directory must start with forward slash." - condition = can(regex("^/", var.sap_software_download_directory)) + condition = can(regex("^/", var.sap_install_media_detect_source_directory)) } } @@ -193,35 +199,13 @@ variable "map_os_image_regex_bastion" { default = { - rhel-7-6-sap-ha = ".*redhat.*7-6.*amd64.*hana.*" - - rhel-8-1-sap-ha = ".*redhat.*8-1.*amd64.*hana.*" - - rhel-8-2-sap-ha = ".*redhat.*8-2.*amd64.*hana.*" - - rhel-8-4-sap-ha = ".*redhat.*8-4.*amd64.*hana.*" - - rhel-7-6-sap-applications = ".*redhat.*7-6.*amd64.*applications.*" - - rhel-8-1-sap-applications = ".*redhat.*8-1.*amd64.*applications.*" - - rhel-8-2-sap-applications = ".*redhat.*8-2.*amd64.*applications.*" - - rhel-8-4-sap-applications = ".*redhat.*8-4.*amd64.*applications.*" - - rhel-8-4 = ".*redhat.*8-4.*minimal.*amd64.*" - - sles-12-4-sap-ha = ".*sles.*12-4.*amd64.*hana.*" + rhel-8-8 = ".*redhat.*8-8.*minimal.*amd64.*" + rhel-8-10 = ".*redhat.*8-10.*minimal.*amd64.*" + rhel-9-2 = ".*redhat.*9-2.*minimal.*amd64.*" + rhel-9-4 = ".*redhat.*9-4.*minimal.*amd64.*" - sles-15-1-sap-ha = ".*sles.*15-1.*amd64.*hana.*" - - sles-15-2-sap-ha = ".*sles.*15-2.*amd64.*hana.*" - - sles-12-4-sap-applications = ".*sles.*12-4.*amd64.*applications.*" - - sles-15-1-sap-applications = ".*sles.*15-1.*amd64.*applications.*" - - sles-15-2-sap-applications = ".*sles.*15-2.*amd64.*applications.*" + sles-15-5 = ".*sles.*15-5.*amd64-[0-9]" + sles-15-6 = ".*sles.*15-6.*amd64-[0-9]" } @@ -236,25 +220,24 @@ variable "map_os_image_regex" { default = { - rhel-8-4 = ".*RHEL.*8.*4" - - rhel-8-6 = ".*RHEL.*8.*6" - - rhel-9-2 = ".*RHEL.*9.*2" - - sles-15-3 = ".*SLES.*15.*3" - - sles-15-4 = ".*SLES.*15.*4" - - rhel-8-4-sap-ha = ".*RHEL.*8.*4.*SAP$" # ensure string suffix using $ - - rhel-8-6-sap-ha = ".*RHEL.*8.*6.*SAP$" # ensure string suffix using $ - - sles-15-2-sap = ".*SLES.*15.*2.*SAP$" # ensure string suffix using $ - - sles-15-3-sap = ".*SLES.*15.*3.*SAP$" # ensure string suffix using $ - - sles-15-4-sap = ".*SLES.*15.*4.*SAP$" # ensure string suffix using $ + # OS Image keys for IBM Power Virtual Server + rhel-8-4-sap-ha = "RHEL8-SP4-SAP" + rhel-8-4-sap-ha-byol = "RHEL8-SP4-SAP-BYOL" + rhel-8-6-sap-ha = "RHEL8-SP6-SAP" + rhel-8-6-sap-ha-byol = "RHEL8-SP6-SAP-BYOL" + rhel-8-8-sap-ha = "RHEL8-SP8-SAP" + rhel-8-8-sap-ha-byol = "RHEL8-SP8-SAP-BYOL" + rhel-9-2-sap-ha = "RHEL9-SP2-SAP" + rhel-9-2-sap-ha-byol = "RHEL9-SP2-SAP-BYOL" + + sles-15-2-sap-ha = "SLES15-SP2-SAP" + sles-15-2-sap-ha-byol = "SLES15-SP2-SAP-BYOL" + sles-15-3-sap-ha = "SLES15-SP3-SAP" + sles-15-3-sap-ha-byol = "SLES15-SP3-SAP-BYOL" + sles-15-4-sap-ha = "SLES15-SP4-SAP" + sles-15-4-sap-ha-byol = "SLES15-SP4-SAP-BYOL" + sles-15-5-sap-ha = "SLES15-SP5-SAP" + sles-15-5-sap-ha-byol = "SLES15-SP5-SAP-BYOL" } @@ -271,6 +254,7 @@ variable "map_host_specifications" { small_256gb = { hana01 = { // Hostname + ibmcloud_powervs_hardware_machine_type = "e1080" virtual_server_profile = "ush1-4x256" // An IBM PowerVS host will be set to Tier 1 or Tier 3 storage type, and cannot use block storage volumes from both storage types // Therefore all block storage volumes are provisioned with Tier 1 (this cannot be changed once provisioned) diff --git a/roles/sap_vm_provision/tasks/platform_ansible_to_terraform/ibmcloud_vs/execute_main.yml b/roles/sap_vm_provision/tasks/platform_ansible_to_terraform/ibmcloud_vs/execute_main.yml index f6c519a..4136e27 100644 --- a/roles/sap_vm_provision/tasks/platform_ansible_to_terraform/ibmcloud_vs/execute_main.yml +++ b/roles/sap_vm_provision/tasks/platform_ansible_to_terraform/ibmcloud_vs/execute_main.yml @@ -4,13 +4,16 @@ block: # Do not use ansible.builtin.copy as this will cause error 'not writable' on localhost (even if user has permissions) - - name: Copy Terraform Template files to temporary directory in current Ansible Playbook directory + # Should not cause overwrite of Terraform State files, but should overwrite populated Terraform Template files if they are updated + - name: Copy Terraform Template files to provided working directory path + run_once: true ansible.builtin.shell: | mkdir -p {{ sap_vm_provision_terraform_work_dir_path }} - cp -r {{ role_path }}/tasks/platform_ansible_to_terraform/{{ sap_vm_provision_iac_platform }}/tf_template/* {{ sap_vm_provision_terraform_work_dir_path }} + cp -R {{ role_path }}/tasks/platform_ansible_to_terraform/{{ sap_vm_provision_iac_platform }}/tf_template/* {{ sap_vm_provision_terraform_work_dir_path }} - name: Terraform Template for SAP - IBM Cloud register: terraform_template1_result + run_once: true cloud.terraform.terraform: project_path: "{{ sap_vm_provision_terraform_work_dir_path }}" state: "{{ sap_vm_provision_terraform_state }}" @@ -30,12 +33,13 @@ map_host_specifications: "{{ lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary') }}" sap_vm_provision_host_specification_plan: "{{ sap_vm_provision_host_specification_plan }}" sap_vm_provision_ibmcloud_vs_host_os_image: "{{ sap_vm_provision_ibmcloud_vs_host_os_image }}" - sap_software_download_directory: "{{ sap_software_download_directory }}" + sap_install_media_detect_source_directory: "{{ sap_install_media_detect_source_directory }}" sap_hana_install_instance_nr: "{{ sap_hana_install_instance_nr | default('') }}" sap_nwas_abap_ascs_instance_no: "{{ sap_swpm_ascs_instance_nr | default('') }}" sap_nwas_abap_pas_instance_no: "{{ sap_swpm_pas_instance_nr | default('') }}" - name: Terraform Template output + run_once: true ansible.builtin.debug: var: terraform_template1_result @@ -56,6 +60,7 @@ - name: Read outputs from project path when: sap_vm_provision_terraform_state == "present" + run_once: true cloud.terraform.terraform_output: project_path: "{{ sap_vm_provision_terraform_work_dir_path }}" register: terraform_output_project_path diff --git a/roles/sap_vm_provision/tasks/platform_ansible_to_terraform/ibmcloud_vs/tf_template/tf_template_input_vars.tf b/roles/sap_vm_provision/tasks/platform_ansible_to_terraform/ibmcloud_vs/tf_template/tf_template_input_vars.tf index e5c62b7..d5cd41b 100644 --- a/roles/sap_vm_provision/tasks/platform_ansible_to_terraform/ibmcloud_vs/tf_template/tf_template_input_vars.tf +++ b/roles/sap_vm_provision/tasks/platform_ansible_to_terraform/ibmcloud_vs/tf_template/tf_template_input_vars.tf @@ -99,35 +99,33 @@ variable "map_os_image_regex" { default = { - rhel-7-6-sap-ha = ".*redhat.*7-6.*amd64.*hana.*" - - rhel-8-1-sap-ha = ".*redhat.*8-1.*amd64.*hana.*" - - rhel-8-2-sap-ha = ".*redhat.*8-2.*amd64.*hana.*" - + rhel-8-8 = ".*redhat.*8-8.*minimal.*amd64.*" + rhel-8-10 = ".*redhat.*8-10.*minimal.*amd64.*" + rhel-9-2 = ".*redhat.*9-2.*minimal.*amd64.*" + rhel-9-4 = ".*redhat.*9-4.*minimal.*amd64.*" + sles-15-5 = ".*sles.*15-5.*amd64-[0-9]" + sles-15-6 = ".*sles.*15-6.*amd64-[0-9]" + + # rhel-7-6-sap-ha = ".*redhat.*7-6.*amd64.*hana.*" // retrievable from deprecated list + # rhel-7-9-sap-ha = ".*redhat.*7-9.*amd64.*hana.*" // retrievable from deprecated list + # rhel-8-1-sap-ha = ".*redhat.*8-1.*amd64.*hana.*" // retrievable from deprecated list + # rhel-8-2-sap-ha = ".*redhat.*8-2.*amd64.*hana.*" // retrievable from deprecated list rhel-8-4-sap-ha = ".*redhat.*8-4.*amd64.*hana.*" - - rhel-7-6-sap-applications = ".*redhat.*7-6.*amd64.*applications.*" - - rhel-8-1-sap-applications = ".*redhat.*8-1.*amd64.*applications.*" - - rhel-8-2-sap-applications = ".*redhat.*8-2.*amd64.*applications.*" - - rhel-8-4-sap-applications = ".*redhat.*8-4.*amd64.*applications.*" - - rhel-8-4 = ".*redhat.*8-4.*minimal.*amd64.*" - - sles-12-4-sap-ha = ".*sles.*12-4.*amd64.*hana.*" - - sles-15-1-sap-ha = ".*sles.*15-1.*amd64.*hana.*" - + rhel-8-6-sap-ha = ".*redhat.*8-6.*amd64.*hana.*" + rhel-8-8-sap-ha = ".*redhat.*8-8.*amd64.*hana.*" + rhel-8-10-sap-ha = ".*redhat.*8-10.*amd64.*hana.*" + rhel-9-0-sap-ha = ".*redhat.*9-0.*amd64.*hana.*" + rhel-9-2-sap-ha = ".*redhat.*9-2.*amd64.*hana.*" + rhel-9-4-sap-ha = ".*redhat.*9-4.*amd64.*hana.*" + + # sles-12-4-sap-ha = ".*sles.*12-4.*amd64.*hana.*" // retrievable from deprecated list + # sles-12-5-sap-ha = ".*sles.*12-5.*amd64.*hana.*" // retrievable from deprecated list + # sles-15-1-sap-ha = ".*sles.*15-1.*amd64.*hana.*" // retrievable from deprecated list sles-15-2-sap-ha = ".*sles.*15-2.*amd64.*hana.*" - - sles-12-4-sap-applications = ".*sles.*12-4.*amd64.*applications.*" - - sles-15-1-sap-applications = ".*sles.*15-1.*amd64.*applications.*" - - sles-15-2-sap-applications = ".*sles.*15-2.*amd64.*applications.*" + sles-15-3-sap-ha = ".*sles.*15-3.*amd64.*hana.*" + sles-15-4-sap-ha = ".*sles.*15-4.*amd64.*hana.*" + sles-15-5-sap-ha = ".*sles.*15-5.*amd64.*hana.*" + sles-15-6-sap-ha = ".*sles.*15-6.*amd64.*hana.*" } @@ -141,12 +139,12 @@ variable "sap_vm_provision_ibmcloud_vs_host_os_image" { description = "Host OS Image. This variable uses the locals mapping with regex of OS Images, and will alter host provisioning." } -variable "sap_software_download_directory" { +variable "sap_install_media_detect_source_directory" { description = "Mount point for downloads of SAP Software" validation { error_message = "Directory must start with forward slash." - condition = can(regex("^/", var.sap_software_download_directory)) + condition = can(regex("^/", var.sap_install_media_detect_source_directory)) } } diff --git a/roles/sap_vm_provision/tasks/platform_ansible_to_terraform/ibmpowervm_vm/execute_main.yml b/roles/sap_vm_provision/tasks/platform_ansible_to_terraform/ibmpowervm_vm/execute_main.yml index 2dba5d7..3e95288 100644 --- a/roles/sap_vm_provision/tasks/platform_ansible_to_terraform/ibmpowervm_vm/execute_main.yml +++ b/roles/sap_vm_provision/tasks/platform_ansible_to_terraform/ibmpowervm_vm/execute_main.yml @@ -12,13 +12,16 @@ block: # Do not use ansible.builtin.copy as this will cause error 'not writable' on localhost (even if user has permissions) - - name: Copy Terraform Template files to temporary directory in current Ansible Playbook directory + # Should not cause overwrite of Terraform State files, but should overwrite populated Terraform Template files if they are updated + - name: Copy Terraform Template files to provided working directory path + run_once: true ansible.builtin.shell: | mkdir -p {{ sap_vm_provision_terraform_work_dir_path }} - cp -r {{ role_path }}/tasks/platform_ansible_to_terraform/{{ sap_vm_provision_iac_platform }}/tf_template/* {{ sap_vm_provision_terraform_work_dir_path }} + cp -R {{ role_path }}/tasks/platform_ansible_to_terraform/{{ sap_vm_provision_iac_platform }}/tf_template/* {{ sap_vm_provision_terraform_work_dir_path }} - name: Terraform Template for SAP - IBM PowerVM register: terraform_template1_result + run_once: true cloud.terraform.terraform: project_path: "{{ sap_vm_provision_terraform_work_dir_path }}" state: "{{ sap_vm_provision_terraform_state }}" @@ -47,12 +50,13 @@ map_host_specifications: "{{ lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary') }}" sap_vm_provision_host_specification_plan: "{{ sap_vm_provision_host_specification_plan }}" - sap_software_download_directory: "{{ sap_software_download_directory }}" + sap_install_media_detect_source_directory: "{{ sap_install_media_detect_source_directory }}" sap_hana_install_instance_nr: "{{ sap_hana_install_instance_nr | default('') }}" sap_nwas_abap_ascs_instance_no: "{{ sap_swpm_ascs_instance_nr | default('') }}" sap_nwas_abap_pas_instance_no: "{{ sap_swpm_pas_instance_nr | default('') }}" - name: Terraform Template output + run_once: true ansible.builtin.debug: var: terraform_template1_result @@ -73,6 +77,7 @@ - name: Read outputs from project path when: sap_vm_provision_terraform_state == "present" + run_once: true cloud.terraform.terraform_output: project_path: "{{ sap_vm_provision_terraform_work_dir_path }}" register: terraform_output_project_path diff --git a/roles/sap_vm_provision/tasks/platform_ansible_to_terraform/ibmpowervm_vm/tf_template/tf_template_input_vars.tf b/roles/sap_vm_provision/tasks/platform_ansible_to_terraform/ibmpowervm_vm/tf_template/tf_template_input_vars.tf index d77ac49..d3a514c 100644 --- a/roles/sap_vm_provision/tasks/platform_ansible_to_terraform/ibmpowervm_vm/tf_template/tf_template_input_vars.tf +++ b/roles/sap_vm_provision/tasks/platform_ansible_to_terraform/ibmpowervm_vm/tf_template/tf_template_input_vars.tf @@ -62,12 +62,12 @@ variable "sap_vm_provision_host_specification_plan" { description = "Host specification plans are xsmall_256gb. This variable uses the locals mapping with a nested list of host specifications, and will alter host provisioning." } -variable "sap_software_download_directory" { +variable "sap_install_media_detect_source_directory" { description = "Mount point for downloads of SAP Software" validation { error_message = "Directory must start with forward slash." - condition = can(regex("^/", var.sap_software_download_directory)) + condition = can(regex("^/", var.sap_install_media_detect_source_directory)) } } diff --git a/roles/sap_vm_provision/tasks/platform_ansible_to_terraform/msazure_vm/execute_main.yml b/roles/sap_vm_provision/tasks/platform_ansible_to_terraform/msazure_vm/execute_main.yml index 88471ae..addf9d9 100644 --- a/roles/sap_vm_provision/tasks/platform_ansible_to_terraform/msazure_vm/execute_main.yml +++ b/roles/sap_vm_provision/tasks/platform_ansible_to_terraform/msazure_vm/execute_main.yml @@ -4,13 +4,16 @@ block: # Do not use ansible.builtin.copy as this will cause error 'not writable' on localhost (even if user has permissions) - - name: Copy Terraform Template files to temporary directory in current Ansible Playbook directory + # Should not cause overwrite of Terraform State files, but should overwrite populated Terraform Template files if they are updated + - name: Copy Terraform Template files to provided working directory path + run_once: true ansible.builtin.shell: | mkdir -p {{ sap_vm_provision_terraform_work_dir_path }} - cp -r {{ role_path }}/tasks/platform_ansible_to_terraform/{{ sap_vm_provision_iac_platform }}/tf_template/* {{ sap_vm_provision_terraform_work_dir_path }} + cp -R {{ role_path }}/tasks/platform_ansible_to_terraform/{{ sap_vm_provision_iac_platform }}/tf_template/* {{ sap_vm_provision_terraform_work_dir_path }} - name: Terraform Template for SAP - MS Azure register: terraform_template1_result + run_once: true cloud.terraform.terraform: project_path: "{{ sap_vm_provision_terraform_work_dir_path }}" state: "{{ sap_vm_provision_terraform_state }}" @@ -35,12 +38,13 @@ map_host_specifications: "{{ lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary') }}" sap_vm_provision_host_specification_plan: "{{ sap_vm_provision_host_specification_plan }}" sap_vm_provision_msazure_vm_host_os_image: "{{ sap_vm_provision_msazure_vm_host_os_image }}" - sap_software_download_directory: "{{ sap_software_download_directory }}" + sap_install_media_detect_source_directory: "{{ sap_install_media_detect_source_directory }}" sap_hana_install_instance_nr: "{{ sap_hana_install_instance_nr | default('') }}" sap_nwas_abap_ascs_instance_no: "{{ sap_swpm_ascs_instance_nr | default('') }}" sap_nwas_abap_pas_instance_no: "{{ sap_swpm_pas_instance_nr | default('') }}" - name: Terraform Template output + run_once: true ansible.builtin.debug: var: terraform_template1_result @@ -61,6 +65,7 @@ - name: Read outputs from project path when: sap_vm_provision_terraform_state == "present" + run_once: true cloud.terraform.terraform_output: project_path: "{{ sap_vm_provision_terraform_work_dir_path }}" register: terraform_output_project_path diff --git a/roles/sap_vm_provision/tasks/platform_ansible_to_terraform/msazure_vm/tf_template/tf_template_input_vars.tf b/roles/sap_vm_provision/tasks/platform_ansible_to_terraform/msazure_vm/tf_template/tf_template_input_vars.tf index 908e802..fa6477d 100644 --- a/roles/sap_vm_provision/tasks/platform_ansible_to_terraform/msazure_vm/tf_template/tf_template_input_vars.tf +++ b/roles/sap_vm_provision/tasks/platform_ansible_to_terraform/msazure_vm/tf_template/tf_template_input_vars.tf @@ -92,12 +92,12 @@ variable "sap_vm_provision_msazure_vm_host_os_image" { description = "Host OS Image. This variable uses the locals mapping with regex of OS Images, and will alter host provisioning." } -variable "sap_software_download_directory" { +variable "sap_install_media_detect_source_directory" { description = "Mount point for downloads of SAP Software" validation { error_message = "Directory must start with forward slash." - condition = can(regex("^/", var.sap_software_download_directory)) + condition = can(regex("^/", var.sap_install_media_detect_source_directory)) } } @@ -142,47 +142,48 @@ variable "map_os_image_regex" { default = { - rhel-8-4 = { - publisher = "RedHat" - offer = "RHEL" - sku = "84-gen2" - }, - - rhel-8-1-sap-ha = { - publisher = "RedHat" - offer = "RHEL-SAP-HA" - sku = "81sapha-gen2" - }, - - rhel-8-2-sap-ha = { - publisher = "RedHat" - offer = "RHEL-SAP-HA" - sku = "82sapha-gen2" - }, - - rhel-8-4-sap-ha = { - publisher = "RedHat" - offer = "RHEL-SAP-HA" - sku = "84sapha-gen2" - }, - - rhel-8-1-sap-applications = { - publisher = "RedHat" - offer = "RHEL-SAP-APPS" - sku = "81sapapps-gen2" - }, - - rhel-8-2-sap-applications = { - publisher = "RedHat" - offer = "RHEL-SAP-APPS" - sku = "82sapapps-gen2" - }, - - rhel-8-4-sap-applications = { - publisher = "RedHat" - offer = "RHEL-SAP-APPS" - sku = "84sapapps-gen2" - } + rhel-8-0 = { publisher = "RedHat" , offer = "RHEL" , sku = "8-gen2" }, + rhel-8-1 = { publisher = "RedHat" , offer = "RHEL" , sku = "81gen2" }, + rhel-8-2 = { publisher = "RedHat" , offer = "RHEL" , sku = "82gen2" }, + rhel-8-3 = { publisher = "RedHat" , offer = "RHEL" , sku = "83-gen2" }, + rhel-8-4 = { publisher = "RedHat" , offer = "RHEL" , sku = "84-gen2" }, + rhel-8-5 = { publisher = "RedHat" , offer = "RHEL" , sku = "85-gen2" }, + rhel-8-6 = { publisher = "RedHat" , offer = "RHEL" , sku = "86-gen2" }, + rhel-8-7 = { publisher = "RedHat" , offer = "RHEL" , sku = "87-gen2" }, + rhel-8-8 = { publisher = "RedHat" , offer = "RHEL" , sku = "88-gen2" }, + rhel-8-9 = { publisher = "RedHat" , offer = "RHEL" , sku = "89-gen2" }, + rhel-8-10 = { publisher = "RedHat" , offer = "RHEL" , sku = "810-gen2" }, + rhel-9-0 = { publisher = "RedHat" , offer = "RHEL" , sku = "90-gen2" }, + rhel-9-1 = { publisher = "RedHat" , offer = "RHEL" , sku = "91-gen2" }, + rhel-9-2 = { publisher = "RedHat" , offer = "RHEL" , sku = "92-gen2" }, + rhel-9-3 = { publisher = "RedHat" , offer = "RHEL" , sku = "93-gen2" }, + rhel-9-4 = { publisher = "RedHat" , offer = "RHEL" , sku = "94-gen2" }, + + rhel-8-1-sap-ha = { publisher = "RedHat" , offer = "RHEL-SAP-HA" , sku = "81sapha-gen2" }, + rhel-8-2-sap-ha = { publisher = "RedHat" , offer = "RHEL-SAP-HA" , sku = "82sapha-gen2" }, + rhel-8-4-sap-ha = { publisher = "RedHat" , offer = "RHEL-SAP-HA" , sku = "84sapha-gen2" }, + rhel-8-6-sap-ha = { publisher = "RedHat" , offer = "RHEL-SAP-HA" , sku = "86sapha-gen2" }, + rhel-8-8-sap-ha = { publisher = "RedHat" , offer = "RHEL-SAP-HA" , sku = "88sapha-gen2" }, + rhel-8-10-sap-ha = { publisher = "RedHat" , offer = "RHEL-SAP-HA" , sku = "810sapha-gen2" }, + rhel-9-0-sap-ha = { publisher = "RedHat" , offer = "RHEL-SAP-HA" , sku = "90sapha-gen2" }, + rhel-9-2-sap-ha = { publisher = "RedHat" , offer = "RHEL-SAP-HA" , sku = "92sapha-gen2" }, + rhel-9-4-sap-ha = { publisher = "RedHat" , offer = "RHEL-SAP-HA" , sku = "94sapha-gen2" }, + + sles-12-5 = { publisher = "SUSE" , offer = "sles-12-sp5" , sku = "gen2" }, + sles-15-1 = { publisher = "SUSE" , offer = "sles-15-sp1" , sku = "gen2" }, + sles-15-2 = { publisher = "SUSE" , offer = "sles-15-sp2" , sku = "gen2" }, + sles-15-3 = { publisher = "SUSE" , offer = "sles-15-sp3" , sku = "gen2" }, + sles-15-4 = { publisher = "SUSE" , offer = "sles-15-sp4" , sku = "gen2" }, + sles-15-5 = { publisher = "SUSE" , offer = "sles-15-sp5" , sku = "gen2" }, + sles-15-6 = { publisher = "SUSE" , offer = "sles-15-sp6" , sku = "gen2" }, + + sles-12-5-sap-ha = { publisher = "SUSE" , offer = "sles-sap-12-sp5" , sku = "gen2" }, + sles-15-1-sap-ha = { publisher = "SUSE" , offer = "sles-sap-15-sp1" , sku = "gen2" }, + sles-15-2-sap-ha = { publisher = "SUSE" , offer = "sles-sap-15-sp2" , sku = "gen2" }, + sles-15-3-sap-ha = { publisher = "SUSE" , offer = "sles-sap-15-sp3" , sku = "gen2" }, + sles-15-4-sap-ha = { publisher = "SUSE" , offer = "sles-sap-15-sp4" , sku = "gen2" }, + sles-15-5-sap-ha = { publisher = "SUSE" , offer = "sles-sap-15-sp5" , sku = "gen2" }, + sles-15-6-sap-ha = { publisher = "SUSE" , offer = "sles-sap-15-sp6" , sku = "gen2" } } diff --git a/roles/sap_vm_provision/tasks/platform_ansible_to_terraform/vmware_vm/execute_main.yml b/roles/sap_vm_provision/tasks/platform_ansible_to_terraform/vmware_vm/execute_main.yml index 1d15706..27a76bd 100644 --- a/roles/sap_vm_provision/tasks/platform_ansible_to_terraform/vmware_vm/execute_main.yml +++ b/roles/sap_vm_provision/tasks/platform_ansible_to_terraform/vmware_vm/execute_main.yml @@ -13,13 +13,16 @@ block: # Do not use ansible.builtin.copy as this will cause error 'not writable' on localhost (even if user has permissions) - - name: Copy Terraform Template files to temporary directory in current Ansible Playbook directory + # Should not cause overwrite of Terraform State files, but should overwrite populated Terraform Template files if they are updated + - name: Copy Terraform Template files to provided working directory path + run_once: true ansible.builtin.shell: | mkdir -p {{ sap_vm_provision_terraform_work_dir_path }} - cp -r {{ role_path }}/tasks/platform_ansible_to_terraform/{{ sap_vm_provision_iac_platform }}/tf_template/* {{ sap_vm_provision_terraform_work_dir_path }} + cp -R {{ role_path }}/tasks/platform_ansible_to_terraform/{{ sap_vm_provision_iac_platform }}/tf_template/* {{ sap_vm_provision_terraform_work_dir_path }} - name: Terraform Template for SAP - VMware register: terraform_template1_result + run_once: true cloud.terraform.terraform: project_path: "{{ sap_vm_provision_terraform_work_dir_path }}" state: "{{ sap_vm_provision_terraform_state }}" @@ -48,12 +51,13 @@ sap_vm_provision_ibmcloud_vs_host_os_image: "{{ sap_vm_provision_ibmcloud_vs_host_os_image }}" - sap_software_download_directory: "{{ sap_software_download_directory }}" + sap_install_media_detect_source_directory: "{{ sap_install_media_detect_source_directory }}" sap_hana_install_instance_nr: "{{ sap_hana_install_instance_nr | default('') }}" sap_nwas_abap_ascs_instance_no: "{{ sap_swpm_ascs_instance_nr | default('') }}" sap_nwas_abap_pas_instance_no: "{{ sap_swpm_pas_instance_nr | default('') }}" - name: Terraform Template output + run_once: true ansible.builtin.debug: var: terraform_template1_result @@ -74,6 +78,7 @@ - name: Read outputs from project path when: sap_vm_provision_terraform_state == "present" + run_once: true cloud.terraform.terraform_output: project_path: "{{ sap_vm_provision_terraform_work_dir_path }}" register: terraform_output_project_path diff --git a/roles/sap_vm_provision/tasks/platform_ansible_to_terraform/vmware_vm/tf_template/tf_template_input_vars.tf b/roles/sap_vm_provision/tasks/platform_ansible_to_terraform/vmware_vm/tf_template/tf_template_input_vars.tf index f7f7585..4bec6eb 100644 --- a/roles/sap_vm_provision/tasks/platform_ansible_to_terraform/vmware_vm/tf_template/tf_template_input_vars.tf +++ b/roles/sap_vm_provision/tasks/platform_ansible_to_terraform/vmware_vm/tf_template/tf_template_input_vars.tf @@ -48,12 +48,12 @@ variable "sap_vm_provision_host_specification_plan" { description = "Host specification plans are xsmall_256gb. This variable uses the locals mapping with a nested list of host specifications, and will alter host provisioning." } -variable "sap_software_download_directory" { +variable "sap_install_media_detect_source_directory" { description = "Mount point for downloads of SAP Software" validation { error_message = "Directory must start with forward slash." - condition = can(regex("^/", var.sap_software_download_directory)) + condition = can(regex("^/", var.sap_install_media_detect_source_directory)) } } diff --git a/roles/sap_vm_temp_vip/INPUT_PARAMETERS.md b/roles/sap_vm_temp_vip/INPUT_PARAMETERS.md new file mode 100644 index 0000000..6ef4192 --- /dev/null +++ b/roles/sap_vm_temp_vip/INPUT_PARAMETERS.md @@ -0,0 +1,66 @@ +## Input Parameters for sap_vm_temp_vip Ansible Role + +### sap_vm_temp_vip_default_ip + +- _Type:_ `string` +- _Default:_ `ansible_default_ipv4.address` + +IP Address of default network interface is obtained from Ansible Facts and it is used for calculation of missing input parameters. + +### sap_vm_temp_vip_default_netmask + +- _Type:_ `string` +- _Default:_ `ansible_default_ipv4.netmask` + +Netmask of default network interface is obtained from Ansible Facts and it is used for calculation of missing input parameters. + +### sap_vm_temp_vip_default_prefix + +- _Type:_ `string` +- _Default:_ `ansible_default_ipv4.prefix` + +Prefix of default network interface is obtained from Ansible Facts and it is used for calculation of missing input parameters. + +### sap_vm_temp_vip_default_broadcast + +- _Type:_ `string` +- _Default:_ `ansible_default_ipv4.broadcast` + +Broadcast of default network interface is obtained from Ansible Facts and it is used for calculation of missing input parameters.
+This parameter is empty on some cloud platforms and VIP is created without broadcast if attempt to calculate fails. + +### sap_vm_temp_vip_default_interface + +- _Type:_ `string` +- _Default:_ `ansible_default_ipv4.interface` or `eth0` + +Default Network Interface name is obtained from Ansible Facts and it is used for calculation of missing input parameters.
+Ensure to use correct Network Interface if default interface from Ansible Facts does not represent desired Network Interface. + +### sap_vm_temp_vip_hana_primary +- _Type:_ `string` +- _Default:_ `sap_ha_pacemaker_cluster_vip_hana_primary_ip_address` + +Mandatory for SAP HANA cluster setup.
+VIP address is by default assigned from `sap_ha_pacemaker_cluster_vip_hana_primary_ip_address` input parameter used by [sap_ha_pacemaker_cluster](https://github.com/sap-linuxlab/community.sap_install/tree/main/roles/sap_ha_pacemaker_cluster) role. + +### sap_vm_temp_vip_nwas_abap_ascs +- _Type:_ `string` +- _Default:_ `sap_ha_pacemaker_cluster_vip_nwas_abap_ascs_ip_address` + +Mandatory for SAP ASCS/ERS cluster setup.
+VIP address is by default assigned from `sap_ha_pacemaker_cluster_vip_nwas_abap_ascs_ip_address` input parameter used by [sap_ha_pacemaker_cluster](https://github.com/sap-linuxlab/community.sap_install/tree/main/roles/sap_ha_pacemaker_cluster) role. + +### sap_vm_temp_vip_nwas_abap_ers +- _Type:_ `string` +- _Default:_ `sap_ha_pacemaker_cluster_vip_nwas_abap_ers_ip_address` + +Mandatory for SAP ASCS/ERS cluster setup.
+VIP address is by default assigned from `sap_ha_pacemaker_cluster_vip_hana_primary_ip_address` input parameter used by [sap_ha_pacemaker_cluster](https://github.com/sap-linuxlab/community.sap_install/tree/main/roles/sap_ha_pacemaker_cluster) role. + +### sap_vm_temp_vip_anydb_primary +- _Type:_ `string` + +Mandatory for SAP AnyDB cluster setup. + + \ No newline at end of file diff --git a/roles/sap_vm_temp_vip/README.md b/roles/sap_vm_temp_vip/README.md index c72b8b0..bd7e40c 100644 --- a/roles/sap_vm_temp_vip/README.md +++ b/roles/sap_vm_temp_vip/README.md @@ -1,81 +1,77 @@ -`Beta` - + # sap_vm_temp_vip Ansible Role + -Ansible Role for assignment of Temporary Virtual IP (VIP) to OS Network Interface prior to Linux Pacemaker ownership. - -This Ansible Role will (dependent on detected Infrastructure Platform) perform assignment of a Virtual IP Address to the OS Network Interface. - - -## Functionality - -The hosts for SAP Software allocated for High Availability are configured with a temporary Virtual IP for the OS Network Interface; thereby allowing Linux Pacemaker to be installed once the SAP Software installation has concluded (best practice for Linux Pacemaker). When an Infrastructure Platform with specific requirements is detected (e.g. Load Balancers), then bespoke actions are performed. - - -## Scope +## Description + +Ansible role `sap_vm_temp_vip` is used to enable installation of SAP Application and Database on High Availability clusters provisioned by [sap_vm_provision](https://github.com/sap-linuxlab/community.sap_infrastructure/tree/main/roles/sap_vm_provision) role. -Only hosts required for High Availability (such as SAP HANA Primary node, SAP NetWeaver ASCS/ERS) should use this Ansible Role. +Installation of cluster environment requires temporary assignment of Virtual IP (VIP) before executing installation roles [sap_hana_install](https://github.com/sap-linuxlab/community.sap_install/tree/main/roles/sap_hana_install) and [sap_swpm](https://github.com/sap-linuxlab/community.sap_install/tree/main/roles/sap_swpm). +- This is temporary and it will be replaced by Cluster VIP resource once cluster is configured by [sap_ha_pacemaker_cluster](https://github.com/sap-linuxlab/community.sap_install/tree/main/roles/sap_ha_pacemaker_cluster) role. -Assumptions are made based upon the default High Availability configuration for a given Infrastructure Platform (e.g. using Linux Pacemaker `IPAddr2` resource agent). +This role does not update `/etc/hosts` or DNS records, as these steps are performed by the [sap_vm_provision](https://github.com/sap-linuxlab/community.sap_infrastructure/tree/main/roles/sap_vm_provision) role. + +## Prerequisites + +Environment: +- Assign hosts to correct groups, which are also used in other roles in our project + - Supported cluster groups: `hana_primary, hana_secondary, anydb_primary, anydb_secondary, nwas_ascs, nwas_ers` -## Requirements - -### Target hosts - -**OS Versions:** -- Red Hat Enterprise Linux 8.2+ -- SUSE Linux Enterprise Server 15 SP3+ - -### Execution/Controller host - -**Dependencies:** -- OS Packages - - Python 3.9.7+ (i.e. CPython distribution) -- Python Packages - - None -- Ansible - - Ansible Core 2.12.0+ - - Ansible Collections: - - None - +Role dependency: +- [sap_vm_provision](https://github.com/sap-linuxlab/community.sap_infrastructure/tree/main/roles/sap_vm_provision), for creating required resources: DNS, Load Balancers and Health Checks. + ## Execution - -### Sample execution - -For further information, see the [sample Ansible Playbooks in `/playbooks`](../playbooks/). - -### Suggested execution sequence - -It is advised this Ansible Role is used only for High Availability and executed prior to execution of: -- sap_hana_install -- sap_swpm - -Prior to execution of this Ansible Role, there are no Ansible Roles suggested to be executed first. - -### Summary of execution flow - -- Identify IPv4 Address with CIDR and Broadcast Address -- If SAP AnyDB or SAP NetWeaver, assign Virtual IP to OS Network Interface. If SAP HANA, skip -- Start temporary listener for SAP HANA, SAP AnyDB or SAP NetWeaver when using Load Balancers _(GCP, IBM Cloud, MS Azure)_ - -### Tags to control execution - -There are no tags used to control the execution of this Ansible Role - + +Role can be execute separately or as part of [ansible.playbooks_for_sap](https://github.com/sap-linuxlab/ansible.playbooks_for_sap) playbooks. + + +### Execution Flow + +1. Assert that required inputs were provided. +2. Collect missing inputs using provided inputs (example: Calculate prefix from netmask, if VIP prefix was not defined) +3. Append VIP to network interface + - SAP HANA Primary host if both groups are present: `hana_primary, hana_secondary` + - SAP AnyDB Primary host if both groups are present: `anydb_primary, anydb_secondary` + - SAP ASCS host if both groups are present: `nwas_ascs, nwas_ers` + - SAP ERS host if both groups are present:` nwas_ascs, nwas_ers` +4. Install `netcat` and start 12 hour process to ensure that Load Balancer Health Checks are working before Cluster is configured. + - Limited to platforms with Network Load Balancers and `IPAddr2` resource agent: Google Cloud, MS Azure, IBM Cloud. + + +### Example + +```yaml +- name: Ansible Play for Temporary VIP setup on SAP ASCS/ERS hosts + hosts: nwas_ascs, nwas_ers + become: true + any_errors_fatal: true + max_fail_percentage: 0 + tasks: + + - name: Execute Ansible Role sap_vm_temp_vip + ansible.builtin.include_role: + name: community.sap_infrastructure.sap_vm_temp_vip +``` + + + + + + + ## License - + Apache 2.0 + +## Maintainers + +- [Sean Freeman](https://github.com/sean-freeman) +- [Marcel Mamula](https://github.com/marcelmamula) + -## Authors - -Sean Freeman - ---- - -## Ansible Role Input Variables - -Please first check the [/defaults parameters file](./defaults/main.yml). +## Role Input Parameters +All input parameters used by role are described in [INPUT_PARAMETERS.md](https://github.com/sap-linuxlab/community.sap_infrastructure/blob/main/roles/sap_vm_temp_vip/INPUT_PARAMETERS.md) diff --git a/roles/sap_vm_temp_vip/defaults/main.yml b/roles/sap_vm_temp_vip/defaults/main.yml index 8ab3cd8..df7e520 100644 --- a/roles/sap_vm_temp_vip/defaults/main.yml +++ b/roles/sap_vm_temp_vip/defaults/main.yml @@ -1,5 +1,13 @@ --- +# General variables that are calculated from Ansible facts +sap_vm_temp_vip_default_ip: "{{ ansible_default_ipv4.address | default('') }}" +sap_vm_temp_vip_default_netmask: "{{ ansible_default_ipv4.netmask | default('') }}" +sap_vm_temp_vip_default_prefix: "{{ ansible_default_ipv4.prefix | default('') }}" +sap_vm_temp_vip_default_broadcast: "{{ ansible_default_ipv4.broadcast | default('') }}" +sap_vm_temp_vip_default_interface: "{{ ansible_default_ipv4.interface | default('eth0') }}" + +# SAP specific IPs are defined from sap_install.sap_ha_pacemaker_role input variables sap_vm_temp_vip_hana_primary: "{{ sap_ha_pacemaker_cluster_vip_hana_primary_ip_address | default('') }}" sap_vm_temp_vip_anydb_primary: "" sap_vm_temp_vip_nwas_abap_ascs: "{{ sap_ha_pacemaker_cluster_vip_nwas_abap_ascs_ip_address | default('') }}" diff --git a/roles/sap_vm_temp_vip/tasks/get_temp_vip_details.yml b/roles/sap_vm_temp_vip/tasks/get_temp_vip_details.yml new file mode 100644 index 0000000..1ed03ab --- /dev/null +++ b/roles/sap_vm_temp_vip/tasks/get_temp_vip_details.yml @@ -0,0 +1,100 @@ +--- +# Get details of default ip route to detect default network interface +- name: Get network interface from ip route show default 0.0.0.0/0 + ansible.builtin.shell: + cmd: set -o pipefail && ip route show default 0.0.0.0/0 | awk '/default/ {print $5}' + register: __sap_vm_temp_vip_get_route + changed_when: false + failed_when: false + +# Get content of ip address show filtered by primary IP +- name: Get contents of ip address show for {{ sap_vm_temp_vip_default_ip }} + ansible.builtin.shell: + cmd: set -o pipefail && ip -oneline address show {{ __sap_vm_temp_vip_get_route.stdout }} | grep {{ sap_vm_temp_vip_default_ip }} + when: + - __sap_vm_temp_vip_get_route.stdout is defined and __sap_vm_temp_vip_get_route.stdout | length > 0 + register: __sap_vm_temp_vip_get_ips + changed_when: false + failed_when: false + +# Extract prefix from netmask if it is available +# Use localhost (execution host) Python3 instead of relying on target host +- name: Calculate prefix from netmask {{ sap_vm_temp_vip_default_netmask }} + delegate_to: localhost + ansible.builtin.command: + cmd: > + python3 -c "import ipaddress; print(ipaddress.IPv4Network('{{ sap_vm_temp_vip_default_ip }}/{{ sap_vm_temp_vip_default_netmask }}', strict=False).prefixlen)" + when: + - sap_vm_temp_vip_default_prefix == '' + - sap_vm_temp_vip_default_netmask | length > 0 + register: __sap_vm_temp_vip_get_prefix_netmask + changed_when: false + failed_when: false + +# Extract prefix from primary IP on default interface if netmask is not available +# Stdout result is array instead of string. [0] is used to select only one in case of multiple results. +# [0] could be replaced by join('') but it would require duplicate record validation. +- name: Calculate prefix from IP {{ sap_vm_temp_vip_default_ip }} if sap_vm_temp_vip_default_netmask is empty + ansible.builtin.set_fact: + __sap_vm_temp_vip_get_prefix_ip: + "{{ (__sap_vm_temp_vip_inet[0] | basename) if __sap_vm_temp_vip_inet | length > 0 else __sap_vm_temp_vip_inet }}" + vars: + __sap_vm_temp_vip_inet: "{{ __sap_vm_temp_vip_get_ips.stdout | regex_search('inet ([0-9.]+/[0-9]+)', '\\1') }}" + when: + - sap_vm_temp_vip_default_prefix == '' + - sap_vm_temp_vip_default_netmask == '' + - __sap_vm_temp_vip_get_ips is defined and __sap_vm_temp_vip_get_ips.stdout is defined and __sap_vm_temp_vip_get_ips.stdout | length > 0 + changed_when: false + + +# Combine final prefix variable based on decision below: +# 1. Always use /32 for AWS and GCP, regardless of existing prefix +# 2. Else use prefix calculated from netmask if it is available and sap_vm_temp_vip_default_prefix is empty +# 3. Else use prefix calculated from primary IP if netmask is not available and sap_vm_temp_vip_default_prefix is empty +# 4. Else use sap_vm_temp_vip_default_prefix (regardless of content) to be used to skip steps. +- name: Update netmask prefix variable if it was calculated + ansible.builtin.set_fact: + __sap_vm_temp_vip_prefix: >- + {%- if __sap_vm_temp_vip_force_static_32 -%} + 32 + {%- elif sap_vm_temp_vip_default_prefix | length == 0 + and __sap_vm_temp_vip_get_prefix_netmask.stdout is defined and __sap_vm_temp_vip_get_prefix_netmask.stdout | length > 0 -%} + {{ __sap_vm_temp_vip_get_prefix_netmask.stdout }} + {%- elif sap_vm_temp_vip_default_prefix | length == 0 + and __sap_vm_temp_vip_get_prefix_ip is defined and __sap_vm_temp_vip_get_prefix_ip | length > 0 -%} + {{ __sap_vm_temp_vip_get_prefix_ip }} + {%- else -%} + {{ sap_vm_temp_vip_default_prefix }} + {%- endif -%} + vars: + __sap_vm_temp_vip_force_static_32: + "{{ true if (('amazon' in (ansible_system_vendor | lower) or 'amazon' in (ansible_product_name | lower)) + or (ansible_product_name == 'Google Compute Engine')) else false }}" + + +# Extract broadcast IP from primary IP if it is present and ansible fact ansible_default_ipv4.broadcast is empty +# Stdout result is array instead of string. [0] is used to select only one in case of multiple results. +# [0] could be replaced by join('') but it would require duplicate record validation. +- name: Calculate broadcast IP from IP {{ sap_vm_temp_vip_default_ip }} if sap_vm_temp_vip_default_broadcast is empty + ansible.builtin.set_fact: + __sap_vm_temp_vip_get_broadcast_ip: + "{{ (__sap_vm_temp_vip_brd[0] | basename) if __sap_vm_temp_vip_brd | length > 0 else __sap_vm_temp_vip_brd }}" + vars: + __sap_vm_temp_vip_brd: "{{ __sap_vm_temp_vip_get_ips.stdout | regex_search('brd ([0-9.]+)', '\\1') }}" + when: + - sap_vm_temp_vip_default_broadcast == '' + - __sap_vm_temp_vip_get_ips is defined and __sap_vm_temp_vip_get_ips.stdout is defined and __sap_vm_temp_vip_get_ips.stdout | length > 0 + changed_when: false + +# Combine final broadcast IP based on decision below: +# 1. Use calculated broadcast from primary IP if sap_vm_temp_vip_default_broadcast is empty +# 2. Else use sap_vm_temp_vip_default_broadcast (regardless of content) to be used during VIP creation +- name: Update broadcast IP variable if it was calculated + ansible.builtin.set_fact: + __sap_vm_temp_vip_broadcast: >- + {%- if sap_vm_temp_vip_default_broadcast | length == 0 + and __sap_vm_temp_vip_get_broadcast_ip is defined and __sap_vm_temp_vip_get_broadcast_ip | length > 0 -%} + {{ __sap_vm_temp_vip_get_broadcast_ip }} + {%- else -%} + {{ sap_vm_temp_vip_default_broadcast }} + {%- endif -%} diff --git a/roles/sap_vm_temp_vip/tasks/main.yml b/roles/sap_vm_temp_vip/tasks/main.yml index cdcd579..e1d4565 100644 --- a/roles/sap_vm_temp_vip/tasks/main.yml +++ b/roles/sap_vm_temp_vip/tasks/main.yml @@ -1,14 +1,42 @@ --- +# Ansible role to setup temporary Virtual IP (VIP) -- name: Setup temporary Virtual IP (VIP) +- name: Collect Ansible facts for required subsets - Hardware and Network Interfaces + ansible.builtin.setup: + gather_subset: + - hardware + - interfaces + +- name: Assert that sap_vm_temp_vip_default_ip is defined + ansible.builtin.assert: + that: sap_vm_temp_vip_default_ip is defined and sap_vm_temp_vip_default_ip | length > 0 + fail_msg: + - "Unable to get ansible fact ansible_default_ipv4.address or variable sap_vm_temp_vip_default_ip is empty!" + - "Ensure that sap_vm_temp_vip_default_ip is not empty." + + +- name: Block to ensure that only supported groups are allowed + when: group_names | intersect(['hana_primary', 'hana_secondary', 'anydb_primary', 'anydb_secondary', 'nwas_ascs', 'nwas_ers']) block: # - name: Identify OS Primary Network Interface # ansible.builtin.include_tasks: "identify_network_interface.yml" + - name: Attempt to obtain missing network information + ansible.builtin.include_tasks: "get_temp_vip_details.yml" + when: + - not ansible_chassis_asset_tag == 'ibmcloud' # Moved here instead of each task inside of "set_temp_vip.yml" + + - name: Execute temporary set of a Virtual IP (VIP) prior to Linux Pacemaker ownership ansible.builtin.include_tasks: "set_temp_vip.yml" + when: + - not ansible_chassis_asset_tag == 'ibmcloud' # Moved here instead of each task inside of "set_temp_vip.yml" + + # Required when using Load Balancers (i.e. Google Cloud, IBM Cloud, MS Azure) - name: Set Health Check Probe Listener for Virtual IP when Load Balancer ansible.builtin.include_tasks: "set_temp_vip_lb_listener.yml" - when: (ansible_product_name == 'Google Compute Engine') or (ansible_chassis_asset_tag == 'ibmcloud') or (ansible_chassis_vendor == 'Microsoft Corporation' and ansible_product_name == 'Virtual Machine') + when: + - (ansible_product_name == 'Google Compute Engine') or (ansible_chassis_asset_tag == 'ibmcloud') + or (ansible_chassis_vendor == 'Microsoft Corporation' and ansible_product_name == 'Virtual Machine') diff --git a/roles/sap_vm_temp_vip/tasks/set_temp_vip.yml b/roles/sap_vm_temp_vip/tasks/set_temp_vip.yml index 193917a..546b800 100644 --- a/roles/sap_vm_temp_vip/tasks/set_temp_vip.yml +++ b/roles/sap_vm_temp_vip/tasks/set_temp_vip.yml @@ -8,119 +8,126 @@ # for IBM Power IaaS VLAN on IBM Cloud, must be within the VLAN Subnet CIDR Range # for IBM PowerVM, must be within the VLAN Subnet CIDR Range - +## Set Virtual IP's Netmask / CIDR Prefix # Use of Primary IP Address default netmask prefix and/or the broadcast is automatic for Linux Pacemaker -# For AWS, this would be the VPC Subnet Netmask CIDR e.g. /24 +# For AWS, this would be static Netmask CIDR /32 (see AWS 'Overlay IP' documentation) +# For GCP, this would be static Netmask CIDR /32, unless using custom OS Image - https://cloud.google.com/vpc/docs/create-use-multiple-interfaces#i_am_having_connectivity_issues_when_using_a_netmask_that_is_not_32 # For MS Azure, this would be the VNet Subnet Netmask CIDR e.g. /24 -# For GCP, this would be static Netmask CIDR /32 unless using custom OS Image - https://cloud.google.com/vpc/docs/create-use-multiple-interfaces#i_am_having_connectivity_issues_when_using_a_netmask_that_is_not_32 -- name: Set fact for Broadcast Address and Prefix of the Primary IP - ansible.builtin.set_fact: - ip_broadcast_address: "{{ ansible_default_ipv4.broadcast | default('') }}" - ip_cidr_prefix: "{{ ansible_default_ipv4.prefix | default('') }}" +## Set Virtual IP - Other related information +# In all cases, use noprefixroute parameter to avoid automatic creation of OS route table entries (i.e. 'ip route'), which occurs if the IP Address is outside of the existing Subnet Range +# TODO: Add rare scenario for PAS/AAS VIP if needed. +# (groups["nwas_pas"] is defined and inventory_hostname in groups["nwas_pas"]) and (groups["nwas_pas"] is defined and (groups["nwas_pas"]|length>0)) -#### HA of HANA Primary/Secondary #### -# Not required before SAP HANA installation or Linux Pacemaker installation, performed so the VIP connectivity can be tested -- name: Append temporary Virtual IP (VIP) to network interface for SAP HANA, will be replaced by Linux Pacemaker IPaddr2 Resource Agent - ansible.builtin.shell: | - if [ "{{ ip_broadcast_address }}" = "" ] && [ "{{ ip_cidr_prefix }}" = "" ] - then - ip address add {{ sap_vm_temp_vip_hana_primary | regex_replace('/.*', '') }}/32 brd + dev eth0 - elif [ "{{ ip_broadcast_address }}" != "" ] - then - ip address add {{ sap_vm_temp_vip_hana_primary | regex_replace('/.*', '') }} brd {{ ip_broadcast_address }} dev eth0 - elif [ "{{ ip_cidr_prefix }}" != "" ] - then - ip address add {{ sap_vm_temp_vip_hana_primary | regex_replace('/.*', '') }}/{{ ip_cidr_prefix }} brd + dev eth0 - fi +# Define VIP address based on target host group which is filtered in main.yml +- name: Set fact for VIP address depending on target host group + ansible.builtin.set_fact: + __sap_vm_temp_vip_address: >- + {% if groups['hana_secondary'] | d([]) | length > 0 and inventory_hostname in groups["hana_primary"] -%} + {{ sap_vm_temp_vip_hana_primary | regex_replace('/.*', '') }} + {%- elif groups['anydb_secondary'] | d([]) | length > 0 and inventory_hostname in groups["anydb_primary"] -%} + {{ sap_vm_temp_vip_anydb_primary | regex_replace('/.*', '') }} + {%- elif groups["nwas_ers"] | d([]) | length > 0 and inventory_hostname in groups["nwas_ascs"] -%} + {{ sap_vm_temp_vip_nwas_abap_ascs | regex_replace('/.*', '') }} + {%- elif groups["nwas_ers"] | d([]) | length > 0 and inventory_hostname in groups["nwas_ers"] -%} + {{ sap_vm_temp_vip_nwas_abap_ers | regex_replace('/.*', '') }} + {%- endif %} + +# Get content of ip address show filtered by VIP +- name: Get contents of ip address show for {{ __sap_vm_temp_vip_address }} + ansible.builtin.shell: + cmd: set -o pipefail && ip -oneline address show | grep {{ __sap_vm_temp_vip_address }} when: - - (groups["hana_secondary"] is defined and inventory_hostname in groups["hana_primary"]) and (groups["hana_secondary"] is defined and (groups["hana_secondary"]|length>0)) - - not ansible_chassis_asset_tag == 'ibmcloud' + - __sap_vm_temp_vip_address is defined and __sap_vm_temp_vip_address | length > 0 + register: __sap_vm_temp_vip_get_vip + changed_when: false ignore_errors: true - -# Not required before SAP HANA installation or Linux Pacemaker installation, performed so the VIP connectivity can be tested -- name: Append temporary Virtual IP (VIP) to network interface for SAP AnyDB, will be replaced by Linux Pacemaker IPaddr2 Resource Agent - ansible.builtin.shell: | - if [ "{{ ip_broadcast_address }}" = "" ] && [ "{{ ip_cidr_prefix }}" = "" ] - then - ip address add {{ sap_vm_temp_vip_anydb_primary | regex_replace('/.*', '') }}/32 brd + dev eth0 - elif [ "{{ ip_broadcast_address }}" != "" ] - then - ip address add {{ sap_vm_temp_vip_anydb_primary | regex_replace('/.*', '') }} brd {{ ip_broadcast_address }} dev eth0 - elif [ "{{ ip_cidr_prefix }}" != "" ] - then - ip address add {{ sap_vm_temp_vip_anydb_primary | regex_replace('/.*', '') }}/{{ ip_cidr_prefix }} brd + dev eth0 - fi + failed_when: false + + +# Show debug information if existing VIP is found based on decision below: +# 1. Inform that found VIP is same as VIP planned to create +# 2. Else inform that VIP found is under different prefix than planned to create +# 3. Else inform that more than one VIP was found +# 4. Else inform that comparison failed because provided prefix was empty +# join('') is used instead of [0] because duplicate records are filtered out +- name: Show information if VIP is already present on network interfaces + ansible.builtin.debug: + msg: >- + {%- if __vip_expected == __vip_found -%} + VIP address {{ __vip_expected }} is already present. VIP creation will be skipped. + {%- elif __vip_expected != __vip_found and __sap_vm_temp_vip_prefix != '' and not __vip_multiple -%} + VIP address {{ __vip_expected }} is already present with different prefix {{ __vip_found }}. VIP creation will be skipped. + {%- elif __vip_multiple -%} + Multiple VIP address entries found. VIP creation will be skipped. + {%- else -%} + VIP address {{ __sap_vm_temp_vip_address }} is already present, but comparison failed because of empty sap_vm_temp_vip_default_prefix. + {%- endif -%} + vars: + __vip_expected: "{{ __sap_vm_temp_vip_address ~ '/' ~ __sap_vm_temp_vip_prefix }}" + __vip_found: "{{ __sap_vm_temp_vip_get_vip.stdout | regex_search('inet ([0-9.]+/[0-9]+)', '\\1') | join('') if not __vip_multiple else '' }}" + __vip_multiple: "{{ true if __sap_vm_temp_vip_get_vip.stdout_lines | length > 1 else false }}" when: - - (groups["anydb_secondary"] is defined and inventory_hostname in groups["anydb_primary"]) and (groups["anydb_secondary"] is defined and (groups["anydb_secondary"]|length>0)) - - not ansible_chassis_asset_tag == 'ibmcloud' - ignore_errors: true - + - __sap_vm_temp_vip_get_vip.stdout is defined and __sap_vm_temp_vip_get_vip.stdout | length > 0 + - __sap_vm_temp_vip_address is defined and __sap_vm_temp_vip_address | length > 0 -#### HA of ASCS/ERS #### -# Required before running SAP SWPM -# Otherwise CSiManagerInterfaces.cpp will provide WARNING "The host with the name XXXXXX defined by SAPINST_USE_HOSTNAME is not a virtual host on the local host." -# And if the Virtual Hostname / Virtual IP cannot resolve, it will likely prevent SAP SWPM from completing the installation -- name: Append temporary Virtual IP (VIP) to network interface for SAP NetWeaver ASCS, will be replaced by Linux Pacemaker IPaddr2 Resource Agent - ansible.builtin.shell: | - if [ "{{ ip_broadcast_address }}" = "" ] && [ "{{ ip_cidr_prefix }}" = "" ] - then - ip address add {{ sap_vm_temp_vip_nwas_abap_ascs | regex_replace('/.*', '') }}/32 brd + dev eth0 - elif [ "{{ ip_broadcast_address }}" != "" ] - then - ip address add {{ sap_vm_temp_vip_nwas_abap_ascs | regex_replace('/.*', '') }} brd {{ ip_broadcast_address }} dev eth0 - elif [ "{{ ip_cidr_prefix }}" != "" ] - then - ip address add {{ sap_vm_temp_vip_nwas_abap_ascs | regex_replace('/.*', '') }}/{{ ip_cidr_prefix }} brd + dev eth0 - fi +# Dynamically generate IP creation command depending on values gathered before: +# 1. VIP address is defined based on target host group +# 2. Prefix is defined or generated using netmask or primary IP prefix +# 3. Broadcast IP is used only if it was defined or generated using primary IP broadcast +- name: Generate command for IP creation - Prefix /{{ __sap_vm_temp_vip_prefix }} static IPs + ansible.builtin.set_fact: + __sap_vm_temp_vip_command: >- + {%- if __sap_vm_temp_vip_broadcast | length > 0 -%} + ip address add {{ __sap_vm_temp_vip_address }}/{{ __sap_vm_temp_vip_prefix }} brd {{ __sap_vm_temp_vip_broadcast }} dev {{ sap_vm_temp_vip_default_interface }} noprefixroute + {%- else -%} + ip address add {{ __sap_vm_temp_vip_address }}/{{ __sap_vm_temp_vip_prefix }} brd + dev {{ sap_vm_temp_vip_default_interface }} noprefixroute + {%- endif -%} when: - - (groups["nwas_ers"] is defined and inventory_hostname in groups["nwas_ascs"]) and (groups["nwas_ers"] is defined and (groups["nwas_ers"]|length>0)) - - not ansible_chassis_asset_tag == 'ibmcloud' - ignore_errors: true - -# Required before running SAP SWPM -# Otherwise CSiManagerInterfaces.cpp will provide WARNING "The host with the name XXXXXX defined by SAPINST_USE_HOSTNAME is not a virtual host on the local host." -# And if the Virtual Hostname / Virtual IP cannot resolve, it will likely prevent SAP SWPM from completing the installation -- name: Append temporary Virtual IP (VIP) to network interface for SAP NetWeaver ERS, will be replaced by Linux Pacemaker IPaddr2 Resource Agent - ansible.builtin.shell: | - if [ "{{ ip_broadcast_address }}" = "" ] && [ "{{ ip_cidr_prefix }}" = "" ] - then - ip address add {{ sap_vm_temp_vip_nwas_abap_ers | regex_replace('/.*', '') }}/32 brd + dev eth0 - elif [ "{{ ip_broadcast_address }}" != "" ] - then - ip address add {{ sap_vm_temp_vip_nwas_abap_ers | regex_replace('/.*', '') }} brd {{ ip_broadcast_address }} dev eth0 - elif [ "{{ ip_cidr_prefix }}" != "" ] - then - ip address add {{ sap_vm_temp_vip_nwas_abap_ers | regex_replace('/.*', '') }}/{{ ip_cidr_prefix }} brd + dev eth0 - fi + - __sap_vm_temp_vip_address is defined and __sap_vm_temp_vip_address | length > 0 + - __sap_vm_temp_vip_prefix | length > 0 + - __sap_vm_temp_vip_get_vip.stdout is defined and __sap_vm_temp_vip_get_vip.stdout | length == 0 + + +# Show debug information with input details if command was generated: +- name: Show actions to be executed to create temporary VIP + ansible.builtin.debug: + msg: + - "Ansible Facts:" + - primary_ip_address = {{ sap_vm_temp_vip_default_ip }} + - primary_ip_address_netmask = {{ sap_vm_temp_vip_default_netmask }} + - primary_ip_address_netmask_cidr_prefix = {{ __sap_vm_temp_vip_prefix }} + - primary_ip_broadcast_address = {{ __sap_vm_temp_vip_broadcast }} + - "" + - "Command to be executed:" + - "{{ __sap_vm_temp_vip_command }}" when: - - (groups["nwas_ers"] is defined and inventory_hostname in groups["nwas_ers"]) and (groups["nwas_ers"] is defined and (groups["nwas_ers"]|length>0)) - - not ansible_chassis_asset_tag == 'ibmcloud' - ignore_errors: true - - -#### HA of PAS/AAS [rare, comment out] #### - -# # Required before running SAP SWPM -# # Otherwise CSiManagerInterfaces.cpp will provide WARNING "The host with the name XXXXXX defined by SAPINST_USE_HOSTNAME is not a virtual host on the local host." -# # And if the Virtual Hostname / Virtual IP cannot resolve, it will likely prevent SAP SWPM from completing the installation -# - name: Append temporary Virtual IP (VIP) to network interface for SAP NetWeaver PAS, will be replaced by Linux Pacemaker IPaddr2 Resource Agent -# ansible.builtin.shell: ip address add {{ sap_vm_temp_vip_nwas_abap_pas | regex_replace('/.*', '') }}/24 brd + dev eth0 -# when: -# - (groups["nwas_pas"] is defined and inventory_hostname in groups["nwas_pas"]) and (groups["nwas_pas"] is defined and (groups["nwas_pas"]|length>0)) -# - not ansible_chassis_asset_tag == 'ibmcloud' -# ignore_errors: true - -# # Required before running SAP SWPM -# # Otherwise CSiManagerInterfaces.cpp will provide WARNING "The host with the name XXXXXX defined by SAPINST_USE_HOSTNAME is not a virtual host on the local host." -# # And if the Virtual Hostname / Virtual IP cannot resolve, it will likely prevent SAP SWPM from completing the installation -# - name: Append temporary Virtual IP (VIP) to network interface for SAP NetWeaver AAS, will be replaced by Linux Pacemaker IPaddr2 Resource Agent -# ansible.builtin.shell: ip address add {{ sap_vm_temp_vip_nwas_abap_aas | regex_replace('/.*', '') }}/24 brd + dev eth0 + - __sap_vm_temp_vip_command is defined and __sap_vm_temp_vip_command | length > 0 + - __sap_vm_temp_vip_get_vip.stdout is defined and __sap_vm_temp_vip_get_vip.stdout | length == 0 + +# Show debug information with input details if command was not generated and some inputs are empty: +- name: Show information if command was unable to be generated + ansible.builtin.debug: + msg: + - "ERROR: Unable to generate command because of lacking data." + - "" + - "Please review facts below, to see which are empty or missing:" + - primary_ip_address = {{ sap_vm_temp_vip_default_ip }} + - primary_ip_address_netmask = {{ sap_vm_temp_vip_default_netmask }} + - primary_ip_address_netmask_cidr_prefix = {{ __sap_vm_temp_vip_prefix }} + - primary_ip_broadcast_address = {{ __sap_vm_temp_vip_broadcast }} + when: + - __sap_vm_temp_vip_command is not defined or (__sap_vm_temp_vip_command is defined and __sap_vm_temp_vip_command | length == 0) + - __sap_vm_temp_vip_get_vip.stdout is defined and __sap_vm_temp_vip_get_vip.stdout | length == 0 -# when: -# - (groups["nwas_pas"] is defined and inventory_hostname in groups["nwas_aas"]) and (groups["nwas_pas"] is defined and (groups["nwas_pas"]|length>0)) -# - not ansible_chassis_asset_tag == 'ibmcloud' -# ignore_errors: true +# Execute generated command to add VIP to network interface if command is generated and VIP is not present yet. +- name: Append temporary Virtual IP (VIP) to network interface # noqa command-instead-of-shell + ansible.builtin.shell: + cmd: "{{ __sap_vm_temp_vip_command }}" + when: + - __sap_vm_temp_vip_command is defined and __sap_vm_temp_vip_command | length > 0 + - __sap_vm_temp_vip_get_vip.stdout | length == 0 + register: __sap_vm_temp_vip_command_output diff --git a/roles/sap_vm_temp_vip/tasks/set_temp_vip_lb_listener.yml b/roles/sap_vm_temp_vip/tasks/set_temp_vip_lb_listener.yml index 9252c52..3703a40 100644 --- a/roles/sap_vm_temp_vip/tasks/set_temp_vip_lb_listener.yml +++ b/roles/sap_vm_temp_vip/tasks/set_temp_vip_lb_listener.yml @@ -1,48 +1,63 @@ --- +# Define listening port based on target host group. Same ports are used by sap_vm_provision during NLB creation +# 55550 - SAP HANA and SAP AnyDB +# 55551 - SAP SAP NetWeaver ASCS +# 55552 - SAP NetWeaver ERS +- name: Set fact for temporary listening port + ansible.builtin.set_fact: + __sap_vm_temp_vip_port: >- + {% if groups['hana_secondary'] | d([]) | length > 0 and inventory_hostname in groups["hana_primary"] -%} + 55550 + {%- elif groups['anydb_secondary'] | d([]) | length > 0 and inventory_hostname in groups["anydb_primary"] -%} + 55550 + {%- elif groups["nwas_ers"] | d([]) | length > 0 and inventory_hostname in groups["nwas_ascs"] -%} + 55551 + {%- elif groups["nwas_ers"] | d([]) | length > 0 and inventory_hostname in groups["nwas_ers"] -%} + 55552 + {%- endif %} -- name: Install netcat and lsof utils - ansible.builtin.package: - name: - - nc - - lsof - state: present - - -# Must use while loop to avoid netcat process ending too early -# Required when using Load Balancers (i.e. Google Cloud, IBM Cloud, MS Azure) -# Temporary listener for SAP HANA or SAP AnyDB used is 55550 -# Temporary listener for SAP NetWeaver ASCS used is 55551 -# Temporary listener for SAP NetWeaver ERS used is 55552; must be different to ASCS Health Check Port to avoid ASCS VIP distributing to ERS host - -- name: Start netcat temporary listen on port 55550 for SAP HANA or SAP AnyDB for 6 hours (until SAP installation complete) to respond to Load Balancer Health Check probe until Linux Pacemaker started - ansible.builtin.shell: | - if ! $(lsof -Pi :55550 -sTCP:LISTEN -t >/dev/null) ; then - nohup timeout 6h bash -c "while true; do nc -vvv -l -k 55550 ; done" /dev/null 2>&1 & - sleep 2 - fi - when: - - (groups["hana_secondary"] is defined and (groups["hana_secondary"]|length>0)) or (groups["anydb_secondary"] is defined and (groups["anydb_secondary"]|length>0)) - - (groups["hana_secondary"] is defined and inventory_hostname in groups["hana_primary"]) or (groups["anydb_secondary"] is defined and inventory_hostname in groups["anydb_primary"]) - - (ansible_product_name == 'Google Compute Engine') or (ansible_chassis_asset_tag == 'ibmcloud') or (ansible_chassis_vendor == 'Microsoft Corporation' and ansible_product_name == 'Virtual Machine') - -- name: Start netcat temporary listen on port 55551 for SAP NetWeaver ASCS for 6 hours (until SAP installation complete) to respond to Load Balancer Health Check probe until Linux Pacemaker started - ansible.builtin.shell: | - if ! $(lsof -Pi :55551 -sTCP:LISTEN -t >/dev/null) ; then - nohup timeout 6h bash -c "while true; do nc -vvv -l -k 55551 ; done" /dev/null 2>&1 & - sleep 2 - fi + +# Check if defined port is alreadu active and listening +# ss is used as it is present on both SUSE and Red Hat OS without need to install lsof or netstat +- name: Check if temporary port is already open + ansible.builtin.command: + cmd: ss -tulnH "sport = :{{ __sap_vm_temp_vip_port }}" + register: __sap_vm_temp_vip_port_check when: - - (groups["nwas_ers"] is defined and (groups["nwas_ers"]|length>0)) - - (groups["nwas_ers"] is defined and inventory_hostname in groups["nwas_ascs"]) - - (ansible_product_name == 'Google Compute Engine') or (ansible_chassis_asset_tag == 'ibmcloud') or (ansible_chassis_vendor == 'Microsoft Corporation' and ansible_product_name == 'Virtual Machine') - -- name: Start netcat temporary listen on port 55552 for SAP NetWeaver ERS for 6 hours (until SAP installation complete) to respond to Load Balancer Health Check probe until Linux Pacemaker started - ansible.builtin.shell: | - if ! $(lsof -Pi :55552 -sTCP:LISTEN -t >/dev/null) ; then - nohup timeout 6h bash -c "while true; do nc -vvv -l -k 55552 ; done" /dev/null 2>&1 & - sleep 2 - fi + - __sap_vm_temp_vip_port is defined and __sap_vm_temp_vip_port | length > 0 + changed_when: false + + +- name: Block to start temporary netcat processes for Load Balancer Health Checks when: - - (groups["nwas_ers"] is defined and (groups["nwas_ers"]|length>0)) - - (groups["nwas_ers"] is defined and inventory_hostname in groups["nwas_ers"]) - - (ansible_product_name == 'Google Compute Engine') or (ansible_chassis_asset_tag == 'ibmcloud') or (ansible_chassis_vendor == 'Microsoft Corporation' and ansible_product_name == 'Virtual Machine') + - __sap_vm_temp_vip_address is defined and __sap_vm_temp_vip_address | length > 0 + - __sap_vm_temp_vip_port is defined and __sap_vm_temp_vip_port | length > 0 + - __sap_vm_temp_vip_port_check.stdout is defined and __sap_vm_temp_vip_port_check.stdout | length == 0 + - __sap_vm_temp_vip_prefix | length > 0 # Dont execute if prefix was empty during VIP creation + block: + + # Get content of ip address show filtered by VIP - Additional execution if VIP was previously created + - name: Check if VIP is was already attached to network interface + ansible.builtin.shell: + cmd: "set -o pipefail && ip --oneline address show | grep {{ __sap_vm_temp_vip_address }}/{{ __sap_vm_temp_vip_prefix }}" + executable: /bin/bash + register: __sap_vm_temp_vip_check_ip + changed_when: false + ignore_errors: true + failed_when: false + + # Install netcat package that is used to temporarily listen on ports + - name: Install netcat package + ansible.builtin.package: + name: + - "{{ 'netcat' if ansible_os_family == 'Suse' else 'nc' }}" + state: present + when: __sap_vm_temp_vip_check_ip.stdout | length > 0 + + + # Start netcat with timeout of 12 hours to ensure that SAP installation has enough time to complete. + - name: Start temporary 12 hour netcat process for Load Balancer Health Checks during SAP installation + ansible.builtin.shell: | + nohup timeout 12h bash -c "while true; do nc -vvv -l -k {{ __sap_vm_temp_vip_port }} ; done" /dev/null 2>&1 & + sleep 2 + when: __sap_vm_temp_vip_check_ip.stdout | length > 0