diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index e1e121e..f4e2093 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -20,6 +20,7 @@ jobs: matrix: subplatform: - hypershift + - terraform steps: - name: Check out code diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 9fd8bf2..4c59231 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,6 +1,6 @@ repos: - repo: https://github.com/igorshubovych/markdownlint-cli - rev: v0.35.0 + rev: v0.37.0 hooks: - id: markdownlint args: [--disable, MD013, MD002] @@ -9,20 +9,20 @@ repos: hooks: - id: shellcheck - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v4.4.0 + rev: v4.5.0 hooks: - id: check-json - repo: https://github.com/PyCQA/flake8 - rev: 6.0.0 + rev: 6.1.0 hooks: - id: flake8 args: - - --max-line-length=400 + - --max-line-length=450 - repo: https://github.com/PyCQA/pylint - rev: v3.0.0a6 + rev: v3.0.1 hooks: - id: pylint args: - - --max-line-length=400 + - --max-line-length=450 - --errors-only - --disable=C,R,E0401 diff --git a/libs/arguments.py b/libs/arguments.py index 1621f8a..267b346 100644 --- a/libs/arguments.py +++ b/libs/arguments.py @@ -5,9 +5,9 @@ """ import argparse import configparser -import re import importlib import sys +import re from libs.elasticsearch import ElasticArguments from libs.logging import LoggingArguments @@ -35,7 +35,7 @@ def __init__(self, environment): self.common_parser.add_argument("--cluster-name-seed", action=EnvDefault, env=environment, envvar="ROSA_BURNER_CLUSTER_NAME_SEED", type=str, help="Seed used to generate cluster names. 6 chars max") - self.common_parser.add_argument("--workers", action=EnvDefault, env=environment, envvar="ROSA_BURNER_WORKERS", type=self._verify_workers, default="3", + self.common_parser.add_argument("--workers", action=EnvDefault, env=environment, envvar="ROSA_BURNER_WORKERS", type=str, default="3", help="Number of workers for the hosted cluster (min: 3). If list (comma separated), iteration over the list until reach number of clusters") self.common_parser.add_argument("--workers-wait-time", type=int, default=60, action=EnvDefault, env=environment, envvar="ROSA_BURNER_WORKERS_WAIT_TIME", help="Waiting time in minutes for the workers to be Ready after cluster installation or machinepool creation . If 0, do not wait. Default: 60 minutes") diff --git a/libs/platforms/platform.py b/libs/platforms/platform.py index 7337574..c33786c 100644 --- a/libs/platforms/platform.py +++ b/libs/platforms/platform.py @@ -2,11 +2,8 @@ # -*- coding: utf-8 -*- import uuid import sys -import os import yaml import json -import time -import datetime import argparse import configparser @@ -178,80 +175,8 @@ def get_ocm_cluster_info(self, cluster_name): metadata['network_type'] = cluster.get("network", {}).get("type", None) return metadata - def _wait_for_workers( - self, kubeconfig, worker_nodes, wait_time, cluster_name, machinepool_name - ): - self.logging.info( - f"Waiting {wait_time} minutes for {worker_nodes} workers to be ready on {machinepool_name} machinepool on {cluster_name}" - ) - myenv = os.environ.copy() - myenv["KUBECONFIG"] = kubeconfig - result = [machinepool_name] - starting_time = datetime.datetime.utcnow().timestamp() - self.logging.debug( - f"Waiting {wait_time} minutes for nodes to be Ready on cluster {cluster_name} until {datetime.datetime.fromtimestamp(starting_time + wait_time * 60)}" - ) - while datetime.datetime.utcnow().timestamp() < starting_time + wait_time * 60: - # if force_terminate: - # logging.error("Exiting workers waiting on the cluster %s after capturing Ctrl-C" % cluster_name) - # return [] - self.logging.info("Getting node information for cluster %s" % cluster_name) - nodes_code, nodes_out, nodes_err = self.utils.subprocess_exec( - "oc get nodes -o json", - extra_params={"env": myenv, "universal_newlines": True}, - ) - try: - nodes_json = json.loads(nodes_out) - except Exception as err: - self.logging.error( - f"Cannot load command result for cluster {cluster_name}. Waiting 15 seconds for next check..." - ) - self.logging.error(err) - time.sleep(15) - continue - nodes = nodes_json["items"] if "items" in nodes_json else [] - - # First we find nodes which label nodePool match the machinepool name and then we check if type:Ready is on the conditions - ready_nodes = ( - sum( - len( - list( - filter( - lambda x: x.get("type") == "Ready" - and x.get("status") == "True", - node["status"]["conditions"], - ) - ) - ) - for node in nodes - if node.get("metadata", {}) - .get("labels", {}) - .get("hypershift.openshift.io/nodePool") - and machinepool_name - in node["metadata"]["labels"]["hypershift.openshift.io/nodePool"] - ) - if nodes - else 0 - ) - - if ready_nodes == worker_nodes: - self.logging.info( - f"Found {ready_nodes}/{worker_nodes} ready nodes on machinepool {machinepool_name} for cluster {cluster_name}. Stopping wait." - ) - result.append(ready_nodes) - result.append(int(datetime.datetime.utcnow().timestamp())) - return result - else: - self.logging.info( - f"Found {ready_nodes}/{worker_nodes} ready nodes on machinepool {machinepool_name} for cluster {cluster_name}. Waiting 15 seconds for next check..." - ) - time.sleep(15) - self.logging.error( - f"Waiting time expired. After {wait_time} minutes there are {ready_nodes}/{worker_nodes} ready nodes on {machinepool_name} machinepool for cluster {cluster_name}" - ) - result.append(ready_nodes) - result.append("") - return result + def get_workers_ready(self, kubeconfig, cluster_name): + pass def create_cluster(self, platform, cluster_name): pass diff --git a/libs/platforms/rosa/hypershift/README.md b/libs/platforms/rosa/hypershift/README.md index 1460f30..8df1c6d 100644 --- a/libs/platforms/rosa/hypershift/README.md +++ b/libs/platforms/rosa/hypershift/README.md @@ -1,7 +1,7 @@ -# Hypershift Platform +# Hypershift Subplatform -Classes related to Hypershift platform +Classes related to Hypershift subplatform As hypershift is a subplatform or ROSA, select rosa as platform parameter and hypershift as subplatform: `rosa-burner --platform rosa --subplatform hypershift` diff --git a/libs/platforms/rosa/hypershift/hypershift.py b/libs/platforms/rosa/hypershift/hypershift.py index dabd07d..b21b26f 100644 --- a/libs/platforms/rosa/hypershift/hypershift.py +++ b/libs/platforms/rosa/hypershift/hypershift.py @@ -2,6 +2,7 @@ # -*- coding: utf-8 -*- import sys import json +import re import os import time import datetime @@ -18,6 +19,17 @@ class Hypershift(Rosa): def __init__(self, arguments, logging, utils, es): super().__init__(arguments, logging, utils, es) + pattern = re.compile(r"^(\d+)(,\s*\d+)*$") + if arguments["workers"].isdigit() and int(arguments["workers"]) % 3 != 0: + self.logging.error(f"Invalid value ({arguments['workers']}) for parameter `--workers`. If digit, it must be divisible by 3'") + elif bool(pattern.match(arguments["workers"])): + for num in arguments["workers"].split(","): + if int(num) < 3 or int(num) % 3 != 0: + self.logging.error(f"Invalid value ({num}) for parameter `--workers`. If list, all values must be divisible by 3") + sys.exit("Exiting...") + + self.environment["workers"] = arguments["workers"] + self.environment["service_cluster"] = arguments["service_cluster"] self.environment["create_vpcs"] = arguments["create_vpcs"] @@ -262,106 +274,6 @@ def _destroy_vpcs(self): ) return 1 - def watcher(self): - super().watcher() - self.logging.info(f"Watcher started on {self.environment['platform']}") - self.logging.info(f"Getting status every {self.environment['watcher_delay']}") - self.logging.info(f"Expected Clusters_ {self.environment['cluster_count']}") - self.logging.info( - f"Manually terminate watcher creating the file {self.environment['path']}/terminate_watcher" - ) - file_path = os.path.join(self.environment["path"], "terminate_watcher") - if os.path.exists(file_path): - os.remove(file_path) - while not self.utils.force_terminate: - self.logging.debug(self.environment['clusters']) - if os.path.isfile( - os.path.join(self.environment["path"], "terminate_watcher") - ): - self.logging.warning("Watcher has been manually set to terminate") - break - - ( - cluster_list_code, - cluster_list_out, - cluster_list_err, - ) = self.utils.subprocess_exec( - "rosa list clusters -o json", extra_params={"universal_newlines": True} - ) - current_cluster_count = 0 - installed_clusters = 0 - clusters_with_all_workers = 0 - state = {} - error = [] - try: - rosa_list_clusters = json.loads(cluster_list_out) - except ValueError as err: - self.logging.error("Failed to get hosted clusters list: %s" % err) - self.logging.error(cluster_list_out) - self.logging.error(cluster_list_err) - rosa_list_clusters = {} - for cluster in rosa_list_clusters: - if ( - "name" in cluster - and self.environment["cluster_name_seed"] in cluster["name"] - ): - current_cluster_count += 1 - state_key = cluster["state"] if "state" in cluster else "" - if state_key == "error": - error.append(cluster["name"]) - elif state_key == "ready": - state[state_key] = state.get(state_key, 0) + 1 - installed_clusters += 1 - required_workers = cluster["nodes"]["compute"] - ready_workers = self._get_workers_ready( - self.environment["path"] - + "/" - + cluster["name"] - + "/kubeconfig", - cluster["name"], - ) - if ready_workers == required_workers: - clusters_with_all_workers += 1 - elif state_key != "": - state[state_key] = state.get(state_key, 0) + 1 - self.logging.info( - "Requested Clusters for test %s: %d of %d" - % ( - self.environment["uuid"], - current_cluster_count, - self.environment["cluster_count"], - ) - ) - state_output = "" - for i in state.items(): - state_output += "(" + str(i[0]) + ": " + str(i[1]) + ") " - self.logging.info(state_output) - if error: - self.logging.warning("Clusters in error state: %s" % error) - if installed_clusters == self.environment["cluster_count"]: - # All clusters ready - if self.environment["wait_for_workers"]: - if clusters_with_all_workers == self.environment["cluster_count"]: - self.logging.info( - "All clusters on ready status and all clusters with all workers ready. Exiting watcher" - ) - break - else: - self.logging.info( - f"Waiting {self.environment['watcher_delay']} seconds for next watcher run" - ) - time.sleep(self.environment["watcher_delay"]) - else: - self.logging.info("All clusters on ready status. Exiting watcher") - break - else: - self.logging.info( - f"Waiting {self.environment['watcher_delay']} seconds for next watcher run" - ) - time.sleep(self.environment["watcher_delay"]) - self.logging.debug(self.environment['clusters']) - self.logging.info("Watcher terminated") - def delete_cluster(self, platform, cluster_name): super().delete_cluster(platform, cluster_name) cluster_info = platform.environment["clusters"][cluster_name] @@ -420,6 +332,81 @@ def _get_aws_role_name(self, cluster_name): self.logging.error(f"No Role named kube-controller-manager found on Cluster {cluster_name}") return None + def _wait_for_workers( + self, kubeconfig, worker_nodes, wait_time, cluster_name, machinepool_name + ): + self.logging.info( + f"Waiting {wait_time} minutes for {worker_nodes} workers to be ready on {machinepool_name} machinepool on {cluster_name}" + ) + myenv = os.environ.copy() + myenv["KUBECONFIG"] = kubeconfig + result = [machinepool_name] + starting_time = datetime.datetime.utcnow().timestamp() + self.logging.debug( + f"Waiting {wait_time} minutes for nodes to be Ready on cluster {cluster_name} until {datetime.datetime.fromtimestamp(starting_time + wait_time * 60)}" + ) + while datetime.datetime.utcnow().timestamp() < starting_time + wait_time * 60: + # if force_terminate: + # logging.error("Exiting workers waiting on the cluster %s after capturing Ctrl-C" % cluster_name) + # return [] + self.logging.info("Getting node information for cluster %s" % cluster_name) + nodes_code, nodes_out, nodes_err = self.utils.subprocess_exec( + "oc get nodes -o json", + extra_params={"env": myenv, "universal_newlines": True}, + ) + try: + nodes_json = json.loads(nodes_out) + except Exception as err: + self.logging.error( + f"Cannot load command result for cluster {cluster_name}. Waiting 15 seconds for next check..." + ) + self.logging.error(err) + time.sleep(15) + continue + nodes = nodes_json["items"] if "items" in nodes_json else [] + + # First we find nodes which label nodePool match the machinepool name and then we check if type:Ready is on the conditions + ready_nodes = ( + sum( + len( + list( + filter( + lambda x: x.get("type") == "Ready" + and x.get("status") == "True", + node["status"]["conditions"], + ) + ) + ) + for node in nodes + if node.get("metadata", {}) + .get("labels", {}) + .get("hypershift.openshift.io/nodePool") + and machinepool_name + in node["metadata"]["labels"]["hypershift.openshift.io/nodePool"] + ) + if nodes + else 0 + ) + + if ready_nodes == worker_nodes: + self.logging.info( + f"Found {ready_nodes}/{worker_nodes} ready nodes on machinepool {machinepool_name} for cluster {cluster_name}. Stopping wait." + ) + result.append(ready_nodes) + result.append(int(datetime.datetime.utcnow().timestamp())) + return result + else: + self.logging.info( + f"Found {ready_nodes}/{worker_nodes} ready nodes on machinepool {machinepool_name} for cluster {cluster_name}. Waiting 15 seconds for next check..." + ) + time.sleep(15) + self.logging.error( + f"Waiting time expired. After {wait_time} minutes there are {ready_nodes}/{worker_nodes} ready nodes on {machinepool_name} machinepool for cluster {cluster_name}" + ) + result.append(ready_nodes) + result.append("") + return result + def create_cluster(self, platform, cluster_name): super().create_cluster(platform, cluster_name) cluster_info = platform.environment["clusters"][cluster_name] @@ -638,6 +625,31 @@ def _namespace_wait(self, kubeconfig, cluster_id, cluster_name, type): ) return 0 + def get_workers_ready(self, kubeconfig, cluster_name): + super().get_workers_ready(kubeconfig, cluster_name) + myenv = os.environ.copy() + myenv["KUBECONFIG"] = kubeconfig + self.logging.info(f"Getting node information for Hypershift cluster {cluster_name}") + nodes_code, nodes_out, nodes_err = self.utils.subprocess_exec("oc get nodes -o json", extra_params={"env": myenv, "universal_newlines": True}, log_output=False) + try: + nodes_json = json.loads(nodes_out) + except Exception as err: + self.logging.debug(f"Cannot load command result for cluster {cluster_name}") + self.logging.debug(err) + return 0 + nodes = nodes_json["items"] if "items" in nodes_json else [] + status = [] + for node in nodes: + nodepool = node.get("metadata", {}).get("labels", {}).get("hypershift.openshift.io/nodePool", "") + if "workers" in nodepool: + conditions = node.get("status", {}).get("conditions", []) + for condition in conditions: + if "type" in condition and condition["type"] == "Ready": + status.append(condition["status"]) + status_list = {i: status.count(i) for i in status} + ready_nodes = status_list["True"] if "True" in status_list else 0 + return ready_nodes + class HypershiftArguments(RosaArguments): def __init__(self, parser, config_file, environment): diff --git a/libs/platforms/rosa/rosa.py b/libs/platforms/rosa/rosa.py index ad033b6..d534a9f 100644 --- a/libs/platforms/rosa/rosa.py +++ b/libs/platforms/rosa/rosa.py @@ -202,38 +202,15 @@ def _delete_operator_roles(self): def platform_cleanup(self): super().platform_cleanup() - def watcher(self): - super().watcher() - def create_cluster(self, platform, cluster_name): super().create_cluster(platform, cluster_name) def delete_cluster(self, platform, cluster_name): super().delete_cluster(platform, cluster_name) - def _get_workers_ready(self, kubeconfig, cluster_name): - myenv = os.environ.copy() - myenv["KUBECONFIG"] = kubeconfig - self.logging.info(f"Getting node information for cluster {cluster_name}") - nodes_code, nodes_out, nodes_err = self.utils.subprocess_exec("oc get nodes -o json", extra_params={"env": myenv, "universal_newlines": True}, log_output=False) - try: - nodes_json = json.loads(nodes_out) - except Exception as err: - self.logging.debug(f"Cannot load command result for cluster {cluster_name}") - self.logging.debug(err) - return 0 - nodes = nodes_json["items"] if "items" in nodes_json else [] - status = [] - for node in nodes: - nodepool = node.get("metadata", {}).get("labels", {}).get("hypershift.openshift.io/nodePool", "") - if "workers" in nodepool: - conditions = node.get("status", {}).get("conditions", []) - for condition in conditions: - if "type" in condition and condition["type"] == "Ready": - status.append(condition["status"]) - status_list = {i: status.count(i) for i in status} - ready_nodes = status_list["True"] if "True" in status_list else 0 - return ready_nodes + def get_workers_ready(self, kubeconfig, cluster_name): + super().get_workers_ready(kubeconfig, cluster_name) + return Platform.get_workers_ready(self, kubeconfig, cluster_name) def get_metadata(self, cluster_name): metadata = {} @@ -420,6 +397,103 @@ def add_machinepool(self, cluster_name, cluster_id, aws_zones, machinepool): f"Unable to create machinepool {machinepool['name']}-{str(id)} on {cluster_name}" ) + def watcher(self): + super().watcher() + self.logging.info(f"Watcher started on {self.environment['platform']}") + self.logging.info(f"Getting status every {self.environment['watcher_delay']}") + self.logging.info(f"Expected Clusters: {self.environment['cluster_count']}") + self.logging.info(f"Manually terminate watcher creating the file {self.environment['path']}/terminate_watcher") + file_path = os.path.join(self.environment["path"], "terminate_watcher") + if os.path.exists(file_path): + os.remove(file_path) + while not self.utils.force_terminate: + self.logging.debug(self.environment['clusters']) + if os.path.isfile( + os.path.join(self.environment["path"], "terminate_watcher") + ): + self.logging.warning("Watcher has been manually set to terminate") + break + + ( + cluster_list_code, + cluster_list_out, + cluster_list_err, + ) = self.utils.subprocess_exec( + "rosa list clusters -o json", extra_params={"universal_newlines": True} + ) + current_cluster_count = 0 + installed_clusters = 0 + clusters_with_all_workers = 0 + state = {} + error = [] + try: + rosa_list_clusters = json.loads(cluster_list_out) + except ValueError as err: + self.logging.error("Failed to get clusters list: %s" % err) + self.logging.error(cluster_list_out) + self.logging.error(cluster_list_err) + rosa_list_clusters = {} + for cluster in rosa_list_clusters: + if ( + "name" in cluster + and self.environment["cluster_name_seed"] in cluster["name"] + ): + current_cluster_count += 1 + state_key = cluster["state"] if "state" in cluster else "" + if state_key == "error": + error.append(cluster["name"]) + elif state_key == "ready": + state[state_key] = state.get(state_key, 0) + 1 + installed_clusters += 1 + required_workers = cluster["nodes"]["compute"] + ready_workers = self.get_workers_ready(self.environment["path"] + + "/" + + cluster["name"] + + "/kubeconfig", + cluster["name"], + ) + if ready_workers == required_workers: + clusters_with_all_workers += 1 + elif state_key != "": + state[state_key] = state.get(state_key, 0) + 1 + self.logging.info( + "Requested Clusters for test %s: %d of %d" + % ( + self.environment["uuid"], + current_cluster_count, + self.environment["cluster_count"], + ) + ) + state_output = "" + for i in state.items(): + state_output += "(" + str(i[0]) + ": " + str(i[1]) + ") " + self.logging.info(state_output) + if error: + self.logging.warning("Clusters in error state: %s" % error) + if installed_clusters == self.environment["cluster_count"]: + # All clusters ready + if self.environment["wait_for_workers"]: + if clusters_with_all_workers == self.environment["cluster_count"]: + self.logging.info( + "All clusters on ready status and all clusters with all workers ready. Exiting watcher" + ) + break + else: + self.logging.info( + f"Waiting {self.environment['watcher_delay']} seconds for next watcher run" + ) + time.sleep(self.environment["watcher_delay"]) + else: + self.logging.info("All clusters on ready status. Exiting watcher") + break + else: + self.logging.info( + f"Waiting {self.environment['watcher_delay']} seconds for next watcher run" + ) + time.sleep(self.environment["watcher_delay"]) + self.logging.debug(self.environment['clusters']) + self.logging.info("Watcher terminated") + class RosaArguments(PlatformArguments): def __init__(self, parser, config_file, environment): diff --git a/libs/platforms/rosa/terraform/README.md b/libs/platforms/rosa/terraform/README.md new file mode 100644 index 0000000..69f52c9 --- /dev/null +++ b/libs/platforms/rosa/terraform/README.md @@ -0,0 +1,19 @@ +# Terraform Subplatform + + +Classes related to Terraform subplatform + +As terraform is a subplatform or ROSA, select rosa as platform parameter and terraform as subplatform: +`rosa-burner --platform rosa --subplatform terraform` + +## Platforms Arguments + +To use the config file, define parameters related to platform under the `[Platform:Rosa:Terraform]` section + +| Argument | Default Value | Config file variable | Environment Variable | +|--------------------------|-------------------|----------------------|--------------------------------| +| --create-vpcs | | | | +| --clusters-per-vpc | 1 | | ROSA_BURNER_CLUSTERS_PER_VPC | +| --terraform-retry | 5 | | | +| --service-cluster | | hypershift_service_cluster | ROSA_BURNER_HYPERSHIFT_SERVICE_CLUSTER | +| --delete-vpcs | | | | diff --git a/libs/platforms/rosa/terraform/files/README.md b/libs/platforms/rosa/terraform/files/README.md new file mode 100644 index 0000000..25eb501 --- /dev/null +++ b/libs/platforms/rosa/terraform/files/README.md @@ -0,0 +1,37 @@ +# ROSA STS cluster creation example + +This example shows how to create an STS _ROSA_ cluster, operator IAM roles and OIDC provider for an existing ROSA STS cluster. +_ROSA_ stands for Red Hat Openshift Service on AWS +and is a cluster that is created in the AWS cloud infrastructure. + +To run it: + +* Provide OCM Authentication Token + + OCM authentication token that you can get [here](https://console.redhat.com/openshift/token). + ``` + export TF_VAR_token=... + ``` + +* Provide OCM environment by setting a value to url + ``` + export TF_VAR_url=... + ``` + +* Decide STS operator_role_prefix + ``` + export TF_VAR_operator_role_prefix=... + ``` + +* Provide STS account_role_prefix + ``` + export TF_VAR_account_role_prefix=... + ``` + +* Provide List of AWS resource tags to apply (optional): + ``` + export TF_VAR_tags=... + ``` + +and then run the `terraform apply` command. + diff --git a/libs/platforms/rosa/terraform/files/main.tf b/libs/platforms/rosa/terraform/files/main.tf new file mode 100644 index 0000000..6f865ce --- /dev/null +++ b/libs/platforms/rosa/terraform/files/main.tf @@ -0,0 +1,83 @@ +# +# Copyright (c) 2023 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = ">= 4.20.0" + } + rhcs = { + version = ">= 1.1.0" + source = "terraform-redhat/rhcs" + } + } +} +provider "rhcs" { + token = var.token + url = var.url +} + +# Create managed OIDC config +module "oidc_config" { + token = var.token + url = var.url + source = "./oidc_provider" + managed = true + operator_role_prefix = var.operator_role_prefix + account_role_prefix = var.account_role_prefix + tags = var.tags + path = var.path +} + +locals { + path = coalesce(var.path, "/") + sts_roles = { + role_arn = "arn:aws:iam::${data.aws_caller_identity.current.account_id}:role${local.path}${var.account_role_prefix}-Installer-Role", + support_role_arn = "arn:aws:iam::${data.aws_caller_identity.current.account_id}:role${local.path}${var.account_role_prefix}-Support-Role", + instance_iam_roles = { + master_role_arn = "arn:aws:iam::${data.aws_caller_identity.current.account_id}:role${local.path}${var.account_role_prefix}-ControlPlane-Role", + worker_role_arn = "arn:aws:iam::${data.aws_caller_identity.current.account_id}:role${local.path}${var.account_role_prefix}-Worker-Role" + }, + operator_role_prefix = var.operator_role_prefix, + oidc_config_id = module.oidc_config.id + } +} + +data "aws_caller_identity" "current" { +} + +locals { + openshift_version = var.openshift_version != null ? var.openshift_version : null +} + +resource "rhcs_cluster_rosa_classic" "rosa_sts_cluster" { + name = var.cluster_name + cloud_region = var.cloud_region + aws_account_id = data.aws_caller_identity.current.account_id + availability_zones = var.availability_zones + replicas = var.replicas + autoscaling_enabled = var.autoscaling_enabled + min_replicas = var.min_replicas + max_replicas = var.max_replicas + version = local.openshift_version + compute_machine_type = var.compute_machine_type + properties = { + rosa_creator_arn = data.aws_caller_identity.current.arn + } + sts = local.sts_roles + wait_for_create_complete = true +} diff --git a/libs/platforms/rosa/terraform/files/oidc_provider/README.md b/libs/platforms/rosa/terraform/files/oidc_provider/README.md new file mode 100644 index 0000000..14ee54f --- /dev/null +++ b/libs/platforms/rosa/terraform/files/oidc_provider/README.md @@ -0,0 +1,59 @@ +# OIDC provider creation example + +This example shows how to create an OIDC config, an operator IAM roles and an OIDC provider. + +In order to create unmanaged OIDC config you'll need to create those resources: +1. OIDC config input - using the resource called `rhcs_rosa_oidc_config_input` +2. AWS resources - using the module that celled `oidc_config_input_resources` in the main.tf file +3. OIDC config = using the resource `rhcs_rosa_oidc_config` + +After you created the OIDC config you can create the OIDC provider and operator roles. + +To run it: + +* Provide OCM Authentication Token + + OCM authentication token that you can get [here](https://console.redhat.com/openshift/token). + + ``` + export TF_VAR_token=... + ``` + +* Provide OCM environment by setting a value to url + + ``` + export TF_VAR_url=... + ``` +* Indicate weather this is managed or unmanaged provider config + + ``` + export TF_VAR_managed=[true/false] + ``` +* For unmanaged provider config, provide Installer Role ARN by setting a value + + ``` + export TF_VAR_installer_role_arn=... + ``` + +* Decide STS operator_role_prefix + + ``` + export TF_VAR_operator_role_prefix=... + ``` + +* Decide STS account_role_prefix. if not set use the default account IAM roles + + ``` + export TF_VAR_account_role_prefix=... + ``` +* You can decide which cloud region to use, this is optional, default us-east-2 + ``` + export TF_VAR_cloud_region=... + ``` + +* Provide List of AWS resource tags to apply (optional): + ``` + export TF_VAR_tags=... + ``` + +and then run the `terraform apply` command. diff --git a/libs/platforms/rosa/terraform/files/oidc_provider/main.tf b/libs/platforms/rosa/terraform/files/oidc_provider/main.tf new file mode 100644 index 0000000..8ff4b4a --- /dev/null +++ b/libs/platforms/rosa/terraform/files/oidc_provider/main.tf @@ -0,0 +1,87 @@ +# +# Copyright (c) 2023 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = ">= 4.20.0" + } + rhcs = { + version = ">= 1.1.0" + source = "terraform-redhat/rhcs" + } + } +} +provider "rhcs" { + token = var.token + url = var.url +} + +provider "aws" { + region = var.cloud_region +} + +# Generates the OIDC config resources' names +resource "rhcs_rosa_oidc_config_input" "oidc_input" { + count = var.managed ? 0 : 1 + + region = var.cloud_region +} + +# Create the OIDC config resources on AWS +module "oidc_config_input_resources" { + count = var.managed ? 0 : 1 + + source = "terraform-redhat/rosa-sts/aws" + version = "0.0.14" + + create_oidc_config_resources = true + + bucket_name = one(rhcs_rosa_oidc_config_input.oidc_input[*].bucket_name) + discovery_doc = one(rhcs_rosa_oidc_config_input.oidc_input[*].discovery_doc) + jwks = one(rhcs_rosa_oidc_config_input.oidc_input[*].jwks) + private_key = one(rhcs_rosa_oidc_config_input.oidc_input[*].private_key) + private_key_file_name = one(rhcs_rosa_oidc_config_input.oidc_input[*].private_key_file_name) + private_key_secret_name = one(rhcs_rosa_oidc_config_input.oidc_input[*].private_key_secret_name) +} + +resource "rhcs_rosa_oidc_config" "oidc_config" { + managed = var.managed + secret_arn = one(module.oidc_config_input_resources[*].secret_arn) + issuer_url = one(rhcs_rosa_oidc_config_input.oidc_input[*].issuer_url) + installer_role_arn = var.installer_role_arn +} + +data "rhcs_rosa_operator_roles" "operator_roles" { + operator_role_prefix = var.operator_role_prefix + account_role_prefix = var.account_role_prefix +} + +module "operator_roles_and_oidc_provider" { + source = "terraform-redhat/rosa-sts/aws" + version = "0.0.14" + + create_operator_roles = true + create_oidc_provider = true + + cluster_id = "" + rh_oidc_provider_thumbprint = rhcs_rosa_oidc_config.oidc_config.thumbprint + rh_oidc_provider_url = rhcs_rosa_oidc_config.oidc_config.oidc_endpoint_url + operator_roles_properties = data.rhcs_rosa_operator_roles.operator_roles.operator_iam_roles + tags = var.tags + path = var.path +} diff --git a/libs/platforms/rosa/terraform/files/oidc_provider/output.tf b/libs/platforms/rosa/terraform/files/oidc_provider/output.tf new file mode 100644 index 0000000..a491a17 --- /dev/null +++ b/libs/platforms/rosa/terraform/files/oidc_provider/output.tf @@ -0,0 +1,11 @@ +output "id" { + value = rhcs_rosa_oidc_config.oidc_config.id +} + +output "oidc_endpoint_url" { + value = rhcs_rosa_oidc_config.oidc_config.oidc_endpoint_url +} + +output "thumbprint" { + value = rhcs_rosa_oidc_config.oidc_config.thumbprint +} diff --git a/libs/platforms/rosa/terraform/files/oidc_provider/variables.tf b/libs/platforms/rosa/terraform/files/oidc_provider/variables.tf new file mode 100644 index 0000000..3dfb1e2 --- /dev/null +++ b/libs/platforms/rosa/terraform/files/oidc_provider/variables.tf @@ -0,0 +1,47 @@ +variable "token" { + type = string + sensitive = true +} + +variable "url" { + type = string + description = "Provide OCM environment by setting a value to url" + default = "https://api.openshift.com" +} + +variable "managed" { + description = "Indicates whether it is a Red Hat managed or unmanaged (Customer hosted) OIDC Configuration" + type = bool +} + +variable "installer_role_arn" { + description = "STS Role ARN with get secrets permission, relevant only for unmanaged OIDC config" + type = string + default = null +} + +variable "operator_role_prefix" { + type = string +} + +variable "account_role_prefix" { + type = string + default = "" +} + +variable "cloud_region" { + type = string + default = "us-east-2" +} + +variable "tags" { + description = "List of AWS resource tags to apply." + type = map(string) + default = null +} + +variable "path" { + description = "(Optional) The arn path for the account/operator roles as well as their policies." + type = string + default = null +} diff --git a/libs/platforms/rosa/terraform/files/output.tf b/libs/platforms/rosa/terraform/files/output.tf new file mode 100644 index 0000000..49ede51 --- /dev/null +++ b/libs/platforms/rosa/terraform/files/output.tf @@ -0,0 +1,15 @@ +output "oidc_config_id" { + value = module.oidc_config.id +} + +output "oidc_endpoint_url" { + value = module.oidc_config.oidc_endpoint_url +} + +output "thumbprint" { + value = module.oidc_config.thumbprint +} + +output "cluster_id" { + value = rhcs_cluster_rosa_classic.rosa_sts_cluster.id +} diff --git a/libs/platforms/rosa/terraform/files/variables.tf b/libs/platforms/rosa/terraform/files/variables.tf new file mode 100644 index 0000000..8116c1c --- /dev/null +++ b/libs/platforms/rosa/terraform/files/variables.tf @@ -0,0 +1,81 @@ +variable "token" { + type = string + sensitive = true +} + +variable "url" { + type = string + description = "Provide OCM environment by setting a value to url" + default = "https://api.openshift.com" +} + +variable "operator_role_prefix" { + type = string +} + +variable "account_role_prefix" { + type = string + default = "" +} + +variable "cluster_name" { + type = string + default = "rbur-000-0001" +} + +variable "cloud_region" { + type = string + default = "us-east-2" +} + +variable "availability_zones" { + type = list(string) + default = ["us-east-2a"] +} + +variable "path" { + description = "(Optional) The arn path for the account/operator roles as well as their policies." + type = string + default = null +} + +variable "tags" { + description = "List of AWS resource tags to apply." + type = map(string) + default = null +} + +variable "openshift_version" { + description = "Desired version of OpenShift for the cluster, for example '4.1.0'. If version is greater than the currently running version, an upgrade will be scheduled." + type = string + default = null +} + +variable "replicas" { + description = "The amount of the machine created in this machine pool." + type = number + default = null +} + +variable "autoscaling_enabled" { + description = "Enables autoscaling. This variable requires you to set a maximum and minimum replicas range using the `max_replicas` and `min_replicas` variables." + type = string + default = "false" +} + +variable "min_replicas" { + description = "The minimum number of replicas for autoscaling." + type = number + default = null +} + +variable "max_replicas" { + description = "The maximum number of replicas not exceeded by the autoscaling functionality." + type = number + default = null +} + +variable "compute_machine_type" { + type = string + default = "m5.2xlarge" +} diff --git a/libs/platforms/rosa/terraform/terraform.py b/libs/platforms/rosa/terraform/terraform.py new file mode 100644 index 0000000..6332316 --- /dev/null +++ b/libs/platforms/rosa/terraform/terraform.py @@ -0,0 +1,274 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +import sys +import json +import os +import time +import datetime +# import math +import shutil +import configparser + +from libs.platforms.rosa.rosa import Rosa +from libs.platforms.rosa.rosa import RosaArguments + + +class Terraform(Rosa): + def __init__(self, arguments, logging, utils, es): + super().__init__(arguments, logging, utils, es) + + self.environment["commands"].append("terraform") + + self.logging.info("Parameter --workers will be ignored on terraform subplatform. OCM Terraform module is fixed to 2 workers") + self.environment["workers"] = "2" + + # if self.environment['cluster_count'] % arguments['clusters_per_apply'] == 0: + # self.logging.debug(str(self.environment['cluster_count'] % arguments['clusters_per_apply'])) + # self.logging.info(str(arguments['clusters_per_apply']) + " clusters will be installed on each Terraform Apply") + # self.environment['clusters_per_apply'] = arguments['clusters_per_apply'] + # self.environment['cluster_count'] = self.environment['cluster_count'] / self.environment['clusters_per_apply'] + # else: + # self.logging.debug(str(self.environment['cluster_count'] % arguments['clusters_per_apply'])) + # self.logging.error("--cluster-count (" + str(self.environment['cluster_count']) + ") parameter must be divisible by --clusters-per-apply (" + str(arguments['clusters_per_apply']) + ")") + # sys.exit("Exiting...") + + def initialize(self): + super().initialize() + + shutil.copytree(sys.path[0] + "/libs/platforms/rosa/terraform/files", self.environment['path'] + "/terraform") + + self.logging.info("Initializing Terraform with: terraform init") + terraform_code, terraform_out, terraform_err = self.utils.subprocess_exec("terraform init", self.environment["path"] + "/terraform/terraform-init.log", {"cwd": self.environment["path"] + "/terraform"}) + if terraform_code != 0: + self.logging.error(f"Failed to initialize terraform. Check {self.environment['path']}/terraform/init.log for more information") + sys.exit("Exiting...") + + def platform_cleanup(self): + super().platform_cleanup() + + def delete_cluster(self, platform, cluster_name): + super().delete_cluster(platform, cluster_name) + + myenv = os.environ.copy() + myenv["TF_VAR_token"] = self.environment["ocm_token"] + myenv["TF_VAR_cloud_region"] = self.environment['aws']['region'] + myenv["TF_VAR_url"] = self.environment["ocm_url"] + myenv["TF_VAR_account_role_prefix"] = 'ManagedOpenShift' + myenv["TF_VAR_cluster_name"] = cluster_name + myenv["TF_VAR_operator_role_prefix"] = cluster_name +# myenv["TF_VAR_clusters_per_apply"] = str(self.environment['clusters_per_apply']) + + cluster_info = platform.environment["clusters"][cluster_name] + cluster_start_time = int(datetime.datetime.utcnow().timestamp()) + cluster_info["uuid"] = self.environment["uuid"] + cluster_info["install_method"] = "terraform" + self.logging.info(f"Deleting cluster {cluster_name} on Rosa Platform using terraform") + cleanup_code, cleanup_out, cleanup_err = self.utils.subprocess_exec("terraform apply -destroy -state=" + cluster_info['path'] + "/terraform.tfstate --auto-approve", cluster_info["path"] + "/cleanup.log", {"cwd": self.environment['path'] + "/terraform", 'preexec_fn': self.utils.disable_signals, "env": myenv}) + cluster_delete_end_time = int(datetime.datetime.utcnow().timestamp()) + if cleanup_code == 0: + self.logging.debug( + f"Confirm cluster {cluster_name} deleted by attempting to describe the cluster. This should fail if the cluster is removed." + ) + check_code, check_out, check_err = self.utils.subprocess_exec( + "rosa describe cluster -c " + cluster_name, log_output=False + ) + if check_code != 0: + cluster_info["status"] = "deleted" + else: + cluster_info["status"] = "not deleted" + else: + cluster_info["status"] = "not deleted" + cluster_end_time = int(datetime.datetime.utcnow().timestamp()) + cluster_info["destroy_duration"] = cluster_delete_end_time - cluster_start_time + cluster_info["destroy_all_duration"] = cluster_end_time - cluster_start_time + try: + with open(cluster_info['path'] + "/metadata_destroy.json", "w") as metadata_file: + json.dump(cluster_info, metadata_file) + except Exception as err: + self.logging.error(err) + self.logging.error(f"Failed to write metadata_install.json file located at {cluster_info['path']}") + if self.es is not None: + cluster_info["timestamp"] = datetime.datetime.utcnow().isoformat() + self.es.index_metadata(cluster_info) + + def get_workers_ready(self, kubeconfig, cluster_name): + super().get_workers_ready(kubeconfig, cluster_name) + myenv = os.environ.copy() + myenv["KUBECONFIG"] = kubeconfig + self.logging.info(f"Getting node information for Terraform installed cluster {cluster_name}") + nodes_code, nodes_out, nodes_err = self.utils.subprocess_exec("oc get nodes -o json", extra_params={"env": myenv, "universal_newlines": True}, log_output=False) + try: + nodes_json = json.loads(nodes_out) + except Exception as err: + self.logging.debug(f"Cannot load command result for cluster {cluster_name}") + self.logging.debug(err) + return 0 + nodes = nodes_json["items"] if "items" in nodes_json else [] + status = [] + for node in nodes: + labels = node.get("metadata", {}).get("labels", {}) + if "node-role.kubernetes.io/worker" in labels and "node-role.kubernetes.io/control-plane" not in labels and "node-role.kubernetes.io/infra" not in labels: + conditions = node.get("status", {}).get("conditions", []) + for condition in conditions: + if "type" in condition and condition["type"] == "Ready": + status.append(condition["status"]) + status_list = {i: status.count(i) for i in status} + ready_nodes = status_list["True"] if "True" in status_list else 0 + return ready_nodes + + def create_cluster(self, platform, cluster_name): + super().create_cluster(platform, cluster_name) + cluster_info = platform.environment["clusters"][cluster_name] + cluster_info["uuid"] = self.environment["uuid"] + cluster_info["install_method"] = "terraform" + self.logging.info(f"Creating cluster {cluster_info['index']} on ROSA with name {cluster_name} and {cluster_info['workers']} workers") + cluster_info["path"] = platform.environment["path"] + "/" + cluster_name + os.mkdir(cluster_info["path"]) + self.logging.debug("Attempting cluster installation") + self.logging.debug("Output directory set to %s" % cluster_info["path"]) + + myenv = os.environ.copy() + myenv["TF_VAR_token"] = self.environment["ocm_token"] + myenv["TF_VAR_cloud_region"] = self.environment['aws']['region'] + myenv["TF_VAR_url"] = self.environment["ocm_url"] + myenv["TF_VAR_account_role_prefix"] = 'ManagedOpenShift' + myenv["TF_VAR_cluster_name"] = cluster_name + myenv["TF_VAR_operator_role_prefix"] = cluster_name +# myenv["TF_VAR_clusters_per_apply"] = str(self.environment['clusters_per_apply']) + + terraform_plan_code, terraform_plan_out, terraform_plan_err = self.utils.subprocess_exec("terraform plan -out " + cluster_info['path'] + "/" + cluster_name + ".tfplan", cluster_info["path"] + "/terraform_plan.log", {"cwd": self.environment['path'] + "/terraform", "env": myenv}) + if terraform_plan_code != 0: + cluster_end_time = int(datetime.datetime.utcnow().timestamp()) + cluster_info["status"] = "Not Installed" + self.logging.error(f"Cluster {cluster_name} terraform plan failed") + self.logging.debug(terraform_plan_out) + return 1 + else: + self.logging.info(f"Trying to install cluster {cluster_name} with {cluster_info['workers']} workers up to 5 times using terraform provider") + trying = 0 + while trying <= 5: + cluster_start_time = int(datetime.datetime.utcnow().timestamp()) + if self.utils.force_terminate: + self.logging.error(f"Exiting cluster creation for {cluster_name} after capturing Ctrl-C") + return 0 + trying += 1 + terraform_apply_code, terraform_apply_out, terraform_apply_err = self.utils.subprocess_exec("terraform apply -state=" + cluster_info['path'] + "/terraform.tfstate " + cluster_info['path'] + "/" + cluster_name + ".tfplan", cluster_info["path"] + "/terraform_apply.log", {"cwd": self.environment['path'] + "/terraform", 'preexec_fn': self.utils.disable_signals, "env": myenv}) + if terraform_apply_code != 0: + cluster_info["install_try"] = trying + self.logging.debug(terraform_apply_out) + self.logging.debug(terraform_apply_err) + if trying <= 5: + self.logging.warning(f"Try: {trying}/5. Cluster {cluster_name} installation failed, retrying in 15 seconds") + time.sleep(15) + else: + cluster_end_time = int(datetime.datetime.utcnow().timestamp()) + cluster_info["status"] = "Not Installed" + self.logging.error(f"Cluster {cluster_name} installation failed after 5 retries") + self.logging.debug(terraform_apply_out) + self.logging.debug(terraform_apply_err) + return 1 + else: + cluster_end_time = int(datetime.datetime.utcnow().timestamp()) + break + + cluster_info['status'] = "installed" + self.logging.info(f"Cluster {cluster_name} installation finished on the {trying} try") + cluster_info["metadata"] = self.get_metadata(cluster_name) + cluster_info["install_try"] = trying + cluster_info["install_duration"] = cluster_end_time - cluster_start_time + access_timers = self.get_cluster_admin_access(cluster_name, cluster_info["path"]) + cluster_info["kubeconfig"] = access_timers.get("kubeconfig", None) + cluster_info["cluster_admin_create"] = access_timers.get("cluster_admin_create", None) + cluster_info["cluster_admin_login"] = access_timers.get("cluster_admin_login", None) + cluster_info["cluster_oc_adm"] = access_timers.get("cluster_oc_adm", None) + if not cluster_info["kubeconfig"]: + self.logging.error(f"Failed to download kubeconfig file for cluster {cluster_name}. Disabling wait for workers and workload execution") + cluster_info["workers_wait_time"] = None + cluster_info["status"] = "Ready. Not Access" + return 1 + if cluster_info["workers_wait_time"]: + workers_ready = self._wait_for_workers(cluster_info["kubeconfig"], cluster_info["workers"], cluster_info["workers_wait_time"], cluster_name, "workers") + if workers_ready[1] == cluster_info["workers"]: + cluster_info["workers_ready"] = workers_ready[2] - cluster_start_time + else: + cluster_info['workers_ready'] = None + cluster_info['status'] = "Ready, missing workers" + return 1 + cluster_info['status'] = "ready" + try: + with open(cluster_info['path'] + "/metadata_install.json", "w") as metadata_file: + json.dump(cluster_info, metadata_file) + except Exception as err: + self.logging.error(err) + self.logging.error(f"Failed to write metadata_install.json file located at {cluster_info['path']}") + if self.es is not None: + cluster_info["timestamp"] = datetime.datetime.utcnow().isoformat() + self.es.index_metadata(cluster_info) + + def _wait_for_workers(self, kubeconfig, worker_nodes, wait_time, cluster_name, machinepool_name): + self.logging.info(f"Waiting {wait_time} minutes for {worker_nodes} workers to be ready on {machinepool_name} machinepool on {cluster_name}") + myenv = os.environ.copy() + myenv["KUBECONFIG"] = kubeconfig + result = [machinepool_name] + starting_time = datetime.datetime.utcnow().timestamp() + self.logging.debug(f"Waiting {wait_time} minutes for nodes to be Ready on cluster {cluster_name} until {datetime.datetime.fromtimestamp(starting_time + wait_time * 60)}") + while datetime.datetime.utcnow().timestamp() < starting_time + wait_time * 60: + # if force_terminate: + # logging.error("Exiting workers waiting on the cluster %s after capturing Ctrl-C" % cluster_name) + # return [] + self.logging.info("Getting node information for cluster %s" % cluster_name) + nodes_code, nodes_out, nodes_err = self.utils.subprocess_exec("oc get nodes -o json", extra_params={"env": myenv, "universal_newlines": True}) + try: + nodes_json = json.loads(nodes_out) + except Exception as err: + self.logging.error( + f"Cannot load command result for cluster {cluster_name}. Waiting 15 seconds for next check..." + ) + self.logging.error(err) + time.sleep(15) + continue + nodes = nodes_json["items"] if "items" in nodes_json else [] + + ready_nodes = 0 + for node in nodes: + labels = node.get("metadata", {}).get("labels", {}) + if "node-role.kubernetes.io/worker" in labels and "node-role.kubernetes.io/control-plane" not in labels and "node-role.kubernetes.io/infra" not in labels: + conditions = node.get("status", {}).get("conditions", []) + for condition in conditions: + if "type" in condition and condition["type"] == "Ready": + ready_nodes += 1 + if ready_nodes == worker_nodes: + self.logging.info( + f"Found {ready_nodes}/{worker_nodes} ready nodes on machinepool {machinepool_name} for cluster {cluster_name}. Stopping wait." + ) + result.append(ready_nodes) + result.append(int(datetime.datetime.utcnow().timestamp())) + return result + else: + self.logging.info( + f"Found {ready_nodes}/{worker_nodes} ready nodes on machinepool {machinepool_name} for cluster {cluster_name}. Waiting 15 seconds for next check..." + ) + time.sleep(15) + self.logging.error( + f"Waiting time expired. After {wait_time} minutes there are {ready_nodes}/{worker_nodes} ready nodes on {machinepool_name} machinepool for cluster {cluster_name}" + ) + result.append(ready_nodes) + result.append("") + return result + + +class TerraformArguments(RosaArguments): + def __init__(self, parser, config_file, environment): + super().__init__(parser, config_file, environment) +# EnvDefault = self.EnvDefault + + parser.add_argument("--terraform-retry", type=int, default=5, help="Number of retries when executing terraform commands") +# parser.add_argument("--clusters-per-apply", type=int, default=1, help="Number of clusters to install on each terraform apply") +# parser.add_argument("--service-cluster", action=EnvDefault, env=environment, envvar="ROSA_BURNER_HYPERSHIFT_SERVICE_CLUSTER", help="Service Cluster Used to create the Hosted Clusters") + + if config_file: + config = configparser.ConfigParser() + config.read(config_file) + defaults = {} + defaults.update(dict(config.items("Platform:Rosa:Hypershift"))) + parser.set_defaults(**defaults) diff --git a/rosa-burner.ini b/rosa-burner.ini index eb5a9fe..14f0a11 100644 --- a/rosa-burner.ini +++ b/rosa-burner.ini @@ -45,6 +45,9 @@ common_operator_roles = #extra_machinepool_taints = +[Platform:Rosa:Terraform] +terraform_retry = 5 + [Platform:Rosa:Hypershift] create_vpcs = True clusters_per_vpc = 2 diff --git a/rosa-burner.py b/rosa-burner.py index 1a64842..b13f094 100755 --- a/rosa-burner.py +++ b/rosa-burner.py @@ -30,7 +30,7 @@ platform_module_path = "libs.platforms." + arguments["platform"] + "." + arguments["platform"] platform_module = importlib.import_module(platform_module_path) PlatformClass = getattr(platform_module, arguments["platform"].capitalize()) - platform = PlatformClass(arguments, logging, utils) + platform = PlatformClass(arguments, logging, utils, es) except ImportError as err: logging.error("Module not found)") logging.error(err)