diff --git a/Jenkinsfile b/Jenkinsfile new file mode 100644 index 0000000..7375b1b --- /dev/null +++ b/Jenkinsfile @@ -0,0 +1,73 @@ +pipeline { + + agent { + node { + label 'aws-ec2' + } + } + + triggers { + gitlab(triggerOnPush: true, branchFilterType: 'All') + } + + + options { + gitLabConnection('Intel-Gitlab') + gitlabCommitStatus(name: 'jenkins') + } + + + stages { + stage('Setup') { + steps { + sh ''' + while pgrep apt > /dev/null; do + sleep 10 + done + sudo apt-get update + sudo apt-get install -y python3 python3-pip + pip3 install setuptools + ''' + } + } + stage('Build') { + steps { + sh ''' + sudo python3 setup.py install + sudo pip3 install pyinstaller + sudo pip3 install defusedxml + sudo pyinstaller edgesoftware.spec + ''' + } + } + stage('Validate') { + steps { + sh ''' + python3 test/functional/test.py + python3 test/unit/test_utils.py + ''' + } + } + } + + post { + success { + emailext( + body: '$DEFAULT_CONTENT', + replyTo: '$DEFAULT_REPLYTO', + subject: '$DEFAULT_SUBJECT', + to: '$gitlabUserEmail', + ) + updateGitlabCommitStatus name: 'build', state: 'success' + } + failure { + emailext( + body: '$DEFAULT_CONTENT', + replyTo: '$DEFAULT_REPLYTO', + subject: '$DEFAULT_SUBJECT', + to: '$gitlabUserEmail', + ) + updateGitlabCommitStatus name: 'build', state: 'failed' + } + } +} diff --git a/LICENSE.pdf b/LICENSE.pdf new file mode 100644 index 0000000..735dfd0 Binary files /dev/null and b/LICENSE.pdf differ diff --git a/LICENSE.txt b/LICENSE.txt new file mode 100644 index 0000000..ace02f0 --- /dev/null +++ b/LICENSE.txt @@ -0,0 +1,23 @@ +Copyright May 2021 Intel Corporation + +This software and the related documents are Intel copyrighted materials, and your use of them is governed by the express license under which they were provided to you ("License"). Unless the License provides otherwise, you may not use, modify, copy, publish, distribute, disclose or transmit this software or the related documents without Intel's prior written permission. + +This software and the related documents are provided as is, with no express or implied warranties, other than those that are expressly stated in the License. + + +Third party components and their licenses: + +python3-pip: https://pypi.org/project/pip/ +PyYAML - https://github.com/yaml/pyyaml +click - https://pypi.org/project/click/ +wget - https://pypi.org/project/wget/ +requests - https://pypi.org/project/requests/ +termcolor - https://pypi.org/project/termcolor/ +git - https://github.com/git/git +curl - https://curl.haxx.se/, https://curl.haxx.se/docs/copyright.html +Prettytable - https://pypi.org/project/PrettyTable/ +psutil - https://pypi.org/project/psutil/ +docker - https://pypi.org/project/docker/ +colorama - https://pypi.org/project/colorama/ +tqdm - https://pypi.org/project/tqdm/ + diff --git a/README.md b/README.md new file mode 100644 index 0000000..c4c4edf --- /dev/null +++ b/README.md @@ -0,0 +1,172 @@ +## Command Line Interface for Intel® Edgesoftware + +*edgesoftware* is a command line interface wrapper (CLI) that helps you manage [Intel® Edge Software Hub packages](https://software.intel.com/content/www/us/en/develop/topics/iot/edge-solutions.html). With these pretested and pre-validated packages you can create reliable, scalable AI applications for the edge. + + + +### Installation + +#### Requirements + +- Minimum Python* version: 3.6 + +- To install the software package, you will need an XML configuration file (*edgesoftware_configuration.xml*) along with the CLI. There is a dedicated XML configuration file for each software package (for example, Edge Insights for Vision) which you can find on [Intel® Edge Software Hub](https://www.intel.in/content/www/in/en/edge-computing/edge-software-hub.html). + + + +#### System Requirements + +The table below lists supported operating systems and Python versions required to run the installation. + +| Supported Operating System | [Python* Version (64-bit)](https://www.python.org/) | +| :-------------------------------------------- | :-------------------------------------------------- | +| Ubuntu* 18.04 long-term support (LTS), 64-bit | 3.6 | +| Ubuntu* 20.04 long-term support (LTS), 64-bit | 3.8 | +| Ubuntu* 22.04 long-term support (LTS), 64-bit | 3.10 | +| Red Hat* Enterprise Linux* 8, 64-bit | 3.6 | +| CentOS* 7, 64-bit | 3.6 | + +> **NOTE**: This package can be installed on other versions of Linux, but only the specific versions above are fully validated. + + + +#### Install edgesoftware CLI + +##### Step 1: Install and update PIP to the highest version + +To install pip in Ubuntu 18.04, Ubuntu 20.04 and Ubuntu 22.04 + +```shell +sudo apt install python3-pip +``` + +To install pip in CentOS 7 and RHEL 8: + +``` +sudo yum install python3-pip +``` + +Run the command below to upgrade pip to the latest version: + +``` +python3 -m pip install --upgrade pip +``` + +##### Step 2. Install the package + +Run the command below: + +```shell +python3 -m pip install edgesoftware --user +``` + +##### Step 3. If needed, launch a new terminal and verify that package is installed + +Run the command below: + +```shell +edgesoftware -v +``` + +You will not see any error messages if installation finished successfully. + + + + ### Using the edgesoftware CLI + +**Learn all of the commands available with *edgesoftware* CLI** + +Run the command below: + +```shell +edgesoftware +``` + +**Response:** + +> Usage: edgesoftware [OPTIONS] COMMAND [ARGS]... + +> A CLI wrapper for management of Intel® Edge Software Hub packages. + +> **Options:** +> +> > | options | description | +> > | ------------- | -------------------------- | +> > | -v, --version | Show the version and exit. | +> > | --help | Show this message and exit | +> > +> > + +> **Commands:** +> +> > | commands | description | +> > | --------- | ------------------------------------------------------ | +> > | docker | Pull docker images | +> > | download | Download modules/artifacts of a package. | +> > | export | Export modules installed as part of a package. | +> > | helm | Download Helm charts or update Kubernetes secret keys. | +> > | install | Install modules of a package. | +> > | list | List the modules of a package. | +> > | log | Show log of CLI events. | +> > | uninstall | Uninstall the modules of a package. | +> > | update | Update the modules of a package. | +> > | upgrade | Upgrade a package. | + + + +#### Learn more about a command + +Run the command below: + +```shell +edgesoftware --help +``` + +Example: + +```shell +edgesoftware list --help +``` + +Response: + +> Usage: edgesoftware list [OPTIONS] + +> List the modules of a package. + +> Options: + +> > | options | description | +> > | ------------- | -------------------------------------- | +> > | -v, --version | Lists available packages. | +> > | -j, --json | Return output in json format. | +> > | -d, --default | Lists the default modules of a package | +> > | --help | Show this message and exit | + + + +### Troubleshooting + +**Error: *edgesoftware* command may not respond after installation.** + +To mitigate this issue, close the current terminal and open a new terminal. The command should work in the newer terminal. + +**Error: *python3 -m pip install edgesoftware* may fail due to failed installation of cryptography package.** + +To resolve this issue, upgrade pip to latest version using *python3 -m pip install --upgrade pip* and then retry the installation. + +**Error: *edgesoftware install* command may fail first time giving error - missing esb_common.** + +To resolve this issue, try edgesoftware install command once again. This time you should be able to install the modules. + + + +### Additional Resources + +Refer to [Introduction to the Edge Software Hub CLI](https://www.intel.com/content/www/us/en/develop/documentation/edge-insights-vision-doc/get-started-guide-using-linux/intro-to-the-edge-software-cli.html) for more information on CLI commands. + + + +### License + +All *edgesoftware* wheels on PyPI are distributed under [LIMITED TOOLS LICENSE AGREEMENT](https://software.intel.com/content/dam/develop/external/us/en/documents/limited-tools-license-agreement.pdf). diff --git a/README.rst b/README.rst new file mode 100644 index 0000000..afd8fa7 --- /dev/null +++ b/README.rst @@ -0,0 +1,81 @@ +================================================================== +Edge Software Builder: Command Line Interface for Package Building +================================================================== + +This is a client library for installing modules in a package. +It provides a command-line tool (edgesoftware). + +Installation Steps +------------------ +edgesoftware CLI can be installed from sources. This is a preferred and quicker way for Developers. + +************ +For Linux: +************ + + +One time setup only: +^^^^^^^^^^^^^^^^^^^^ + + $ *sudo apt-get update && sudo apt install python3-pip -y* (On Ubuntu/Debian based systems) + + $ *sudo yum update -y && sudo yum install gcc python3-devel python3-pip -y* (On CentOS/RHEL based systems) + + $ *pip3 install --upgrade pip* + + $ *pip3 install -r requirements.txt* + +**Generate the Binary:** + + $ *sudo python3 setup.py install* + +************ +For Windows: +************ + +One time setup only +^^^^^^^^^^^^^^^^^^^ + + $ *pip install --upgrade pip* + + $ *pip install -r requirements.txt* + +**Generate the Binary:** + + $ *python setup.py install* + + +Generate Python Executable +-------------------------- + +**Preferred way for Validation and E2E testing::** + +To generate ``edgesoftware`` executable for **Linux** based systems:: + + $ git clone --single-branch https://gitlab.devtools.intel.com/software_recipe/software_recipe_components/common + $ cd common + $ python3 build_common.py + $ cd - + $ sudo python3 setup.py install && sudo rm -rf build/ dist/ edgesoftware.egg-info + $ pyinstaller edgesoftware.spec + +The executable will be available at ``dist/edgesoftware``. + +Usage +----- + +Use the ``edgesoftware`` command to download, install, list, log, pull, update and upgrade +packages/modules in a recipe. For help run:: + + $ edgesoftware --help + +For example:: + + $ edgesoftware install + +Running Tests +------------- + +To run tests:: + + $ python3 test/functional/test.py diff --git a/__init__.py b/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/config.ini b/config.ini new file mode 100644 index 0000000..1700458 --- /dev/null +++ b/config.ini @@ -0,0 +1,5 @@ +[default] +service_layer_url = http://mc-qa-service-layer.apps1-ir-int.icloud.intel.com/ +#service_layer_url = http://mcdevservicelayeresb.apps1-ir-int.icloud.intel.com/ +#service_layer_url = https://eshqaservicelayer.intel.com/ +esh_cli_dev = 250cb3ee-d37a-409e-abae-3991c78904f9 diff --git a/doc/requirements.txt b/doc/requirements.txt new file mode 100644 index 0000000..103fa69 --- /dev/null +++ b/doc/requirements.txt @@ -0,0 +1,2 @@ +sphinx!=1.6.6,!=1.6.7,!=2.1.0,>=1.6.2;python_version>='3.4' # BSD +reno>=2.5.0 # Apache-2.0 diff --git a/doc/source/README.rst b/doc/source/README.rst new file mode 100644 index 0000000..cbcbbc4 --- /dev/null +++ b/doc/source/README.rst @@ -0,0 +1,44 @@ +================================================================== +Edge Software Builder: Command Line Interface for Package Building +================================================================== + +This is a client library for installing modules in a package. +It provides a command-line tool (edgesoftware). + +Installation +------------ + +To install the ``edgesoftware`` command:: + + $ sudo apt-get update && sudo apt install python3-pip -y && pip3 install setuptools + + $ sudo python3 setup.py install + +Usage +----- + +Use the ``edgesoftware`` command to install, list, update and upgrade +packages in a recipe. For help run:: + + $ edgesoftware --help + +For example:: + + $ edgesoftware install + +Running Tests +------------- + +To run tests:: + + $ python3 test/functional/test.py + +Generate Python Executable +-------------------------- + +To generate ``edgesoftware`` executable:: + + $ pip3 install pyinstaller + $ pyinstaller edgesoftware.spec + +The executable will be available at ``dist/edgesoftware``. diff --git a/doc/source/conf.py b/doc/source/conf.py new file mode 100644 index 0000000..bb09e84 --- /dev/null +++ b/doc/source/conf.py @@ -0,0 +1,59 @@ +# Configuration file for the Sphinx documentation builder. +# +# This file only contains a selection of the most common options. For a full +# list see the documentation: +# https://www.sphinx-doc.org/en/master/usage/configuration.html + +# -- Path setup -------------------------------------------------------------- + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +# +# import os +# import sys +# sys.path.insert(0, os.path.abspath('.')) + + +# -- Project information ----------------------------------------------------- + +project = 'Edge Software Hub' +copyright = '2020' +author = '' + + +# -- General configuration --------------------------------------------------- + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom +# ones. +extensions = [ +] + +# Add any paths that contain templates here, relative to this directory. +templates_path = ['_templates'] + +# The language for content autogenerated by Sphinx. Refer to documentation +# for a list of supported languages. +# +# This is also used if you do content translation via gettext catalogs. +# Usually you set "language" from the command line for these cases. +language = 'Python 3' + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +# This pattern also affects html_static_path and html_extra_path. +exclude_patterns = [] + + +# -- Options for HTML output ------------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +# +html_theme = 'alabaster' + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = [] diff --git a/doc/source/index.rst b/doc/source/index.rst new file mode 100644 index 0000000..8fca00b --- /dev/null +++ b/doc/source/index.rst @@ -0,0 +1,19 @@ +Welcome to Edge Software Hub CLI documentation! +=============================================== + +Contents +-------- + +.. toctree:: + :maxdepth: 2 + + README + +.. only:: html + + Indices and tables + ------------------ + + * :ref:`genindex` + * :ref:`modindex` + * :ref:`search` diff --git a/edgesoftware.spec b/edgesoftware.spec new file mode 100644 index 0000000..a3d9956 --- /dev/null +++ b/edgesoftware.spec @@ -0,0 +1,37 @@ +# -*- mode: python ; coding: utf-8 -*- + +block_cipher = None +import os + +a = Analysis(['edgesoftware/edgesoftware.py'], + pathex=[os.getcwd()], + binaries=[], + datas=[('lanternrocksdk-linux-3.0.90/python/lanternrock/linux/libintel-ias3.so', 'lanternrock/linux')], + hiddenimports=['pkg_resources.py2_warn', 'termcolor', 'click', 'colorama', + 'esb_common', 'esb_common.logger', 'esb_common.locale', 'psutil', + 'wget', 'lsb_release', 'filecmp', 'esb_common.util', 'inputimeout', + 'urllib3.exceptions', 'json', 'platform', 'pathlib', 'scp', + 'paramiko', 'ruamel.yaml', 'multiprocessing', 'pexpect', 'inquirer'], + hookspath=[], + runtime_hooks=[], + excludes=[], + win_no_prefer_redirects=False, + win_private_assemblies=False, + cipher=block_cipher, + noarchive=False) +pyz = PYZ(a.pure, a.zipped_data, + cipher=block_cipher) +exe = EXE(pyz, + a.scripts, + a.binaries, + a.zipfiles, + a.datas, + [], + name='edgesoftware', + debug=False, + bootloader_ignore_signals=False, + strip=False, + upx=True, + upx_exclude=[], + runtime_tmpdir=None, + console=True ) diff --git a/edgesoftware/__init__.py b/edgesoftware/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/edgesoftware/common/__init__.py b/edgesoftware/common/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/edgesoftware/common/constants.py b/edgesoftware/common/constants.py new file mode 100644 index 0000000..5c309e1 --- /dev/null +++ b/edgesoftware/common/constants.py @@ -0,0 +1,72 @@ +import platform +import subprocess + + +def get_os_version(): + os_version = subprocess.run( + "hostnamectl | grep Operating | " "cut -d':' -f2 | awk '{$1=$1};$1'", + stdout=subprocess.PIPE, + shell=True, + ) + os_name = os_version.stdout.decode("ascii").strip("\n") + if "Ubuntu 18.04" in os_name: + os_name = "Ubuntu 18.04" + elif "Ubuntu 20.04" in os_name: + os_name = "Ubuntu 20.04" + elif "Ubuntu 22.04" in os_name: + os_name = "Ubuntu 22.04" + elif "CentOS" in os_name: + os_name = "CentOS 7" + elif "Red Hat" in os_name: + os_name = "RHEL 8" + elif "Debian GNU/Linux 10" in os_name: + os_name = "Debian 10" + elif "Debian GNU/Linux 11" in os_name: + os_name = "Debian 11" + return os_name + + +# Log colors +CYAN = "\033[36m{}\033[00m" +GREEN = "\033[92m{}\033[00m" +BICYAN = "\033[1;96m{}\033[00m" +RED = "\033[91m{}\033[00m" +WHITE = "\033[37m{}\033[00m" +YELLOW = "\033[93m{}\033[00m" + +# HTTP Status Codes + +HTTP_STATUS = { + "OK": 200, + "CREATED": 201, + "ACCEPTED": 202, + "NO_CONTENT": 204, + "NOT_MODIFIED": 304, + "BAD_REQUEST": 400, + "UNAUTHORIZED": 401, + "NOT_FOUND": 404, + "NOT_ALLOWED": 405, + "CONFLICT": 409, + "UNPROCESSABLE_ENTITY": 422, + "TOO_MANY_REQUESTS": 429, + "SERVER_ERR": 500, + "BAD_GATEWAY": 502, +} + +# Operating system +Operating_system = platform.system() +# Build env +if Operating_system == "Windows": + BUILD_OS = "Windows" +elif Operating_system == "Linux": + BUILD_OS = get_os_version() + +# CLI version-tag + +VERSION = "2022.3" +DATE = "19 August 2022" + +VERSION_TAG = "{}, build date: {}, target OS: {}".format(VERSION, DATE, BUILD_OS) + +# Domains +DOMAINS = ["http://www.google.com", "http://www.baidu.com", "http://www.intel.com"] diff --git a/edgesoftware/common/logger.py b/edgesoftware/common/logger.py new file mode 100644 index 0000000..4bd4d74 --- /dev/null +++ b/edgesoftware/common/logger.py @@ -0,0 +1,75 @@ +# Logger file to generate log statements +import logging +from edgesoftware.common import constants +import sys + + +class Logger(object): + def __init__(self, file_name): + self.LOGGER = logging.getLogger(__file__) + self.LOGGER.setLevel(logging.DEBUG) + FORMATTER = logging.Formatter( + "%(asctime)s - %(levelname)-4s -" " %(message)s", + datefmt="%a %b %d %I:%M:%S IST %Y", + ) + self.file_handler = logging.FileHandler(file_name, "a+") + self.file_handler.setFormatter(FORMATTER) + self.LOGGER.addHandler(self.file_handler) + + def info(self, msg): + """ + Method to define logging info + :return: None + """ + self.LOGGER.info(msg) + + def error(self, msg): + """ + Method to define logging error + :return: None + """ + self.LOGGER.error(msg) + + def warn(self, msg): + """ + Method to define logging warning + :return: None + """ + self.LOGGER.warning(msg) + + def console( + self, + log_msg, + print_msg=None, + error=False, + color_code=None, + new_line=True, + flush=False, + ): + """ + Helper method to log and print the message + :param print_msg: Message to print on the terminal + :param log_msg: Message to log to file + """ + if print_msg == None: + print_msg = log_msg + if new_line: + if error: + print(constants.RED.format(print_msg)) + self.error(log_msg) + elif color_code: + print(color_code.format(print_msg)) + self.info(log_msg) + else: + print(print_msg) + self.info(log_msg) + else: + # FIXME: When new_line false, how to handle error and color_code + if flush: + print(print_msg, end="") + sys.stdout.flush() + else: + print(print_msg, end="") + + def clean(self): + self.LOGGER.removeHandler(self.file_handler) diff --git a/edgesoftware/common/service_layer_api.py b/edgesoftware/common/service_layer_api.py new file mode 100644 index 0000000..d6219a7 --- /dev/null +++ b/edgesoftware/common/service_layer_api.py @@ -0,0 +1,1178 @@ +from requests.auth import HTTPBasicAuth +from edgesoftware.common import utils +from edgesoftware.common import constants +from configparser import ConfigParser +from multiprocessing.pool import ThreadPool +import json +import shutil +import gzip +import hashlib +import requests +import os +import sys +import docker +import tarfile + +import urllib3 +import base64 +from tqdm import tqdm + +urllib3.disable_warnings() + + +# FIXME(mkumari): Remove the hardcoded URL + +# BASE_URL = "http://servicelayeresb.apps1-bg-int.icloud.intel.com/" +BASE_URL = "https://edgesoftwarehub.intel.com/" + +success_ids = [] +failed_ids = [] +success_helm_ids = [] +failed_helm_ids = [] +success_helm_chart_names = [] + + +def get_service_layer_url(): + parser = ConfigParser() + url = None + if os.path.exists("config.ini"): + parser.read("config.ini") + url = parser.get("default", "service_layer_url") + url = url if url else BASE_URL + return url + + +def get_modules_list(recipe_id, os_id, country_code, log): + base_url = get_service_layer_url() + url = "".join( + [ + base_url, + "recipe/supportedModules/{}?order=display&osId={}&countryCode={}".format( + recipe_id, os_id, country_code + ), + ] + ) + resp = None + try: + modules_list = requests.get(url) + if modules_list.status_code == constants.HTTP_STATUS.get("OK"): + resp = modules_list.json() + else: + log.error("Failed to retrieve the supported modules from URL.") + except requests.ConnectionError as e: + log.console( + "Failed to connect. Please check the Internet connection" + " and proxy settings and retry. {}".format(e), + error=True, + ) + return resp + + +def get_components_list(recipe_id, os_id, log): + base_url = get_service_layer_url() + url = "".join( + [base_url, "recipe/{}?order=display&osId={}".format(recipe_id, os_id)] + ) + resp = None + try: + components_list = requests.get(url) + if components_list.status_code == constants.HTTP_STATUS.get("OK"): + resp = components_list.json() + else: + msg = "Failed to get the list of modules. {} {}".format( + components_list.status_code, components_list.reason + ) + log.console(msg, error=True) + except requests.ConnectionError as e: + log.console( + "Failed to connect. Please check the Internet connection" + " and proxy settings and retry. {}".format(e), + error=True, + ) + return resp + + +def get_supported_recipes(recipe_name, log): + base_url = get_service_layer_url() + url = "".join([base_url, "recipe/getAllVersionByName", recipe_name]) + resp = None + try: + recipes_list = requests.get(url) + if recipes_list.status_code == constants.HTTP_STATUS.get("OK"): + resp = recipes_list.json() + else: + msg = "Failed to get the list of packages. {} {}".format( + recipes_list.status_code, recipes_list.reason + ) + log.console(msg, error=True) + except requests.ConnectionError as e: + log.console( + "Failed to connect. Please check the Internet connection" + " and proxy settings and retry. {}".format(e), + error=True, + ) + return resp + + +def get_update_components(components_id_list, recipe_id, product_key, log): + base_url = get_service_layer_url() + data = { + "components": components_id_list, + "recipeId": recipe_id, + "productKey": product_key, + } + url = "".join([base_url, "ingredient/update_ingredient/"]) + try: + components_list = requests.post(url, json=data) + if components_list.status_code == constants.HTTP_STATUS.get("OK"): + return components_list.content + else: + msg = "Failed to get the list of modules. {}.".format( + components_list.json()["message"] + ) + err_msg = "Failed to get the list of modules. {} {} {}.".format( + components_list.status_code, + components_list.reason, + components_list.json()["message"], + ) + log.console(msg, error=True) + log.error(err_msg) + except requests.ConnectionError as e: + log.console( + "Failed to connect. Please check the Internet connection" + " and proxy settings and retry. {}".format(e), + error=True, + ) + return None + + +def get_upgrade_package(data): + base_url = get_service_layer_url() + url = "".join([base_url, "recipe/upgrade/"]) + try: + package_details = requests.post(url, json=data) + if package_details.status_code == constants.HTTP_STATUS.get("OK"): + return package_details.content + else: + print( + constants.RED.format( + "Failed to get the upgrade details. {} {}".format( + package_details.status_code, package_details.reason + ) + ) + ) + except requests.ConnectionError as e: + print( + "Failed to connect. Please check the Internet connection" + " and proxy settings and retry. {}".format(e) + ) + + +def get_upgrade_list(package_type, os_id, log): + base_url = get_service_layer_url() + url = "".join( + [base_url, "recipe/getAllByIrcId/{}?osId={}".format(package_type, os_id)] + ) + resp = None + try: + components_list = requests.get(url) + if components_list.status_code == constants.HTTP_STATUS.get("OK"): + resp = components_list.json() + else: + msg = "Failed to get the list of packages. {} {}".format( + components_list.status_code, components_list.reason + ) + log.console(msg, error=True) + except requests.ConnectionError as e: + log.console( + "Failed to connect. Please check the Internet connection" + " and proxy settings and retry. {}".format(e), + error=True, + ) + return resp + + +def validate_product_key(log, product_key, recipe_id): + """ + Validate product key for package + :param product_key: Customer obtained product key + :param recipe_id: ID of the Package + """ + try: + log.console("Validating package product key", color_code=constants.CYAN) + base_url = get_service_layer_url() + data = {"recipeId": recipe_id, "productKey": product_key} + url = "".join([base_url, "recipe/validateProductKey"]) + resp = requests.post(url, json=data) + if resp.status_code == constants.HTTP_STATUS.get("UNAUTHORIZED"): + return False + elif resp.status_code == constants.HTTP_STATUS.get("OK"): + return True + else: + log.console( + "Failed to validate Product Key. {} {}".format( + resp.status_code, resp.reason + ), + error=True, + ) + return False + except requests.ConnectionError as e: + log.console( + "Failed to connect. Please check the Internet connection" + " and proxy settings and retry. {}".format(e), + error=True, + ) + sys.exit(-1) + + +def get_download_status(): + return success_ids, failed_ids, success_helm_ids, failed_helm_ids + + +def fetch_helm( + log, prod_key, name, tag, helm_chart_id, src_dir, registry_type=None, unzip=False +): + """ + Fetch Helm charts from Service Layer + + Args: + log (obj): Logger Object + prod_key (string): Product Key Value + name (string): Name of the Helm chart + tag (string): Helm chart Tag + helm_chart_id (string): Helm chart id. + unzip (bool, optional): If the Helm chart is to be Extracted. Defaults to False. + """ + base_url = get_service_layer_url() + valid = True + global success_helm_ids + global failed_helm_ids + global success_helm_chart_names + if registry_type is None: + registry_type, _ = get_helm_registry_credentials(name, tag, log) + + data = {"id": helm_chart_id, "productKey": prod_key} + url = "".join([base_url, "helmChart/download"]) + helm_chart_name = name + "-" + tag + if src_dir: + tar_path = os.path.join(src_dir, helm_chart_id + ".tgz") + else: + tar_path = os.path.join(helm_chart_id + ".tgz") + download = False + num_tries = 0 + utils.component_valid[helm_chart_name] = None + if os.path.exists(tar_path): + if registry_type == "intelprivate": + log.console( + "Helm chart package {} already exists. " + "Validating it...".format(helm_chart_name), + color_code=constants.CYAN, + ) + ret = hashlib.md5(open(tar_path, "rb").read()).hexdigest() + valid = validate_helm_chart(helm_chart_id, ret, log) + else: + log.console( + "Helm chart package {} already exists. ".format(helm_chart_name), + color_code=constants.CYAN, + ) + + utils.component_valid[helm_chart_name] = valid + if not valid: + log.console( + "Validation failed, re-downloading Helm chart package {}".format( + helm_chart_name + ), + error=True, + ) + download = True + else: + log.console("Skipping download...", color_code=constants.CYAN) + else: + download = True + if download: + while num_tries < 3: + try: + if unzip: + log.console( + "Downloading Helm chart package {}-{} ".format(name, tag) + ) + + log.info( + "Sending request to download Helm chart package {}-{} id : {} ".format( + name, tag, helm_chart_id + ) + ) + resp = requests.post(url, json=data, stream=True) + if resp.status_code == constants.HTTP_STATUS.get("OK"): + size_bytes = int(resp.headers.get("content-length", 0)) + block_size = 1024 + p_bar = tqdm(total=size_bytes, unit="iB", unit_scale=True) + with open(tar_path, "wb") as fd: + for stream_data in resp.iter_content(block_size): + p_bar.update(len(stream_data)) + fd.write(stream_data) + p_bar.close() + if registry_type == "intelprivate": + ret = hashlib.md5(open(tar_path, "rb").read()).hexdigest() + valid = validate_helm_chart(helm_chart_id, ret, log) + utils.component_valid[helm_chart_name] = valid + if not valid: + log.console( + "Validation failed, deleting Helm chart package {}".format( + helm_chart_name + ), + error=True, + ) + os.remove(tar_path) + else: + success_helm_ids.append(helm_chart_id) + log.console( + "Successfully downloaded Helm Chart package {}".format( + helm_chart_name + ), + color_code=constants.GREEN, + ) + break + elif resp.status_code < constants.HTTP_STATUS.get("SERVER_ERR"): + failed_helm_ids.append(helm_chart_id) + log.console( + "Failed to download the Helm chart package {}. {} {}".format( + helm_chart_name, resp.status_code, resp.reason + ), + error=True, + ) + break + else: + failed_helm_ids.append(helm_chart_id) + log.console( + "Failed to download the Helm chart package {}. {} {}".format( + helm_chart_name, resp.status_code, resp.reason + ), + error=True, + ) + except requests.ConnectionError as e: + log.console( + "Failed to connect. Please check the Internet " + "connection and proxy settings and retry. {}".format(e), + error=True, + ) + if num_tries == 2: + log.console( + "Failed to connect. Please check the Internet " + "connection and proxy settings. Exiting download.", + error=True, + ) + else: + log.console("Retrying Helm chart download", color_code=constants.YELLOW) + num_tries += 1 + + if unzip: + file_path = tar_path.replace("{}.tgz".format(helm_chart_id), helm_chart_name) + if not len(utils.component_valid) or ( + utils.component_valid[helm_chart_name] is None + or not os.path.exists(file_path) + ): + try: + if os.path.exists(tar_path): + with tarfile.open(tar_path, "r:gz") as tar: + # Extract all the contents of tar file + log.console( + "Unzipping Helm chart {}...".format(helm_chart_name) + ) + tar.extractall(helm_chart_name) + success_helm_chart_names = [helm_chart_name] + success_helm_ids = [helm_chart_id] + + except Exception as e: + log.console("Failed to unzip Helm chart. {}".format(e)) + + +def get_helm_pull_status(): + return success_helm_chart_names, success_helm_ids + + +def fetch_ingredient( + prod_key, ingredient_name, recipe_id, os_id, ingredient_id, src_dir, log +): + base_url = get_service_layer_url() + data = { + "component": ingredient_id, + "recipeId": recipe_id, + "osId": os_id, + "productKey": prod_key, + } + url = "".join([base_url, "ingredient/download"]) + zip_path = os.path.join(src_dir, ingredient_id + ".zip") + download = False + num_tries = 0 + utils.component_valid[ingredient_name] = None + if os.path.exists(zip_path): + log.console( + "ZIP file for module {} already exists. " + "Validating it...".format(ingredient_id), + color_code=constants.CYAN, + ) + ret = hashlib.md5(open(zip_path, "rb").read()).hexdigest() + valid = validate_ingredient(ingredient_id, ret, log) + utils.component_valid[ingredient_name] = valid + if not valid: + log.console( + "Validation failed, re-downloading module {}".format(ingredient_id), + error=True, + ) + download = True + else: + log.console("Skipping download...", color_code=constants.CYAN) + else: + download = True + if download: + while num_tries < 3: + try: + log.info("Sending request to download module {}".format(ingredient_id)) + resp = requests.post(url, json=data, stream=True) + if resp.status_code == constants.HTTP_STATUS.get("OK"): + size_bytes = int(resp.headers.get("content-length", 0)) + block_size = 1024 + p_bar = tqdm(total=size_bytes, unit="iB", unit_scale=True) + with open(zip_path, "wb") as fd: + for stream_data in resp.iter_content(block_size): + p_bar.update(len(stream_data)) + fd.write(stream_data) + p_bar.close() + ret = hashlib.md5(open(zip_path, "rb").read()).hexdigest() + valid = validate_ingredient(ingredient_id, ret, log) + if not valid: + log.console( + "Validation failed, deleting module {}".format( + ingredient_id + ), + error=True, + ) + os.remove(zip_path) + else: + success_ids.append(ingredient_id) + log.console( + "Successfully downloaded module {}".format(ingredient_name), + color_code=constants.GREEN, + ) + break + elif resp.status_code < 500: + failed_ids.append(ingredient_id) + log.console( + "Failed to download the module {}. {} {}".format( + ingredient_name, resp.status_code, resp.reason + ), + error=True, + ) + break + else: + failed_ids.append(ingredient_id) + log.console( + "Failed to download the module {}. {} {}".format( + ingredient_name, resp.status_code, resp.reason + ), + error=True, + ) + except requests.ConnectionError as e: + log.console( + "Failed to connect. Please check the Internet " + "connection and proxy settings and retry. {}".format(e), + error=True, + ) + if num_tries == 2: + failed_ids.append(ingredient_id) + log.console( + "Failed to connect. Please check the Internet " + "connection and proxy settings. Exiting download.", + error=True, + ) + else: + log.console("Retrying module download", color_code=constants.YELLOW) + num_tries += 1 + + +def validate_ingredient(ingredient_id, ingredient_hash, log): + base_url = get_service_layer_url() + data = {"id": ingredient_id, "value": ingredient_hash} + url = "".join([base_url, "ingredient/validate"]) + try: + log.info( + "Sending request to validate module {} with hash value {}".format( + ingredient_id, ingredient_hash + ) + ) + resp = requests.post(url, json=data) + if resp.status_code == constants.HTTP_STATUS.get("OK"): + log.console( + "Module validation passed for {}".format(ingredient_id), + color_code=constants.GREEN, + ) + return True + else: + msg = "Module validation failed for {}. {} {}".format( + ingredient_id, resp.status_code, resp.reason + ) + log.console(msg, error=True) + except requests.ConnectionError as e: + log.console( + "Failed to connect. Please check the Internet connection" + " and proxy settings and retry. {}".format(e), + error=True, + ) + raise e + + +def validate_helm_chart(helm_chart_id, helm_chart_hash, log): + """ + Validate helm Chart MD5 Hash + + Args: + helm_chart_id (String): Helm Chart ID + helm_chart_hash (String): Helm Chart MD5 Hash + log (obj): Logger Object + + Returns: + Bool: Status + """ + base_url = get_service_layer_url() + data = {"id": helm_chart_id, "value": helm_chart_hash} + url = "".join([base_url, "helmChart/validate"]) + try: + log.info( + "Sending request to validate Helm chart {} with hash value {}".format( + helm_chart_id, helm_chart_hash + ) + ) + resp = requests.post(url, json=data) + if resp.status_code == constants.HTTP_STATUS.get("OK"): + log.console( + "Helm chart validation passed for {}".format(helm_chart_id), + color_code=constants.GREEN, + ) + return True + else: + msg = "Helm chart validation failed for {}. {} {}".format( + helm_chart_id, resp.status_code, resp.reason + ) + log.console(msg, error=True) + return False + except requests.ConnectionError as e: + log.console( + "Failed to connect. Please check the Internet connection" + " and proxy settings and retry. {}".format(e), + error=True, + ) + return False + + +def update_ingredient_count(success_ids, failed_ids, log): + base_url = get_service_layer_url() + data = { + "successfullInstallationIds": success_ids, + "failedInstallationIds": failed_ids, + } + url = "".join([base_url, "ingredient/update_installation_count"]) + try: + log.info("Sending installation status {}".format(data)) + resp = requests.post(url, json=data) + if resp.status_code == constants.HTTP_STATUS.get("OK"): + log.info("Installation status was successfully sent.") + else: + log.error( + "Failed to update installation status. {} {}".format( + resp.status_code, resp.reason + ) + ) + except requests.ConnectionError as e: + log.console( + "Failed to connect. Please check the Internet connection" + " and proxy settings and retry. {}".format(e), + error=True, + ) + + +def send_telemetry_data(telemetry_data, log): + base_url = get_service_layer_url() + data = telemetry_data + url = "".join([base_url, "analytics"]) + try: + log.info("Sending installation report") + resp = requests.post(url, json=data) + if resp.status_code == constants.HTTP_STATUS.get("OK"): + log.info("Installation report was successfully sent.") + else: + log.error( + "Failed to send installation report. {} {}".format( + resp.status_code, resp.reason + ) + ) + except requests.ConnectionError as e: + log.error( + "Failed to connect. Please check the Internet connection" + " and proxy settings and retry. {}".format(e) + ) + + +def validate_docker_image_productkey(imageId, product_key, log): + """ + Validate product key + :param imageId: UUID of the Image + :param product_key: product_key of the user + """ + try: + base_url = get_service_layer_url() + data = {"imageId": imageId, "productKey": product_key} + url = "".join([base_url, "docker/validate/productkey"]) + resp = requests.post(url, json=data) + if resp.status_code == constants.HTTP_STATUS.get("OK"): + return True, resp.json()["token"] + else: + log.console( + "Failed to validate Product Key. {} {}".format( + resp.status_code, resp.reason + ), + error=True, + ) + return False + except requests.ConnectionError as e: + log.console( + "Failed to connect. Please check the Internet connection" + " and proxy settings and retry. {}".format(e), + error=True, + ) + sys.exit(-1) + + +def validate_docker_image(image, tag, product_key, log): + """ + Validate docker image and its availability in Intel Reg. + :param image: Docker image name + :param tag: Docker image tag + :param product_key: product_key of the user + """ + try: + base_url = get_service_layer_url() + log.console("Checking Intel registry for {}:{}".format(image, tag)) + url = "".join([base_url, "docker/image?name={}&tag={}".format(image, tag)]) + resp = requests.get(url) + if resp.status_code == constants.HTTP_STATUS.get("OK"): + if str(resp.text): + return resp.json()[0] + else: + return False + else: + return False + except requests.ConnectionError as e: + log.console( + "Failed to connect. Please check the Internet connection" + " and proxy settings and retry. {}".format(e), + error=True, + ) + sys.exit(-1) + + +def fetch_base_image(base_img, base_img_digest, log): + """ + Fetch Base Image Layer + :param registry: Registry to pull the docker image + :param auth_url: Registry Authentication URL + :param registry_service: Registry service of docker registry + :param repository: Repo to download the image + :param digest: image identified with the digest + """ + try: + log.console( + "Pulling Base Image from {}@{}".format(base_img, base_img_digest), + color_code=constants.GREEN, + ) + client = docker.from_env() + with tqdm( + total=1, + desc="Downloading", + unit="B", + unit_scale=True, + unit_divisor=1024, + position=0, + ascii=False, + ) as download_bar: + with tqdm( + total=1, + desc="Extracting", + unit="B", + unit_scale=True, + unit_divisor=1024, + position=1, + ascii=False, + ) as extract_bar: + download_progress = {} + extract_progress = {} + for line in client.api.pull( + base_img, tag=base_img_digest, stream=True, decode=True + ): + if "status" in line: + if line["status"] == "Downloading": + utils.docker_progress_bar( + line, download_bar, download_progress + ) + elif line["status"] == "Extracting": + utils.docker_progress_bar( + line, extract_bar, extract_progress + ) + + if download_progress: + utils.docker_final_update(download_bar,download_progress) + if extract_progress: + utils.docker_final_update(extract_bar,extract_progress) + + log.console( + "Status: Base Image saved for {}@{}".format(base_img, base_img_digest) + ) + return utils.image_load_status(base_img, base_img_digest, log) + except Exception as e: + log.console( + "Failed to download base layer. {}".format(e), + error=True, + ) + return False + + +def get_robot_account(imageId, product_key, log): + try: + base_url = get_service_layer_url() + data = {"imageId": imageId, "productKey": product_key} + url = "".join([base_url, "docker/robot"]) + resp = requests.post(url, json=data) + if resp.status_code == constants.HTTP_STATUS.get("OK"): + return True, resp.json() + else: + log.console( + "Failed to validate Product Key. {} {}".format( + resp.status_code, resp.reason + ), + error=True, + ) + return False, "" + except requests.ConnectionError as e: + log.console( + "Failed to connect. Please check the Internet connection" + " and proxy settings and retry. {}".format(e), + error=True, + ) + + +def get_helm_robot_account(helmId, product_key, log): + """ + Get helm robot account + + Args: + helmId (String): Id of Helm Chart + product_key (String): Product Key Value + log (obj): Logger Object + + Returns: + [type]: Robot Account Details + """ + try: + base_url = get_service_layer_url() + data = {"id": helmId, "productKey": product_key} + url = "".join([base_url, "helmChart/getRobotAccount"]) + resp = requests.post(url, json=data) + if resp.status_code == constants.HTTP_STATUS.get("OK"): + return True, resp.json() + else: + log.console( + "Failed to validate Product Key. {} {}".format( + resp.status_code, resp.reason + ), + error=True, + ) + return False, "" + except requests.ConnectionError as e: + log.console( + "Failed to connect. Please check the Internet connection" + " and proxy settings and retry. {}".format(e), + error=True, + ) + + +def fetch_image(imageId, image, tag, product_key, log): + """ + Fetch Docker Image from Intel registry + :param registry: Registry to pull the docker image + :param registry_service: Registry service of docker registry + :param repository: Repo to download the image + :param imageId: Docker Image ID + :param image: Docker image name + :param tag: Docker image tag + :param product_key: Product Key required to pull image + :param directory: Directory to pull the layers + """ + + validation_status, data = get_robot_account(imageId, product_key, log) + + if validation_status: + try: + log.console( + "Pulling from Intel Registry {}:{}".format(image, tag), + color_code=constants.GREEN, + ) + client = docker.from_env() + + image_name = data["registry"] + "/" + data["repository"] + "/" + image + auth_config = {} + # Decode the token into username and password + auth_config["username"], auth_config["password"] = ( + base64.b64decode(data["token"]).decode("utf-8").split(":", 1) + ) + + with tqdm( + total=1, + desc="Downloading", + unit="B", + unit_scale=True, + unit_divisor=1024, + position=0, + ascii=False, + ) as download_bar: + with tqdm( + total=1, + desc="Extracting", + unit="B", + unit_scale=True, + unit_divisor=1024, + position=1, + ascii=False, + ) as extract_bar: + download_progress = {} + extract_progress = {} + for line in client.api.pull( + image_name, + tag=tag, + stream=True, + auth_config=auth_config, + decode=True, + ): + if "status" in line: + if line["status"] == "Downloading": + utils.docker_progress_bar( + line, download_bar, download_progress + ) + elif line["status"] == "Extracting": + utils.docker_progress_bar( + line, extract_bar, extract_progress + ) + if download_progress: + utils.docker_final_update(download_bar,download_progress) + if extract_progress: + utils.docker_final_update(extract_bar,extract_progress) + + log.console("Status: Image saved for {}:{}".format(image, tag)) + + full_image_name = image_name + ":" + tag + new_image_name = image + ":" + tag + client.api.tag(full_image_name, new_image_name, tag=tag, force=True) + client.api.remove_image(full_image_name) + return utils.image_load_status(image, tag, log) + except Exception as e: + msg = "Failed to download docker image. {}" + print_msg = "Failed to download docker image" + log.console(msg.format(e), print_msg, error=True) + else: + return False + + +def check_product_key( + log, recipe_id=None, image=None, tag=None, helm_chart_name=None, helm_chart_tag=None +): + """ + Checks if product key is needed for the Package + + Args: + log (obj): log object + recipe_id (string, optional): Recipe ID of the Package. Defaults to None. + image (string, optional): Image Name of the Image. Defaults to None. + tag (string, optional): Tag of the Image. Defaults to None. + helm_chart (string, optional): Helm chart name. Defaults to None. + + Returns: + Bool: True / False + Multiple Return : Helm chart details + """ + base_url = get_service_layer_url() + resp = True + helm_chart_id = None + helm_chart_type = None + is_helm_credentials = False + + if image is None and helm_chart_name is None: + url = "".join([base_url, "recipe/{}".format(recipe_id)]) + + elif helm_chart_name is not None: + url = "".join( + [ + base_url, + "helmChart/chart?name={}&tag={}".format( + helm_chart_name, helm_chart_tag + ), + ] + ) + + else: + url = "".join([base_url, "docker/image?name={}&tag={}".format(image, tag)]) + try: + status = requests.get(url) + if status.status_code == constants.HTTP_STATUS.get("OK"): + if recipe_id: + resp = status.json()["productKey"] + elif helm_chart_name: + resp = status.json()["productKey"] + helm_chart_id = status.json()["id"] + helm_chart_type = status.json()["registryType"] + is_helm_credentials = status.json()["updateCredentials"] + else: + resp = status.json()[0]["productKey"] + else: + log.console( + "Failed to check Product Key requirement. {} {}".format( + status.status_code, status.reason + ), + error=True, + ) + if helm_chart_name: + return None, helm_chart_id, helm_chart_type, is_helm_credentials + sys.exit(-1) + except requests.ConnectionError as e: + log.console( + "Failed to connect. Please check the Internet " + "connection and proxy settings and retry. {}".format(e), + error=True, + ) + sys.exit(-1) + + except ValueError as e: + # For Public Images, SL will not have Value for product key + if image is not None: + resp = False + log.info( + "Public Image - {}:{}. Product Key Status not found. {}".format( + image, tag, e + ) + ) + + elif helm_chart_name is not None: + log.console( + "Server Response Incomplete. {}".format(e), + error=True, + ) + return None, helm_chart_id, helm_chart_type, is_helm_credentials + + else: + log.error("Exception : Product Key Status not found. {}".format(e)) + + except (KeyError, IndexError) as e: + if helm_chart_name is not None: + log.console( + "Server Response Incomplete. {}".format(e), + error=True, + ) + return None, helm_chart_id, helm_chart_type, is_helm_credentials + + log.error("Exception : Product Key Status not found. {}".format(e)) + + log.info("Product key requirement status is {}".format(resp)) + + if helm_chart_name: + log.info( + "Helm chart id for {} chart {}-{} is {}".format( + helm_chart_type, helm_chart_name, helm_chart_tag, helm_chart_id + ) + ) + return resp, helm_chart_id, helm_chart_type, is_helm_credentials + return resp + + +def get_config_xml(configuration_id): + """ + Gets configuration XML from service layer + + :param configuration_id: Unique ID for package which user gets from ESH-UI + """ + + base_url = get_service_layer_url() + url = "".join([base_url, "downloadedconfiguration/{}".format(configuration_id)]) + try: + xml_file = requests.get(url) + if xml_file.status_code == constants.HTTP_STATUS.get("OK"): + return xml_file.content + else: + print( + constants.RED.format( + "Failed to fetch the Manifest XML file edgesoftware_configuration.xml. " + "{} {}. Exiting. ".format(xml_file.status_code, xml_file.reason) + ) + ) + sys.exit(-1) + except requests.ConnectionError as e: + print( + "Failed to connect. Please check the Internet connection" + " and proxy settings and retry. {}".format(e) + ) + except requests.exceptions.RequestException as e: + print("Failed to connect. " " {}".format(e)) + + +def get_helm_registry_credentials(helm_chart_name, helm_chart_tag, log): + """ + Gets Helm chart Registry Type + + Args: + helm_chart_name (String): Helm Chart Name + helm_chart_tag (String): Helm Chart Tag + log (obj): Logger Object + + Returns: + helm_chart_type, Bool: Chart Type, Credentials Status + """ + try: + base_url = get_service_layer_url() + url = "".join( + [ + base_url, + "helmChart/chart?name={}&tag={}".format( + helm_chart_name, helm_chart_tag + ), + ] + ) + status = requests.get(url) + if status.status_code == constants.HTTP_STATUS.get("OK"): + helm_chart_type = status.json()["registryType"] + is_creds = status.json()["updateCredentials"] + return helm_chart_type, is_creds + else: + log.console( + "Failed to get Helm chart registry Type. {} {}".format( + status.status_code, status.reason + ), + error=True, + ) + return None, None + except requests.ConnectionError as e: + log.console( + "Failed to connect. Please check the Internet connection" + " and proxy settings and retry. {}".format(e), + error=True, + ) + + +def get_recipe_details(recipe_id, log): + """ + Get package and module details from SL + + :Returns: package and module details + """ + try: + base_url = get_service_layer_url() + url = "".join([base_url, "recipe/{}".format(recipe_id)]) + resp = None + recipe_details = requests.get(url) + if recipe_details.status_code == constants.HTTP_STATUS.get("OK"): + resp = recipe_details.json() + else: + log.error("Failed to retrieve the package and module details from URL.") + except requests.ConnectionError as e: + log.console( + "Failed to connect. Please check the Internet connection" + " and proxy settings and retry. {}".format(e), + error=True, + ) + return resp + + +def download_package_artifacts( + log, recipe_id, temp_file=None, remove_previous=False, export=False +): + """ + Download the Package Artifacts from Service Layer + + Args: + log (obj): Logger Object + recipe_id (String): Package GUID + temp_file (String, optional): Temporary folder location. Defaults to None. + remove_previous (BOOL, optional): Remove Previous Files. Defaults to False. + export (BOOL, optional): If called from export. + """ + try: + base_url = get_service_layer_url() + url = "".join([base_url, "recipe/otherfiles/{}".format(recipe_id)]) + resp = None + if export: + file_list = [] + recipe_details = requests.get(url) + if recipe_details.status_code == constants.HTTP_STATUS.get("OK"): + resp = recipe_details.json() + if remove_previous and len(resp) == 0: + log.console( + "No artifacts file(s) for the package exist.", + color_code=constants.CYAN, + ) + # Array of files, check if the file is present in the dir if not then download + is_message = True + for artifact in resp: + file_name = artifact["filename"] + download_link = artifact["downloadLink"] + if temp_file: + file_path = os.path.join(temp_file, file_name) + else: + file_path = os.path.join(file_name) + if not os.path.isfile(file_path) or remove_previous: + if is_message: + log.console( + "Downloading artifacts file(s).", color_code=constants.CYAN + ) + is_message = False + artifact_file = requests.get(download_link, stream=True) + if artifact_file.status_code == constants.HTTP_STATUS.get("OK"): + with open(file_path, "wb") as fd: + fd.write(artifact_file.content) + if remove_previous: + log.console( + "Fetched artifacts file {} ".format(file_name), + color_code=constants.GREEN, + ) + else: + log.info("Fetched artifacts file {}. ".format(file_name)) + else: + log.console( + "Failed to fetch the artifacts file {}. {} {}".format( + file_name, + artifact_file.status_code, + artifact_file.reason, + ), + color_code=constants.RED, + ) + + else: + log.info("Artifacts file {} already exists.".format(file_name)) + + if os.path.isfile(file_path) and export: + file_list.append(file_name) + log.info("Artifacts file {} appended.".format(file_name)) + else: + log.error( + "Failed to retrieve the package artifacts details for {}.".format( + recipe_id + ) + ) + + if export: + return file_list + + except requests.ConnectionError as e: + log.console( + "Failed to connect. Please check the Internet connection" + " and proxy settings and retry. {}".format(e), + error=True, + ) + sys.exit(-1) + except (KeyError, IndexError, ValueError) as e: + log.error("Exception in artifacts API. {}".format(e)) diff --git a/edgesoftware/common/utils.py b/edgesoftware/common/utils.py new file mode 100644 index 0000000..c5f5bf4 --- /dev/null +++ b/edgesoftware/common/utils.py @@ -0,0 +1,2589 @@ +from __future__ import print_function +import os +import subprocess +from subprocess import run, PIPE, check_output, Popen, DEVNULL, STDOUT +import sys +import tempfile +import urllib.request +import socket +import random +import string +import json +import shutil +import uuid +import requests +import re +import psutil +import math +import tarfile +import shutil +import base64 +import hashlib +import wget +import platform +import docker +import csv + +from inputimeout import inputimeout, TimeoutOccurred +from collections import OrderedDict +from configparser import ConfigParser +import defusedxml.ElementTree as ET +from zipfile import ZipFile +from pathlib2 import Path +from kubernetes import client, config +from kubernetes.client.rest import ApiException + +from edgesoftware.common import constants +from edgesoftware.common import service_layer_api as api +from tqdm import tqdm + + +import argparse +from datetime import datetime, timedelta + +# APP_VERSION registers ESH CLI version into LanternRock portal +APP_VERSION = constants.VERSION +# TID of ESH-CLI-PROD for LanternRock Analytics portal +ESH_CLI_PROD = "155f9474-9241-4336-abd0-c6f557775001" + +if constants.Operating_system == "Linux": + output_dir = "/var/log/esb-cli" +elif constants.Operating_system == "Windows": + output_dir = "/log/esb-cli" +install_status_log = "install_status.json" +manifest_file = "edgesoftware_configuration.xml" +# TODO: check if LR_data and telemetry_data can be made one variable +LR_data = {} +LR_INSTALLED = False +success_container_ids = [] +success_container_names = [] +telemetry_data = {} +source_list = "/etc/apt/sources.list" +pip_conf_file = "pip.conf" +OS_Version = "" +if constants.Operating_system == "Linux": + layers_dir = "/.intel/esh-layers/" +elif constants.Operating_system == "Windows": + layers_dir = "/Intel/esh-layers/" +component_valid = {} +whitelist_components = [] +region_flag = 0 +china_supported_modules = [] +if constants.Operating_system == "Windows": + command = subprocess.run("where python", shell=True, stdout=subprocess.PIPE) + site_package = command.stdout.decode("ascii").strip("\n") + sitepackage_location = site_package.splitlines()[0].strip("python.exe") + + +def get_lanternrock_tid(log): + """ + Get tid for ESH-CLI dev from config.ini or + tid for ESH-CLI prod + + :returns: GUID for Lanternrock analytics portal + """ + try: + parser = ConfigParser() + lanternrock_tid = None + if os.path.exists("config.ini"): + parser.read("config.ini") + lanternrock_tid = parser.get("default", "esh_cli_dev") + lanternrock_tid = lanternrock_tid if lanternrock_tid else ESH_CLI_PROD + return lanternrock_tid + except Exception as e: + log.console("Exiting. LanternRock TID is missing.") + sys.exit(-1) + + +def get_geolocation(log): + geo_services_info = [ + ["http://ip-api.com/json/", "countryCode", "country"], + ["https://ipapi.co/json/", "country_code", "country_name"], + ["https://ip.useragentinfo.com/json/", "short_name", "country"], + ] + for url, code, country in geo_services_info: + try: + resp = requests.get(url, timeout=5) + if resp.ok: + ret = resp.json() + country_name, country_code = ret[country], ret[code] + log.info(f"Connected to a network in {country_name}") + return country_code + else: + log.info(f"Server {url} returns {resp.status_code}.") + except KeyError: + log.info(f"Failed to get country name from server {url}.") + except Exception as e: + log.info(f"Failed to connect to {url}: {e}") + log.info("Could not detect the geographic location.") + return None + +def identify_geolocation(log): + country_code = get_geolocation(log) + if country_code is None: + return + if country_code.lower() == "cn": + global region_flag + global china_supported_modules + region_flag = 1 + package_id = get_recipe_details(manifest_file)["id"] + os_id = get_recipe_details(manifest_file)["osId"] + china_supported_modules = api.get_modules_list( + package_id, os_id, country_code, log + ) + tree = ET.parse(manifest_file) + root = tree.getroot() + for child in root: + if child.tag in ["project", "default"]: + name = child.attrib["label"] + comp_id = child.attrib["id"] + if china_supported_modules is not None: + if comp_id in china_supported_modules: + whitelist_components.append(name) + else: + whitelist_components.append(name) + display_list = whitelist_components + log.console( + "Connected to a network in China. Module availability is restricted in your region.", + color_code=constants.YELLOW, + ) + log.console( + "Modules available for download in your region:", color_code=constants.CYAN + ) + for comp in display_list: + log.console(" {}".format(comp), color_code=constants.CYAN) + check_count = 0 + while True: + if check_count > 9: + log.error("Maximum retries exceeded to override settings.") + break + check_count = check_count + 1 + try: + log.console( + "For a successful installation, enter the URL for a local mirror site" + " for pip and apt package managers" + ) + option = input( + constants.BICYAN.format( + "Do you want to override settings? Enter Yes or No: " + ) + ) + if option.lower() == "yes" or option.lower() == "y": + mirror = input( + constants.BICYAN.format( + "Please enter the URL for a local mirror site for pip and apt package managers: " + ) + ) + log.info("Mirror site entered: {}".format(mirror)) + from urllib.parse import urlparse + import re + + domain = "".join(["http://", mirror]) + connection = urllib.request.urlopen(domain, timeout=10) + if connection.status == constants.HTTP_STATUS.get("OK"): + reg = "http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\), ]|(?:%[0-9a-fA-F][0-9a-fA-F]))" + if re.search(reg, mirror, re.IGNORECASE): + mirror = urlparse(mirror).netloc + + log.console("Modifying settings") + + command = [ + "sudo", + "sed", + "-i", + "-e", + f"s/http:\/\/.*archive\.ubuntu\.com/http:\/\/{mirror}/g", + source_list, + ] + ret = run(command, stdout=PIPE, stderr=PIPE) + + if ret.returncode: + log.console( + "Failed to modify settings for Ubuntu", error=True + ) + + pip_conf_path = os.path.join(Path.home(), ".config/pip") + if not os.path.isdir(pip_conf_path): + Path(pip_conf_path).mkdir(parents=True, exist_ok=True) + + with open( + os.path.join(pip_conf_path, pip_conf_file), "w" + ) as fd: + fd.write("[global]\n") + fd.write("trusted-host = {0}\n".format(mirror)) + fd.write( + "index-url = http://{0}/pypi/simple\n".format(mirror) + ) + break + else: + log.console("Failed to connect") + elif option.lower() == "no" or option.lower() == "n": + break + else: + log.console( + "Invalid option. Valid " "options are YES or NO.\n", error=True + ) + except KeyboardInterrupt: + log.console( + "Installation aborted by user. Exiting installation", error=True + ) + sys.exit(-1) + except Exception as e: + log.console( + "Failed to modify settings. Please enter a valid URL. {}".format(e), + error=True, + ) + + +def install_git(log): + """ + Install latest version of git + """ + log.info("Installing Git") + try: + if constants.Operating_system == "Linux": + git_link = ( + "https://packages.endpoint.com/rhel/7/os/x86_64/" + "endpoint-repo-1.7-1.x86_64.rpm" + ) + command = ["sudo", "yum", "remove", "-y", "git*"] + ret = run(command, stdout=PIPE, stderr=PIPE) + if ret.returncode: + log.console( + "Failed to remove existing git", color_code=constants.YELLOW + ) + command = ["sudo", "yum", "-y", "install", git_link] + ret = run(command, stdout=PIPE, stderr=PIPE) + command = ["sudo", "yum", "install", "-y", "git"] + ret = run(command, stdout=PIPE, stderr=PIPE) + elif constants.Operating_system == "Windows": + git_link = ( + "https://github.com/git-for-windows/git/releases/download/" + "v2.29.2.windows.2/Git-2.29.2.2-64-bit.exe" + ) + wget.download(git_link) + ret = subprocess.run( + "Git-2.29.2.2-64-bit.exe /VERYSILENT /NORESTART", shell=True + ) + if ret.returncode: + msg = "Failed to install prerequisites. Exiting installation. {}" + print_msg = "Failed to install prerequisites. Exiting installation." + log.console(msg.format(ret.stderr), print_msg, error=True) + sys.exit(-1) + except Exception as e: + msg = "Failed to install prerequisites. Exiting installation. {}" + print_msg = "Failed to install prerequisites. Exiting installation." + log.console(msg.format(e), print_msg, error=True) + sys.exit(-1) + + +def check_installed(log, check_component): + """ + Check the installation status of the component + :param check_component: The component's status that is checked + """ + output_dir_path = create_output_dir(manifest_file) + install_status_json_path = os.path.join(output_dir_path, install_status_log) + try: + if ( + os.path.exists(install_status_json_path) + and os.stat(install_status_json_path).st_size != 0 + ): + with open(install_status_json_path, "r") as file: + components = json.load(file) + if check_component not in list(components.keys()): + return False + for component, val in components.items(): + if component == check_component: + if val["status"] == "FAILED": + return False + return True + except Exception as e: + log.error( + "Failed to verify the installation status of the component due to error {}".format( + e + ) + ) + + +def get_network_time(): + try: + res = urllib.request.urlopen("http://worldclockapi.com/api/json/utc/now") + date_time = json.loads(res.read()) + return date_time["currentDateTime"] + except Exception as e: + return None + + +def format_component_name(component_name): + length = len(component_name) + for index in range(0, length): + component_name[index] = component_name[index].replace("_", " ") + + +def validate_product_key( + log, product_key, component_key, update=False, upgrade=False, download=False +): + """ + Validate the given product key + :param product_key: product key + """ + output_dir_path = create_output_dir(manifest_file) + install_status_log_path = os.path.join(output_dir_path, install_status_log) + resp = api.validate_product_key(log, product_key, component_key) + if not resp: + if update or upgrade or download: + log.console("Invalid Product Key. Exiting installation", error=True) + return False + else: + if os.path.getsize(install_status_log_path): + log.console( + "[WARNING] Invalid Product Key. Continuing " + "installation with local files", + color_code=constants.YELLOW, + ) + return True + else: + log.console("Invalid Product Key. Exiting installation", error=True) + return False + else: + log.console("Successfully validated Product Key", color_code=constants.GREEN) + return True + + +def print_time(seconds): + """ + Prints the time taken to install the component + """ + hour = seconds // 3600 + seconds %= 3600 + minutes = seconds // 60 + seconds %= 60 + if minutes == 0: + return_time = "{:.2f} seconds".format(seconds) + elif hour == 0: + if seconds == 0: + return_time = "{:.0f} minutes".format(minutes) + else: + return_time = "{:.0f} minutes {:.2f} seconds".format(minutes, seconds) + else: + if minutes == 0: + return_time = "{:.0f} hours".format(hour) + else: + return_time = "{:.0f} hour {:.2f} minutes".format(hour, minutes) + return return_time + + +def python_version(log): + """Recommended python version for ESB Cli""" + + RECOMMENDED_PYTHON_VERSION = (3, 6) + + python_ver = sys.version_info + major_version = python_ver.major + minor_version = python_ver.minor + micro_version = python_ver.micro + + log.console( + "Python version: {}.{}.{}".format(major_version, minor_version, micro_version), + color_code=constants.WHITE, + ) + + # Abort on Python 2 and older versions. + + if (major_version, minor_version) < (3, 0): + log.console( + 'Exiting installation. Update to Python {}.{} or above "\ + "and restart installation.'.format( + *RECOMMENDED_PYTHON_VERSION + ), + error=True, + ) + sys.exit(-1) + + +def get_component_list(file_name=None, xmlstring=None): + component_list = OrderedDict() + if file_name and os.path.exists(file_name): + tree = ET.parse(file_name) + root = tree.getroot() + details = get_recipe_details(file_name) + elif xmlstring: + root = ET.fromstring(xmlstring) + details = get_recipe_details(xml=root) + else: + return [] + label = details.get("label") + version = details.get("version") + dir_name = "_".join([label, version]) + for child in root: + if child.tag in ["project", "default"]: + name = child.attrib["label"] + comp_id = child.attrib["id"] + path = os.path.join(os.getcwd(), dir_name) + if region_flag: + if china_supported_modules is not None: + if name != "esb_common" and comp_id not in china_supported_modules: + continue + else: + pass + component_list[comp_id] = {"path": path, "label": name} + install = child.attrib.get("esb_install") + if install == "true": + component_list[comp_id].update({"esb_install": True}) + if child.tag in ["image"]: + image_id = child.attrib["id"] + label = child.attrib["label"] + tag = child.attrib["tag"] + component_list[image_id] = {"label": label, "tag": tag, "type": "image"} + if child.tag in ["helm"]: + helm_id = child.attrib["id"] + label = child.attrib["label"] + tag = child.attrib["tag"] + path = os.path.join(os.getcwd(), dir_name) + component_list[helm_id] = { + "label": label, + "tag": tag, + "type": "helm", + "path": path, + } + return component_list + + +def get_recipe_details(file_name=None, xml=None, common=False): + if file_name: + if os.path.exists(file_name): + tree = ET.parse(file_name) + root = tree.getroot() + else: + print( + "Exiting Installation. Failed to find manifest file {}. " + "Check the file location and run the command from the " + "folder where the file is located.".format(file_name) + ) + sys.exit(-1) + if xml: + root = xml + + details = {} + for child in root: + if child.tag == "main": + details["label"] = child.attrib.get("label") + details["id"] = child.attrib.get("id") + details["version"] = child.attrib.get("version") + details["packageId"] = child.attrib.get("packageId") + details["osId"] = child.attrib.get("osId") + if common and child.tag == "default": + details["common_id"] = child.attrib.get("id") + details["label"] = child.attrib.get("label") + return details + + +def get_package_from_url(xml_details): + package_id = xml_details.get("id") + package_url = os.path.join(api.get_service_layer_url(), "recipe") + api_packages = requests.get(package_url).json() + package_label = "" + if api_packages: + for items in api_packages: + if items.get("id", "") == package_id: + package_version = items.get("version", "") + package_label = items.get("name", "") + if package_label != "": + package_label = package_label["en"] + return package_label, package_version + if package_label == "": + return "", "" + + +def find_element(root, element): + """ + Finds element in the root Tree + + :param root: Root of the tree + :param element: The element to find + :returns: True if found. False otherwise + """ + for child in root: + if child.tag == "project" or child.tag == "image" or child.tag == "helm": + if child.attrib["label"] == element: + return True + return False + + +def update_xml(file_name, element, log): + """ + Updates the XML file + + :param file_name: XML file path + :param element: List of elements to append + """ + try: + if os.path.exists(file_name): + tree = ET.parse(file_name) + installed_root = tree.getroot() + else: + log.console( + "Manifest XML file {} not found. Exiting " + "installation.".format(file_name), + error=True, + ) + sys.exit(-1) + except ET.ParseError as e: + log.console( + "Failed to parse manifest file {}. {}. The manifest file " + "should not be edited manually. Go to the Edge" + "Software Hub to customize the configuration of the " + "platform.".format(file_name, e), + error=True, + ) + sys.exit(-1) + update_root = ET.fromstring(element) + for child in update_root: + if child.tag == "project" or child.tag == "image" or child.tag == "helm": + comp_name = child.attrib["label"] + if not find_element(installed_root, comp_name): + installed_root.append(child) + tree.write(file_name) + + +def write_xml(content): + """ + Writes content to an temporary XML file. + + :param content: An XML string + :returns: File name + """ + scriptFile = tempfile.NamedTemporaryFile(delete=False) + root = ET.fromstring(content) + tree = ET.ElementTree(root) + with open(scriptFile.name, "w") as f: + tree.write(scriptFile.name) + scriptFile.file.close() + return scriptFile.name + + +def download_component(log, product_key, components, recipe_id, os_id): + for component_id, value in components.items(): + if not is_image(value) and not is_helm(value): + src_name = value["label"] + src_path = value["path"] + src_id = component_id + log.console("Downloading component {}".format(src_name)) + try: + if not os.path.isdir(src_path): + os.makedirs(src_path) + api.fetch_ingredient( + product_key, src_name, recipe_id, os_id, src_id, src_path, log + ) + except Exception as e: + log.console( + "Failed to download component {} due to error " + "{}".format(src_name, e), + error=True, + ) + + if is_helm(value): + name = value["label"] + tag = value["tag"] + src_path = value["path"] + src_id = component_id + log.console("Downloading Helm chart {}-{}".format(name, tag)) + try: + if not os.path.isdir(src_path): + os.makedirs(src_path) + api.fetch_helm(log, product_key, name, tag, src_id, src_path) + except Exception as e: + log.console( + "Failed to download Helm chart {} due to error " + "{}".format(src_name, e), + error=True, + ) + + +def extract_file(zf, info, extract_dir): + """ + extracts a zip file with file permissions intact + + :param zf: ZipFile object + :param info: elements of list containing ZipInfo objects + :param extract_dir: directory where unzipped file will be extracted + """ + zf.extract(info.filename, path=extract_dir) + out_path = os.path.join(extract_dir, info.filename) + perm = info.external_attr >> 16 + os.chmod(out_path, perm) + + +def install_common(src, manifest, log): + log.console("Installing shared module 'esb_common'", color_code=constants.GREEN) + common_id = get_recipe_details(manifest, common=True)["common_id"] + try: + cwd = os.getcwd() + abs_src_path = os.path.abspath(src["path"]) + zip_file_path = os.path.join(abs_src_path, common_id) + module = src["label"] + zip_file = zip_file_path + ".zip" + module_path = os.path.join(abs_src_path, module) + + if os.path.exists(zip_file): + from zipfile import ZipFile + + with ZipFile(zip_file, "r") as zipObj: + # Extract all the contents of zip file + log.console( + "Unzipping the shared module 'esb_common'...", + color_code=constants.GREEN, + ) + zipObj.extractall(module_path) + else: + log.console( + "Failed to find shared module 'esb_common'. " "Exiting installation.", + error=True, + ) + return False + + os.chdir(os.path.join(module_path)) + if constants.Operating_system == "Linux": + ret = subprocess.run(["sudo", "python3", "setup.py", "install"]) + elif constants.Operating_system == "Windows": + ret = subprocess.run(["python", "setup.py", "install"]) + if ret.returncode: + msg = "Failed to install shared module" " 'esb_common'. {}".format( + ret.stderr + ) + print(constants.RED.format(msg)) + log.error("Failed to run setup.py for esb_common. {}".format(ret.stderr)) + + python_ver = ( + "python" + str(sys.version_info[0]) + "." + str(sys.version_info[1]) + ) + if constants.Operating_system == "Linux": + if "CentOS" in OS_Version or "Red Hat" in OS_Version: + python_lib_path = os.path.join( + "/usr/local/lib/", python_ver, "site-packages/esb_common" + ) + else: + python_lib_path = os.path.join( + "/usr/local/lib/", python_ver, "dist-packages/esb_common" + ) + ret = subprocess.run(["sudo", "mkdir", "-p", python_lib_path]) + + if ret.returncode: + msg = "Failed to install shared module" " 'esb_common'. {}".format( + ret.stderr + ) + print(constants.RED.format(msg)) + log.error( + "Failed to create 'esb_common' directory. {}".format(ret.stderr) + ) + return False + ret = subprocess.run( + [ + "sudo", + "cp", + module_path + "/esb_common/logger.py", + module_path + "/esb_common/locale.py", + module_path + "/esb_common/util.py", + python_lib_path, + ] + ) + if ret.returncode: + msg = "Failed to install shared module" " 'esb_common'. {}".format( + ret.stderr + ) + print(constants.RED.format(msg)) + log.error("Failed to copy 'esb_common' files. {}".format(ret.stderr)) + return False + elif constants.Operating_system == "Windows": + norm_path = os.path.join( + sitepackage_location, "Lib", "site-packages", "esb_common" + ) + python_lib_path = os.path.normpath(norm_path) + if not os.path.exists(python_lib_path): + try: + os.mkdir(python_lib_path) + + except Exception as e: + msg = "Failed to install shared module" " 'esb_common'. {}".format( + e + ) + print(constants.RED.format(msg)) + log.error("Failed to create 'esb_common' directory. {}".format(e)) + return False + + esb_common_files = os.listdir(os.path.join(module_path, "esb_common")) + if len(esb_common_files): + for file in esb_common_files: + if ( + file == "logger.pyd" + or file == "util.pyd" + or file == "locale.pyd" + ): + shutil.copy(os.path.join(module_path, "esb_common", file), python_lib_path) + else: + msg = "Failed to copy shared module 'esb_common'." + print(constants.RED.format(msg)) + log.error("Failed to copy 'esb_common' files.") + return False + except Exception as e: + msg = "Failed to install shared module 'esb_common'. {}".format(e) + log.console(msg, error=True) + return False + finally: + os.chdir(cwd) + log.console( + "Successfully installed shared module 'esb_common'.", color_code=constants.GREEN + ) + return True + + +def create_output_dir(manifest=None): + """ + Create output directory + + :param manifest: Manifest file + :returns: The path of the output directory + """ + try: + path = os.path.join(output_dir) + if manifest: + details = get_recipe_details(manifest) + label = details.get("label") + version = details.get("version") + if not label or not version: + print( + "Failed to get label or version from Manifest XML file. " + "Please check your XML file." + ) + sys.exit(-1) + dir_name = "_".join([label, version]) + path = os.path.join(output_dir, dir_name) + if not os.path.isdir(path): + if constants.Operating_system == "Windows": + os.makedirs(path) + elif constants.Operating_system == "Linux": + subprocess.run(["sudo", "mkdir", "-p", path]) + subprocess.run(["sudo", "chown", os.environ["USER"], path]) + return path + except Exception as e: + print(constants.RED.format("Failed to create log directory. {}".format(e))) + sys.exit(-1) + + +def sys_info(log): + """ + Target system information + """ + try: + system_info = {} + global OS_Version + if constants.Operating_system == "Linux": + os_version = subprocess.run( + "hostnamectl | grep Operating | " "cut -d':' -f2 | awk '{$1=$1};$1'", + stdout=subprocess.PIPE, + shell=True, + ) + os_name = os_version.stdout.decode("ascii").strip("\n") + OS_Version = os_name + kernel_version = subprocess.run( + "uname -r", stdout=subprocess.PIPE, shell=True + ) + dec_kernel_version = kernel_version.stdout.decode("ascii").strip("\n") + hardware_arch = subprocess.run( + "uname -p", stdout=subprocess.PIPE, shell=True + ) + dec_hardware_arch = hardware_arch.stdout.decode("ascii").strip("\n") + processor = subprocess.run( + "cat /proc/cpuinfo | grep 'model name' \ + | uniq | cut -d" + ":" + " -f2 | " + "awk '{$1=$1};$1'", + stdout=subprocess.PIPE, + shell=True, + ) + processor_name = processor.stdout.decode("ascii").strip("\n") + vendor = subprocess.run( + "cat /proc/cpuinfo | grep 'vendor' | uniq | cut -d" + ":" + " -f2 | " + "awk '{$1=$1};$1'", + stdout=subprocess.PIPE, + shell=True, + ) + vendor_id = vendor.stdout.decode("ascii").strip("\n") + vendor_id = vendor_id.strip("\t") + memory_size = psutil.virtual_memory() + dec_memory = math.ceil(memory_size.total / (1024 ** 3)) + total = shutil.disk_usage("/").total / (1024 ** 3) + free = shutil.disk_usage("/").free / (1024 ** 3) + + vpu = subprocess.run( + 'lsusb | grep "03e7" | wc -l', shell=True, stdout=subprocess.PIPE + ) + dec_vpu = int(vpu.stdout.decode("utf-8").strip("\n")) + fpga = subprocess.run( + 'lspci | grep "Processing accelerators:.*.Altera" \ + | wc -l', + shell=True, + stdout=subprocess.PIPE, + ) + dec_fpga = int(fpga.stdout.decode("utf-8")) + + cpu_util = psutil.cpu_percent() + manu = subprocess.run( + "sudo dmidecode -t system | grep 'Manufacturer' | cut -d" + ":" + " -f2 | " + "awk '{$1=$1};$1'", + stdout=subprocess.PIPE, + shell=True, + ) + manufacturer_info = manu.stdout.decode("ascii").strip("\n") + prod = subprocess.run( + "sudo dmidecode -t system | grep 'Product Name' | cut -d" + ":" + " -f2 | " + "awk '{$1=$1};$1'", + stdout=subprocess.PIPE, + shell=True, + ) + prod_info = prod.stdout.decode("ascii").strip("\n") + platform_info = manufacturer_info + " " + prod_info + system_info.update( + { + "os_name": os_name, + "kernel": dec_kernel_version, + "hardware": dec_hardware_arch, + "processor": processor_name, + "vendor_id": vendor_id, + "memory": dec_memory, + "total_size": total, + "free_size": free, + "vpu": dec_vpu, + "fpga": dec_fpga, + "cpu_util": cpu_util, + "platform_info": platform_info, + } + ) + elif constants.Operating_system == "Windows": + command = platform.platform().split("-") + os_name = command[0] + " " + command[1] + OS_Version = os_name + dec_hardware_arch = platform.architecture()[0] + processor_name = platform.processor() + vendor_id = platform.processor().strip().split(",")[1] + memory_size = psutil.virtual_memory() + dec_memory = math.ceil(memory_size.total / (1024 ** 3)) + total = shutil.disk_usage("/").total / (1024 ** 3) + free = shutil.disk_usage("/").free / (1024 ** 3) + cpu_util = psutil.cpu_percent() + cmd = ["wmic", "computersystem", "get", "model"] + status = subprocess.run(cmd, stdout=subprocess.PIPE) + platform_info = status.stdout.decode("ascii").strip("\n").splitlines()[2] + system_info.update( + { + "os_name": os_name, + "hardware": dec_hardware_arch, + "processor": processor_name, + "vendor_id": vendor_id, + "cpu_util": cpu_util, + "memory": dec_memory, + "total_size": total, + "free_size": free, + "platform_info": platform_info, + } + ) + except Exception as e: + log.console("Failed to read System Information. {}".format(e), error=True) + return system_info + + +def print_system_info(system_info, log): + """ + Target system information + """ + os_name = system_info["os_name"] + hardware = system_info["hardware"] + processor = system_info["processor"] + memory = system_info["memory"] + total = system_info["total_size"] + free = system_info["free_size"] + vendor = system_info["vendor_id"] + cpu_util = system_info["cpu_util"] + platform_info = system_info["platform_info"] + if constants.Operating_system == "Linux": + kernel = system_info["kernel"] + vpu = system_info["vpu"] + fpga = system_info["fpga"] + try: + if ( + "Ubuntu 18.04" not in os_name + and "Ubuntu 20.04" not in os_name + and "Ubuntu 22.04" not in os_name + and "CentOS" not in os_name + and "Windows" not in os_name + and "Red Hat" not in os_name + and "Debian" not in os_name + ): + log.console("Unsupported OS. Please check your OS version", error=True) + sys.exit(-1) + if "GenuineIntel" not in vendor: + log.console( + "Intel® Edge Software Hub packages are only " + "supported on Intel® architecture", + error=True, + ) + sys.exit(-1) + if constants.Operating_system == "Linux": + if "CentOS" in os_name or "Red Hat" in os_name: + command = ["sudo", "yum", "install", "usbutils", "-y"] + ret = run(command, stdout=PIPE, stderr=PIPE) + command = ["sudo", "yum", "install", "pciutils", "-y"] + ret = run(command, stdout=PIPE, stderr=PIPE) + hardware_acc_status = False + if vpu != 0: + hardware_acc_status = True + if fpga != 0: + hardware_acc_status = True + + xml_details = get_recipe_details(manifest_file) + package_label, package_version = get_package_from_url(xml_details) + if package_label == "": + log.error("Failed to retrieve package information from url") + log.console("SYSTEM INFO".center(50, "-"), color_code=constants.CYAN) + if package_label != "": + log.console("Package Name: {} {}".format(package_label, package_version)) + else: + package_label = xml_details["label"] + package_version = xml_details["version"] + package_label = package_label.replace("_", " ") + log.console("Package Name: {} {}".format(package_label, package_version)) + log.console("Product Name: {}".format(platform_info)) + log.console("CPU SKU: {}".format(processor)) + log.console("Memory Size: {} GB".format(round(memory))) + log.console("Operating System: {}".format(os_name)) + if constants.Operating_system == "Linux": + log.console("Kernel Version: {}".format(kernel)) + if vpu != 0: + log.console("Accelerator(VPU): {}".format(vpu)) + if fpga != 0: + log.console("Accelerator(FPGA): {}".format(fpga)) + if not hardware_acc_status: + log.console("Accelerator: None") + log.console("CPU Utilization: {}%".format(cpu_util)) + log.console("Available Disk Space: {:.0f} GB".format(free)) + + except Exception as e: + log.console("Failed to read System Information. {}".format(e), error=True) + + +def check_enough_memory(system_info, package_id, log): + avail_memory = system_info["memory"] + avail_disk = system_info["free_size"] + memory_details = api.get_recipe_details(package_id, log) + if memory_details is not None: + min_memory = memory_details.get("memoryRequired", "") + min_disk = memory_details.get("diskRequired", "") + if min_memory == "" and min_disk == "": + return + if float(avail_memory) < float(min_memory) or float(avail_disk) < float( + min_disk + ): + log.console( + "WARNING: Installation may fail. The target device" + " does not meet the minimum system requirement.", + color_code=constants.YELLOW, + ) + log.console( + "Minimum memory requirement for this package: {} GB.".format( + min_memory + ), + color_code=constants.YELLOW, + ) + log.console( + "Minimum disk requirement for this package: {} GB.".format(min_disk), + color_code=constants.YELLOW, + ) + else: + return + + +def modify_module_package_label(label): + """ + Gets a package or module label and modifies + it as per LR portal norms. + :param label: module or package name with version + """ + label = "".join( + char + for char in label + if char.isalnum() + or char == " " + or char == "." + or char == "-" + or char == "_" + or char == ":" + ) + return label + + +def send_LR_data(data, log): + """ + Send telemetry data to LanternRock portal + :param data: Dictionary with telemetry info + """ + LR_data.update(data) + if ( + LR_data.get("success_ids") + or LR_data.get("failed_ids") + or LR_data.get("image_name") + or LR_data.get("helm_chart") + or LR_data.get("success_helm_ids") + or LR_data.get("failed_helm_ids") + or LR_data.get("success_container_ids") + or LR_data.get("failed_container_ids") + ): + TID = get_lanternrock_tid(log) + if os.path.isfile(manifest_file): + xml_details = get_recipe_details(manifest_file) + recipe_id = get_recipe_details(manifest_file)["id"] + if "type" in LR_data and LR_data["type"] == "upgrade": + recipe_id = LR_data["recipe_id"] + try: + package_details = api.get_recipe_details(recipe_id, log) + package_label = package_details["name"]["en"] + package_version = package_details["version"] + all_modules = {} + all_ingredients = package_details["ingredients"] + for index in range(len(all_ingredients)): + module_id_key = all_ingredients[index]["id"] + module_id_label = all_ingredients[index]["name"]["en"] + module_id_version = all_ingredients[index]["version"] + module_id_label = module_id_label + ":" + module_id_version + all_modules[module_id_key] = module_id_label + except Exception as e: + log.error("Failed to get package and modules details.".format(e)) + + import_LR_helper(log) + + global LR_INSTALLED + if not LR_INSTALLED: + return + + try: + from lanternrock import ( + LanternRock, + LanternRockArgumentError, + LanternRockError, + LanternRockInitializationError, + ) + + lr = LanternRock() + log.info("Initializing LanternRock") + log.info("CLI version: {}".format(APP_VERSION)) + lr.Initialize("ESH-CLI", APP_VERSION, TID, None, None) + if "type" not in LR_data: + LR_data.update({"type": "install"}) + if "configuration_id" in LR_data: + ESH_configuration_id = { + "esh_configuration_id": LR_data["configuration_id"], + "esh_command_type": LR_data["type"], + } + lr.RecordEventEx( + None, "ESH_configuration_id", 1, 1.0, ESH_configuration_id + ) + if "product_key" in LR_data: + ESH_product_key = {"esh_product_key": LR_data["product_key"]} + lr.RecordEventEx(None, "ESH_product_key", 1, 1.0, ESH_product_key) + package_info = None + if "recipe_id" in LR_data: + label = modify_module_package_label(package_label) + package_info = label + ":" + package_version + ESH_package = { + "esh_package": package_info, + "esh_command_type": LR_data["type"], + } + log.info("Package: {}".format(package_info)) + lr.RecordEventEx(None, "ESH_package", 1, 1.0, ESH_package) + if "type" in LR_data: + log.info("Command type: {}".format(LR_data["type"])) + ESH_command_type = { + "esh_command_type": LR_data["type"], + "esh_package": package_info, + } + lr.RecordEventEx(None, "ESH_command_type", 1, 1.0, ESH_command_type) + if "image_name" in LR_data: + log.info("Image pulled successfully: {}".format(LR_data["image_name"])) + for image_name in LR_data["image_name"]: + label = modify_module_package_label(image_name) + ESH_pulled_images = {"esh_pulled_images": image_name} + lr.RecordEventEx(None, "ESH_pulled_images", 1, 1.0, ESH_pulled_images) + ESH_command_type_success = {"esh_command_type_success": LR_data["type"]} + lr.RecordEventEx( + None, + "ESH_command_type_success", + 1, + 1.0, + ESH_command_type_success, + ) + if "helm_chart" in LR_data: + log.info( + "Helm chart downloaded successfully: {}".format( + LR_data["helm_chart"] + ) + ) + for helm_chart in LR_data["helm_chart"]: + label = modify_module_package_label(helm_chart) + ESH_downloaded_helm_charts = {"esh_downloaded_helm_charts": label} + lr.RecordEventEx( + None, + "ESH_downloaded_helm_charts", + 1, + 1.0, + ESH_downloaded_helm_charts, + ) + ESH_command_type_success = {"esh_command_type_success": LR_data["type"]} + lr.RecordEventEx( + None, + "ESH_command_type_success", + 1, + 1.0, + ESH_command_type_success, + ) + + if "success_ids" in LR_data and LR_data["success_ids"]: + record_LR_modules( + "success_ids", + all_modules, + lr, + "ESH_module_success", + "esh_module_success", + "esh_command_type_success", + ) + + if "success_helm_ids" in LR_data and LR_data["success_helm_ids"]: + record_LR_modules( + "success_helm_ids", + all_modules, + lr, + "ESH_helm_success", + "esh_helm_success", + "esh_command_type_success", + ) + + if "success_container_ids" in LR_data and LR_data["success_container_ids"]: + record_LR_modules( + "success_container_ids", + all_modules, + lr, + "ESH_container_success", + "esh_container_success", + "esh_command_type_success", + ) + + if ( + "failed_ids" in LR_data + or "failed_helm_ids" in LR_data + or "failed_container_ids" in LR_data + ): + if ( + not LR_data["failed_ids"] + and not LR_data["failed_helm_ids"] + and not LR_data["failed_container_ids"] + ): + log.info("Package status: success") + success_package = ESH_package["esh_package"] + ESH_package_success = { + "esh_package_success": success_package, + "esh_command_type_success": LR_data["type"], + } + lr.RecordEventEx( + None, "ESH_package_success", 1, 1.0, ESH_package_success + ) + ESH_command_type_success = { + "esh_command_type_success": LR_data["type"], + "esh_package_success": success_package, + } + lr.RecordEventEx( + None, + "ESH_command_type_success", + 1, + 1.0, + ESH_command_type_success, + ) + else: + log.info("Package status: failed") + failed_package = ESH_package["esh_package"] + ESH_package_failed = { + "esh_package_failed": failed_package, + "esh_command_type_failed": LR_data["type"], + } + lr.RecordEventEx( + None, "ESH_package_failed", 1, 1.0, ESH_package_failed + ) + ESH_command_type_failed = { + "esh_command_type_failed": LR_data["type"], + "esh_package_failed": failed_package, + } + lr.RecordEventEx( + None, "ESH_command_type_failed", 1, 1.0, ESH_command_type_failed + ) + + if "failed_ids" in LR_data and LR_data["failed_ids"]: + record_LR_modules( + "failed_ids", + all_modules, + lr, + "ESH_module_failed", + "esh_module_failed", + "esh_command_type_failed", + ) + + if "failed_helm_ids" in LR_data and LR_data["failed_helm_ids"]: + record_LR_modules( + "failed_helm_ids", + all_modules, + lr, + "ESH_helm_failed", + "esh_helm_failed", + "esh_command_type_failed", + ) + + if "failed_container_ids" in LR_data and LR_data["failed_container_ids"]: + record_LR_modules( + "failed_container_ids", + all_modules, + lr, + "ESH_container_failed", + "esh_container_failed", + "esh_command_type_failed", + ) + + log.info("De-Initializing LanternRock") + lr.Deinitialize() + log.info("Uploading data to the LanternRock portal") + lr.Upload(TID, {"show": False}) + except Exception as e: + log.error("Failed to send telemetry report to LanternRock {}".format(e)) + + +def record_LR_modules( + id_collection, + all_modules, + lr, + ESH_module_status, + esh_module_status, + esh_command_type_status, +): + """ + Uses record API from LR SDK to keep log of modules, helm_charts and + images to be uploaded on LR portal. + + :params id_collection: string to retrieve modules/helm/container ids from LR_data + :params all_modules: list of all modules within a package + :params ESH_module_status: name of the dictionary to be uploaded on LR portal + :params esh_module_status: status of module/helm/containers + :params esh_command_type_status: status of corresponding command type + """ + + global LR_data + for guid in LR_data.get(id_collection): + if guid in all_modules: + label = all_modules[guid] + label = modify_module_package_label(label) + ESH_module_dict = { + esh_module_status: label, + esh_command_type_status: LR_data["type"], + } + lr.RecordEventEx(None, ESH_module_status, 1, 1.0, ESH_module_dict) + + +def send_telemetry_data(data, log): + """ + Send installation report to service layer + :param data: Dictionary with telemetry info + """ + try: + telemetry_data.update(data) + if ( + telemetry_data.get("success_ids") + or telemetry_data.get("failed_ids") + or telemetry_data.get("successHelmIds") + or telemetry_data.get("failedHelmIds") + or telemetry_data.get("successContainerIds") + or telemetry_data.get("failedContainerIds") + ): + system_info = sys_info(log) + os_name = system_info["os_name"] + hardware = system_info["hardware"] + processor = system_info["processor"] + vendor = system_info["vendor_id"] + network_time = get_network_time() + if constants.Operating_system == "Linux": + kernel = system_info["kernel"] + if "type" not in telemetry_data: + telemetry_data.update({"type": "install"}) + if constants.Operating_system == "Linux": + telemetry_data.update( + { + "os_name": os_name, + "kernel": kernel, + "hardware": hardware, + "processor": processor, + "vendor_id": vendor, + "network_time": network_time, + } + ) + elif constants.Operating_system == "Windows": + telemetry_data.update( + { + "os_name": os_name, + "hardware": hardware, + "processor": processor, + "vendor_id": vendor, + "network_time": network_time, + } + ) + api.send_telemetry_data(telemetry_data, log) + telemetry_data.clear() + except Exception as e: + log.error("Failed to send installation report") + + +def checkInternetConnection(log): + """ + Check for internet connection + """ + num_tries = 0 + log.console("Checking Internet connection", color_code=constants.CYAN) + while num_tries < 3: + try: + url = constants.DOMAINS[num_tries] + if url.lower().startswith("http"): + req = urllib.request.Request(url) + connection = urllib.request.urlopen(req, timeout=10) + if connection.status == constants.HTTP_STATUS.get("OK"): + log.console("Connected to the Internet", color_code=constants.GREEN) + break + else: + log.console("Not connected to the Internet", error=True) + except KeyboardInterrupt: + log.console( + "Installation aborted by user. Exiting installation", error=True + ) + sys.exit(-1) + except urllib.error.HTTPError as e: + log.console("Failed to verify internet connection {}".format(e), error=True) + except urllib.error.URLError as e: + if isinstance(e.reason, socket.timeout): + log.console( + "Check for Internet connection request timed out", error=True + ) + except Exception as e: + log.console("Failed to verify Internet connection {}".format(e), error=True) + if num_tries == 2: + log.console( + "Please check the Internet connection and proxy " "settings", error=True + ) + sys.exit(-1) + else: + log.console("Reconnecting", color_code=constants.YELLOW) + num_tries += 1 + + +def check_existing_modules(module_list, os_name, log): + """ + Check if any of the pre-requisite modules exist + :param os_name: Name of Operating System + :param pip_list: list of required pip modules to be installed + :param module_list: list of required modules to be installed + """ + if constants.Operating_system == "Linux": + if "CentOS" in os_name or "Red Hat" in os_name: + git_version = subprocess.run( + "git --version | cut -d ' ' -f3", stdout=subprocess.PIPE, shell=True + ) + dec_version = git_version.stdout.decode("ascii").strip() + for package in module_list: + if package == "git": + if not dec_version.startswith("2."): + return False + else: + skipped_modules = ["git", "python3-apt"] + if package in skipped_modules: + continue + command = ["yum", "list", "installed", "|", "grep", "-w", package] + res = run(command, stdout=PIPE, stderr=PIPE) + if res.stdout.decode("ascii") == "": + return False + return True + else: + try: + ret = subprocess.run( + "sudo apt-get update", shell=True, stdout=subprocess.PIPE + ) + if ret.returncode: + log.console( + "Failed to update apt list. Exiting installation.", error=True + ) + sys.exit(-1) + for package in module_list: + reqs = subprocess.check_output( + ["apt", "-qq", "list", package], + stderr=subprocess.DEVNULL, + ) + installed_packages = [ + r.decode().split("==")[0] for r in reqs.split() + ] + installed_packages = [i.split("/")[0] for i in installed_packages] + if "[installed]" not in installed_packages: + return False + if installed_packages[0] not in module_list: + return False + return True + except Exception as e: + log.console( + "Failed to update apt list. {} Exiting installation.".format(e), + error=True, + ) + sys.exit(-1) + + elif constants.Operating_system == "Windows": + if "Windows" in os_name: + git_version = subprocess.run( + "git --version", + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + shell=True, + ) + if git_version.returncode: + return False + else: + dec_version1 = git_version.stdout.decode("ascii").strip() + dec_version = dec_version1.strip("git version") + for package in module_list: + if package == "git": + if not dec_version.startswith("2."): + return False + elif package == "curl": + res = subprocess.run("curl -V", shell=True, stdout=subprocess.PIPE) + if res.returncode != 0: + return False + elif package == "pip": + res = subprocess.run( + "pip -V", + shell=True, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + ) + if res.returncode != 0: + log.console( + "Issue with python/pip version or Path setup." + "Exiting installation {}".format(res.stderr), + color_code=constants.RED, + ) + log.console( + "Check installed python verison and Environment Variable", + color_code=constants.YELLOW, + ) + sys.exit(-1) + + +def check_pip_installed_modules(pip_list, log): + """ + Check if any of the pip pre-requisite modules exist + :param pip_list: list of required pip modules to be installed + """ + for module in pip_list: + command = ["pip3", "show", module] + ret = subprocess.run(command, stdout=PIPE, stderr=PIPE) + dec_ret = ret.stdout.decode("ascii").strip("\n") + if dec_ret == "": + return False + return True + + +def pre_requisites(log, os_name): + """ + Install the pre-requisite packages + :param os_name: Name of Operating System + """ + log.console("Checking for prerequisites", color_code=constants.CYAN) + pip_list = [ + "click", + "requests", + "termcolor", + "wget", + "setuptools", + "PyYAML", + "prettytable", + "psutil", + "py-cpuinfo", + "colorama", + ] + if constants.Operating_system == "Linux": + module_list = [ + "python3-pip", + "git", + "curl", + "usbutils", + "pciutils", + "python3-apt", + ] + elif constants.Operating_system == "Windows": + module_list = ["pip", "git", "curl"] + module_exists = check_existing_modules(module_list, os_name, log) + if not module_exists: + log.console( + "Installing prerequisites. This may take some " "time...".center(50, "-"), + color_code=constants.CYAN, + ) + if constants.Operating_system == "Linux": + if "CentOS" in os_name or "Red Hat" in os_name: + ret = subprocess.run( + "sudo yum install -y " + "python3-pip curl usbutils pciutils", + shell=True, + ) + ret = subprocess.run( + "sudo yum install -y gcc python3-devel", shell=True + ) + git_install = install_git(log) + else: + ret = subprocess.run( + "sudo apt-get update && sudo apt-get" + " install -y python3-pip git curl usbutils pciutils python3-apt", + shell=True, + ) + if ret.returncode: + log.console( + "Failed to install prerequisites:" " {}".format(ret.stderr), + error=True, + ) + sys.exit(-1) + elif constants.Operating_system == "Windows": + git_version = subprocess.run( + "git --version", + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + shell=True, + ) + if git_version.returncode: + install_git(log) + elif git_version.returncode == 0: + dec_version1 = git_version.stdout.decode("ascii").strip() + dec_version = dec_version1.strip("git version") + if not dec_version.startswith("2."): + install_git(log) + pip_module_exists = check_pip_installed_modules(pip_list, log) + if not pip_module_exists: + for module in pip_list: + if constants.Operating_system == "Linux": + command = ["sudo", "-E", "pip3", "install", module] + ret = run(command, stdout=PIPE, stderr=PIPE) + elif constants.Operating_system == "Windows": + command = ["pip", "install", module] + ret = subprocess.run(command, stdout=PIPE, stderr=PIPE) + if ret.returncode: + log.console( + "Failed to install {}. " "{}".format(module, ret.stderr), error=True + ) + sys.exit(-1) + log.console( + "Succesfuly installed prerequisites".center(50, "-"), + color_code=constants.GREEN, + ) + log.console("All dependencies met", color_code=constants.GREEN) + + +def prompt_reinstall(log, component): + """ + Prompt reinstall if the component is installed + + :param component: Component to be checked + :returns: User input True or False + """ + result = False + num_prompts = 1 + while True: + msg = ( + "{} is already installed. Type YES to " + "reinstall or NO to skip installation.\n".format(component) + ) + try: + option = inputimeout(constants.BICYAN.format(msg), timeout=30) + try: + if option.lower() == "yes" or option.lower() == "y": + result = True + log.info("Selected {} for {}".format(option, component)) + break + elif option.lower() == "no" or option.lower() == "n": + result = False + log.info("Selected {} for {}".format(option, component)) + break + elif num_prompts >= 10: + log.console( + "Skipping re-installation for {}".format(component), + color_code=constants.CYAN, + ) + result = False + break + else: + log.console( + "Invalid option. Valid " "options are YES or NO.\n", error=True + ) + num_prompts += 1 + except: + log.error("Failed to re-prompt. Exiting installation") + sys.exit(-1) + except KeyboardInterrupt: + log.console( + "Installation aborted by user. Exiting installation" + " for {}".format(component), + error=True, + ) + break + except: + log.console( + "Skipping re-installation for {}".format(component), + color_code=constants.CYAN, + ) + result = False + break + return result + + +def unzip_modules(component_list, log): + esb_modules_list = OrderedDict() + try: + for module_id, value in component_list.items(): + if not is_image(value): + abs_src_path = os.path.abspath(value["path"]) + zip_location = os.path.join(abs_src_path, module_id) + if not is_helm(value): + module = value["label"] + else: + module = value["label"] + "-" + value["tag"] + tar_file = zip_location + ".tgz" + zip_file = zip_location + ".zip" + module_path = os.path.join(abs_src_path, module) + if not len(component_valid) or ( + component_valid[module] == None or not os.path.exists(module_path) + ): + if os.path.exists(zip_file): + with ZipFile(zip_file, "r") as zipObj: + # Extract all the contents of zip file + log.console("Unzipping the module {}...".format(module)) + for info in zipObj.infolist(): + extract_file(zipObj, info, module_path) + elif os.path.exists(tar_file): + with tarfile.open(tar_file, "r:gz") as tar: + log.console("Unzipping Helm chart {}...".format(module)) + tar.extractall(module_path) + + if value.get("esb_install"): + esb_modules_list.update({module_id: value}) + if is_helm(value): + esb_modules_list.update({module_id: value}) + else: + esb_modules_list.update({module_id: value}) + except Exception as e: + log.console("Failed to unzip modules. {}".format(e)) + return esb_modules_list + + +def update_xml_json(component_id, component_name, file_name, log): + output_dir_path = create_output_dir(manifest_file) + install_status_log_path = os.path.join(output_dir_path, install_status_log) + try: + component_list = {} + try: + with open(install_status_log_path, "r") as file: + component_list = json.load(file) + del component_list[component_name] + with open(install_status_log_path, "w") as file: + json.dump(component_list, file) + log.info("Successfully updated json file for {}".format(component_name)) + except Exception as e: + log.console( + "Failed to update json file due to " "error {}".format(e), error=True + ) + # try: + # if(os.path.exists(file_name)): + # tree = ET.parse(file_name) + # root = tree.getroot() + # for child in root: + # if child.tag == "project": + # if component_id == child.attrib.get('id'): + # rem_child = child + # root.remove(rem_child) + # tree.write(file_name) + # log.info("Removed the xml entry of the " + # "component id {}".format(component_id)) + # break + # except Exception as e: + # log.console("Failed to update xml file due to " + # "error {}".format(e), error=True) + except Exception as e: + log.console( + "Failed to update uninstalled component due to" " error {}".format(e), + error=True, + ) + + +def copy_export_deps(deps_list, dest, log): + for dep in deps_list: + try: + if os.path.exists(dep): + if os.path.isfile(dep): + shutil.copy(dep, dest) + else: + log.console("Failed to add export dependencies", error=True) + sys.exit(-1) + except: + log.console("Failed to add export dependencies.") + sys.exit(-1) + + +def upgrade_cleanup(log, install_path, conf_path, package, error=False): + """ + Clean up for upgrade function + + :param install_path: ESB CLI path under log directory + :param error: upgrade status + """ + temp_xml_path = os.path.join(conf_path, manifest_file) + log.info("Cleanup for Upgrade function") + try: + if error: + if os.path.exists(conf_path): + shutil.rmtree(conf_path) + if os.path.exists(package): + log.info("Removing {}".format(package)) + if constants.Operating_system == "Linux": + command = ["sudo", "rm", "-rf", package] + ret = run(command, stdout=PIPE, stderr=PIPE) + elif constants.Operating_system == "Windows": + shutil.rmtree(package) + if os.path.exists(install_path): + log.info("Removing {}".format(install_path)) + if constants.Operating_system == "Linux": + command = ["sudo", "rm", "-rf", install_path] + ret = run(command, stdout=PIPE, stderr=PIPE) + elif constants.Operating_system == "Windows": + shutil.rmtree(install_path) + else: + if os.path.exists(conf_path): + os.remove(manifest_file) + file_names = os.listdir(conf_path) + for file_name in file_names: + shutil.move( + os.path.join(conf_path, file_name), + os.path.join(os.getcwd(), file_name), + ) + shutil.rmtree(conf_path) + if os.path.exists(install_path): + log.info("Removing {}".format(install_path)) + if constants.Operating_system == "Linux": + command = ["sudo", "rm", "-rf", install_path] + ret = run(command, stdout=PIPE, stderr=PIPE) + elif constants.Operating_system == "Windows": + shutil.rmtree(install_path) + log.info("Clean up complete") + except Exception as e: + log.console("Failed to clean installation directory. {}".format(e), error=True) + + +def clean_esb_common(log, output_dir_path): + if constants.Operating_system == "Linux": + command = ["sudo", "find", "/usr/local/", "-type", "d", "-iname", "esb_common"] + common_path = run(command, stdout=PIPE, stderr=PIPE) + del_common_path = common_path.stdout.decode("ascii").strip("\n") + command = [ + "sudo", + "find", + "/usr/local/", + "-type", + "d", + "-iname", + "lanternrock-*", + ] + lr_path = run(command, stdout=PIPE, stderr=PIPE) + del_lr_path = lr_path.stdout.decode("ascii").strip("\n") + command = ["sudo", "find", "/opt/intel/", "-name", "lanternrocksdk-*"] + opt_lr_path = run(command, stdout=PIPE, stderr=PIPE) + del_opt_lr_path = opt_lr_path.stdout.decode("ascii").strip("\n") + elif constants.Operating_system == "Windows": + del_common_path = os.path.join( + sitepackage_location, "Lib", "site-packages", "esb_common" + ) + try: + if del_common_path: + log.info("Cleaning common directories") + if constants.Operating_system == "Linux": + command = ["sudo", "rm", "-rf", del_common_path] + ret = run(command, stdout=PIPE, stderr=PIPE) + if ret.returncode: + log.error("Failed to clean common directories") + command = [ + "sudo", + "-E", + "python3", + "-m", + "pip", + "uninstall", + "-y", + "esb-common", + ] + ret = run(command, stdout=PIPE, stderr=PIPE) + if ret.returncode: + log.error("Failed to clean esb_common-egg directories") + command = [ + "sudo", + "-E", + "python3", + "-m", + "pip", + "uninstall", + "-y", + "lanternrock", + ] + ret = run(command, stdout=PIPE, stderr=PIPE) + if ret.returncode: + log.error("Failed to clean LanternRock directories") + elif constants.Operating_system == "Windows": + command = ["python", "-m", "pip", "uninstall", "-y", "esb-common"] + ret = run(command, stdout=PIPE, stderr=PIPE) + if ret.returncode: + log.error("Failed to clean esb_common-egg directories") + shutil.rmtree(del_common_path) + if del_lr_path: + log.info("Cleaning Lanternrock SDK") + command = ["sudo", "rm", "-rf", del_lr_path] + ret = run(command, stdout=PIPE, stderr=PIPE) + if ret.returncode: + log.error("Failed to clean Lanternrock egg folders") + if del_opt_lr_path: + command = ["sudo", "rm", "-rf", del_opt_lr_path] + ret = run(command, stdout=PIPE, stderr=PIPE) + if ret.returncode: + log.error("Failed to clean Lanternrock SDK") + + if os.path.exists(output_dir_path): + if constants.Operating_system == "Linux": + command = ["sudo", "rm", "-rf", output_dir_path] + ret = run(command, stdout=PIPE, stderr=PIPE) + if ret.returncode: + log.error("Failed to clean log directories") + elif constants.Operating_system == "Windows": + for i in os.listdir(output_dir_path): + if i == "output.log": + continue + if os.path.isfile(os.path.join(output_dir_path, i)): + os.remove(os.path.join(output_dir_path, i)) + else: + shutil.rmtree(os.path.join(output_dir_path, i)) + except Exception as e: + log.error("Failed to clean log directories") + log.console("Failed to clean log directories {}".format(e)) + + +def reinstall_check(update, upgrade, log): + """ + Check if it is reinstallation of package + :param update: update command + """ + output_dir_path = create_output_dir(manifest_file) + install_status_json_path = os.path.join(output_dir_path, install_status_log) + try: + if constants.Operating_system == "Linux": + common_path = subprocess.run( + "sudo find /usr/local/ -type d -iname " "'esb_common' ", + stdout=subprocess.PIPE, + shell=True, + ) + common_dir = common_path.stdout.decode("ascii").strip("\n") + if ( + not (update or upgrade) + and common_dir + and os.path.exists(install_status_json_path) + and os.stat(install_status_json_path).st_size != 0 + ): + send_telemetry_data(({"type": "reinstall"}), log) + elif constants.Operating_system == "Windows": + common_dir = os.path.join( + sitepackage_location, "Lib", "site-packages", "esb_common" + ) + if ( + not (update or upgrade) + and os.path.isdir(common_dir) + and os.path.exists(install_status_json_path) + and os.stat(install_status_json_path).st_size != 0 + ): + send_telemetry_data(({"type": "reinstall"}), log) + except: + log.error("Failed to set reinstall status") + + +def reboot_msg(statuses): + """ + Print Reboot message after installation is complete + :param statuses: list of install statuses of modules + """ + try: + dec_statuses = [] + reg_form = re.compile(r"\x1B\[[0-?]*[ -/]*[@-~]") + for status in statuses: + dec_statuses.append(reg_form.sub("", status)) + if not "FAILED" in dec_statuses: + print( + constants.YELLOW.format( + "Recommended to reboot system" " after installation".center(53, "*") + ) + ) + except: + print(constants.YELLOW.format("Missing reboot message")) + + +def docker_progress_bar(line, bar, progress_data): + """ + Create progress bar for pulling docker image + :param line: docker status line + :param bar: level of new bar + :param progress_data: progress data + """ + line_text = "{}: {}".format(line.get("id", ""), line.get("status", "")) + if line.get("progressDetail"): + progress_data[line["id"]] = line["progressDetail"] + bar.total = sum(p["total"] for p in progress_data.values()) + bar.update(sum(p["current"] for p in progress_data.values()) - bar.n) + bar.set_description_str(line_text) + +def docker_final_update(bar, progress_data): + """ + Update Final progress bar for pulling docker image + :param bar: level of new bar + :param progress_data: progress data + """ + bar.total = sum(p["total"] for p in progress_data.values()) + bar.update(bar.total - bar.n) + +def intel_registry_pull(image, tag, product_key, log): + """ + Pull docker image from Intel Registry + :param image: Name of the docker image + :param tag: Tag of the docker image + :param product_key: Product Key associated with the images + """ + global success_container_names + global success_container_ids + status = False + data = api.validate_docker_image(image, tag, product_key, log) + if not data or data["registryType"] != "intelprivate": + # Image not found in Intel Registry, or is Intel Public Registry. + status = non_intel_registry_pull(image, tag, log) + else: + base_image_status = None + if ( + "baseImage" in data + and data["baseImage"] != None + and data["baseImage"] != "null" + and data["baseImage"].strip() != "" + ): + # If image has non-redistributable base image, then pull base layers and then pull Intel layers + base_img, base_img_digest = data["baseImage"].split("@") + base_image_status = api.fetch_base_image(base_img, base_img_digest, log) + if base_image_status or base_image_status == None: + status = api.fetch_image( + data["id"], + data["image"], + data["tag"], + product_key, + log, + ) + else: + status = False + success_container_ids = [data["id"]] + success_pulled_image = image + ":" + tag + success_container_names = [success_pulled_image] + if status and telemetry_data.get("type") == "docker-pull": + send_telemetry_data(({"successContainerIds": success_container_ids}), log) + if status and is_LR_installed(log) and LR_data.get("type") == "docker-pull": + successful_pulled_image = image + ":" + tag + send_LR_data({"image_name": success_container_names}, log) + + return status + + +def non_intel_registry_pull(image, tag, log): + """ + Pull docker image from Non-Intel Registry. + :param image: Name of the docker image + :param tag: Tag of the docker image + """ + log.console("Image not available in Intel Registry.") + log.console("Downloading {}:{} from public registry.".format(image, tag)) + + try: + log.console( + "Pulling Image from {}:{}".format(image, tag), color_code=constants.GREEN + ) + client = docker.from_env() + with tqdm( + total=1, + desc="Downloading", + unit="B", + unit_scale=True, + unit_divisor=1024, + position=0, + ascii=False, + ) as download_bar: + with tqdm( + total=1, + desc="Extracting", + unit="B", + unit_scale=True, + unit_divisor=1024, + position=1, + ascii=False, + ) as extract_bar: + download_progress = {} + extract_progress = {} + for line in client.api.pull(image, tag=tag, stream=True, decode=True): + if line["status"] == "Downloading": + docker_progress_bar(line, download_bar, download_progress) + elif line["status"] == "Extracting": + docker_progress_bar(line, extract_bar, extract_progress) + + if download_progress: + docker_final_update(download_bar,download_progress) + if extract_progress: + docker_final_update(extract_bar,extract_progress) + + log.console("Status: Image saved for {}:{}".format(image, tag)) + return image_load_status(image, tag, log) + except Exception as e: + msg = "Failed to pull image {}:{}. {}" + print_msg = "Failed to pull image {}:{}" + log.console(msg.format(image, tag, e), print_msg.format(image, tag), error=True) + return False + + +def image_load_status(image, tag, log): + """ + Verifies if docker image available in device. + :param image: Name of the docker image + :param tag: Tag of the docker image + """ + digest = None + if "sha256" in tag: + digest = tag + if constants.Operating_system == "Linux": + if not digest: + cmd = "sudo docker images -q {}:{}".format(image, tag) + else: + cmd = "sudo docker images -q {}@{}".format(image, digest) + elif constants.Operating_system == "Windows": + if not digest: + cmd = "docker images -q {}:{}".format(image, tag) + else: + cmd = "docker images -q {}@{}".format(image, digest) + p = subprocess.run(cmd.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE) + output = p.stdout.decode("utf-8") + if output: + return True + return False + + +def docker_installed(): + """ + Verifies if docker is installed in the device. + """ + if constants.Operating_system == "Linux": + cmd = "sudo docker --version" + elif constants.Operating_system == "Windows": + cmd = "docker --version" + p = subprocess.run(cmd.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE) + docker_version = p.stdout.decode("ascii").strip() + if docker_version: + return True + return False + + +def is_image(val): + if val.get("type") == "image": + return True + return False + + +def is_helm(val): + if val.get("type") == "helm": + return True + return False + + +def get_docker_credentials(log): + """ + Get Docker credentials from ~/.docker/config.json file + """ + try: + docker_config = str(Path.home()) + "/.docker/config.json" + if Path(docker_config).is_file(): + with open(docker_config) as json_file: + data = json.load(json_file) + if "auths" in data: + repo_creds = data["auths"] + if "https://index.docker.io/v1/" in repo_creds: + auth = repo_creds["https://index.docker.io/v1/"] + if "auth" in auth: + encoded_cred = auth["auth"] + decoded_cred = base64.b64decode(encoded_cred).decode( + "utf-8" + ) + user, passwd = decoded_cred.split(":") + return {"username": user, "password": passwd} + else: + return {} + else: + return {} + else: + return {} + else: + return {} + except Exception as e: + log.console("Unable to get docker credentials.", error=True) + return {} + + +def get_layer_dir(log): + """ + Create layer directory if not exist + """ + layer_dir = str(Path.home()) + layers_dir + try: + if constants.Operating_system == "Linux": + if not os.path.isdir(layer_dir): + subprocess.run(["sudo", "mkdir", "-p", layer_dir]) + subprocess.run(["sudo", "chown", os.environ["USER"], layer_dir]) + elif constants.Operating_system == "Windows": + Path(layer_dir).mkdir(parents=True, exist_ok=True) + return layer_dir + except Exception as e: + log.console( + "Unable to create {} directory: {}".format(layer_dir, e), error=True + ) + sys.exit(-1) + + +def validate_hash(filepath, hash, log): + """ + Validate the file with provided hash + :param filepath: Path where the layer is located + :param hash: Hash to validate the file against + """ + try: + sha256_hash = hashlib.sha256() + with open(filepath, "rb") as f: + # Read and update hash string value in blocks of 4K + for byte_block in iter(lambda: f.read(4096), b""): + sha256_hash.update(byte_block) + computed_hash = sha256_hash.hexdigest() + except Exception as e: + log.console("Cannot compute hash on file {}".format(filepath), error=True) + + return hash == computed_hash + + +def remove_docker_image(image, log): + """ + Remove the docker images + :param image: Name of the image + """ + if constants.Operating_system == "Linux": + command = run( + ( + [ + "sudo", + "docker", + "images", + "-a", + "--format", + "{{.Repository}},{{.Tag}},{{.ID}}", + ] + ), + stdout=PIPE, + stdin=PIPE, + ) + elif constants.Operating_system == "Windows": + command = run( + ( + [ + "docker", + "images", + "-a", + "--format", + "{{.Repository}},{{.Tag}},{{.ID}}", + ] + ), + stdout=PIPE, + stdin=PIPE, + ) + image_list = command.stdout.decode("utf-8").splitlines() + + sv_reader = csv.reader(image_list, delimiter=",") + for row in sv_reader: + image_name = row[0] + ":" + row[1] + if image_name == image: + image_id = row[2] + if constants.Operating_system == "Linux": + command = f"sudo docker rmi -f {image_id}" + elif constants.Operating_system == "Windows": + command = f"docker rmi -f {image_id}" + status = run(command.split(), stdout=PIPE, stderr=PIPE) + if status.returncode: + log.console( + ((status.stdout + status.stderr).decode("utf-8")), error=True + ) + return False + else: + return True + + # Image not found. So return True + return True + + +def remove_helm_keys(chart_name, log): + """ + Remove Helm chart Secrets from Kubernetes Cluster + + Args: + chart_name (String): Name and ID of the chart + log (obj): Log Object + + Returns: + Bool: Status + """ + # Delete the secret if it actually exists + try: + config.load_kube_config() + v1 = client.CoreV1Api() + namespace = "default" + secret_name = "esh-" + chart_name + secret_name = secret_name.lower() + api_response = v1.list_namespaced_secret(namespace) + except Exception as e: + log.error("Exception when calling CoreV1Api load Kube Config {}".format(e)) + return True + + try: + if not api_response: + return True + for data in api_response.items: + if data.metadata.name == secret_name: + log.info( + "Deleting old secret keys for Helm chart {}".format(chart_name) + ) + api_response = v1.delete_namespaced_secret(secret_name, namespace) + if api_response: + log.console( + "Deleted old secret keys for Helm chart {}".format(chart_name), + color_code=constants.GREEN, + ) + return True + log.console( + "Helm chart secret keys for {} not found in the Kubernetes Cluster".format( + chart_name + ) + ) + return True + except ApiException as e: + log.console( + "Exception when calling CoreV1Api. {}".format(e), + error=True, + ) + + return False + + +def update_k8s_secret_store(chart_name, chart_tag, chart_id, product_key, log): + """ + Delete Previous Secrets and add new Secrets for Helm Chart + + Args: + chart_name (String): Helm Chart Name + chart_tag (String): Helm Chart Tag + chart_id (String): Helm Chart GUID + product_key (String): Product Key Value + log (obj): Log Object + + Returns: + Bool: Status + """ + # Initialize K8s Client + validation_status, helm_data = api.get_helm_robot_account( + chart_id, product_key, log + ) + + if validation_status: + + try: + config.load_kube_config() + v1 = client.CoreV1Api() + namespace = "default" + secret_name = "esh-" + chart_name + "-" + chart_tag + secret_name = secret_name.lower() + api_response = v1.list_namespaced_secret(namespace) + + except Exception as e: + log.console( + "Exception when calling Kubernetes CoreV1Api. {}".format(e), + error=True, + ) + return False + # Delete the secret if it actually exists. + if api_response: + for data in api_response.items: + if data.metadata.name == secret_name: + log.info( + "Deleting old secret keys for Helm chart {}-{}".format( + chart_name, chart_tag + ) + ) + try: + api_response = v1.delete_namespaced_secret( + secret_name, namespace + ) + if api_response: + log.info( + "Deleted old secret keys for Helm Chart {}-{}".format( + chart_name, chart_tag + ) + ) + except ApiException as e: + log.console( + "Exception when calling CoreV1Api to delete old Helm Chart secret {}-{}".format( + chart_name, chart_tag + ), + color_code=constants.RED, + ) + + # Get one registry credential for all the private docker images available in Helm chart. + username, password = ( + base64.b64decode(helm_data["token"]).decode("utf-8").split(":", 1) + ) + registry = helm_data["registry"] + docker_creds = get_credentials(registry, username, password) + + docker_creds_encoded = base64.b64encode( + json.dumps(docker_creds).encode() + ).decode() + # Add new secret with the credentials. + log.info( + "Adding new secret keys for Helm chart {}-{}".format(chart_name, chart_tag) + ) + namespace = "default" + body = { + "api_version": "v1", + "data": {".dockerconfigjson": docker_creds_encoded}, + "kind": "Secret", + "metadata": {"name": secret_name, "namespace": namespace}, + "type": "kubernetes.io/dockerconfigjson", + } + + try: + api_response = v1.create_namespaced_secret(namespace, body) + if api_response: + log.console( + "Added new secret keys for Helm chart {}-{}".format( + chart_name, chart_tag + ), + color_code=constants.GREEN, + ) + return True + except ApiException as e: + log.console( + "Exception when calling CoreV1Api list_namespaced_secret. {}".format(e), + error=True, + ) + return False + else: + return False + + +def get_credentials(registry_name, username, password): + creds = { + "auths": { + "{}".format(registry_name): {"username": username, "password": password} + } + } + return creds + + +def install_lanternrock(src, manifest, log): + """ + Installs Lanternrock analytics SDK + + :param src: esb-common dict + :param manifest: manifest file edgesoftware_configuration.xml + :log (obj): Logger Object + """ + + global LR_INSTALLED + log.console("Installing Lanternrock SDK", color_code=constants.GREEN) + common_id = get_recipe_details(manifest, common=True)["common_id"] + try: + cwd = os.getcwd() + abs_src_path = os.path.abspath(src["path"]) + module_path = os.path.join(abs_src_path, "esb_common") + + lr_install_path = "/opt/intel/" + if not os.path.exists(lr_install_path): + subprocess.run(["sudo", "mkdir", "-p", lr_install_path]) + subprocess.run(["sudo", "chown", os.environ["USER"], lr_install_path]) + tarfile_path = os.path.join(module_path, "lanternrocksdk-linux-3.0.90.tar.gz") + if os.path.exists(tarfile_path): + with tarfile.open(tarfile_path, "r:gz") as tar: + log.info("Unzipping LanternRock SDK within {}".format(lr_install_path)) + tar.extractall(lr_install_path) + + unzipped_lr_path = os.path.join(lr_install_path, "lanternrocksdk-linux-3.0.90") + lr_tar_gz = os.path.basename(tarfile_path) + lr_tar = os.path.splitext(lr_tar_gz)[0] + lr_version = os.path.splitext(lr_tar)[0] + log.info("Lanternrock SDK being installed: {}".format(lr_version)) + # NOTE: Following lines are to support LR SDK 3.0.14 which doesn't have the + # requirement of specific libstdc++.so + # dest_ias3_so_path = os.path.join( + # unzipped_lr_path, "python/lanternrock/linux/libintel-ias3.so" + # ) + # src_ias3_so_path = os.path.join( + # unzipped_lr_path, "native/lib/static-legacy/libintel-ias3.so" + # ) + # shutil.copy(src_ias3_so_path, dest_ias3_so_path) + + # NOTE: LR SDK 3.0.90 requires libstdc++.so from the LR package for Ubuntu 18, CentOS & RHEL + + import_LR_helper(log) + os.chdir(os.path.join(unzipped_lr_path, "python/")) + ret = subprocess.run( + "sudo python3 setup.py install 2>/dev/null", + shell=True, + stdout=subprocess.PIPE, + ) + if ret.returncode: + msg = "Failed to install Lanternrock SDK" " {}".format(ret.stderr) + print(constants.RED.format(msg)) + log.error("Failed to run setup.py for lanternrock. {}".format(ret.stderr)) + except Exception as e: + msg = "Failed to install 'Lanternrock SDK'. {}".format(e) + log.console(msg, error=True) + return False + finally: + os.chdir(cwd) + log.console("Successfully installed Lanternrock SDK.", color_code=constants.GREEN) + command = ["sudo", "find", "/usr/local/", "-type", "d", "-iname", "lanternrock-*"] + lr_path = run(command, stdout=PIPE, stderr=PIPE) + lr_egg_path = lr_path.stdout.decode("ascii").strip("\n") + if lr_egg_path not in sys.path: + sys.path.append(lr_egg_path) + LR_INSTALLED = True + return True + + +def import_LR_helper(log): + """ + Sets appropriate libstdc++.so version from LR SDK folder. + Logic is equivalent to setting LD_PRELOAD from terminal. + """ + global LR_INSTALLED + lr_install_path = "/opt/intel/lanternrocksdk-linux-3.0.90" + if os.path.exists(lr_install_path): + cwd = os.getcwd() + libstd_path = os.path.join(lr_install_path, "native/lib/libstdc++/libstdc++.so") + if libstd_path: + import ctypes + + ctypes.cdll.LoadLibrary(libstd_path) + else: + log.error("Failed to setup relevant libstdc++ version for LanternRock") + return False + os.chdir(cwd) + LR_INSTALLED = True + + +def is_LR_installed(log): + """ + Checks if LanternRock SDK is already installed in a system or not. + """ + if constants.Operating_system == "Linux": + ret = os.system("pip3 show lanternrock >/dev/null 2>&1") + if ret == 0: + return True + return False + + +def save_image(log, name, tag, id): + """ + Save Docker image to a tarfile + Max Supported Docker Image to save ~ 30 GB + + Args: + log (obj): Logger Object + name (String): Name of the Image + tag (String): Tag of the Image + id (String): ID of the module + """ + try: + cli = docker.from_env(timeout=99999) + log.info("Saving Image file {}:{} to target system".format(name, tag)) + + image = cli.images.get("{}:{}".format(name, tag)) + gen = image.save(named=True) + with open("{}.tar".format(id), "wb") as f: + for chunk in tqdm( + gen, + leave=True, + miniters=1, + desc="Saving Image {}:{} ".format(name, tag), + ): + f.write(chunk) + + except Exception as e: + log.error("Exception in saving image {}:{} to file. {}".format(name, tag, e)) + + +def load_image(log, name, tag, image_path): + """ + Load Docker image from a tarfile + + Args: + log (obj): Logger Object + name (String): Name of the Image + tag (String): Tag of the Image + image_path (String): Image path Location + """ + try: + cli = docker.from_env(timeout=99999) + log.info("Loading Image file {}:{} to target system".format(name, tag)) + with open( + "{}.tar".format(image_path), + "rb", + ) as file: + images = cli.images.load(file) + if images: + # Retagging the image incase there exist an image already of same ID + if cli.api.tag(images[0].tags[0], name, tag=tag, force=True): + log.info("Loaded Image file {}:{} to target system".format(name, tag)) + else: + log.error( + "Unable to load Image file {}:{} to target system".format(name, tag) + ) + except Exception as e: + log.error( + "Exception in loading image file {}:{} to target system. {}".format( + name, tag, e + ) + ) + return image_load_status(name, tag, log) diff --git a/edgesoftware/edgesoftware.py b/edgesoftware/edgesoftware.py new file mode 100644 index 0000000..57dfa26 --- /dev/null +++ b/edgesoftware/edgesoftware.py @@ -0,0 +1,536 @@ +import click +import os +import sys + +from edgesoftware import functions +from edgesoftware.common import constants +from colorama import init + +if constants.Operating_system == "Windows": + init(convert=True, autoreset=True) + + +@click.group() +@click.version_option(constants.VERSION_TAG, "-v", "--version") +def main(): + """A CLI wrapper for management of Intel® Edge Software Hub packages.""" + + +@click.argument("configuration_id", required=False) +@click.argument("package_name", required=False) +@click.option( + "-f", + "--yaml-file", + help="YAML file path that contains the list of modules to " + "install. Please specify the supported format which is " + ": under 'custom_modules' tag", +) +@main.command() +def install(yaml_file, package_name, configuration_id): + """Install modules of a package. + + \b + PACKAGE_NAME is the name of the package. + CONFIGURATION_ID is the Configuration ID of the selected package. + """ + if len(sys.argv) == 3: + print( + constants.RED.format( + "Package name or Configuration ID is missing. " + "Please check install command usage. Run edgesoftware install --help " + ) + ) + sys.exit(-1) + if yaml_file == None and len(sys.argv) > 3 and sys.argv[2] and sys.argv[3]: + configuration_id = sys.argv[3] + + xml_overwritten = False + if configuration_id: + manifest, xml_overwritten = functions.get_config_xml(configuration_id) + else: + manifest = None + if os.path.exists("edgesoftware_configuration.xml"): + manifest = "edgesoftware_configuration.xml" + if yaml_file: + if os.path.exists(yaml_file): + # NOTE(mkumari): Setting manifest to None because we don't + # want to sync repo when path to custom packages is provided + manifest = None + else: + print( + constants.RED.format( + "User defined configuration file {} " + "not found. Exiting installation.".format(yaml_file) + ) + ) + sys.exit(-1) + if not manifest and not yaml_file: + print( + constants.RED.format( + "Manifest XML file " + "edgesoftware_configuration.xml not found. " + "Exiting installation." + ) + ) + sys.exit(-1) + + product_key = None + is_product_key = True + + if not yaml_file: + functions.download_package_artifacts(manifest_file=manifest) + is_product_key = functions.check_product_key(manifest_file=manifest) + + if manifest and not yaml_file and is_product_key is True: + print( + constants.BICYAN.format( + "Please enter the " + "Product Key. The Product Key is contained in the email you " + "received from Intel confirming your download: " + ), + end=" ", + ) + product_key = input() + if xml_overwritten: + print( + constants.YELLOW.format( + "WARNING: Overwriting Manifest XML file edgesoftware_configuration.xml " + "in current working directory." + ) + ) + functions.setup_start(product_key, manifest, yaml_file) + + +@click.option( + "-a", + "--artifacts", + is_flag=True, + help="Download Package Artifacts.", +) +@click.argument("configuration_id", required=False) +@click.argument("package_name", required=False) +@main.command() +def download( + artifacts, + package_name, + configuration_id, +): + """Download modules/artifacts of a package. + + \b + PACKAGE_NAME is the name of the package. + CONFIGURATION_ID is the Configuration ID of the selected package. + """ + if artifacts: + manifest_file = None + if os.path.exists("edgesoftware_configuration.xml"): + manifest_file = "edgesoftware_configuration.xml" + if not manifest_file: + print( + constants.RED.format( + "Manifest XML file " + "edgesoftware_configuration.xml not found. " + "Exiting artifacts download operation." + ) + ) + sys.exit(-1) + + functions.download_package_artifacts( + manifest_file, recipe_id=None, src_dir=None, remove_previous=True + ) + + sys.exit(-1) + + if len(sys.argv) == 3: + print( + constants.RED.format( + "Package name or Configuration ID is missing. " + "Please check download command usage. Run edgesoftware download --help " + ) + ) + sys.exit(-1) + if len(sys.argv) > 3 and sys.argv[2] and sys.argv[3]: + configuration_id = sys.argv[3] + + xml_overwritten = False + if configuration_id: + manifest, xml_overwritten = functions.get_config_xml(configuration_id) + else: + manifest = None + yaml_file = None + if os.path.exists("edgesoftware_configuration.xml"): + manifest = "edgesoftware_configuration.xml" + if not manifest: + print( + constants.RED.format( + "Manifest XML file " + "edgesoftware_configuration.xml not found. " + "Exiting download operation." + ) + ) + sys.exit(-1) + product_key = None + is_product_key = True + + functions.download_package_artifacts(manifest_file=manifest) + is_product_key = functions.check_product_key(manifest_file=manifest) + if manifest and is_product_key is True: + print( + constants.BICYAN.format( + "Please enter the " + "Product Key. The Product Key is contained in the email you " + "received from Intel confirming your download: " + ), + end=" ", + ) + product_key = input() + if xml_overwritten: + print( + constants.YELLOW.format( + "WARNING: Overwriting Manifest XML file edgesoftware_configuration.xml " + "in current working directory." + ) + ) + functions.setup_start(product_key, manifest, yaml_file, download=True) + + +@click.option( + "-d", "--default", is_flag=True, help="Lists the default modules of a package." +) +@click.option("-j", "--json", is_flag=True, help="Return output in json format.") +@click.option("-v", "--version", is_flag=True, help="Lists available packages.") +@click.option( + "-l", "--local", is_flag=True, hidden=True, help="Lists available modules." +) +@main.command() +def list(default, json, version, local): + """List the modules of a package.""" + functions.list_packages(default, json, version, local) + + +@click.argument("module", nargs=-1, required=True) +@main.command() +def update(module): + """Update the modules of a package.""" + functions.update(module) + + +@click.argument("module_id", nargs=-1) +@click.option( + "-a", "--all-modules", is_flag=True, help="Print logs for all modules of a package." +) +@main.command() +def log(module_id, all_modules): + """Show log of CLI events.""" + functions.print_log(module_id, all_modules) + + +@click.option("-n", "--name", help="Name of ZIP file created by export") +@click.option("-f", "--yaml-file", help="Path to custom YAML file to be exported") +@main.command() +def export(name=None, yaml_file=None): + """ + Export modules installed as part of a package. + + Package modules, custom modules, edgesoftware_configuration.xml file, + a custom YAML file and edgesoftware, the Python executable to a zip file + """ + functions.export_package(name, yaml_file) + + +@click.argument("package_id", nargs=1, required=True) +@main.command() +def upgrade(package_id): + """ + Upgrade a package. + + Run 'edgesoftware list -v' for available upgrades + """ + functions.upgrade(package_id) + + +@click.argument("ingredient_id", nargs=-1) +@click.option( + "-f", "--file", is_flag=True, help="uninstall from esb_module for export." +) +@click.option("-a", "--all-modules", is_flag=True, help="Uninstall all ingredients.") +@main.command() +def uninstall(ingredient_id, all_modules, file): + """Uninstall the modules of a package.""" + if len(ingredient_id) == 0 and all_modules == False: + print("Please check uninstall command usage. Run edgesoftware uninstall --help") + sys.exit(-1) + functions.uninstall_ingredient(ingredient_id, all_modules, file) + + +@click.option("-p", "--pull", help="Pull docker image. ") +@click.argument("image_names", nargs=-1) +@click.option( + "-f", + "--yaml-file", + "yaml_file", + type=click.Path(exists=True), + help="Docker Compose file path that contains the list of docker images to " + "download.", +) +@main.command() +def docker(pull, image_names, yaml_file): + """Pull docker images""" + if pull and len(sys.argv) > 3: + if len(sys.argv) > 4: + print( + constants.BICYAN.format("Pulling images : {} ".format(sys.argv[3:])), + end="\n", + ) + + for image_name in sys.argv[3:]: + image = image_name.split(":") + if len(image) == 1: + name = image[0] + tag = "latest" + else: + name, tag = image + + product_key = None + is_product_key = True + + is_product_key = functions.check_product_key(image=name, tag=tag) + + if is_product_key is True: + product_key = input( + constants.BICYAN.format( + "Please enter the " + "Product Key. The Product Key is contained in the email you " + "received from Intel confirming your download: " + ) + ) + functions.pull(name, tag, product_key) + + elif yaml_file: + print( + constants.BICYAN.format( + "Please enter the " + "Product Key. The Product Key is contained in the email you " + "received from Intel confirming your download: " + ), + end=" ", + ) + product_key = input() + functions.pull_docker_compose(yaml_file, product_key) + else: + print( + constants.RED.format( + "Please check docker command usage. Run edgesoftware docker --help " + ) + ) + sys.exit(-1) + + +@click.option( + "-p", + "--pull", + help="Download Helm chart. ", +) +@click.option( + "-u", + "--update-keys", + help="Update Kubernetes secret keys", + is_flag=True, + default=False, + is_eager=True, +) +@click.argument("value", required=False, nargs=-1) +@main.command() +def helm(pull, update_keys, value): + """ + Download Helm charts or update Kubernetes secret keys. + """ + product_key = None + is_product_key = False + helm_chart_id = None + helm_chart_type = None + is_helm_credentials = False + + manifest = None + + if pull and len(sys.argv) > 3: + + if len(sys.argv) > 4: + print( + constants.BICYAN.format( + "Pulling Helm charts : {} ".format(sys.argv[3:]) + ), + end="\n", + ) + + for chart_name in sys.argv[3:]: + helm_list = [] + helm_chart = chart_name.rsplit("-", 1) + if len(helm_chart) == 1: + name = helm_chart[0] + tag = "latest" + else: + name, tag = helm_chart + print( + constants.BICYAN.format( + "Pulling Helm chart : {}-{} ".format(name, tag) + ), + end="\n", + ) + ( + is_product_key, + helm_chart_id, + helm_chart_type, + is_helm_credentials, + ) = functions.check_product_key(helm_chart_name=name, helm_chart_tag=tag) + + if is_product_key is None: + continue + if is_product_key is True: + print( + constants.BICYAN.format( + "Please enter the " + "Product Key. The Product Key is contained in the email you " + "received from Intel confirming your download: " + ), + end=" ", + ) + product_key = input() + functions.download_helm_chart( + name, tag, helm_chart_id, helm_chart_type, product_key + ) + if is_helm_credentials and os.path.isdir("{}-{}".format(name, tag)): + helm_list.append( + { + "label": name, + "comp_id": helm_chart_id, + "tag": tag, + } + ) + functions.update_helm_keys(helm_list, product_key, None) + sys.exit(-1) + + elif update_keys: + if value: + if len(sys.argv) > 4: + print( + constants.BICYAN.format( + "Updating Keys for Helm charts : {} ".format(sys.argv[3:]) + ), + end="\n", + ) + + for chart_name in sys.argv[3:]: + helm_chart = chart_name.rsplit("-", 1) + if len(helm_chart) == 1: + name = helm_chart[0] + tag = "latest" + else: + name, tag = helm_chart + print( + constants.BICYAN.format( + "Updating Keys for Helm chart : {}-{} ".format(name, tag) + ), + end="\n", + ) + if not os.path.isdir("{}-{}".format(name, tag)): + print( + constants.RED.format( + "Please download the helm chart {}-{} before updating secret keys.".format( + name, tag + ) + ) + ) + continue + + ( + is_product_key, + helm_chart_id, + helm_chart_type, + is_helm_credentials, + ) = functions.check_product_key( + helm_chart_name=name, helm_chart_tag=tag + ) + if is_product_key is None: + continue + if is_product_key is True: + print( + constants.BICYAN.format( + "Please enter the " + "Product Key. The Product Key is contained in the email you " + "received from Intel confirming your download: " + ), + end=" ", + ) + product_key = input() + + if is_helm_credentials: + helm_list = [] + helm_list.append( + { + "label": name, + "comp_id": helm_chart_id, + "tag": tag, + } + ) + functions.update_helm_keys(helm_list, product_key, None) + else: + print( + constants.GREEN.format( + "Helm chart {}-{} does not need secret keys.".format( + name, tag + ) + ), + end="\n", + ) + sys.exit(-1) + + elif os.path.exists("edgesoftware_configuration.xml"): + manifest = "edgesoftware_configuration.xml" + if not manifest: + print( + constants.RED.format( + "Manifest XML file " + "edgesoftware_configuration.xml not found. " + "Exiting update secret keys operation." + ) + ) + sys.exit(-1) + helm_list = functions.get_helm_charts(manifest) + if not helm_list: + print( + constants.RED.format( + "No helm chart found for updating secret keys." + ), + end="\n", + ) + sys.exit(-1) + + product_key = None + is_product_key = True + is_product_key = functions.check_product_key(manifest_file=manifest) + + if manifest and is_product_key is True: + print( + constants.BICYAN.format( + "Please enter the " + "Product Key. The Product Key is contained in the email you " + "received from Intel confirming your download: " + ), + end=" ", + ) + product_key = input() + if helm_list: + functions.update_helm_keys(helm_list, product_key, manifest) + sys.exit(-1) + + else: + print( + constants.RED.format( + "Please check helm command usage. Run edgesoftware helm --help " + ) + ) + + +if __name__ == "__main__": + main(prog_name="edgesoftware") diff --git a/edgesoftware/functions.py b/edgesoftware/functions.py new file mode 100644 index 0000000..14f443e --- /dev/null +++ b/edgesoftware/functions.py @@ -0,0 +1,2265 @@ +import importlib.util +import os +import requests +import subprocess +import sys +import oyaml as yaml +import time +import json +import shutil +import tempfile +import signal +import yaml + +from subprocess import run, PIPE, check_output, Popen, DEVNULL, STDOUT +from collections import OrderedDict +from distutils.dir_util import copy_tree +from distutils.version import LooseVersion +from edgesoftware.common import logger +from edgesoftware.common import service_layer_api as api +from edgesoftware.common import utils +from edgesoftware.common import constants +from json.decoder import JSONDecodeError +from prettytable import PrettyTable + +install_status_json = "install_status.json" +output_log = "output.log" +docker_pull_log = "docker.log" +helm_download_log = "helm.log" +log = None +manifest_file = "edgesoftware_configuration.xml" +temp_conf_file = "" +system_info = utils.sys_info(log) +OS_Version = system_info["os_name"] + + +def signal_handling(signum, frame): + print("Operation aborted by user. Exiting") + sys.exit(-1) + + +if constants.Operating_system == "Windows": + signal.signal(signal.SIGTERM, signal_handling) +elif constants.Operating_system == "Linux": + signal.signal(signal.SIGTSTP, signal_handling) + + +def get_install_status(component_list): + """ + Get the component status from install_status log file + + :param component_list: list of components + :returns: A dictionary with component name and status + """ + output_dir_path = utils.create_output_dir(manifest_file) + install_status_json_path = os.path.join(output_dir_path, install_status_json) + components = {} + component_status = {} + try: + with open(install_status_json_path, "r") as file: + components = json.load(file) + for component, val in components.items(): + if component in component_list: + if val["status"] == "FAILED": + status = constants.RED.format(val["status"]) + else: + status = constants.GREEN.format(val["status"]) + component_status[component] = status + return component_status + except Exception as e: + log.error("Failed to get install status due to error {}".format(e)) + + +def verify_installation_status(component_list, upgrade=False): + """ + Verify Installation + :param component_list: list of components + """ + components = {} + table = PrettyTable() + if upgrade and os.path.exists(temp_conf_file): + output_dir_path = utils.create_output_dir(temp_conf_file) + else: + output_dir_path = utils.create_output_dir(manifest_file) + install_status_json_path = os.path.join(output_dir_path, install_status_json) + try: + with open(install_status_json_path, "r") as file: + components = json.load(file) + component_name = [] + component_id = [] + component_status = [] + for component in component_list: + component_name.append(component.get("label")) + if utils.is_image(component): + component_id.append(component.get("comp_id")) + status = ( + constants.GREEN.format("SUCCESS") + if utils.image_load_status( + component.get("label"), component.get("tag"), log + ) + else constants.RED.format("FAILED") + ) + component_status.append(status) + elif utils.is_helm(component): + component_id.append(component.get("comp_id")) + helm_chart = "{}-{}".format( + component.get("label"), component.get("tag") + ) + status = ( + constants.GREEN.format("SUCCESS") + if ( + helm_chart in components + and components[helm_chart]["status"] == "SUCCESS" + ) + else constants.RED.format("FAILED") + ) + component_status.append(status) + else: + component_id.append(components[component.get("label")]["id"]) + status = ( + constants.GREEN.format("SUCCESS") + if ( + component.get("label") in components + and components[component.get("label")]["status"] == "SUCCESS" + ) + else constants.RED.format("FAILED") + ) + component_status.append(status) + utils.format_component_name(component_name) + utils.reboot_msg(component_status) + table.add_column("Id", component_id) + table.add_column("Module", component_name) + table.add_column("Status", component_status) + print(table) + except Exception as e: + log.error("Failed to verify installation status due to error {}".format(e)) + + +def verify_uninstall_status(uninstall_dict): + """ + Print uninstall status + :param uninstall_dict: Dictionary with component details + """ + components = uninstall_dict + table = PrettyTable() + try: + component_name = [] + component_id = [] + component_status = [] + for component, val in components.items(): + component_name.append(component) + component_id.append(val["id"]) + if val["status"] == "FAILED": + status = constants.RED.format(val["status"]) + elif val["status"] == "NOT SUPPORTED": + status = constants.YELLOW.format(val["status"]) + else: + status = constants.GREEN.format(val["status"]) + component_status.append(status) + utils.format_component_name(component_name) + table.add_column("Id", component_id) + table.add_column("Module", component_name) + table.add_column("Status", component_status) + print(table) + except Exception as e: + log.error("Failed to verify uninstall status") + + +def update_log( + component, state, component_dict, id="custom", type="native", upgrade=False +): + """ + Update log file with installation status + + :param component: Recipe component + :param state: state of the component + :param component_dict: dictionary to be updated with component details + :param id: id associated with components + """ + if upgrade and os.path.exists(temp_conf_file): + output_dir_path = utils.create_output_dir(temp_conf_file) + else: + output_dir_path = utils.create_output_dir(manifest_file) + + install_status_json_path = os.path.join(output_dir_path, install_status_json) + + component_list = {} + try: + if ( + os.path.exists(install_status_json_path) + and os.stat(install_status_json_path).st_size != 0 + ): + with open(install_status_json_path, "r") as file: + component_list = json.load(file) + log.info( + "Updating status in log file for {} as {}".format(component, state) + ) + for key, value in component_list.items(): + if key == component: + if value["status"] != state: + value["status"] = state + component_val = {} + component_val["status"] = state + component_val["id"] = id + component_val["type"] = type + component_list[component] = component_val + component_dict.update(component_list) + + if upgrade and os.path.exists(temp_conf_file): + output_dir_path = utils.create_output_dir(temp_conf_file) + else: + output_dir_path = utils.create_output_dir(manifest_file) + + install_status_json_path = os.path.join(output_dir_path, install_status_json) + with open(install_status_json_path, "w") as file: + json.dump(component_dict, file) + log.info("Successfully updated status in log file for {}".format(component)) + except Exception as e: + log.error("Failed to update log due to error {}".format(e)) + + +def get_component_list(manifest, path, xml=None, product_key=None, download=False): + # Clone components + components_dict = {} + component_list = {} + recipe_id = None + os_id = None + + if not path: + recipe_id = utils.get_recipe_details(manifest)["id"] + os_id = utils.get_recipe_details(manifest)["osId"] + + component_list_download = [] + + if manifest: + component_list = utils.get_component_list(manifest) + components_dict["esb_modules"] = component_list + + if xml: + component_list = utils.get_component_list(xmlstring=xml) + components_dict["esb_modules"] = component_list + for comp_id, val in component_list.items(): + component_list_download.append(val["label"]) + + if download and not path: + log.info( + "Modules to be downloaded by package are {}".format(component_list_download) + ) + log.console("Downloading modules...", color_code=constants.CYAN) + utils.download_component(log, product_key, component_list, recipe_id, os_id) + log.console("Downloading modules completed...", color_code=constants.GREEN) + + if path: + try: + with open(path) as f: + data = yaml.load(f, Loader=yaml.FullLoader) + if "esb_modules" in data: + component_list = data.get("esb_modules") + components_dict["esb_modules"] = component_list + if "custom_modules" in data: + component_list = data.get("custom_modules") + components_dict["custom_modules"] = OrderedDict(component_list) + except yaml.YAMLError as e: + log.console("Failed to read custom YAML file {}".format(e), error=True) + sys.exit(-1) + except Exception as e: + log.console("Failed to read custom YAML file {}".format(e), error=True) + sys.exit(-1) + return components_dict + + +def download_modules(log, product_key, manifest, path): + """ + Run the installation scripts + + :param product_key: Product Key associated with the user + :param manifest: Repo manifest file + :param path: YAML file with package details + """ + # FIXME(mkumari): Remove the hardcoded file name + try: + downloaded_modules = [] + output_dir_path = utils.create_output_dir(manifest) + component_list = get_component_list( + manifest, path, xml=None, product_key=product_key, download=True + ) + esb_modules_list = utils.unzip_modules(component_list["esb_modules"], log) + esb_common = utils.get_recipe_details(manifest, common=True)["common_id"] + if not utils.is_LR_installed(log): + utils.install_lanternrock( + component_list["esb_modules"][esb_common], manifest, log + ) + + ( + success_ids, + failed_ids, + success_helm_ids, + failed_helm_ids, + ) = api.get_download_status() + if "type" in utils.LR_data and utils.LR_data["type"] == "download": + utils.send_LR_data( + { + "success_ids": success_ids, + "failed_ids": failed_ids, + "success_helm_ids": success_helm_ids, + "failed_helm_ids": failed_helm_ids, + "success_container_ids": [], + "failed_container_ids": [], + }, + log, + ) + utils.send_telemetry_data( + ( + { + "success_ids": success_ids, + "failed_ids": failed_ids, + "successHelmIds": success_helm_ids, + "failedHelmIds": failed_helm_ids, + } + ), + log, + ) + if "esb_modules" in component_list: + for comp_id, val in component_list.get("esb_modules").items(): + downloaded_modules.append(val["label"]) + component_list["esb_modules"] = esb_modules_list + + except Exception as e: + log.console("Failed to download modules: {}".format(e), error=True) + sys.exit(-1) + + +def run_installation(log, product_key, manifest, path, xml, upgrade): + """ + Run the installation scripts + + :param product_key: Product Key associated with the user + :param manifest: Repo manifest file + :param path: YAML file with package details + :param xml: XML string + """ + log.console("Starting installation", color_code=constants.CYAN) + # FIXME(mkumari): Remove the hardcoded file name + file_manifest = "edgesoftware_configuration.xml" + if upgrade and os.path.exists(temp_conf_file): + output_dir_path = utils.create_output_dir(temp_conf_file) + manifest = temp_conf_file + else: + output_dir_path = utils.create_output_dir(file_manifest) + manifest = file_manifest + + state = "FAILED" + component_list = get_component_list(manifest, path, xml, product_key, True) + if "esb_modules" in component_list: + if xml is None: + esb_common = utils.get_recipe_details(manifest, common=True)["common_id"] + if xml is None and esb_common in component_list.get("esb_modules"): + try: + ret = utils.install_common( + component_list["esb_modules"][esb_common], manifest, log + ) + if not utils.is_LR_installed(log): + utils.install_lanternrock( + component_list["esb_modules"][esb_common], manifest, log + ) + if not ret: + if upgrade: + log.console("Failed to upgrade", error=True) + sys.exit(-1) + del component_list["esb_modules"][esb_common] + except Exception as e: + log.console( + "Failed to install 'esb_common'. {}" + " Exiting installation.".format(e), + error=True, + ) + sys.exit(-1) + esb_modules_list = utils.unzip_modules(component_list["esb_modules"], log) + component_list["esb_modules"] = esb_modules_list + to_install = [] + if "esb_modules" in component_list: + for comp_id, val in component_list.get("esb_modules").items(): + if utils.is_image(val): + to_install.append( + { + "label": val["label"], + "type": val.get("type"), + "comp_id": comp_id, + "tag": val.get("tag"), + } + ) + elif utils.is_helm(val): + to_install.append( + { + "label": val["label"], + "type": val.get("type"), + "comp_id": comp_id, + "tag": val.get("tag"), + } + ) + + else: + to_install.append({"label": val["label"], "comp_id": comp_id}) + + if "custom_modules" in component_list: + to_install.extend( + list( + {"label": module} + for module in component_list.get("custom_modules").keys() + ) + ) + + log.console( + "Modules to be installed by package" + " are {}".format(list(module.get("label") for module in to_install)), + color_code=constants.CYAN, + ) + + if upgrade: + log.console( + " Uninstalling previously installed Package ".center(50, "-"), + color_code=constants.YELLOW, + ) + uninstall_ingredient(False, True, False, upgrade=True) + log.console( + "Starting modules installation".center(50, "-"), color_code=constants.YELLOW + ) + log.console( + " WARNING: DO NOT INTERRUPT ".center(50, "*"), color_code=constants.YELLOW + ) + + # Re-installing esb_common as during upgrade we do a blanket uninstallation + ordered_dict = OrderedDict() + ordered_dict = utils.get_component_list(manifest) + esb_common = utils.get_recipe_details(manifest, common=True)["common_id"] + if esb_common in ordered_dict: + utils.install_common(ordered_dict[esb_common], manifest, log) + # Reininstallation of LR SDK as it is uninstalled along with esb_common + if constants.Operating_system == "Linux": + utils.install_lanternrock(ordered_dict[esb_common], manifest, log) + + success_ids = [] + failed_ids = [] + success_helm_ids = [] + failed_helm_ids = [] + success_container_ids = [] + failed_container_ids = [] + component_update = {} + + for modules, modules_dict in component_list.items(): + for component_id, value in modules_dict.items(): + component = value["label"] + if utils.is_image(value): + try: + ret = False + module_type = "image" + startTime = time.time() + + if path and os.path.isfile( + "{}.tar".format(os.path.join(value["path"], component_id)) + ): + log.console( + "Loading Image {}".format(value["label"]), + color_code=constants.CYAN, + ) + image_path = os.path.join(value["path"], component_id) + ret = utils.load_image( + log, value["label"], value["tag"], image_path + ) + else: + log.console( + "Pulling Image {}".format(value["label"]), + color_code=constants.CYAN, + ) + ret = utils.intel_registry_pull( + value["label"], value["tag"], product_key, log + ) + endTime = time.time() + + if ret == True: + state = "SUCCESS" + log.console( + "Successfully installed {} " + "took {}".format( + component, + utils.print_time(endTime - startTime), + ), + color_code=constants.GREEN, + ) + update_log( + value["label"] + ":" + value["tag"], + state, + component_update, + component_id, + module_type, + upgrade, + ) + success_container_ids.append(component_id) + else: + state = "FAILED" + log.console( + "Failed to install {} took {}".format( + component, + utils.print_time(endTime - startTime), + ), + error=True, + ) + update_log( + value["label"] + ":" + value["tag"], + state, + component_update, + component_id, + module_type, + upgrade, + ) + failed_container_ids.append(component_id) + except Exception as e: + state = "FAILED" + log.console( + "Failed to download Image {} due to error " + "{}".format(value["label"], e), + error=True, + ) + update_log( + value["label"] + ":" + value["tag"], + state, + component_update, + component_id, + module_type, + upgrade, + ) + failed_container_ids.append(component_id) + + elif utils.is_helm(value): + try: + ret = None + helm_list = [] + module_type = "helm" + helm_list.append( + { + "label": value["label"], + "comp_id": component_id, + "tag": value["tag"], + "path": value["path"], + } + ) + + ret = update_helm_keys(helm_list, product_key, True) + if ret == True or ret == None: + state = "SUCCESS" + update_log( + value["label"] + "-" + value["tag"], + state, + component_update, + component_id, + module_type, + upgrade, + ) + success_helm_ids.append(component_id) + + else: + state = "FAILED" + update_log( + value["label"] + "-" + value["tag"], + state, + component_update, + component_id, + module_type, + upgrade, + ) + failed_helm_ids.append(component_id) + except Exception as e: + state = "FAILED" + log.console( + "Failed to update keys {}-{} due to error " + "{}".format(value["label"], value["tag"], e), + error=True, + ) + update_log( + value["label"] + "-" + value["tag"], + state, + component_update, + component_id, + module_type, + upgrade, + ) + failed_helm_ids.append(component_id) + else: + try: + src_path = None + module_type = "native" + custom = False + if modules == "custom_modules": + custom = True + abs_src_path = os.path.abspath(value) + src_path = os.path.join(abs_src_path, "esb_install.py") + else: + abs_src_path = os.path.abspath(value["path"]) + zip_location = os.path.join(abs_src_path, component) + if constants.Operating_system == "Linux": + so_file = "esb_install" + ".so" + elif constants.Operating_system == "Windows": + so_file = "esb_install" + ".pyd" + install_file = "esb_install" + ".py" + src_path = os.path.join(zip_location, so_file) + if not os.path.exists(src_path): + src_path = os.path.join(zip_location, install_file) + install = False + if src_path and os.path.exists(src_path): + output_dir = os.path.join(output_dir_path, component) + # Importing the ingredient SO files and initiating the + # install process + try: + if custom: + install = True + else: + so_location = os.path.join(value["path"], component_id) + spec = importlib.util.spec_from_file_location( + "esb_install", src_path + ) + module_name = importlib.util.module_from_spec(spec) + spec.loader.exec_module(module_name) + log.info( + "Imported module for {} is {}".format( + component, module_name + ) + ) + + if utils.check_installed(log, component): + installed = module_name.verify_install( + zip_location, output_dir + ) + if installed: + if utils.prompt_reinstall(log, component): + install = True + else: + install = True + else: + install = True + ret = False + if install: + log.console( + "Installing {}".format(component), + color_code=constants.CYAN, + ) + startTime = time.time() + if custom: + if constants.Operating_system == "Linux": + status = subprocess.run(["python3", src_path]) + elif constants.Operating_system == "Windows": + status = subprocess.run(["python", src_path]) + if not status.returncode: + ret = True + else: + ret = module_name.main_install( + zip_location, output_dir + ) + if ret: + success_ids.append(component_id) + else: + failed_ids.append(component_id) + endTime = time.time() + if ret == 2: + log.console( + "Skipping install for {}".format(component), + color_code=constants.CYAN, + ) + continue + elif ret == True: + state = "SUCCESS" + log.console( + "Successfully installed {} " + "took {}".format( + component, + utils.print_time(endTime - startTime), + ), + color_code=constants.GREEN, + ) + else: + state = "FAILED" + log.console( + "Failed to install {} took {}".format( + component, + utils.print_time(endTime - startTime), + ), + error=True, + ) + if not custom: + update_log( + component, + state, + component_update, + component_id, + module_type, + upgrade, + ) + else: + update_log(component, state, component_update) + except Exception as e: + state = "FAILED" + failed_ids.append(component_id) + log.console( + "Failed to install {}. {}".format(component, e), + error=True, + ) + if not custom: + update_log( + component, + state, + component_update, + component_id, + module_type, + upgrade, + ) + else: + update_log(component, state, component_update) + else: + state = "FAILED" + failed_ids.append(component_id) + msg = ( + "Failed to find installation file for {} at {}. " + "Check file location and re-enter the path to" + " start installation." + ) + print_msg = "Failed to install {} due to missing file" + log.console( + msg.format(component, src_path), + print_msg.format(component), + error=True, + ) + if not custom: + update_log( + component, + state, + component_update, + component_id, + module_type, + upgrade, + ) + else: + update_log(component, state, component_update) + except Exception as e: + log.console("Failed to install {}".format(e), error=True) + + # api.update_ingredient_count(success_ids, failed_ids, log) + utils.send_LR_data( + ( + { + "success_ids": success_ids, + "failed_ids": failed_ids, + "success_helm_ids": success_helm_ids, + "failed_helm_ids": failed_helm_ids, + "success_container_ids": success_container_ids, + "failed_container_ids": failed_container_ids, + } + ), + log, + ) + utils.send_telemetry_data( + ( + { + "success_ids": success_ids, + "failed_ids": failed_ids, + "successHelmIds": success_helm_ids, + "failedHelmIds": failed_helm_ids, + "successContainerIds": success_container_ids, + "failedContainerIds": failed_container_ids, + } + ), + log, + ) + log.console("Installation of package complete", color_code=constants.GREEN) + + return list(to_install) + + +def setup_start( + product_key, + manifest=None, + path=None, + xml=None, + update=False, + upgrade=False, + download=False, +): + """ + Starting setup + + :param product_key: Product Key associated with the user + :param manifest: manifest.xml file + :param path: Path of a YAML file with component list + :param xml: XML string + """ + # FIXME(mkumari): Remove the hardcoded file name + if upgrade and os.path.exists(temp_conf_file): + output_dir_path = utils.create_output_dir(temp_conf_file) + else: + output_dir_path = utils.create_output_dir(manifest_file) + output_log_path = os.path.join(output_dir_path, output_log) + recipe_id = utils.get_recipe_details(manifest_file)["id"] + print(constants.CYAN.format("Starting the setup...")) + + try: + install_status_json_path = os.path.join(output_dir_path, install_status_json) + if not os.path.exists(install_status_json_path): + with open(install_status_json_path, "w"): + pass + # command = ['touch', install_status_json_path] + # ret = run(command, stdout=PIPE, stderr=PIPE) + except Exception as e: + print( + constants.RED.format("Failed to create install_status_json. {}".format(e)) + ) + sys.exit(-1) + + global log + if not log: + log = logger.Logger(output_log_path) + + log.console( + "ESB CLI version: {}\nTarget OS: {}".format( + constants.VERSION, constants.BUILD_OS + ), + color_code=constants.CYAN, + ) + utils.reinstall_check(update, upgrade, log) + system_info = utils.sys_info(log) + os_name = system_info["os_name"] + utils.python_version(log) + utils.checkInternetConnection(log) + if update and product_key is not None: + if not utils.validate_product_key(log, product_key, recipe_id, update): + sys.exit(-1) + + elif not path and product_key is not None: + if not utils.validate_product_key( + log, product_key, recipe_id, update, upgrade, download + ): + sys.exit(-1) + recipe_version = utils.get_recipe_details(manifest_file)["label"] + utils.identify_geolocation(log) + if not download: + # Skip prerequisite check during download + utils.pre_requisites(log, os_name) + utils.print_system_info(system_info, log) + utils.check_enough_memory(system_info, recipe_id, log) + utils.send_telemetry_data(({"product_key": product_key}), log) + utils.send_LR_data({"product_key": product_key}, log) + if "recipe_id" not in utils.LR_data: + utils.send_telemetry_data(({"recipe_id": recipe_id}), log) + utils.send_LR_data({"recipe_id": recipe_id}, log) + if download: + utils.send_telemetry_data(({"type": "download"}), log) + utils.send_LR_data({"type": "download"}, log) + download_modules(log, product_key, manifest, path) + component_list = run_installation(log, product_key, manifest, path, xml, upgrade) + if not upgrade: + verify_installation_status(component_list) + return component_list + + +def list_packages(default=False, json_out=False, version=False, local=False): + """ + List installed packages in a recipe + + :param default: If True, lists all the default packages in the recipe + :param json_out: Return the output in json format + :param version: If True, lists all the supported recipes + :param local: If True, lists all the supported modules(in JSON format) present in XML + """ + # FIXME(mkumari): Remove the hardcoded file name + output_dir_path = utils.create_output_dir(manifest_file) + global log + output_log_path = os.path.join(output_dir_path, output_log) + log = logger.Logger(output_log_path) + install_status_json_path = os.path.join(output_dir_path, install_status_json) + recipe_version = utils.get_recipe_details(manifest_file)["id"] + os_id = utils.get_recipe_details(manifest_file)["osId"] + + if default: + resp = api.get_components_list(recipe_version, os_id, log) + if json_out: + print(resp) + return resp + log.console( + "Modules in the recommended configuration for " "{}".format(recipe_version), + color_code=constants.CYAN, + ) + if resp: + component_names = [] + component_ids = [] + component_versions = [] + table = PrettyTable() + for ingredient in resp["ingredients"]: + component_names.append(ingredient["label"]) + component_ids.append(ingredient["id"]) + component_versions.append(ingredient["version"]) + table.add_column("ID", component_ids) + table.add_column("Module", component_names) + table.add_column("Version", component_versions) + print(table) + return + + if version: + package_type = utils.get_recipe_details(manifest_file)["packageId"] + resp = api.get_upgrade_list(package_type, os_id, log) + if json_out: + print(resp) + return resp + log.console( + "Packages recommended for " "'{}'".format(package_type), + color_code=constants.CYAN, + ) + if resp: + component_names = [] + component_ids = [] + component_versions = [] + table = PrettyTable() + for ingredient in resp: + component_names.append(ingredient["label"]) + component_ids.append(ingredient["id"]) + component_versions.append(ingredient["version"]) + table.add_column("ID", component_ids) + table.add_column("Package", component_names) + table.add_column("Version", component_versions) + print(table) + return + + if local: + component_list = get_component_list(manifest_file, None, None, None, None) + recipe_name = utils.get_recipe_details(manifest_file)["label"] + recipe_version = utils.get_recipe_details(manifest_file)["version"] + + if "esb_modules" in component_list: + esb_common = utils.get_recipe_details(manifest_file, common=True)[ + "common_id" + ] + if esb_common in component_list.get("esb_modules"): + del component_list["esb_modules"][esb_common] + + to_install = {"recipe_name": recipe_name, "recipe_version": recipe_version} + to_install.update({"component_list": []}) + if "esb_modules" in component_list: + for comp_id, val in component_list.get("esb_modules").items(): + to_install["component_list"].append( + {val["label"].replace("_", " "): {"status": None, "id": comp_id}} + ) + log.console( + "Modules to be installed by package" + " are {}".format(json.dumps(to_install, indent=2)), + color_code=constants.CYAN, + ) + + return json.dumps(to_install) + + if not os.path.exists(install_status_json_path): + log.console("No packages or modules found.", color_code=constants.YELLOW) + return + + try: + table = PrettyTable() + component_name = [] + component_status = [] + component_id = [] + component_list = {} + + recipe_name = utils.get_recipe_details(manifest_file)["label"] + recipe_version = utils.get_recipe_details(manifest_file)["version"] + component_list = {"recipe_name": recipe_name, "recipe_version": recipe_version} + with open(install_status_json_path, "r") as file: + try: + component_list.update({"component_list": []}) + component_list["component_list"].append(json.load(file)) + except JSONDecodeError: + pass + if json_out: + print(json.dumps(component_list)) + return json.dumps(component_list) + + for val in component_list["component_list"]: + for module, module_val in val.items(): + if module_val["status"] == "FAILED": + component_status.append(constants.RED.format(module_val["status"])) + else: + component_status.append( + constants.GREEN.format(module_val["status"]) + ) + component_id.append(module_val["id"]) + component_name.append(module) + + utils.format_component_name(component_name) + table.add_column("ID", component_id) + table.add_column("Module", component_name) + table.add_column("Status", component_status) + print(table) + except Exception as e: + msg = "Failed to read {}. {}" + log.console( + msg.format(install_status_json_path, e), msg.format(" ", e), error=True + ) + sys.exit(-1) + + +def update(package): + """ + Updates a module in a package + + :package: List of packages to update + """ + # FIXME(mkumari): Remove the hardcoded file name + is_product_key = True + product_key = None + is_product_key = check_product_key(manifest_file=manifest_file) + if os.path.exists(manifest_file) and is_product_key is True: + print( + constants.BICYAN.format( + "Please enter the " + "Product Key. The Product Key is contained in the email you " + "received from Intel confirming your download: " + ), + end=" ", + ) + product_key = input() + output_dir_path = utils.create_output_dir(manifest_file) + output_log_path = os.path.join(output_dir_path, output_log) + global log + if not log: + log = logger.Logger(output_log_path) + + recipe_version = utils.get_recipe_details(manifest_file)["id"] + log.console( + "Updating {} modules of package {}".format(list(package), recipe_version), + color_code=constants.CYAN, + ) + utils.send_telemetry_data(({"type": "update"}), log) + utils.send_LR_data({"type": "update"}, log) + + resp = api.get_update_components(list(package), recipe_version, product_key, log) + if resp: + try: + setup_start(product_key, xml=resp, update=True) + except Exception as e: + log.console("Failed to update module {}. {}".format(package, e), error=True) + try: + components = utils.get_component_list(xmlstring=resp) + utils.update_xml(manifest_file, resp, log) + except Exception as e: + log.console( + "Error updating the {} XML file due to error {}".format( + manifest_file, e + ), + error=True, + ) + sys.exit(-1) + else: + log.console("Failed to update module", error=True) + sys.exit(-1) + + +def print_log(components_id, all_components=False): + """ + Prints the logs. + + :param components: Name of the components + :param all_components: Specify True to print logs for all components + """ + + def print_installer_log(log_file): + if os.path.exists(log_file): + with open(log_file) as fd: + print(constants.CYAN.format("Start of installer log".center(80, "="))) + print(fd.read()) + print(constants.CYAN.format("End of installer log".center(80, "="))) + else: + print(constants.CYAN.format("No log found")) + + # FIXME(mkumari): Remove the hardcoded file name + output_dir_path = utils.create_output_dir(manifest_file) + output_log_path = os.path.join(output_dir_path, output_log) + if not components_id and not all_components: + print_installer_log(output_log_path) + return + global log + log = logger.Logger(output_log_path) + + component_list = {} + component_dict = utils.get_component_list(manifest_file) + if components_id: + for component_id, val in component_dict.items(): + component = val["label"] + if component_id in components_id: + component_list[component] = component_id + if all_components: + for component_id, val in component_dict.items(): + component = val["label"] + component_list[component] = component_id + if "esb_common" in list(component_list.keys()): + del component_list["esb_common"] + print_installer_log(output_log_path) + for component in list(component_list.keys()): + log_path = os.path.join(output_dir_path, component, "install.log") + if os.path.exists(log_path): + with open(log_path) as fd: + print( + constants.CYAN.format( + "Start of log for module {}".center(80, "=").format( + component.replace("_", " ") + ) + ) + ) + print(fd.read()) + print( + constants.CYAN.format( + "End of log for module {}".center(80, "=").format( + component.replace("_", " ") + ) + ) + ) + else: + log.console("No log found for module " "{}".format(component), error=True) + + +def export_package(package_name=None, user_yaml=None): + """ + Package the recipe + :param package_name: Name of the package + :param manifest_file: xml file to read components + """ + manifest_file = "edgesoftware_configuration.xml" + configuration_yaml = "edgesoftware_configuration.yaml" + user_config_file = "config_install.yml" + config_file = "config.ini" + resource_files = [] + + output_dir_path = utils.create_output_dir(manifest_file) + output_log_path = os.path.join(output_dir_path, output_log) + global log + if not log: + log = logger.Logger(output_log_path) + + if os.path.exists(configuration_yaml): + os.remove(configuration_yaml) + component_list = get_component_list(manifest_file, user_yaml, download=False) + if package_name == None: + package_name = utils.get_recipe_details(manifest_file)["label"] + + support_files = [ + sys.argv[0], + manifest_file, + configuration_yaml, + user_config_file, + config_file, + ] + + resource_files = download_package_artifacts(manifest_file, None, None, False, True) + if resource_files: + support_files = support_files + resource_files + + try: + temp_dir = tempfile.mkdtemp() + esb_modules_dir = os.path.join(temp_dir, package_name, "esb_modules") + custom_modules_dir = os.path.join(temp_dir, package_name, "custom_modules") + package_module_dir = os.path.join(temp_dir, package_name) + os.makedirs(esb_modules_dir) + os.makedirs(custom_modules_dir) + + except Exception as e: + log.console("Failed to create package. {}".format(e), error=True) + sys.exit(-1) + + modules_to_zip = {"esb_modules": OrderedDict({}), "custom_modules": OrderedDict({})} + try: + for modules, modules_dict in component_list.items(): + for component_id, value in modules_dict.items(): + try: + docker_file_name = "" + if utils.is_image(value): + utils.save_image( + log, value["label"], value["tag"], component_id + ) + docker_file_name = "{}.tar".format(component_id) + component = value["label"] + src_path = "" + src_path_helm = "" + custom = False + if modules == "custom_modules": + custom = True + abs_src_path = os.path.abspath(value) + src_path = os.path.join(abs_src_path) + else: + if not utils.is_image(value): + abs_src_path = os.path.abspath(value["path"]) + zip_location = os.path.join(abs_src_path, component_id) + src_path = zip_location + ".zip" + src_path_helm = zip_location + ".tgz" + if ( + os.path.exists(src_path) + or os.path.exists(src_path_helm) + or utils.is_image(value) + ): + + if custom: + custom_component = os.path.join( + custom_modules_dir, component + ) + if os.path.isfile(src_path): + shutil.copy(src_path, custom_modules_dir) + elif os.path.isfile(src_path_helm): + shutil.copy(src_path_helm, custom_modules_dir) + elif utils.is_image(value): + if os.path.isfile(docker_file_name): + shutil.copy(docker_file_name, custom_modules_dir) + os.remove(docker_file_name) + elif os.path.isdir(src_path_helm): + copy_tree(src_path, custom_modules_dir) + elif os.path.isdir(src_path): + copy_tree(src_path, custom_modules_dir) + else: + log.console( + "Failed to add custom module: {}".format( + custom_component + ), + error=True, + ) + sys.exit(-1) + + if not utils.is_image(value): + path = os.path.join(modules, value.split("/")[-1]) + modules_to_zip[modules].update({component: path}) + + else: + if os.path.isfile(src_path): + shutil.copy(src_path, esb_modules_dir) + elif os.path.isdir(src_path): + copy_tree(src_path, esb_modules_dir) + elif os.path.isfile(src_path_helm): + shutil.copy(src_path_helm, esb_modules_dir) + elif utils.is_image(value): + if os.path.isfile(docker_file_name): + shutil.copy(docker_file_name, esb_modules_dir) + os.remove(docker_file_name) + elif os.path.isdir(src_path_helm): + copy_tree(src_path_helm, esb_modules_dir) + else: + log.console( + "Failed to add esb module: {}".format(component_id), + error=True, + ) + sys.exit(-1) + value["path"] = modules + if "esb_install" in value: + modules_to_zip[modules].update( + { + component_id: { + "path": value["path"], + "label": component, + "esb_install": value["esb_install"], + } + } + ) + elif utils.is_helm(value): + modules_to_zip[modules].update( + { + component_id: { + "path": value["path"], + "label": component, + "tag": value["tag"], + "type": "helm", + } + } + ) + elif utils.is_image(value): + modules_to_zip[modules].update( + { + component_id: { + "path": value["path"], + "label": component, + "tag": value["tag"], + "type": "image", + } + } + ) + else: + modules_to_zip[modules].update( + { + component_id: { + "path": value["path"], + "label": component, + } + } + ) + except Exception as e: + log.console("Failed to create package. {}".format(e), error=True) + sys.exit(-1) + + try: + with open(configuration_yaml, "w") as yaml_file: + yaml.dump(modules_to_zip, yaml_file) + + file_list = [] + for package, module_dirs, module_file in os.walk(package_module_dir): + for module_dir in module_dirs: + if len(os.listdir(os.path.join(package, module_dir))) == 0: + shutil.rmtree(os.path.join(package, module_dir)) + if module_file: + file_list.append(module_file) + if len(file_list) == 0: + try: + if os.path.exists(temp_dir) and os.path.exists(configuration_yaml): + shutil.rmtree(temp_dir) + os.remove(configuration_yaml) + except Exception as e: + log.console( + "Failed to remove temporary files. {}".format(e), error=True + ) + log.console("No modules found to export.", color_code=constants.YELLOW) + sys.exit(-1) + + utils.copy_export_deps(support_files, package_module_dir, log) + shutil.make_archive(package_name, "zip", temp_dir) + + if os.path.exists(temp_dir) and os.path.exists(configuration_yaml): + shutil.rmtree(temp_dir) + os.remove(configuration_yaml) + else: + log.console("Failed to remove temp files") + log.console( + "Successfully created {}.zip".format(package_name), + color_code=constants.GREEN, + ) + except shutil.Error as e: + msg = "Failed to create archive. {}" + print_msg = "Failed to create archive." + log.console(msg.format(e), print_msg, error=True) + sys.exit(-1) + except Exception as e: + msg = "Failed to create package. {}" + print_msg = "Failed to create package." + log.console(msg.format(e), print_msg, error=True) + sys.exit(-1) + + except KeyboardInterrupt: + log.console("Export aborted by user.Exiting" " export operation", error=True) + zip_file = package_name + ".zip" + if os.path.exists(zip_file): + os.remove(zip_file) + if os.path.exists(docker_file_name): + os.remove(docker_file_name) + if os.path.exists(temp_dir) and os.path.exists(configuration_yaml): + shutil.rmtree(temp_dir) + os.remove(configuration_yaml) + sys.exit(-1) + except Exception as e: + msg = "Failed to create package. {}" + print_msg = "Failed to create package." + log.console(msg.format(e), print_msg, error=True) + sys.exit(-1) + + +def uninstall_ingredient(ingredient_id, all_modules, file, upgrade=False): + + output_dir_path = utils.create_output_dir(manifest_file) + output_log_path = os.path.join(output_dir_path, output_log) + install_status_json_path = os.path.join(output_dir_path, install_status_json) + global log + log = logger.Logger(output_log_path) + recipe_name = utils.get_recipe_details(manifest_file)["label"] + recipe_version = utils.get_recipe_details(manifest_file)["version"] + recipe_id = utils.get_recipe_details(manifest_file)["id"] + if not upgrade: + utils.send_LR_data({"type": "uninstall", "recipe_id": recipe_id}, log) + utils.send_telemetry_data(({"type": "uninstall", "recipe_id": recipe_id}), log) + + if file: + install_path = "_".join(["esb", "modules"]) + else: + install_path = "_".join([recipe_name, recipe_version]) + if not os.path.exists(install_status_json_path): + log.console("No Modules to uninstall", color_code=constants.YELLOW) + sys.exit(-1) + try: + components = {} + with open(install_status_json_path, "r") as file: + components_list = json.load(file) + reverse_list = list(components_list.keys()) + reverse_list.reverse() + component_list = {} + for component in reverse_list: + component_list[component] = components_list[component] + + if not all_modules: + id_list = [] + for value in component_list.values(): + id_list.append(value["id"]) + for id in ingredient_id: + if id not in id_list: + log.console("component id {} not installed.".format(id), error=True) + for name, value in component_list.items(): + component_val = {} + ingredient_path = os.path.join(install_path, name) + if all_modules or value["id"] in ingredient_id: + if utils.is_image(value): + component_val["id"] = value["id"] + component_val["type"] = "image" + components[name] = component_val + if utils.is_helm(value): + component_val["id"] = value["id"] + component_val["type"] = "helm" + components[name] = component_val + else: + component_val["id"] = value["id"] + component_val["path"] = ingredient_path + components[name] = component_val + + if len(components): + log.console( + "Components to be uninstalled are :{}".format(list(components.keys())), + color_code=constants.CYAN, + ) + else: + log.console("No modules to uninstall", color_code=constants.YELLOW) + if all_modules: + utils.clean_esb_common(log, output_dir_path) + sys.exit(-1) + + success_ids = [] + failed_ids = [] + success_helm_ids = [] + failed_helm_ids = [] + success_container_ids = [] + failed_container_ids = [] + uninstall_status = {} + + def status_update(component, component_id, status="FAILED"): + uninstall_status.update({component: {"id": component_id, "status": status}}) + + for component, value in components.items(): + try: + if utils.is_image(value): + startTime = time.time() + ret = 0 # initializing with uninstall FAIL status + try: + ret = utils.remove_docker_image(component, log) + except Exception as e: + status = "FAILED" + log.error("Failed to uninstall module " " {}".format(e)) + log.console("Failed to uninstall module", error=True) + status_update(component, value["id"], status) + continue + endTime = time.time() + if ret == 1: + success_container_ids.append(value["id"]) + status = "SUCCESS" + log.console( + "Successfully uninstalled {} " + "took {}".format( + component, utils.print_time(endTime - startTime) + ), + color_code=constants.GREEN, + ) + utils.update_xml_json( + value["id"], component, manifest_file, log + ) + status_update(component, value["id"], status) + else: + status = "FAILED" + failed_container_ids.append(value["id"]) + log.console( + "Failed to uninstall {} took {}".format( + component, utils.print_time(endTime - startTime) + ), + error=True, + ) + status_update(component, value["id"]) + + elif utils.is_helm(value): + startTime = time.time() + ret = False # initializing with uninstall FAIL status + ret = utils.remove_helm_keys(component, log) + endTime = time.time() + if ret: + success_helm_ids.append(value["id"]) + status = "SUCCESS" + log.console( + "Successfully uninstalled {} " + "took {}".format( + component, utils.print_time(endTime - startTime) + ), + color_code=constants.GREEN, + ) + utils.update_xml_json( + value["id"], component, manifest_file, log + ) + status_update(component, value["id"], status) + else: + status = "FAILED" + failed_helm_ids.append(value["id"]) + log.console( + "Failed to uninstall {} took {}".format( + component, utils.print_time(endTime - startTime) + ), + error=True, + ) + status_update(component, value["id"]) + + elif not utils.is_image(value) and not utils.is_helm(value): + if constants.Operating_system == "Windows": + so_file = "esb_install" + ".pyd" + elif constants.Operating_system == "Linux": + so_file = "esb_install" + ".so" + install_file = "esb_install" + ".py" + abs_src_path = os.path.abspath(value["path"]) + src_path = os.path.join(abs_src_path, so_file) + if not os.path.exists(src_path): + src_path = os.path.join(abs_src_path, install_file) + output_dir = os.path.join(output_dir_path, component) + if src_path and os.path.exists(src_path): + try: + spec = importlib.util.spec_from_file_location( + "esb_install", src_path + ) + module_name = importlib.util.module_from_spec(spec) + spec.loader.exec_module(module_name) + + log.console( + "Uninstalling {}".format(component), + color_code=constants.CYAN, + ) + startTime = time.time() + ret = 0 # initializing with uninstall FAIL status + try: + if all_modules: + ret = module_name.main_uninstall( + abs_src_path, output_dir, False + ) + else: + # In case individual uninstallation is not supported + ret = module_name.main_uninstall( + abs_src_path, output_dir, True + ) + except Exception as e: + status = "NOT SUPPORTED" + log.error( + "Module does not support uninstall" " {}".format(e) + ) + log.console( + "Module does not support uninstall", error=True + ) + status_update(component, value["id"], status) + continue + endTime = time.time() + if ret == 1: + success_ids.append(value["id"]) + status = "SUCCESS" + log.console( + "Successfully uninstalled {} " + "took {}".format( + component, utils.print_time(endTime - startTime) + ), + color_code=constants.GREEN, + ) + utils.update_xml_json( + value["id"], component, manifest_file, log + ) + status_update(component, value["id"], status) + elif ret == 2: + status = "NOT SUPPORTED" + log.console( + "Individual uninstall of the module is" + " not supported", + error=True, + ) + status_update(component, value["id"], status) + else: + failed_ids.append(value["id"]) + log.console( + "Failed to uninstall {} took {}".format( + component, utils.print_time(endTime - startTime) + ), + error=True, + ) + status_update(component, value["id"]) + except Exception as e: + msg = "Failed to uninstall {}. {}" + print_msg = "Failed to uninstall {}" + log.console( + msg.format(component, e), + print_msg.format(component), + error=True, + ) + status_update(component, value["id"]) + else: + if utils.is_image(value): + failed_container_ids.append(value["id"]) + elif utils.is_helm(value): + failed_helm_ids.append(value["id"]) + else: + failed_ids.append(value["id"]) + msg = ( + "Failed to find installation file for {} at {}. " + "Please check file location" + ) + print_msg = "Failed to uninstall {} due to missing file" + log.console( + msg.format(component, src_path), + print_msg.format(component), + error=True, + ) + status_update(component, value["id"]) + except Exception as e: + msg = "Failed to uninstall {}" + print_msg = "Failed to uninstall" + log.console(msg.format(e), print_msg, error=True) + + if not upgrade: + utils.send_LR_data( + { + "success_ids": success_ids, + "failed_ids": failed_ids, + "success_helm_ids": success_helm_ids, + "failed_helm_ids": failed_helm_ids, + "success_container_ids": success_container_ids, + "failed_container_ids": failed_container_ids, + }, + log, + ) + utils.send_telemetry_data( + ( + { + "success_ids": success_ids, + "failed_ids": failed_ids, + "successHelmIds": success_helm_ids, + "failedHelmIds": failed_helm_ids, + "successContainerIds": success_container_ids, + "failedContainerIds": failed_container_ids, + } + ), + log, + ) + if all_modules: + utils.clean_esb_common(log, output_dir_path) + + log.console("Uninstall Finished", color_code=constants.GREEN) + verify_uninstall_status(uninstall_status) + + except KeyboardInterrupt as e: + log.console( + "Uninstallation aborted by user. Exiting uninstallation", error=True + ) + sys.exit(-1) + except JSONDecodeError as e: + log.info( + "Failed to load file:{} with error {}".format(install_status_json_path, e) + ) + log.console("No modules to uninstall", color_code=constants.YELLOW) + if all_modules: + utils.clean_esb_common(log, output_dir_path) + log.console("Uninstall Finished", color_code=constants.GREEN) + except Exception as e: + msg = "Error during uninstallation {}" + print_msg = "Error during uninstallation" + log.console(msg.format(e), print_msg, error=True) + + +def upgrade(package): + """ + Upgrade selected package + :param package: id of the package + """ + installed_path = "" + temp_dir = tempfile.mkdtemp() + global temp_conf_file + temp_conf_file = os.path.join(temp_dir, manifest_file) + product_key = None + is_product_key = True + # Flag to check the Product key requirement for the upgrade requested version of the package + is_product_key_upgraded = True + version = "" + data = {} + download_package_artifacts( + manifest_file=manifest_file, recipe_id=package, src_dir=temp_dir + ) + try: + if os.path.exists(manifest_file): + is_product_key = check_product_key(manifest_file=manifest_file) + is_product_key_upgraded = check_product_key(recipe=package) + + # If the upgrade requested version has a product key but current version does not. + if is_product_key is False and is_product_key_upgraded is True: + print( + constants.YELLOW.format( + "This version of the package requires a product key. " + "Please visit Intel® Edge Software Hub and download the product key." + ), + end="\n", + ) + + if is_product_key_upgraded is True: + print( + constants.BICYAN.format( + "Please enter the " + "Product Key. The Product Key is contained in the email you " + "received from Intel confirming your download: " + ), + end=" ", + ) + product_key = input() + if not product_key: + print( + constants.YELLOW.format( + "Please enter product key." " Exiting upgrade." + ) + ) + sys.exit(-1) + details = utils.get_recipe_details(manifest_file) + package_id = details.get("packageId") + os_id = details.get("osId") + label = details.get("label") + version = details.get("version") + package_name = "_".join([label, version]) + if constants.Operating_system == "Windows": + installed_path = os.path.join("/log/esb-cli", package_name) + elif constants.Operating_system == "Linux": + installed_path = os.path.join("/var/log/esb-cli", package_name) + install_json = os.path.join(installed_path, "install_status.json") + else: + print(constants.RED.format("Failed to find manifest file")) + sys.exit(-1) + + data.update( + { + "recipeId": package, + "packageId": package_id, + "productKey": product_key, + "osId": os_id, + "order": "installation", + } + ) + resp = api.get_upgrade_package(data) + if resp: + s = "" + with open(temp_conf_file, "w") as f: + for item in resp.decode("ascii"): + s += item + if item == ">": + f.write(s) + f.write("\n") + s = "" + else: + print( + constants.RED.format( + "Failed to fetch upgrade details." " Exiting Upgrade" + ) + ) + sys.exit(-1) + + details = utils.get_recipe_details(temp_conf_file) + new_version = details.get("version") + + if os.path.exists(install_json) and os.path.getsize(install_json): + if LooseVersion(new_version) == LooseVersion(version): + print( + constants.YELLOW.format( + "The selected package version is" " already installed" + ) + ) + sys.exit(-1) + elif LooseVersion(new_version) < LooseVersion(version): + print(constants.GREEN.format("You are on the latest version")) + sys.exit(-1) + else: + print(constants.CYAN.format("Upgrading to {}".format(package))) + else: + print( + constants.YELLOW.format( + "No package installed to run" " upgrade. Exiting upgrade" + ) + ) + sys.exit(-1) + except Exception as e: + print(constants.RED.format("Upgrade failed. Exiting Upgrade.{}".format(e))) + sys.exit(-1) + + try: + output_dir_path = utils.create_output_dir(temp_conf_file) + output_log_path = os.path.join(output_dir_path, output_log) + tmp_details = utils.get_recipe_details(temp_conf_file) + tmp_label = tmp_details.get("label") + tmp_version = tmp_details.get("version") + tmp_id = tmp_details.get("id") + tmp_package_name = "_".join([tmp_label, tmp_version]) + + global log + log.clean() + log = logger.Logger(output_log_path) + + print( + constants.YELLOW.format( + "WARNING: Upgrade in progress." " Do not interrupt".center(52, "*") + ) + ) + + utils.send_LR_data({"type": "upgrade", "recipe_id": tmp_id}, log) + utils.send_telemetry_data(({"type": "upgrade", "recipe_id": tmp_id}), log) + component_list = setup_start(product_key, temp_conf_file, upgrade=True) + if component_list: + utils.upgrade_cleanup(log, installed_path, temp_dir, package_name) + verify_installation_status(component_list, upgrade=True) + log.console("Finished upgrade", color_code=constants.GREEN) + else: + utils.upgrade_cleanup( + log, output_dir_path, temp_dir, tmp_package_name, error=True + ) + log.console("Failed to upgrade", error=True) + + except KeyboardInterrupt as e: + log.console("Upgrade interrupted by user. Exiting Upgrade", error=True) + utils.upgrade_cleanup( + log, output_dir_path, temp_dir, tmp_package_name, error=True + ) + sys.exit(-1) + except Exception as e: + log.console("Upgrade failed. Exiting Upgrade {}".format(e), error=True) + utils.upgrade_cleanup( + log, output_dir_path, temp_dir, tmp_package_name, error=True + ) + sys.exit(-1) + + +def pull(image, tag, product_key=None, compose_log=None): + """ + Pull Docker Image + + :param product_key: Product Key associated with the user + :param image_name: Docker Image to download + """ + global log + if not compose_log: + try: + output_dir_path = utils.create_output_dir() + if constants.Operating_system == "Linux": + subprocess.run(["sudo", "chown", os.environ["USER"], output_dir_path]) + output_log_path = os.path.join(output_dir_path, docker_pull_log) + if not log: + log = logger.Logger(output_log_path) + except Exception as e: + print("Failed to create log file. {}".format(e)) + sys.exit(-1) + else: + log = compose_log + + if not utils.LR_data.get("type") and utils.is_LR_installed(log): + utils.send_LR_data({"product_key": product_key, "type": "docker-pull"}, log) + utils.send_telemetry_data( + ({"product_key": product_key, "type": "docker-pull"}), log + ) + + startTime = time.time() + log.console("Pulling Image {}:{}".format(image, tag)) + ret = utils.intel_registry_pull(image, tag, product_key, log) + endTime = time.time() + if ret == True: + state = "SUCCESS" + log.console( + "Successfully installed {} " + "took {}".format( + image, + utils.print_time(endTime - startTime), + ), + color_code=constants.GREEN, + ) + else: + state = "FAILED" + log.console( + "Failed to install {} took {}".format( + image, + utils.print_time(endTime - startTime), + ), + error=True, + ) + + +def pull_docker_compose(compose_file, product_key=None): + """ + Get image names from docker compose file + + :param compose_file: Path to docker compose file to parse + :param product_key: Product Key associated with the user + """ + try: + output_dir_path = utils.create_output_dir() + output_log_path = os.path.join(output_dir_path, docker_pull_log) + if constants.Operating_system == "Linux": + subprocess.run(["sudo", "chown", os.environ["USER"], output_dir_path]) + global log + if not log: + log = logger.Logger(output_log_path) + except Exception as e: + print("Failed to create log file. {}".format(e)) + sys.exit(-1) + + """ + If the docker compose file available, it will parse the file, + and read the value beside image tag. + """ + if os.path.exists(compose_file): + path = os.path.abspath(os.getcwd()) + "/" + compose_file + getImages = [] + try: + with open(path, "r") as f: + docker_compose = yaml.safe_load(f) + for item in docker_compose.items(): + if item[0] == "services": + services = item[1] + for i in services: + if "image" in services[i]: + image = services[i]["image"].split(":") + if len(image) == 1: + name = image[0] + tag = "latest" + else: + name, tag = image + getImages.append([name, tag]) + + except yaml.YAMLError as exc: + msg = "Failed to parse file {}: {}" + print_msg = "Failed to parse file {}" + log.console( + msg.format(compose_file, exc), + print_msg.format(compose_file), + error=True, + ) + + except Exception as e: + log.console( + "Failed to read images from {}: {}".format(compose_file, e), error=True + ) + + # Pull each image availabe in docker compose file + for image in getImages: + name = image[0] + tag = image[1] + pull(name, tag, product_key, log) + else: + log.console("{} not available".format(compose_file), error=True) + return + + +def check_product_key( + manifest_file=None, + image=None, + tag=None, + recipe=None, + helm_chart_name=None, + helm_chart_tag=None, +): + """ + Get if Product key is needed by the Package or Image + + :param manifest_file: XML file to read components + :param image : Image Name of the Image + :param tag : Tag of the Image + :param recipe : Recipe GUID + :param helm_chart_name : Name of the Helm Chart + :param helm_chart_tag : Tag of the Helm chart + """ + recipe_id = None + try: + if manifest_file: + output_dir_path = utils.create_output_dir(manifest_file) + output_log_path = os.path.join(output_dir_path, output_log) + + elif helm_chart_name: + output_dir_path = utils.create_output_dir() + output_log_path = os.path.join(output_dir_path, helm_download_log) + else: + output_dir_path = utils.create_output_dir() + output_log_path = os.path.join(output_dir_path, docker_pull_log) + if constants.Operating_system == "Linux": + subprocess.run(["sudo", "chown", os.environ["USER"], output_dir_path]) + global log + if not log: + log = logger.Logger(output_log_path) + + except Exception as e: + print("Failed to create log file. {}".format(e)) + sys.exit(-1) + if manifest_file and recipe is None: + recipe_id = utils.get_recipe_details(manifest_file)["id"] + if recipe: + recipe_id = recipe + return api.check_product_key( + log, + recipe_id=recipe_id, + image=image, + tag=tag, + helm_chart_name=helm_chart_name, + helm_chart_tag=helm_chart_tag, + ) + + +def get_config_xml(configuration_id): + """ + Gets configuration xml file from service layer via api.get_config_xml() + + :param configuration_id: Unique ID for package which user gets from ESH-UI + """ + + xml_overwritten = False + resp = api.get_config_xml(configuration_id) + fetched_xml_file = "edgesoftware_configuration.xml" + if resp and os.path.exists(fetched_xml_file): + xml_overwritten = True + # This part formats downloaded XML file line by line + if resp: + line = "" + with open(fetched_xml_file, "w") as f: + for char in resp.decode("ascii"): + line += char + if char == ">": + f.write(line) + f.write("\n") + line = "" + else: + print( + constants.RED.format( + "Failed to fetch Manifest XML file " + "edgesoftware_configuration.xml. Exiting." + ) + ) + sys.exit(-1) + try: + output_dir_path = utils.create_output_dir(fetched_xml_file) + output_log_path = os.path.join(output_dir_path, output_log) + if constants.Operating_system == "Linux": + subprocess.run(["sudo", "chown", os.environ["USER"], output_dir_path]) + global log + if not log: + log = logger.Logger(output_log_path) + + except Exception as e: + log.error("Failed to create log file. {}".format(e)) + sys.exit(-1) + + utils.send_LR_data({"configuration_id": configuration_id}, log) + utils.send_telemetry_data(({"configuration_id": configuration_id}), log) + log.info( + "User config found. Manifest XML file " + "edgesoftware_configuration.xml fetched succesfully." + ) + return fetched_xml_file, xml_overwritten + + +def download_helm_chart(name, tag, helm_chart_id, helm_chart_type, product_key=None): + """ + Download Helm Chart + + Args: + name (string): Helm chart Name. + tag (string): Helm chart Tag. + helm_chart_id (string): Helm id. + helm_chart_type (string): Helm Chart Registry Type + product_key (string, optional): Product key for the chart. Defaults to None. + """ + global log + try: + output_dir_path = utils.create_output_dir() + if constants.Operating_system == "Linux": + subprocess.run(["sudo", "chown", os.environ["USER"], output_dir_path]) + output_log_path = os.path.join(output_dir_path, helm_download_log) + if not log: + log = logger.Logger(output_log_path) + except Exception as e: + print("Failed to create log file. {}".format(e)) + sys.exit(-1) + + if not utils.LR_data.get("type"): + utils.send_LR_data({"type": "helm-pull", "product_key": product_key}, log) + utils.send_telemetry_data(({"type": "helm-pull", "product_key": product_key}), log) + api.fetch_helm( + log, product_key, name, tag, helm_chart_id, None, helm_chart_type, unzip=True + ) + success_helm_chart_names, success_helm_ids = api.get_helm_pull_status() + if utils.is_LR_installed(log) and utils.LR_data.get("type") == "helm-pull": + utils.send_LR_data({"helm_chart": success_helm_chart_names}, log) + utils.send_telemetry_data(({"successHelmIds": success_helm_ids}), log) + + +def get_helm_charts(manifest): + """ + Gets helm charts list from manifest file. + + Args: + manifest (String): Location of XML file + + Returns: + [list]: List of helm charts + """ + # send a ordered array of all helm charts + helm_list = [] + component_list = get_component_list(manifest, None, None, None, None) + if "esb_modules" in component_list: + for comp_id, val in component_list.get("esb_modules").items(): + if utils.is_helm(val): + helm_list.append( + { + "label": val["label"], + "comp_id": comp_id, + "tag": val.get("tag"), + "path": val["path"], + } + ) + + return helm_list + + +def update_helm_keys(helm_list, product_key, is_manifest): + """ + Updates Helm chart keys. Invoked from edgesoftware helm -u install command + + Args: + helm_list (list): list of helm charts + product_key (string): Product Key Value + is_manifest (bool, optional): Manifest File. Defaults to False. + + Returns: + Bool/None: True/False/None + """ + global log + try: + if not log: + if not is_manifest: + output_dir_path = utils.create_output_dir() + if constants.Operating_system == "Linux": + subprocess.run( + ["sudo", "chown", os.environ["USER"], output_dir_path] + ) + output_log_path = os.path.join(output_dir_path, helm_download_log) + log = logger.Logger(output_log_path) + else: + output_dir_path = utils.create_output_dir(manifest_file) + output_log_path = os.path.join(output_dir_path, output_log) + log = logger.Logger(output_log_path) + except Exception as e: + print("Failed to create log file. {}".format(e)) + sys.exit(-1) + + for helm_chart in helm_list: + + if is_manifest: + helm_chart_path = os.path.join( + helm_chart["path"], helm_chart["label"] + "-" + helm_chart["tag"] + ) + if not os.path.isdir(helm_chart_path): + log.console( + "Failed to update Helm chart secret keys {}-{}.\nHelm chart directory not found at {}.".format( + helm_chart["label"], helm_chart["tag"], helm_chart_path + ), + color_code=constants.RED, + ) + if isinstance(is_manifest, bool): + return False + continue + + log.console( + "Updating secret keys for Helm chart {}-{}".format( + helm_chart["label"], helm_chart["tag"] + ), + color_code=constants.CYAN, + ) + startTime = time.time() + ret = None + # Since it is called form XML, we don't know the credentials requirement + + if is_manifest: + # Gets the status for all the helm charts in the XML. + _, is_helm_credentials = api.get_helm_registry_credentials( + helm_chart["label"], helm_chart["tag"], log + ) + + if is_helm_credentials: + ret = utils.update_k8s_secret_store( + helm_chart["label"], + helm_chart["tag"], + helm_chart["comp_id"], + product_key, + log, + ) + + elif is_helm_credentials is None: + ret = False + + else: + ret = utils.update_k8s_secret_store( + helm_chart["label"], + helm_chart["tag"], + helm_chart["comp_id"], + product_key, + log, + ) + + endTime = time.time() + + if ret is None: + log.info( + "Helm chart {}-{} does not need secret keys.".format( + helm_chart["label"], helm_chart["tag"] + ) + ) + elif ret: + log.console( + "Successfully updated secret keys for Helm chart {}-{} took {}".format( + helm_chart["label"], + helm_chart["tag"], + utils.print_time(endTime - startTime), + ), + color_code=constants.GREEN, + ) + + else: + log.console( + "Failed to update secret keys for Helm chart {}-{} took {}".format( + helm_chart["label"], + helm_chart["tag"], + utils.print_time(endTime - startTime), + ), + error=True, + ) + if len(helm_list) == 1: + return ret + + +def download_package_artifacts( + manifest_file, recipe_id=None, src_dir=None, remove_previous=False, export=False +): + """ + Downloads Package Artifacts + + Args: + manifest_file (string): Manifest File Location + recipe_id (string, optional): Package GUID. Defaults to None. + src_dir (string, optional): Temporary Directory. Defaults to None. + remove_previous (BOOL, optional): Remove Previous Files. Defaults to False. + export (BOOL, optional): If called from export. Defaults to False. + """ + try: + if manifest_file: + output_dir_path = utils.create_output_dir(manifest_file) + output_log_path = os.path.join(output_dir_path, output_log) + if constants.Operating_system == "Linux": + subprocess.run(["sudo", "chown", os.environ["USER"], output_dir_path]) + global log + if not log: + log = logger.Logger(output_log_path) + + except Exception as e: + print("Failed to create log file. {}".format(e)) + sys.exit(-1) + if manifest_file and recipe_id is None and src_dir is None: + recipe_id = utils.get_recipe_details(manifest_file)["id"] + + file_list = api.download_package_artifacts( + log, recipe_id, src_dir, remove_previous, export + ) + if export: + return file_list diff --git a/edgesoftware_configuration.xml b/edgesoftware_configuration.xml new file mode 100644 index 0000000..63e375e --- /dev/null +++ b/edgesoftware_configuration.xml @@ -0,0 +1,6 @@ + +
+ + + + \ No newline at end of file diff --git a/releasenotes/notes/Add-support-to-automate-tests-with-tox-5da4ebedd18316b6.yaml b/releasenotes/notes/Add-support-to-automate-tests-with-tox-5da4ebedd18316b6.yaml new file mode 100644 index 0000000..ca97a49 --- /dev/null +++ b/releasenotes/notes/Add-support-to-automate-tests-with-tox-5da4ebedd18316b6.yaml @@ -0,0 +1,5 @@ +--- +features: + - | + Add support for tox environment for automating tests such as flake8, + docs, releasenotes, bandit, testing. diff --git a/releasenotes/source/conf.py b/releasenotes/source/conf.py new file mode 100644 index 0000000..e363675 --- /dev/null +++ b/releasenotes/source/conf.py @@ -0,0 +1,59 @@ +# Configuration file for the Sphinx documentation builder. +# +# This file only contains a selection of the most common options. For a full +# list see the documentation: +# https://www.sphinx-doc.org/en/master/usage/configuration.html + +# -- Path setup -------------------------------------------------------------- + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +# +# import os +# import sys +# sys.path.insert(0, os.path.abspath('.')) + + +# -- Project information ----------------------------------------------------- + +project = 'Edge Software Hub' +copyright = '2020' +author = '' + + +# -- General configuration --------------------------------------------------- + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom +# ones. +extensions = ['reno.sphinxext' +] + +# Add any paths that contain templates here, relative to this directory. +templates_path = ['_templates'] + +# The language for content autogenerated by Sphinx. Refer to documentation +# for a list of supported languages. +# +# This is also used if you do content translation via gettext catalogs. +# Usually you set "language" from the command line for these cases. +language = 'Python 3' + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +# This pattern also affects html_static_path and html_extra_path. +exclude_patterns = [] + + +# -- Options for HTML output ------------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +# +html_theme = 'alabaster' + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = [] diff --git a/releasenotes/source/index.rst b/releasenotes/source/index.rst new file mode 100644 index 0000000..ab15744 --- /dev/null +++ b/releasenotes/source/index.rst @@ -0,0 +1,9 @@ +=============================== +Edge Software Hub Release Notes +=============================== + +.. toctree:: + :maxdepth: 1 + + master + diff --git a/releasenotes/source/master.rst b/releasenotes/source/master.rst new file mode 100644 index 0000000..a2551ee --- /dev/null +++ b/releasenotes/source/master.rst @@ -0,0 +1,6 @@ +================== +MVP2 Release Notes +================== + +.. release-notes:: + :branch: master diff --git a/requirements-dev.txt b/requirements-dev.txt new file mode 100644 index 0000000..64cf0e7 --- /dev/null +++ b/requirements-dev.txt @@ -0,0 +1,16 @@ +# The order of packages is significant, because pip processes them in the order +# of appearance. Changing the order has an impact on the overall integration +# process, which may cause wedges in the gate later. + +setuptools>=57.4.0 +pyinstaller>=4.4 +Cython>=0.29.24 +bandit>=1.7.0 +flake8>=3.9.2 +pylint>=2.9.6 +pycodestyle +pyflakes>=2.3.1 +black>=21.7b0 +isort>=5.9.3 +autopep8 +pre-commit diff --git a/requirements-test.txt b/requirements-test.txt new file mode 100644 index 0000000..d5b4bf9 --- /dev/null +++ b/requirements-test.txt @@ -0,0 +1,12 @@ +# The order of packages is significant, because pip processes them in the order +# of appearance. Changing the order has an impact on the overall integration +# process, which may cause wedges in the gate later. + +bandit>=1.1.0 # Apache-2.0 +doc8>=0.6.0 # Apache-2.0 +ddt>=1.0.1 # MIT +stestr>=2.0.0 +python-subunit>=1.0.0 # Apache-2.0/BSD +coverage!=4.4,>=4.1 # Apache-2.0 +flake8 +pytest diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..69c2ec3 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,17 @@ +prettytable +oyaml +click +requests +defusedxml +inputimeout +termcolor +wget +pathlib2 +PyYAML>=5.4.1 +psutil +#scp>=0.14.1 +#paramiko>=2.8.0 +ruamel.yaml +pexpect +docker +inquirer diff --git a/setup.py b/setup.py new file mode 100644 index 0000000..d033c6f --- /dev/null +++ b/setup.py @@ -0,0 +1,45 @@ +from setuptools import setup + +with open("README.md", "r", encoding="utf-8") as fh: + long_description = fh.read() + +setup( + name="edgesoftware", + version="1.0.1", + description="A CLI wrapper for management of Intel® Edge Software Hub packages.", + long_description=long_description, + long_description_content_type="text/markdown", + author="Intel Corporation", + author_email="sys_recipes@intel.com", + packages=["edgesoftware", "edgesoftware.common"], + license="Proprietary - Intel", + install_requires=[ + "Click>=7.0", + "requests>=2.27.1", + "oyaml", + "prettytable", + "inputimeout", + "psutil", + "py-cpuinfo", + "wget", + "colorama", + "docker", + "defusedxml", + "tqdm", + "six", + "termcolor", + "pathlib2", + "setuptools>=58.0.0", + "PyYAML>=5.4.1", + "scp", + "paramiko", + "ruamel.yaml", + "pexpect", + "docker", + "inquirer", + "kubernetes", + ], + entry_points={"console_scripts": ["edgesoftware = edgesoftware.edgesoftware:main"]}, + python_requires=">=3.6", +) + diff --git a/test/__init__.py b/test/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/test/functional/__init__.py b/test/functional/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/test/functional/test.py b/test/functional/test.py new file mode 100644 index 0000000..f198a5e --- /dev/null +++ b/test/functional/test.py @@ -0,0 +1,31 @@ +import unittest +import subprocess + +class TestESBCLI(unittest.TestCase): + def setUp(self): + pass + + def test_help_command(self): + ret = subprocess.run(["edgesoftware", "--help"], + stdout = subprocess.DEVNULL) + self.assertEqual(ret.returncode, 0) + + def test_help_command_failure(self): + ret = subprocess.run(["edgesoftware", "help"], + stderr = subprocess.DEVNULL) + self.assertNotEqual(ret.returncode, 0) + + def test_log_command(self): + ret = subprocess.run(["edgesoftware", "log"], + stdout = subprocess.DEVNULL) + self.assertEqual(ret.returncode, 0) + + def test_list_command(self): + ret = subprocess.run(["edgesoftware", "list"], + stdout = subprocess.DEVNULL) + self.assertEqual(ret.returncode, 0) + + +if __name__ == "__main__": + unittest.main() + diff --git a/test/unit/__init__.py b/test/unit/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/test/unit/test_functions.py b/test/unit/test_functions.py new file mode 100644 index 0000000..7352f75 --- /dev/null +++ b/test/unit/test_functions.py @@ -0,0 +1,22 @@ +from edgesoftware import functions +import unittest + + +class TestList(unittest.TestCase): + def setUp(self): + pass + + def tearDown(self): + pass + + def test_list_packages_json(self): + # TODO: Create dummy json file and test with different data + ret = functions.list_packages() + self.assertTrue(ret) + ret = functions.list_packages(json_out=True) + self.assertTrue(ret) + ret = functions.list_packages(default=True, json_out=True) + self.assertTrue(ret) + + + diff --git a/test/unit/test_utils.py b/test/unit/test_utils.py new file mode 100644 index 0000000..804d949 --- /dev/null +++ b/test/unit/test_utils.py @@ -0,0 +1,15 @@ +import unittest +from edgesoftware.common import utils + +class TestUtils(unittest.TestCase): + def setUp(self): + pass + + def test_format_component_name_success(self): + test = ["test_add", "aaa", "", "_", "-"] + ret = utils.format_component_name(test) + self.assertEquals(ret, None) + self.assertEquals(test, ["test add", "aaa", "", " ", "-"]) + +if __name__ == "__main__": + unittest.main() diff --git a/tox.ini b/tox.ini new file mode 100644 index 0000000..f7b65e6 --- /dev/null +++ b/tox.ini @@ -0,0 +1,70 @@ +[tox] +minversion = 3.1.1 +envlist = py3,pep8 +skipsdist = True +ignore_basepython_conflict = True + +[testenv] +basepython = python3 +usedevelop = True +install_command = pip install -U {opts} {packages} +whitelist_externals = bash + find + rm +setenv = + VIRTUAL_ENV={envdir} + TEST_PATH=./test/ + +deps = -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt + +#pytest test/functional/test.py +commands = + find . -type f -name "*.py[c|o]" -delete + stestr --test-path=./test/ run {posargs} + +[testenv:bandit] +deps = -r{toxinidir}/test-requirements.txt +commands = bandit -r edgesoftware -x tests -n5 -ll + +[testenv:docs] +deps = + -r{toxinidir}/requirements.txt + -r{toxinidir}/doc/requirements.txt +commands = + sphinx-build -W -b html doc/source doc/build/html + +[testenv:pep8] +commands = + flake8 + # Run security linter + bandit -r edgesoftware -x tests -n5 -ll + +#coverage run -m pytest test/functional/test.py test/unit/test_utils.py +[testenv:cover] +setenv = + {[testenv]setenv} + PYTHON=coverage run --source edgesoftware --parallel-mode +commands = + stestr run {posargs} + coverage combine + coverage html -d cover + coverage xml -o cover/coverage.xml + +[testenv:releasenotes] +deps = + -r{toxinidir}/requirements.txt + -r{toxinidir}/doc/requirements.txt +commands = + sphinx-build -a -E -W -d releasenotes/build/doctrees -b html releasenotes/source releasenotes/build/html + +[testenv:venv] +commands = {posargs} + +[flake8] +# E123, E125 skipped as they are invalid PEP-8. + +show-source = True +ignore = E123,E125 +builtins = _ +exclude=.venv,.git,.tox,dist,doc,*lib/python*,*egg,build