diff --git a/.github/workflows/pull_request.yml b/.github/workflows/pull_request.yml index 6914a6b4..1d499b45 100644 --- a/.github/workflows/pull_request.yml +++ b/.github/workflows/pull_request.yml @@ -14,7 +14,7 @@ jobs: runs-on: ubuntu-latest strategy: matrix: - python: [3.7.13, 3.8, 3.9] + python: [3.8, 3.9, 3.12] steps: - name: Checkout WrapanAPI uses: actions/checkout@v3 @@ -35,11 +35,5 @@ jobs: pip install --compile --no-cache-dir pycurl pip install -U -e .[test] - # coming soon - #- name: Pre Commit Checks - # uses: pre-commit/action@v3.0.0 - # with: - # extra_args: --show-diff-on-failure - - name: Run Unit Tests run: py.test tests/ -v --cov wrapanapi diff --git a/.gitignore b/.gitignore index 0685cdf3..1d138b35 100644 --- a/.gitignore +++ b/.gitignore @@ -3,6 +3,7 @@ .pytest_cache/ .coverage .venv* +dist/ .eggs *.egg-info .env/ @@ -10,4 +11,4 @@ *.vars.json .tox/ AUTHORS -ChangeLog \ No newline at end of file +ChangeLog diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 00000000..b745f950 --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,33 @@ +ci: + autofix_prs: false +repos: + - repo: https://github.com/asottile/reorder_python_imports + rev: v3.9.0 + hooks: + - id: reorder-python-imports + args: + - --application-directories=.:wrapanapi + - repo: https://github.com/psf/black + rev: 23.3.0 + hooks: + - id: black + args: [--safe, --quiet, --line-length, "100"] + require_serial: true + - repo: https://github.com/PyCQA/flake8 + rev: 6.0.0 + hooks: + - id: flake8 + args: + - --max-line-length=100 + - --ignore=W503,E203 + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v4.4.0 + hooks: + - id: trailing-whitespace + - id: end-of-file-fixer + - id: debug-statements + - repo: https://github.com/asottile/pyupgrade + rev: v3.3.2 + hooks: + - id: pyupgrade + args: [--py3-plus, --py38-plus] diff --git a/LICENSE b/LICENSE index c6759866..99443a66 100644 --- a/LICENSE +++ b/LICENSE @@ -16,4 +16,4 @@ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. \ No newline at end of file +THE SOFTWARE. diff --git a/README.rst b/README.rst index 2dc32e62..664fb714 100644 --- a/README.rst +++ b/README.rst @@ -76,7 +76,7 @@ Each management system is invoked usually with a hostname and some credentials .. code-block:: python from wrapanapi.virtualcenter import VMWareSystem - + system = VMWareSystem(hostname='10.0.0.0', username="root", password="password") system.list_vm() @@ -90,7 +90,7 @@ it doesn't support. This behaviour may change in the future as more and more div from base import WrapanapiAPIBase class RHEVMSystem(WrapanapiAPIBase): - + _stats_available = { 'num_vm': lambda self: self.api.get_summary().get_vms().total, 'num_host': lambda self: len(self.list_host()), @@ -98,7 +98,7 @@ it doesn't support. This behaviour may change in the future as more and more div 'num_template': lambda self: len(self.list_template()), 'num_datastore': lambda self: len(self.list_datastore()), } - + def __init__(self, hostname, username, password, **kwargs): super(RHEVMSystem, self).__init__(kwargs) @@ -116,5 +116,5 @@ This module was originally developed for assisting in the ManageIQ testing team. Contributing ------------ -The guidelines to follow for this project can be found in the +The guidelines to follow for this project can be found in the cfme `dev_guide `_. diff --git a/pyproject.toml b/pyproject.toml index 9bf96d44..d4d0a4fc 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -75,6 +75,9 @@ dependencies = [ ] [project.optional-dependencies] +dev = [ + "pre-commit", +] test = [ "mock", "pytest", diff --git a/tests/__init__.py b/tests/__init__.py index 40a96afc..e69de29b 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -1 +0,0 @@ -# -*- coding: utf-8 -*- diff --git a/tests/resources/hawkular/inventory/feeds/328c17e1-c97f-4583-89d8-73feb0cf47f6/resources/WildfyServerOne~%2Fsubsystem%3Ddatasources%2Fdata-source%3DExampleDSMWTest/data.json b/tests/resources/hawkular/inventory/feeds/328c17e1-c97f-4583-89d8-73feb0cf47f6/resources/WildfyServerOne~%2Fsubsystem%3Ddatasources%2Fdata-source%3DExampleDSMWTest/data.json index 9e331681..a9747b15 100644 --- a/tests/resources/hawkular/inventory/feeds/328c17e1-c97f-4583-89d8-73feb0cf47f6/resources/WildfyServerOne~%2Fsubsystem%3Ddatasources%2Fdata-source%3DExampleDSMWTest/data.json +++ b/tests/resources/hawkular/inventory/feeds/328c17e1-c97f-4583-89d8-73feb0cf47f6/resources/WildfyServerOne~%2Fsubsystem%3Ddatasources%2Fdata-source%3DExampleDSMWTest/data.json @@ -9,4 +9,4 @@ "Password" : "sa" }, "name" : "configuration" -} \ No newline at end of file +} diff --git a/tests/resources/hawkular/inventory/feeds/328c17e1-c97f-4583-89d8-73feb0cf47f6/resources/WildfyServerOne~%2Fsubsystem%3Ddatasources%2Fdata-source%3DExampleXADSMWTest/data.json b/tests/resources/hawkular/inventory/feeds/328c17e1-c97f-4583-89d8-73feb0cf47f6/resources/WildfyServerOne~%2Fsubsystem%3Ddatasources%2Fdata-source%3DExampleXADSMWTest/data.json index a851fc80..a7f6afce 100644 --- a/tests/resources/hawkular/inventory/feeds/328c17e1-c97f-4583-89d8-73feb0cf47f6/resources/WildfyServerOne~%2Fsubsystem%3Ddatasources%2Fdata-source%3DExampleXADSMWTest/data.json +++ b/tests/resources/hawkular/inventory/feeds/328c17e1-c97f-4583-89d8-73feb0cf47f6/resources/WildfyServerOne~%2Fsubsystem%3Ddatasources%2Fdata-source%3DExampleXADSMWTest/data.json @@ -9,4 +9,4 @@ "Password" : "sa" }, "name" : "configuration" -} \ No newline at end of file +} diff --git a/tests/resources/hawkular/inventory/feeds/328c17e1-c97f-4583-89d8-73feb0cf47f6/resources/WildfyServerOne~~/WildfyServerOne~%2Fsubsystem%3Ddatasources%2Fdata-source%3DExampleDS/data.json b/tests/resources/hawkular/inventory/feeds/328c17e1-c97f-4583-89d8-73feb0cf47f6/resources/WildfyServerOne~~/WildfyServerOne~%2Fsubsystem%3Ddatasources%2Fdata-source%3DExampleDS/data.json index 5e51ca64..8d02a0fe 100644 --- a/tests/resources/hawkular/inventory/feeds/328c17e1-c97f-4583-89d8-73feb0cf47f6/resources/WildfyServerOne~~/WildfyServerOne~%2Fsubsystem%3Ddatasources%2Fdata-source%3DExampleDS/data.json +++ b/tests/resources/hawkular/inventory/feeds/328c17e1-c97f-4583-89d8-73feb0cf47f6/resources/WildfyServerOne~~/WildfyServerOne~%2Fsubsystem%3Ddatasources%2Fdata-source%3DExampleDS/data.json @@ -9,4 +9,4 @@ "Password" : "sa" }, "name" : "configuration" -} \ No newline at end of file +} diff --git a/tests/resources/hawkular/inventory/feeds/328c17e1-c97f-4583-89d8-73feb0cf47f6/resources/WildfyServerOne~~/WildfyServerOne~%2Fsubsystem%3Ddatasources%2Fdata-source%3DExampleXADS/data.json b/tests/resources/hawkular/inventory/feeds/328c17e1-c97f-4583-89d8-73feb0cf47f6/resources/WildfyServerOne~~/WildfyServerOne~%2Fsubsystem%3Ddatasources%2Fdata-source%3DExampleXADS/data.json index bdaadfe8..3bba0371 100644 --- a/tests/resources/hawkular/inventory/feeds/328c17e1-c97f-4583-89d8-73feb0cf47f6/resources/WildfyServerOne~~/WildfyServerOne~%2Fsubsystem%3Ddatasources%2Fdata-source%3DExampleXADS/data.json +++ b/tests/resources/hawkular/inventory/feeds/328c17e1-c97f-4583-89d8-73feb0cf47f6/resources/WildfyServerOne~~/WildfyServerOne~%2Fsubsystem%3Ddatasources%2Fdata-source%3DExampleXADS/data.json @@ -9,4 +9,4 @@ "Password" : "sa" }, "name" : "configuration" -} \ No newline at end of file +} diff --git a/tests/resources/hawkular/inventory/feeds/5ea2d44a-6b07-424b-b54e-0b4c798d1353/resourceTypes.json b/tests/resources/hawkular/inventory/feeds/5ea2d44a-6b07-424b-b54e-0b4c798d1353/resourceTypes.json index e2527b5d..ceb71979 100644 --- a/tests/resources/hawkular/inventory/feeds/5ea2d44a-6b07-424b-b54e-0b4c798d1353/resourceTypes.json +++ b/tests/resources/hawkular/inventory/feeds/5ea2d44a-6b07-424b-b54e-0b4c798d1353/resourceTypes.json @@ -90,4 +90,3 @@ "id": "Hawkular WildFly Agent" } ] - diff --git a/tests/resources/hawkular/inventory/feeds/5ea2d44a-6b07-424b-b54e-0b4c798d1353/resources/Local~%2Fsubsystem%3Ddatasources%2Fdata-source%3DExampleDSMWTest/data.json b/tests/resources/hawkular/inventory/feeds/5ea2d44a-6b07-424b-b54e-0b4c798d1353/resources/Local~%2Fsubsystem%3Ddatasources%2Fdata-source%3DExampleDSMWTest/data.json index 5a450099..5d6de4d7 100644 --- a/tests/resources/hawkular/inventory/feeds/5ea2d44a-6b07-424b-b54e-0b4c798d1353/resources/Local~%2Fsubsystem%3Ddatasources%2Fdata-source%3DExampleDSMWTest/data.json +++ b/tests/resources/hawkular/inventory/feeds/5ea2d44a-6b07-424b-b54e-0b4c798d1353/resources/Local~%2Fsubsystem%3Ddatasources%2Fdata-source%3DExampleDSMWTest/data.json @@ -9,4 +9,4 @@ "Password" : "sa" }, "name" : "configuration" -} \ No newline at end of file +} diff --git a/tests/resources/hawkular/inventory/feeds/5ea2d44a-6b07-424b-b54e-0b4c798d1353/resources/Local~%2Fsubsystem%3Ddatasources%2Fdata-source%3DExampleXADSMWTest/data.json b/tests/resources/hawkular/inventory/feeds/5ea2d44a-6b07-424b-b54e-0b4c798d1353/resources/Local~%2Fsubsystem%3Ddatasources%2Fdata-source%3DExampleXADSMWTest/data.json index 3d129376..9126a20e 100644 --- a/tests/resources/hawkular/inventory/feeds/5ea2d44a-6b07-424b-b54e-0b4c798d1353/resources/Local~%2Fsubsystem%3Ddatasources%2Fdata-source%3DExampleXADSMWTest/data.json +++ b/tests/resources/hawkular/inventory/feeds/5ea2d44a-6b07-424b-b54e-0b4c798d1353/resources/Local~%2Fsubsystem%3Ddatasources%2Fdata-source%3DExampleXADSMWTest/data.json @@ -9,4 +9,4 @@ "Password" : "sa" }, "name" : "configuration" -} \ No newline at end of file +} diff --git a/tests/resources/hawkular/inventory/feeds/5ea2d44a-6b07-424b-b54e-0b4c798d1353/resources/Local~%2Fsubsystem%3Ddatasources%2Fdata-source%3DKeycloakDSMWTest/data.json b/tests/resources/hawkular/inventory/feeds/5ea2d44a-6b07-424b-b54e-0b4c798d1353/resources/Local~%2Fsubsystem%3Ddatasources%2Fdata-source%3DKeycloakDSMWTest/data.json index 2e287f8e..a78c4f93 100644 --- a/tests/resources/hawkular/inventory/feeds/5ea2d44a-6b07-424b-b54e-0b4c798d1353/resources/Local~%2Fsubsystem%3Ddatasources%2Fdata-source%3DKeycloakDSMWTest/data.json +++ b/tests/resources/hawkular/inventory/feeds/5ea2d44a-6b07-424b-b54e-0b4c798d1353/resources/Local~%2Fsubsystem%3Ddatasources%2Fdata-source%3DKeycloakDSMWTest/data.json @@ -9,4 +9,4 @@ "Password" : "sa" }, "name" : "configuration" -} \ No newline at end of file +} diff --git a/tests/resources/hawkular/inventory/feeds/5ea2d44a-6b07-424b-b54e-0b4c798d1353/resources/Local~~/Local~%2Fsubsystem%3Ddatasources%2Fdata-source%3DExampleDS/data.json b/tests/resources/hawkular/inventory/feeds/5ea2d44a-6b07-424b-b54e-0b4c798d1353/resources/Local~~/Local~%2Fsubsystem%3Ddatasources%2Fdata-source%3DExampleDS/data.json index de440bdd..a1ea8b92 100644 --- a/tests/resources/hawkular/inventory/feeds/5ea2d44a-6b07-424b-b54e-0b4c798d1353/resources/Local~~/Local~%2Fsubsystem%3Ddatasources%2Fdata-source%3DExampleDS/data.json +++ b/tests/resources/hawkular/inventory/feeds/5ea2d44a-6b07-424b-b54e-0b4c798d1353/resources/Local~~/Local~%2Fsubsystem%3Ddatasources%2Fdata-source%3DExampleDS/data.json @@ -9,4 +9,4 @@ "Password" : "sa" }, "name" : "configuration" -} \ No newline at end of file +} diff --git a/tests/resources/hawkular/inventory/feeds/5ea2d44a-6b07-424b-b54e-0b4c798d1353/resources/Local~~/Local~%2Fsubsystem%3Ddatasources%2Fdata-source%3DExampleXADS/data.json b/tests/resources/hawkular/inventory/feeds/5ea2d44a-6b07-424b-b54e-0b4c798d1353/resources/Local~~/Local~%2Fsubsystem%3Ddatasources%2Fdata-source%3DExampleXADS/data.json index 27a7c7c8..e90f03c8 100644 --- a/tests/resources/hawkular/inventory/feeds/5ea2d44a-6b07-424b-b54e-0b4c798d1353/resources/Local~~/Local~%2Fsubsystem%3Ddatasources%2Fdata-source%3DExampleXADS/data.json +++ b/tests/resources/hawkular/inventory/feeds/5ea2d44a-6b07-424b-b54e-0b4c798d1353/resources/Local~~/Local~%2Fsubsystem%3Ddatasources%2Fdata-source%3DExampleXADS/data.json @@ -9,4 +9,4 @@ "Password" : "sa" }, "name" : "configuration" -} \ No newline at end of file +} diff --git a/tests/resources/hawkular/inventory/feeds/5ea2d44a-6b07-424b-b54e-0b4c798d1353/resources/Local~~/Local~%2Fsubsystem%3Ddatasources%2Fdata-source%3DKeycloakDS/data.json b/tests/resources/hawkular/inventory/feeds/5ea2d44a-6b07-424b-b54e-0b4c798d1353/resources/Local~~/Local~%2Fsubsystem%3Ddatasources%2Fdata-source%3DKeycloakDS/data.json index 0286a559..ad03cea9 100644 --- a/tests/resources/hawkular/inventory/feeds/5ea2d44a-6b07-424b-b54e-0b4c798d1353/resources/Local~~/Local~%2Fsubsystem%3Ddatasources%2Fdata-source%3DKeycloakDS/data.json +++ b/tests/resources/hawkular/inventory/feeds/5ea2d44a-6b07-424b-b54e-0b4c798d1353/resources/Local~~/Local~%2Fsubsystem%3Ddatasources%2Fdata-source%3DKeycloakDS/data.json @@ -9,4 +9,4 @@ "Password" : "sa" }, "name" : "configuration" -} \ No newline at end of file +} diff --git a/tests/resources/hawkular/inventory/feeds/c22fb9d6-0ffc-42cd-bf5b-e1d046398fe5/resources/WildfyServerTwo~%2Fsubsystem%3Ddatasources%2Fdata-source%3DExampleDSMWTest/data.json b/tests/resources/hawkular/inventory/feeds/c22fb9d6-0ffc-42cd-bf5b-e1d046398fe5/resources/WildfyServerTwo~%2Fsubsystem%3Ddatasources%2Fdata-source%3DExampleDSMWTest/data.json index d57d510e..f7e4af09 100644 --- a/tests/resources/hawkular/inventory/feeds/c22fb9d6-0ffc-42cd-bf5b-e1d046398fe5/resources/WildfyServerTwo~%2Fsubsystem%3Ddatasources%2Fdata-source%3DExampleDSMWTest/data.json +++ b/tests/resources/hawkular/inventory/feeds/c22fb9d6-0ffc-42cd-bf5b-e1d046398fe5/resources/WildfyServerTwo~%2Fsubsystem%3Ddatasources%2Fdata-source%3DExampleDSMWTest/data.json @@ -9,4 +9,4 @@ "Password" : "sa" }, "name" : "configuration" -} \ No newline at end of file +} diff --git a/tests/resources/hawkular/inventory/feeds/c22fb9d6-0ffc-42cd-bf5b-e1d046398fe5/resources/WildfyServerTwo~%2Fsubsystem%3Ddatasources%2Fdata-source%3DExampleXADSMWTest/data.json b/tests/resources/hawkular/inventory/feeds/c22fb9d6-0ffc-42cd-bf5b-e1d046398fe5/resources/WildfyServerTwo~%2Fsubsystem%3Ddatasources%2Fdata-source%3DExampleXADSMWTest/data.json index a0b701ad..e349089f 100644 --- a/tests/resources/hawkular/inventory/feeds/c22fb9d6-0ffc-42cd-bf5b-e1d046398fe5/resources/WildfyServerTwo~%2Fsubsystem%3Ddatasources%2Fdata-source%3DExampleXADSMWTest/data.json +++ b/tests/resources/hawkular/inventory/feeds/c22fb9d6-0ffc-42cd-bf5b-e1d046398fe5/resources/WildfyServerTwo~%2Fsubsystem%3Ddatasources%2Fdata-source%3DExampleXADSMWTest/data.json @@ -9,4 +9,4 @@ "Password" : "sa" }, "name" : "configuration" -} \ No newline at end of file +} diff --git a/tests/resources/hawkular/inventory/feeds/c22fb9d6-0ffc-42cd-bf5b-e1d046398fe5/resources/WildfyServerTwo~~/WildfyServerTwo~%2Fsubsystem%3Ddatasources%2Fdata-source%3DExampleDS/data.json b/tests/resources/hawkular/inventory/feeds/c22fb9d6-0ffc-42cd-bf5b-e1d046398fe5/resources/WildfyServerTwo~~/WildfyServerTwo~%2Fsubsystem%3Ddatasources%2Fdata-source%3DExampleDS/data.json index 9788d817..fc415a2f 100644 --- a/tests/resources/hawkular/inventory/feeds/c22fb9d6-0ffc-42cd-bf5b-e1d046398fe5/resources/WildfyServerTwo~~/WildfyServerTwo~%2Fsubsystem%3Ddatasources%2Fdata-source%3DExampleDS/data.json +++ b/tests/resources/hawkular/inventory/feeds/c22fb9d6-0ffc-42cd-bf5b-e1d046398fe5/resources/WildfyServerTwo~~/WildfyServerTwo~%2Fsubsystem%3Ddatasources%2Fdata-source%3DExampleDS/data.json @@ -9,4 +9,4 @@ "Password" : "sa" }, "name" : "configuration" -} \ No newline at end of file +} diff --git a/tests/resources/hawkular/inventory/feeds/c22fb9d6-0ffc-42cd-bf5b-e1d046398fe5/resources/WildfyServerTwo~~/WildfyServerTwo~%2Fsubsystem%3Ddatasources%2Fdata-source%3DExampleXADS/data.json b/tests/resources/hawkular/inventory/feeds/c22fb9d6-0ffc-42cd-bf5b-e1d046398fe5/resources/WildfyServerTwo~~/WildfyServerTwo~%2Fsubsystem%3Ddatasources%2Fdata-source%3DExampleXADS/data.json index 03171600..157eb67f 100644 --- a/tests/resources/hawkular/inventory/feeds/c22fb9d6-0ffc-42cd-bf5b-e1d046398fe5/resources/WildfyServerTwo~~/WildfyServerTwo~%2Fsubsystem%3Ddatasources%2Fdata-source%3DExampleXADS/data.json +++ b/tests/resources/hawkular/inventory/feeds/c22fb9d6-0ffc-42cd-bf5b-e1d046398fe5/resources/WildfyServerTwo~~/WildfyServerTwo~%2Fsubsystem%3Ddatasources%2Fdata-source%3DExampleXADS/data.json @@ -9,4 +9,4 @@ "Password" : "sa" }, "name" : "configuration" -} \ No newline at end of file +} diff --git a/tests/resources/hawkular/inventory/feeds/master.eap7%20domain/resources/Host Controller/data.json b/tests/resources/hawkular/inventory/feeds/master.eap7%20domain/resources/Host Controller/data.json index 6c91154a..e8c57fc6 100644 --- a/tests/resources/hawkular/inventory/feeds/master.eap7%20domain/resources/Host Controller/data.json +++ b/tests/resources/hawkular/inventory/feeds/master.eap7%20domain/resources/Host Controller/data.json @@ -5,7 +5,7 @@ "Server State": "running", "Product Name": "WildFly Full", "Local Host Name": "skondkar-hawkular.bc.jonqe.lab.eng.bos.redhat.com" - + }, "name": "configuration" } diff --git a/tests/resources/hawkular/inventory/status.json b/tests/resources/hawkular/inventory/status.json index 87efe74c..c15b67b2 100644 --- a/tests/resources/hawkular/inventory/status.json +++ b/tests/resources/hawkular/inventory/status.json @@ -3,4 +3,4 @@ "Built-From-Git-SHA1": "3c8ac6648aa0ec33643ae1b98faadc475d2c6f02", "Inventory-Implementation": "org.hawkular.inventory.impl.tinkerpop.TinkerpopInventory", "Initialized": "true" -} \ No newline at end of file +} diff --git a/tests/resources/hawkular/metrics/status.json b/tests/resources/hawkular/metrics/status.json index 614be2df..d4f888d9 100644 --- a/tests/resources/hawkular/metrics/status.json +++ b/tests/resources/hawkular/metrics/status.json @@ -3,4 +3,4 @@ "Implementation-Version":"0.26.0.Final", "Built-From-Git-SHA1":"fe3ef3ccc36a0c85bcdbf3e96aae8d36a636f7f2", "Cassandra":"up" -} \ No newline at end of file +} diff --git a/tests/test_hawkular.py b/tests/test_hawkular.py index 89aa538a..ae868ef8 100644 --- a/tests/test_hawkular.py +++ b/tests/test_hawkular.py @@ -1,16 +1,17 @@ -# -*- coding: utf-8 -*- """Unit tests for Hawkular client.""" import json import os from random import sample +from unittest.mock import patch from urllib.parse import urlparse import pytest -from mock import patch from wrapanapi.systems import HawkularSystem -from wrapanapi.systems.hawkular import (CanonicalPath, Resource, ResourceData, - ResourceType) +from wrapanapi.systems.hawkular import CanonicalPath +from wrapanapi.systems.hawkular import Resource +from wrapanapi.systems.hawkular import ResourceData +from wrapanapi.systems.hawkular import ResourceType def fake_urlopen(c_client, url, headers, params): @@ -19,23 +20,24 @@ def fake_urlopen(c_client, url, headers, params): the filesystem. """ # Map path from url to a file - parsed_url = urlparse("{}/{}".format(c_client.api_entry, url)).path - if parsed_url.startswith('/hawkular/inventory/traversal') \ - or parsed_url.startswith('/hawkular/inventory/entity'): + parsed_url = urlparse(f"{c_client.api_entry}/{url}").path + if parsed_url.startswith("/hawkular/inventory/traversal") or parsed_url.startswith( + "/hawkular/inventory/entity" + ): # Change parsed url, when we use default one, 'd;configuration' replaced with 'd' - parsed_url = "{}/{}".format(urlparse("{}".format(c_client.api_entry)).path, url) - parsed_url = parsed_url.replace('traversal/', '') - parsed_url = parsed_url.replace('entity/', '') - parsed_url = parsed_url.replace('f;', 'feeds/') - parsed_url = parsed_url.replace('r;', 'resources/', 1) - parsed_url = parsed_url.replace('r;', '') - parsed_url = parsed_url.replace('rt;', 'resourceTypes/') - parsed_url = parsed_url.replace('rl;defines/', '') - parsed_url = parsed_url.replace('type=rt', 'resourceTypes') - parsed_url = parsed_url.replace('type=r', 'resources') - parsed_url = parsed_url.replace('type=f', 'feeds') - parsed_url = parsed_url.replace('d;configuration', 'data') - resource_file = os.path.normpath("tests/resources/{}.json".format(parsed_url)) + parsed_url = "{}/{}".format(urlparse(f"{c_client.api_entry}").path, url) + parsed_url = parsed_url.replace("traversal/", "") + parsed_url = parsed_url.replace("entity/", "") + parsed_url = parsed_url.replace("f;", "feeds/") + parsed_url = parsed_url.replace("r;", "resources/", 1) + parsed_url = parsed_url.replace("r;", "") + parsed_url = parsed_url.replace("rt;", "resourceTypes/") + parsed_url = parsed_url.replace("rl;defines/", "") + parsed_url = parsed_url.replace("type=rt", "resourceTypes") + parsed_url = parsed_url.replace("type=r", "resources") + parsed_url = parsed_url.replace("type=f", "feeds") + parsed_url = parsed_url.replace("d;configuration", "data") + resource_file = os.path.normpath(f"tests/resources/{parsed_url}.json") # Must return a file-like object return json.load(open(resource_file)) @@ -67,27 +69,28 @@ def provider(): A stub urlopen() implementation that load json responses from the filesystem. """ - if not os.getenv('HAWKULAR_HOSTNAME'): - patcher = patch('wrapanapi.clients.rest_client.ContainerClient.get_json', fake_urlopen) + if not os.getenv("HAWKULAR_HOSTNAME"): + patcher = patch("wrapanapi.clients.rest_client.ContainerClient.get_json", fake_urlopen) patcher.start() - patcher = patch('wrapanapi.clients.rest_client.ContainerClient.delete_status', - fake_urldelete) + patcher = patch( + "wrapanapi.clients.rest_client.ContainerClient.delete_status", fake_urldelete + ) patcher.start() - patcher = patch('wrapanapi.clients.rest_client.ContainerClient.post_status', fake_urlpost) + patcher = patch("wrapanapi.clients.rest_client.ContainerClient.post_status", fake_urlpost) patcher.start() - patcher = patch('wrapanapi.clients.rest_client.ContainerClient.put_status', fake_urlput) + patcher = patch("wrapanapi.clients.rest_client.ContainerClient.put_status", fake_urlput) patcher.start() hwk = HawkularSystem( - hostname=os.getenv('HAWKULAR_HOSTNAME', 'localhost'), - protocol=os.getenv('HAWKULAR_PROTOCOL', 'http'), - port=os.getenv('HAWKULAR_PORT', 8080), - username=os.getenv('HAWKULAR_USERNAME', 'jdoe'), - password=os.getenv('HAWKULAR_PASSWORD', 'password'), - ws_connect=False + hostname=os.getenv("HAWKULAR_HOSTNAME", "localhost"), + protocol=os.getenv("HAWKULAR_PROTOCOL", "http"), + port=os.getenv("HAWKULAR_PORT", 8080), + username=os.getenv("HAWKULAR_USERNAME", "jdoe"), + password=os.getenv("HAWKULAR_PASSWORD", "password"), + ws_connect=False, ) yield hwk - if not os.getenv('HAWKULAR_HOSTNAME'): + if not os.getenv("HAWKULAR_HOSTNAME"): patcher.stop() @@ -106,23 +109,25 @@ def datasource(provider): assert r_data name_ext = "MWTest" - new_datasource = Resource(name="{}{}".format(datasource.name, name_ext), - id="{}{}".format(datasource.id, name_ext), - path=CanonicalPath( - "{}{}".format(datasource.path.to_string, name_ext))) + new_datasource = Resource( + name=f"{datasource.name}{name_ext}", + id=f"{datasource.id}{name_ext}", + path=CanonicalPath(f"{datasource.path.to_string}{name_ext}"), + ) new_datasource.path.resource_id = new_datasource.path.resource_id[1] - resource_type = ResourceType(id=None, name=None, - path=CanonicalPath("/rt;Datasource")) + resource_type = ResourceType(id=None, name=None, path=CanonicalPath("/rt;Datasource")) new_datasource_data = ResourceData(name=None, path=None, value=r_data.value) new_datasource_data.value.update( - {"JNDI Name": "{}{}".format(r_data.value["JNDI Name"], name_ext), - "Enabled": "true" - } + {"JNDI Name": "{}{}".format(r_data.value["JNDI Name"], name_ext), "Enabled": "true"} ) _delete_resource(provider, new_datasource) - result = _create_resource(provider, resource=new_datasource, - resource_data=new_datasource_data, resource_type=resource_type) + result = _create_resource( + provider, + resource=new_datasource, + resource_data=new_datasource_data, + resource_type=resource_type, + ) assert result, "Create should be successful" r_data = _read_resource_data(provider, new_datasource) assert r_data, "Resource data should exist" @@ -133,7 +138,7 @@ def datasource(provider): def test_list_feed(provider): - """ Checks whether any feed is listed """ + """Checks whether any feed is listed""" feeds = provider.inventory.list_feed() assert len(feeds) > 0, "No feeds are listed" for feed in feeds: @@ -142,7 +147,7 @@ def test_list_feed(provider): def test_list_resource_type(provider): - """ Checks whether any resource type is listed and has attributes """ + """Checks whether any resource type is listed and has attributes""" feeds = provider.inventory.list_feed() for feed in feeds: res_types = provider.inventory.list_resource_type(feed_id=feed.id) @@ -154,7 +159,7 @@ def test_list_resource_type(provider): def test_list_server(provider): - """ Checks whether any server is listed and has attributes""" + """Checks whether any server is listed and has attributes""" servers = provider.inventory.list_server() for server in servers: assert server.id @@ -165,7 +170,7 @@ def test_list_server(provider): def test_list_domain(provider): - """ Checks whether any domain is listed and has attributes""" + """Checks whether any domain is listed and has attributes""" domains = provider.inventory.list_domain() for domain in domains: assert domain.id @@ -176,7 +181,7 @@ def test_list_domain(provider): def test_list_server_group(provider): - """ Checks whether any group is listed and has attributes""" + """Checks whether any group is listed and has attributes""" domains = provider.inventory.list_domain() for domain in domains: server_groups = provider.inventory.list_server_group(domain.path.feed_id) @@ -189,7 +194,7 @@ def test_list_server_group(provider): def test_list_server_deployment(provider): - """ Checks whether any deployment is listed and has attributes """ + """Checks whether any deployment is listed and has attributes""" deployments = provider.inventory.list_server_deployment() for deployment in deployments: assert deployment.id @@ -199,7 +204,7 @@ def test_list_server_deployment(provider): def test_list_messaging(provider): - """ Checks whether any messaging is listed and has attributes """ + """Checks whether any messaging is listed and has attributes""" messagings = provider.inventory.list_messaging() for messaging in messagings: assert messaging.id @@ -209,12 +214,13 @@ def test_list_messaging(provider): def test_get_config_data(provider): - """ Checks whether resource data is provided and has attributes """ + """Checks whether resource data is provided and has attributes""" found = False servers = provider.inventory.list_server() for server in servers: - r_data = provider.inventory.get_config_data(feed_id=server.path.feed_id, - resource_id=server.id) + r_data = provider.inventory.get_config_data( + feed_id=server.path.feed_id, resource_id=server.id + ) if r_data: found = True assert r_data.name @@ -224,54 +230,61 @@ def test_get_config_data(provider): def test_edit_resource_data(provider, datasource): - """ Checks whether resource data is edited """ + """Checks whether resource data is edited""" r_data = _read_resource_data(provider, datasource) assert r_data, "Resource data should exist" - r_data.value['Enabled'] = "false" + r_data.value["Enabled"] = "false" result = _update_resource_data(provider, r_data, datasource) assert result, "Update should be successful" r_data = _read_resource_data(provider, datasource) # skip value verification for mocked provider - if os.getenv('HAWKULAR_HOSTNAME'): - assert r_data.value['Enabled'] == "false" + if os.getenv("HAWKULAR_HOSTNAME"): + assert r_data.value["Enabled"] == "false" def test_delete_resource(provider, datasource): - """ Checks whether resource is deleted """ + """Checks whether resource is deleted""" r_data = _read_resource_data(provider, datasource) assert r_data, "Resource data should exist" result = _delete_resource(provider, datasource) assert result, "Delete should be successful" r_data = _read_resource_data(provider, datasource) # skip deleted verification for mocked provider - if os.getenv('HAWKULAR_HOSTNAME'): + if os.getenv("HAWKULAR_HOSTNAME"): assert not r_data def _read_resource_data(provider, resource): - return provider.inventory.get_config_data(feed_id=resource.path.feed_id, - resource_id=resource.path.resource_id) + return provider.inventory.get_config_data( + feed_id=resource.path.feed_id, resource_id=resource.path.resource_id + ) def _create_resource(provider, resource, resource_data, resource_type): - return provider.inventory.create_resource(resource=resource, resource_data=resource_data, - resource_type=resource_type, - feed_id=resource.path.feed_id) + return provider.inventory.create_resource( + resource=resource, + resource_data=resource_data, + resource_type=resource_type, + feed_id=resource.path.feed_id, + ) def _update_resource_data(provider, resource_data, resource): - return provider.inventory.edit_config_data(resource_data=resource_data, - feed_id=resource.path.feed_id, - resource_id=resource.path.resource_id) + return provider.inventory.edit_config_data( + resource_data=resource_data, + feed_id=resource.path.feed_id, + resource_id=resource.path.resource_id, + ) def _delete_resource(provider, resource): - return provider.inventory.delete_resource(feed_id=resource.path.feed_id, - resource_id=resource.path.resource_id) + return provider.inventory.delete_resource( + feed_id=resource.path.feed_id, resource_id=resource.path.resource_id + ) def test_list_server_datasource(provider): - """ Checks whether any datasource is listed and has attributes """ + """Checks whether any datasource is listed and has attributes""" found = False datasources = provider.inventory.list_server_datasource() if len(datasources) > 0: @@ -280,12 +293,13 @@ def test_list_server_datasource(provider): assert datasource.id assert datasource.name assert datasource.path - assert found | provider.inventory._stats_available['num_datasource'](provider.inventory) > 0,\ - "No any datasource is listed for any of feeds, but they exists" + assert ( + found | provider.inventory._stats_available["num_datasource"](provider.inventory) > 0 + ), "No any datasource is listed for any of feeds, but they exists" def test_path(provider): - """ Checks whether path returned correctly """ + """Checks whether path returned correctly""" feeds = provider.inventory.list_feed() for feed in feeds: assert feed.path @@ -299,47 +313,47 @@ def test_path(provider): def test_num_server(provider): - """ Checks whether number of servers is returned correct """ + """Checks whether number of servers is returned correct""" servers_count = 0 feeds = provider.inventory.list_feed() for feed in feeds: servers_count += len(provider.inventory.list_server(feed_id=feed.id)) - num_server = provider.inventory._stats_available['num_server'](provider.inventory) + num_server = provider.inventory._stats_available["num_server"](provider.inventory) assert num_server == servers_count, "Number of servers is wrong" def test_num_deployment(provider): - """ Checks whether number of deployments is returned correct """ + """Checks whether number of deployments is returned correct""" deployments_count = 0 feeds = provider.inventory.list_feed() for feed in feeds: deployments_count += len(provider.inventory.list_server_deployment(feed_id=feed.id)) - num_deployment = provider.inventory._stats_available['num_deployment'](provider.inventory) + num_deployment = provider.inventory._stats_available["num_deployment"](provider.inventory) assert num_deployment == deployments_count, "Number of deployments is wrong" def test_num_datasource(provider): - """ Checks whether number of datasources is returned correct """ + """Checks whether number of datasources is returned correct""" datasources_count = 0 feeds = provider.inventory.list_feed() for feed in feeds: datasources_count += len(provider.inventory.list_server_datasource(feed_id=feed.id)) - num_datasource = provider.inventory._stats_available['num_datasource'](provider.inventory) + num_datasource = provider.inventory._stats_available["num_datasource"](provider.inventory) assert num_datasource == datasources_count, "Number of datasources is wrong" def test_num_messaging(provider): - """ Checks whether number of messagings is returned correct """ + """Checks whether number of messagings is returned correct""" messagings_count = 0 feeds = provider.inventory.list_feed() for feed in feeds: messagings_count += len(provider.inventory.list_messaging(feed_id=feed.id)) - num_messaging = provider.inventory._stats_available['num_messaging'](provider.inventory) + num_messaging = provider.inventory._stats_available["num_messaging"](provider.inventory) assert num_messaging == messagings_count, "Number of messagings is wrong" def test_list_event(provider): - """ Checks whether is any event listed """ + """Checks whether is any event listed""" events = provider.alert.list_event() if len(events) > 0: event = events[0] diff --git a/tests/test_vm_and_template_systems.py b/tests/test_vm_and_template_systems.py index cd0375f2..4dd08d53 100644 --- a/tests/test_vm_and_template_systems.py +++ b/tests/test_vm_and_template_systems.py @@ -6,33 +6,34 @@ If running within a cfme venv, disable the cfme plugins like so: $ pytest test_vm_and_template_systems.py -p no:cfme -s """ - import datetime import logging -import pytest import fauxfactory +import pytest from wait_for import wait_for import wrapanapi from wrapanapi import VmState from wrapanapi.entities import StackMixin -from wrapanapi.systems.ec2 import EC2Image, EC2Instance, StackStates from wrapanapi.exceptions import MultipleItemsError +from wrapanapi.systems.ec2 import EC2Image +from wrapanapi.systems.ec2 import EC2Instance +from wrapanapi.systems.ec2 import StackStates -log = logging.getLogger('wrapanapi.tests.test_vm_and_template_systems') +log = logging.getLogger("wrapanapi.tests.test_vm_and_template_systems") logging.basicConfig(level=logging.INFO) -PROVIDER_KEYS_LIST = ['ec2west'] +PROVIDER_KEYS_LIST = ["ec2west"] # 'rhos11', 'vsphere65-nested', 'scvmm', 'azure', 'gce_central', 'ec2west', 'rhv41' # TODO test against all provider keys @pytest.fixture(params=PROVIDER_KEYS_LIST) def provider_crud(request): - providers = pytest.importorskip('cfme.utils.providers') + providers = pytest.importorskip("cfme.utils.providers") log.info("Using provider key: %s", request.param) return providers.get_crud(request.param) @@ -41,23 +42,26 @@ def provider_crud(request): def test_template(provider_crud): deploy_args = {} try: - template_name = provider_crud.data['templates']['small_template']['name'] - deploy_args.update({'template': template_name}) + template_name = provider_crud.data["templates"]["small_template"]["name"] + deploy_args.update({"template": template_name}) except KeyError: - raise KeyError('small_template not defined for Provider {} in cfme_data.yaml' - .format(provider_crud.data['name'])) + raise KeyError( + "small_template not defined for Provider {} in cfme_data.yaml".format( + provider_crud.data["name"] + ) + ) log.info( - "Using template %s on provider %s", - deploy_args['template'], provider_crud.data['name'] + "Using template %s on provider %s", deploy_args["template"], provider_crud.data["name"] ) - deploy_args.update(vm_name='TBD') + deploy_args.update(vm_name="TBD") deploy_args.update(provider_crud.deployment_helper(deploy_args)) log.debug("Deploy args: %s", deploy_args) if isinstance(provider_crud.mgmt, wrapanapi.systems.AzureSystem): template = provider_crud.mgmt.get_template( - template_name, container=deploy_args['template_container']) + template_name, container=deploy_args["template_container"] + ) else: template = provider_crud.mgmt.get_template(template_name) @@ -68,13 +72,16 @@ def test_template(provider_crud): def test_vm(provider_crud, test_template): deploy_args = {} try: - template_name = provider_crud.data['templates']['small_template']['name'] - deploy_args.update({'template': template_name}) + template_name = provider_crud.data["templates"]["small_template"]["name"] + deploy_args.update({"template": template_name}) except KeyError: - raise KeyError('small_template not defined for Provider {} in cfme_data.yaml' - .format(provider_crud.data['name'])) + raise KeyError( + "small_template not defined for Provider {} in cfme_data.yaml".format( + provider_crud.data["name"] + ) + ) - vm_name = 'test-{}'.format(fauxfactory.gen_alphanumeric(6)).lower() + vm_name = f"test-{fauxfactory.gen_alphanumeric(6)}".lower() log.info("Deploying VM %s", vm_name) deploy_args.update(vm_name=vm_name) @@ -92,7 +99,6 @@ def test_vm(provider_crud, test_template): def test_sanity(provider_crud, test_template, test_vm): - template = test_template vm = test_vm mgmt = provider_crud.mgmt @@ -109,17 +115,17 @@ def test_sanity(provider_crud, test_template, test_vm): if isinstance(vm, EC2Instance): log.info("Testing ec2 instance tags") - vm.set_tag('key1', 'somedata') - assert vm.get_tag_value('key1') == 'somedata' - vm.unset_tag('key1', 'somedata') - assert vm.get_tag_value('key1') is None + vm.set_tag("key1", "somedata") + assert vm.get_tag_value("key1") == "somedata" + vm.unset_tag("key1", "somedata") + assert vm.get_tag_value("key1") is None if isinstance(template, EC2Image): log.info("Testing ec2 image tags") - template.set_tag('key1', 'somedata') - assert template.get_tag_value('key1') == 'somedata' - template.unset_tag('key1', 'somedata') - assert template.get_tag_value('key1') is None + template.set_tag("key1", "somedata") + assert template.get_tag_value("key1") == "somedata" + template.unset_tag("key1", "somedata") + assert template.get_tag_value("key1") is None log.info("Listing VMs") vms = mgmt.list_vms() @@ -168,7 +174,7 @@ def test_sanity(provider_crud, test_template, test_vm): assert not vm.ip try: - new_name = 'test-{}'.format(fauxfactory.gen_alphanumeric(6)).lower() + new_name = f"test-{fauxfactory.gen_alphanumeric(6)}".lower() vm.rename(new_name) assert vm.name == new_name except NotImplementedError: diff --git a/wrapanapi/__init__.py b/wrapanapi/__init__.py index 7d49cc8c..4fadfb82 100644 --- a/wrapanapi/__init__.py +++ b/wrapanapi/__init__.py @@ -1,5 +1,6 @@ # Imports for convenience - +from .entities.vm import VmState +from .systems.container.rhopenshift import Openshift from .systems.ec2 import EC2System from .systems.google import GoogleCloudSystem from .systems.hawkular import HawkularSystem @@ -13,13 +14,21 @@ from .systems.scvmm import SCVMMSystem from .systems.vcloud import VmwareCloudSystem from .systems.virtualcenter import VMWareSystem -from .systems.container.rhopenshift import Openshift - -from .entities.vm import VmState __all__ = [ - 'EC2System', 'GoogleCloudSystem', 'HawkularSystem', - 'LenovoSystem', 'AzureSystem', 'NuageSystem', 'OpenstackSystem', - 'OpenstackInfraSystem', 'RedfishSystem', 'RHEVMSystem', 'SCVMMSystem', - 'VmwareCloudSystem', 'VMWareSystem', 'Openshift', 'VmState' + "EC2System", + "GoogleCloudSystem", + "HawkularSystem", + "LenovoSystem", + "AzureSystem", + "NuageSystem", + "OpenstackSystem", + "OpenstackInfraSystem", + "RedfishSystem", + "RHEVMSystem", + "SCVMMSystem", + "VmwareCloudSystem", + "VMWareSystem", + "Openshift", + "VmState", ] diff --git a/wrapanapi/clients/__init__.py b/wrapanapi/clients/__init__.py index fcad1a52..79207210 100644 --- a/wrapanapi/clients/__init__.py +++ b/wrapanapi/clients/__init__.py @@ -1,4 +1,5 @@ from .rest_client import ContainerClient -from .websocket_client import WebsocketClient, HawkularWebsocketClient +from .websocket_client import HawkularWebsocketClient +from .websocket_client import WebsocketClient -__all__ = ['ContainerClient', 'WebsocketClient', 'HawkularWebsocketClient'] +__all__ = ["ContainerClient", "WebsocketClient", "HawkularWebsocketClient"] diff --git a/wrapanapi/clients/rest_client.py b/wrapanapi/clients/rest_client.py index d3462dc3..8627c26d 100644 --- a/wrapanapi/clients/rest_client.py +++ b/wrapanapi/clients/rest_client.py @@ -1,7 +1,8 @@ -import requests -import os import json import logging +import os + +import requests from wrapanapi.exceptions import RestClientException @@ -10,17 +11,17 @@ class BearerTokenAuth(requests.auth.AuthBase): """Attaches a bearer token to the given request object""" + def __init__(self, token): self.token = token def __call__(self, r): - r.headers['Authorization'] = 'Bearer {}'.format(self.token) + r.headers["Authorization"] = f"Bearer {self.token}" return r -class ContainerClient(object): - - def __init__(self, hostname, auth, protocol="https", port=6443, entry='api/v1', verify=False): +class ContainerClient: + def __init__(self, hostname, auth, protocol="https", port=6443, entry="api/v1", verify=False): """Simple REST API client for container management systems Args: @@ -32,22 +33,22 @@ def __init__(self, hostname, auth, protocol="https", port=6443, entry='api/v1', verify: 'True' if we want to verify SSL, 'False' otherwise """ self._logger = logging.getLogger(__name__) - self.api_entry = "{}://{}:{}/{}".format(protocol, hostname, port, entry) + self.api_entry = f"{protocol}://{hostname}:{port}/{entry}" self.verify = verify if type(auth) in (list, set, tuple): self.auth = auth elif isinstance(auth, str): self.auth = BearerTokenAuth(auth) else: - raise RestClientException('Invalid auth object') + raise RestClientException("Invalid auth object") def entity_path(self, entity_type, name=None, namespace=None): """Processing the entity path according to the type, name and namespace""" - path = '{}s'.format(entity_type) + path = f"{entity_type}s" if namespace is not None: - path = os.path.join('namespaces/{}'.format(namespace), path) + path = os.path.join(f"namespaces/{namespace}", path) if name is not None: - path = os.path.join(path, '{}'.format(name)) + path = os.path.join(path, f"{name}") return path def get(self, entity_type, name=None, namespace=None, convert=None): @@ -80,8 +81,15 @@ def post(self, entity_type, data, name=None, namespace=None, convert=None): json_content = convert(json_content) return (r.status_code, json_content) - def patch(self, entity_type, data, name=None, namespace=None, convert=None, - headers={'Content-Type': 'application/strategic-merge-patch+json'}): + def patch( + self, + entity_type, + data, + name=None, + namespace=None, + convert=None, + headers={"Content-Type": "application/strategic-merge-patch+json"}, + ): """Sends a PATCH request to an entity specified by the method parameters""" path = self.entity_path(entity_type, name, namespace) r = self.raw_patch(path, data, headers) @@ -116,34 +124,47 @@ def delete_status(self, path, headers=None): return r.ok def raw_get(self, path, headers=None, params=None): - self._logger.debug('GET %s;', path) + self._logger.debug("GET %s;", path) return requests.get( os.path.join(self.api_entry, path), auth=self.auth, verify=self.verify, headers=headers, - params=params) + params=params, + ) def raw_put(self, path, data, headers=None): - self._logger.debug('PUT %s; data=%s;', path, data) + self._logger.debug("PUT %s; data=%s;", path, data) return requests.put( - os.path.join(self.api_entry, path), auth=self.auth, verify=self.verify, - headers=headers, data=json.dumps(data)) + os.path.join(self.api_entry, path), + auth=self.auth, + verify=self.verify, + headers=headers, + data=json.dumps(data), + ) def raw_post(self, path, data, headers=None): - self._logger.debug('POST %s; data=%s;', path, data) + self._logger.debug("POST %s; data=%s;", path, data) return requests.post( - os.path.join(self.api_entry, path), auth=self.auth, verify=self.verify, - headers=headers, data=json.dumps(data)) + os.path.join(self.api_entry, path), + auth=self.auth, + verify=self.verify, + headers=headers, + data=json.dumps(data), + ) def raw_patch(self, path, data, headers=None): - self._logger.debug('PATCH %s; data=%s;', path, data) + self._logger.debug("PATCH %s; data=%s;", path, data) return requests.patch( - os.path.join(self.api_entry, path), auth=self.auth, verify=self.verify, - headers=headers, data=json.dumps(data)) + os.path.join(self.api_entry, path), + auth=self.auth, + verify=self.verify, + headers=headers, + data=json.dumps(data), + ) def raw_delete(self, path, headers=None): - self._logger.debug('DELETE %s;', path) + self._logger.debug("DELETE %s;", path) return requests.delete( - os.path.join(self.api_entry, path), auth=self.auth, verify=self.verify, - headers=headers) + os.path.join(self.api_entry, path), auth=self.auth, verify=self.verify, headers=headers + ) diff --git a/wrapanapi/clients/websocket_client.py b/wrapanapi/clients/websocket_client.py index b00e7d57..855ac551 100644 --- a/wrapanapi/clients/websocket_client.py +++ b/wrapanapi/clients/websocket_client.py @@ -1,22 +1,24 @@ import base64 import json + import websocket -class WebsocketClient(object): - def __init__(self, url, username=None, password=None, headers={}, enable_trace=False, - timeout=60): +class WebsocketClient: + def __init__( + self, url, username=None, password=None, headers={}, enable_trace=False, timeout=60 + ): """Simple Web socket client for wrapanapi - Args: - url: String with the hostname or IP address of the server with port - (e.g. 'ws://10.11.12.13:8080/hawkular/command-gateway/ui/ws') - username: username for basic auth (optional) - password: password for basic auth - headers: When you want to pass specified header use this - enable_trace: Enable trace on web socket client - timeout: receive timeout in seconds - """ + Args: + url: String with the hostname or IP address of the server with port + (e.g. 'ws://10.11.12.13:8080/hawkular/command-gateway/ui/ws') + username: username for basic auth (optional) + password: password for basic auth + headers: When you want to pass specified header use this + enable_trace: Enable trace on web socket client + timeout: receive timeout in seconds + """ self.url = url self.username = username self.password = password @@ -32,8 +34,8 @@ def connect(self): else: websocket.enableTrace(self.enable_trace) if self.username: - base64_creds = base64.b64encode("{}:{}".format(self.username, self.password)) - self.headers.update({"Authorization": "Basic {}".format(base64_creds)}) + base64_creds = base64.b64encode(f"{self.username}:{self.password}") + self.headers.update({"Authorization": f"Basic {base64_creds}"}) self.ws = websocket.create_connection(self.url, header=self.headers) self.ws.settimeout(self.timeout) @@ -77,46 +79,60 @@ def send(self, payload, binary_stream=False): def receive(self): """Returns available message on received queue. If there is no message. - waits till timeout""" + waits till timeout""" self._check_connection() return self.ws.recv() class HawkularWebsocketClient(WebsocketClient): """This client extended from normal websocket client. designed to hawkular specific""" - def __init__(self, url, username=None, password=None, headers={}, enable_trace=False, - timeout=60): + + def __init__( + self, url, username=None, password=None, headers={}, enable_trace=False, timeout=60 + ): """Creates hawkular web socket client. for arguments refer 'WebsocketClient'""" - super(HawkularWebsocketClient, self).__init__(url=url, username=username, password=password, - headers=headers, enable_trace=enable_trace, - timeout=timeout) + super().__init__( + url=url, + username=username, + password=password, + headers=headers, + enable_trace=enable_trace, + timeout=timeout, + ) self.session_id = None def connect(self): """Create connection with hawkular web socket server""" - super(HawkularWebsocketClient, self).connect() + super().connect() response = self.hwk_receive() - if 'WelcomeResponse' in response: - self.session_id = response['WelcomeResponse']['sessionId'] - return response['WelcomeResponse'] + if "WelcomeResponse" in response: + self.session_id = response["WelcomeResponse"]["sessionId"] + return response["WelcomeResponse"] else: - raise RuntimeWarning("Key 'WelcomeResponse' not found on response: {}".format(response)) + raise RuntimeWarning(f"Key 'WelcomeResponse' not found on response: {response}") return response def hwk_receive(self): """parse recevied message and returns as dictionary value""" payload = self.receive() - data = payload.split('=', 1) + data = payload.split("=", 1) if len(data) != 2: - raise IndentationError("Unknown payload format! {}".format(payload)) + raise IndentationError(f"Unknown payload format! {payload}") response = {data[0]: json.loads(data[1])} - if 'GenericErrorResponse' in response: - raise Exception("Hawkular server sent failure message: {}" - .format(response['GenericErrorResponse'])) + if "GenericErrorResponse" in response: + raise Exception( + "Hawkular server sent failure message: {}".format(response["GenericErrorResponse"]) + ) return response - def hwk_invoke_operation(self, payload, operation_name="ExecuteOperation", binary_content=None, - binary_file_location=None, wait_for_response=True): + def hwk_invoke_operation( + self, + payload, + operation_name="ExecuteOperation", + binary_content=None, + binary_file_location=None, + wait_for_response=True, + ): """Runs hawkular specific operations Args: payload: payload to server. only string @@ -126,9 +142,9 @@ def hwk_invoke_operation(self, payload, operation_name="ExecuteOperation", binar wait_for_response: When executing a command, wait for the response. default: True """ - _payload = "{}Request={}".format(operation_name, json.dumps(payload)) + _payload = f"{operation_name}Request={json.dumps(payload)}" if binary_file_location: - binary_content = open(binary_file_location, 'rb').read() + binary_content = open(binary_file_location, "rb").read() if binary_content: self.send(_payload + binary_content, binary_stream=True) else: diff --git a/wrapanapi/entities/__init__.py b/wrapanapi/entities/__init__.py index eeae7fc0..08986470 100644 --- a/wrapanapi/entities/__init__.py +++ b/wrapanapi/entities/__init__.py @@ -1,18 +1,36 @@ """ wrapanapi.entities """ - -from .template import Template, TemplateMixin -from .vm import Vm, VmState, VmMixin from .instance import Instance +from .network import Network +from .network import NetworkMixin from .physical_container import PhysicalContainer -from .stack import Stack, StackMixin -from .server import Server, ServerState -from .network import Network, NetworkMixin -from .volume import Volume, VolumeMixin +from .server import Server +from .server import ServerState +from .stack import Stack +from .stack import StackMixin +from .template import Template +from .template import TemplateMixin +from .vm import Vm +from .vm import VmMixin +from .vm import VmState +from .volume import Volume +from .volume import VolumeMixin __all__ = [ - 'Template', 'TemplateMixin', 'Vm', 'VmState', 'VmMixin', 'Instance', - 'PhysicalContainer', 'Server', 'ServerState', 'Stack', 'StackMixin', - 'Network', 'NetworkMixin', 'Volume', 'VolumeMixin' + "Template", + "TemplateMixin", + "Vm", + "VmState", + "VmMixin", + "Instance", + "PhysicalContainer", + "Server", + "ServerState", + "Stack", + "StackMixin", + "Network", + "NetworkMixin", + "Volume", + "VolumeMixin", ] diff --git a/wrapanapi/entities/base.py b/wrapanapi/entities/base.py index acee7aa2..d6ae644b 100644 --- a/wrapanapi/entities/base.py +++ b/wrapanapi/entities/base.py @@ -3,11 +3,13 @@ Provides method/class definitions for handling any entity on a provider """ -from abc import ABCMeta, abstractmethod, abstractproperty +from abc import ABCMeta +from abc import abstractmethod +from abc import abstractproperty from reprlib import aRepr -from wrapanapi.utils import LoggerMixin from wrapanapi.exceptions import NotFoundError +from wrapanapi.utils import LoggerMixin class Entity(LoggerMixin, metaclass=ABCMeta): @@ -18,6 +20,7 @@ class Entity(LoggerMixin, metaclass=ABCMeta): Provides properties/methods that should be applicable across all entities on all systems. """ + def __init__(self, system, raw=None, **kwargs): """ Constructor for an entity @@ -73,8 +76,8 @@ def _log_id(self): """ string = "" for key, val in self._identifying_attrs.items(): - string = "{}{}={} ".format(string, key, val) - return "<{}>".format(string.strip()) + string = f"{string}{key}={val} " + return f"<{string.strip()}>" def __eq__(self, other): """ @@ -90,8 +93,9 @@ def __eq__(self, other): if not isinstance(other, self.__class__): return False try: - return (self.system == other.system and - self._identifying_attrs == other._identifying_attrs) + return ( + self.system == other.system and self._identifying_attrs == other._identifying_attrs + ) except AttributeError: return False @@ -105,13 +109,10 @@ def __repr__(self): > """ # Show object type for system and raw - params_repr = ( - "system=<{sys_obj_cls}> raw=<{raw_obj_mod}.{raw_obj_cls}>" - .format( - sys_obj_cls=self.system.__class__.__name__, - raw_obj_mod=self._raw.__class__.__module__, - raw_obj_cls=self._raw.__class__.__name__ - ) + params_repr = "system=<{sys_obj_cls}> raw=<{raw_obj_mod}.{raw_obj_cls}>".format( + sys_obj_cls=self.system.__class__.__name__, + raw_obj_mod=self._raw.__class__.__module__, + raw_obj_cls=self._raw.__class__.__name__, ) # Show kwarg key/value for each unique kwarg @@ -119,13 +120,10 @@ def __repr__(self): a_repr.maxstring = 100 a_repr.maxother = 100 for key, val in self._identifying_attrs.items(): - params_repr = ( - "{existing_params_repr}, kwargs['{kwarg_key}']={kwarg_val}" - .format( - existing_params_repr=params_repr, - kwarg_key=key, - kwarg_val=a_repr.repr(val), - ) + params_repr = "{existing_params_repr}, kwargs['{kwarg_key}']={kwarg_val}".format( + existing_params_repr=params_repr, + kwarg_key=key, + kwarg_val=a_repr.repr(val), ) return "<{mod_name}.{class_name} {params_repr}>".format( @@ -167,8 +165,7 @@ def get_all_subclasses(cls): Return all subclasses that inherit from this class """ for subclass in cls.__subclasses__(): - for nested_subclass in subclass.get_all_subclasses(): - yield nested_subclass + yield from subclass.get_all_subclasses() yield subclass @abstractmethod @@ -246,7 +243,7 @@ def raw(self, value): self._raw = value -class EntityMixin(object): +class EntityMixin: """ Usually an Entity also provides a mixin which defines methods/properties that should be defined by a wrapanapi.systems.System that manages that type of entity @@ -259,5 +256,6 @@ class EntityMixin(object): However, methods for operating on a retrieved entity should be defined in the Entity class """ + # There may be some common methods/properties that apply at the base level in future... pass diff --git a/wrapanapi/entities/instance.py b/wrapanapi/entities/instance.py index 3e490bff..6664c4ee 100644 --- a/wrapanapi/entities/instance.py +++ b/wrapanapi/entities/instance.py @@ -3,8 +3,8 @@ Instances which run on cloud providers """ - -from abc import ABCMeta, abstractproperty +from abc import ABCMeta +from abc import abstractproperty from .vm import Vm @@ -14,6 +14,7 @@ class Instance(Vm, metaclass=ABCMeta): Adds a few additional properties/methods pertaining to VMs hosted on a cloud platform. """ + @abstractproperty def type(self): """ diff --git a/wrapanapi/entities/network.py b/wrapanapi/entities/network.py index 4f4ae446..b112ed0a 100644 --- a/wrapanapi/entities/network.py +++ b/wrapanapi/entities/network.py @@ -3,17 +3,20 @@ Networks """ +from abc import ABCMeta +from abc import abstractmethod -from abc import ABCMeta, abstractmethod - -from wrapanapi.entities.base import Entity, EntityMixin -from wrapanapi.exceptions import MultipleItemsError, NotFoundError +from wrapanapi.entities.base import Entity +from wrapanapi.entities.base import EntityMixin +from wrapanapi.exceptions import MultipleItemsError +from wrapanapi.exceptions import NotFoundError class Network(Entity, metaclass=ABCMeta): """ Defines methods/properties pertaining to networks """ + @abstractmethod def get_details(self): """ @@ -32,6 +35,7 @@ class NetworkMixin(EntityMixin, metaclass=ABCMeta): """ Defines methods for systems that support networks """ + @abstractmethod def create_network(self, **kwargs): """ diff --git a/wrapanapi/entities/physical_container.py b/wrapanapi/entities/physical_container.py index 00f37149..a853572a 100644 --- a/wrapanapi/entities/physical_container.py +++ b/wrapanapi/entities/physical_container.py @@ -1,4 +1,3 @@ -# coding: utf-8 """ wrapanapi.entities.physical_container diff --git a/wrapanapi/entities/server.py b/wrapanapi/entities/server.py index 1f62e049..d688570c 100644 --- a/wrapanapi/entities/server.py +++ b/wrapanapi/entities/server.py @@ -3,29 +3,31 @@ Implements classes and methods related to actions performed on (physical) servers """ - -from abc import ABCMeta, abstractmethod +from abc import ABCMeta +from abc import abstractmethod from wrapanapi.entities.base import Entity -class ServerState(object): +class ServerState: """ Represents a state for a server on the provider system. Implementations of ``Server`` should map to these states """ - ON = 'ServerState.On' - OFF = 'ServerState.Off' - POWERING_ON = 'ServerState.PoweringOn' - POWERING_OFF = 'ServerState.PoweringOff' - UNKNOWN = 'ServerState.Unknown' + + ON = "ServerState.On" + OFF = "ServerState.Off" + POWERING_ON = "ServerState.PoweringOn" + POWERING_OFF = "ServerState.PoweringOff" + UNKNOWN = "ServerState.Unknown" @classmethod def valid_states(cls): return [ - var_val for var_val in vars(cls).values() - if isinstance(var_val, str) and var_val.startswith('ServerState.') + var_val + for var_val in vars(cls).values() + if isinstance(var_val, str) and var_val.startswith("ServerState.") ] @@ -33,6 +35,7 @@ class Server(Entity, metaclass=ABCMeta): """ Represents a single server on a management system. """ + # Implementations must define a dict which maps API states returned by the # system to a ServerState. Example: # {'On': ServerState.ON, 'Off': ServerState.OFF} @@ -45,13 +48,17 @@ def __init__(self, *args, **kwargs): Since abc has no 'abstract class property' concept, this is the approach taken. """ state_map = self.state_map - if (not state_map or not isinstance(state_map, dict) or - not all(value in ServerState.valid_states() for value in state_map.values())): + if ( + not state_map + or not isinstance(state_map, dict) + or not all(value in ServerState.valid_states() for value in state_map.values()) + ): raise NotImplementedError( - "property '{}' not properly implemented in class '{}'" - .format('state_map', self.__class__.__name__) + "property '{}' not properly implemented in class '{}'".format( + "state_map", self.__class__.__name__ + ) ) - super(Server, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) def _api_state_to_serverstate(self, api_state): """ @@ -62,7 +69,8 @@ def _api_state_to_serverstate(self, api_state): except KeyError: self.logger.warn( "Unmapped Server state '%s' received from system, mapped to '%s'", - api_state, ServerState.UNKNOWN + api_state, + ServerState.UNKNOWN, ) return ServerState.UNKNOWN diff --git a/wrapanapi/entities/stack.py b/wrapanapi/entities/stack.py index dd1d5b56..035b53a0 100644 --- a/wrapanapi/entities/stack.py +++ b/wrapanapi/entities/stack.py @@ -3,17 +3,20 @@ Orchestration stacks """ +from abc import ABCMeta +from abc import abstractmethod -from abc import ABCMeta, abstractmethod - -from wrapanapi.entities.base import Entity, EntityMixin -from wrapanapi.exceptions import MultipleItemsError, NotFoundError +from wrapanapi.entities.base import Entity +from wrapanapi.entities.base import EntityMixin +from wrapanapi.exceptions import MultipleItemsError +from wrapanapi.exceptions import NotFoundError class Stack(Entity, metaclass=ABCMeta): """ Defines methods/properties pertaining to stacks """ + @abstractmethod def get_details(self): """ @@ -32,6 +35,7 @@ class StackMixin(EntityMixin, metaclass=ABCMeta): """ Defines methods for systems that support stacks """ + @abstractmethod def list_stacks(self, **kwargs): """ diff --git a/wrapanapi/entities/template.py b/wrapanapi/entities/template.py index 0673c3dc..8aff7b9b 100644 --- a/wrapanapi/entities/template.py +++ b/wrapanapi/entities/template.py @@ -3,17 +3,20 @@ Methods/classes pertaining to performing actions on a template """ +from abc import ABCMeta +from abc import abstractmethod -from abc import ABCMeta, abstractmethod - -from wrapanapi.entities.base import Entity, EntityMixin -from wrapanapi.exceptions import MultipleItemsError, NotFoundError +from wrapanapi.entities.base import Entity +from wrapanapi.entities.base import EntityMixin +from wrapanapi.exceptions import MultipleItemsError +from wrapanapi.exceptions import NotFoundError class Template(Entity, metaclass=ABCMeta): """ Represents a template on a system """ + @abstractmethod def deploy(self, vm_name, timeout, **kwargs): """ @@ -27,6 +30,7 @@ class TemplateMixin(EntityMixin, metaclass=ABCMeta): """ Defines methods a wrapanapi.systems.System that manages Templates should have """ + @abstractmethod def get_template(self, name, **kwargs): """ diff --git a/wrapanapi/entities/vm.py b/wrapanapi/entities/vm.py index 6fee2908..c61e7237 100644 --- a/wrapanapi/entities/vm.py +++ b/wrapanapi/entities/vm.py @@ -4,39 +4,46 @@ Methods/classes pertaining to performing actions on a VM/instance """ import time -from abc import ABCMeta, abstractmethod, abstractproperty +from abc import ABCMeta +from abc import abstractmethod +from abc import abstractproperty from cached_property import cached_property_with_ttl -from wait_for import wait_for, TimedOutError +from wait_for import TimedOutError +from wait_for import wait_for from wrapanapi.const import CACHED_PROPERTY_TTL -from wrapanapi.exceptions import MultipleItemsError, NotFoundError -from wrapanapi.entities.base import Entity, EntityMixin +from wrapanapi.entities.base import Entity +from wrapanapi.entities.base import EntityMixin +from wrapanapi.exceptions import MultipleItemsError +from wrapanapi.exceptions import NotFoundError -class VmState(object): +class VmState: """ Represents a state for a VM/instance on the provider system. Implementations of ``Vm`` should map to these states """ - RUNNING = 'VmState.RUNNING' - STOPPED = 'VmState.STOPPED' - PAUSED = 'VmState.PAUSED' - SUSPENDED = 'VmState.SUSPENDED' - DELETED = 'VmState.DELETED' - STARTING = 'VmState.STARTING' - STOPPING = 'VmState.STOPPING' - ERROR = 'VmState.ERROR' - UNKNOWN = 'VmState.UNKNOWN' - SHELVED = 'VmState.SHELVED' - SHELVED_OFFLOADED = 'VmState.SHELVED_OFFLOADED' + + RUNNING = "VmState.RUNNING" + STOPPED = "VmState.STOPPED" + PAUSED = "VmState.PAUSED" + SUSPENDED = "VmState.SUSPENDED" + DELETED = "VmState.DELETED" + STARTING = "VmState.STARTING" + STOPPING = "VmState.STOPPING" + ERROR = "VmState.ERROR" + UNKNOWN = "VmState.UNKNOWN" + SHELVED = "VmState.SHELVED" + SHELVED_OFFLOADED = "VmState.SHELVED_OFFLOADED" @classmethod def valid_states(cls): return [ - var_val for var_val in vars(cls).values() - if isinstance(var_val, str) and var_val.startswith('VmState.') + var_val + for var_val in vars(cls).values() + if isinstance(var_val, str) and var_val.startswith("VmState.") ] @@ -44,6 +51,7 @@ class Vm(Entity, metaclass=ABCMeta): """ Represents a single VM/instance on a management system. """ + # Implementations must define a dict which maps API states returned by the # system to a VmState. Example: # {'running': VmState.RUNNING, 'shutdown': VmState.STOPPED} @@ -55,14 +63,18 @@ def __init__(self, *args, **kwargs): Since abc has no 'abstract class property' concept, this is the approach taken. """ - state_map = getattr(self, 'state_map') - if (not state_map or not isinstance(state_map, dict) or - not all(value in VmState.valid_states() for value in state_map.values())): + state_map = getattr(self, "state_map") + if ( + not state_map + or not isinstance(state_map, dict) + or not all(value in VmState.valid_states() for value in state_map.values()) + ): raise NotImplementedError( - "property '{}' not properly implemented in class '{}'" - .format('state_map', self.__class__.__name__) + "property '{}' not properly implemented in class '{}'".format( + "state_map", self.__class__.__name__ + ) ) - super(Vm, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) def _api_state_to_vmstate(self, api_state): """ @@ -73,7 +85,8 @@ def _api_state_to_vmstate(self, api_state): except KeyError: self.logger.warn( "Unmapped VM state '%s' received from system, mapped to '%s'", - api_state, VmState.UNKNOWN + api_state, + VmState.UNKNOWN, ) return VmState.UNKNOWN @@ -174,7 +187,7 @@ def creation_time(self): Returns creation time of VM/instance """ - def wait_for_state(self, state, timeout='6m', delay=15): + def wait_for_state(self, state, timeout="6m", delay=15): """ Waits for a VM to be in the desired state @@ -187,24 +200,36 @@ def wait_for_state(self, state, timeout='6m', delay=15): if state not in valid_states: self.logger.error( "Invalid desired state. Valid states for %s: %s", - self.__class__.__name__, valid_states + self.__class__.__name__, + valid_states, ) - raise ValueError('Invalid desired state') + raise ValueError("Invalid desired state") wait_for( lambda: self.state == state, timeout=timeout, delay=delay, - message="wait for vm {} to reach state '{}'".format(self._log_id, state)) + message=f"wait for vm {self._log_id} to reach state '{state}'", + ) - def _handle_transition(self, in_desired_state, in_state_requiring_prep, in_actionable_state, - do_prep, do_action, state, timeout, delay): + def _handle_transition( + self, + in_desired_state, + in_state_requiring_prep, + in_actionable_state, + do_prep, + do_action, + state, + timeout, + delay, + ): """ Handles state transition for ensure_state() method See that docstring below for explanation of the args. Each arg here is a callable except for 'state', 'timeout' and 'delay' """ + def _transition(): if in_desired_state(): # Hacking around some race conditions -- double check that desired state is steady @@ -216,24 +241,30 @@ def _transition(): elif in_state_requiring_prep(): self.logger.info( "VM %s in state requiring prep. current state: %s, ensuring state: %s)", - self._log_id, self.state, state + self._log_id, + self.state, + state, ) do_prep() return False elif in_actionable_state(): self.logger.info( "VM %s in actionable state. current state: %s, ensuring state: %s)", - self._log_id, self.state, state + self._log_id, + self.state, + state, ) do_action() return False return wait_for( - _transition, timeout=timeout, delay=delay, - message="ensure vm {} reaches state '{}'".format(self._log_id, state) + _transition, + timeout=timeout, + delay=delay, + message=f"ensure vm {self._log_id} reaches state '{state}'", ) - def ensure_state(self, state, timeout='6m', delay=5): + def ensure_state(self, state, timeout="6m", delay=5): """ Perform the actions required to get the VM to the desired state. @@ -266,9 +297,10 @@ def ensure_state(self, state, timeout='6m', delay=5): if state not in valid_states: self.logger.error( "Invalid desired state. Valid states for %s: %s", - self.__class__.__name__, valid_states + self.__class__.__name__, + valid_states, ) - raise ValueError('Invalid desired state') + raise ValueError("Invalid desired state") if state == VmState.RUNNING: return self._handle_transition( @@ -277,45 +309,51 @@ def ensure_state(self, state, timeout='6m', delay=5): in_actionable_state=lambda: self.is_stopped or self.is_suspended or self.is_paused, do_prep=lambda: None, do_action=self.start, - state=state, timeout=timeout, delay=delay + state=state, + timeout=timeout, + delay=delay, ) elif state == VmState.STOPPED: return self._handle_transition( in_desired_state=lambda: self.is_stopped, - in_state_requiring_prep=lambda: (self.is_suspended or - self.is_paused or - self.is_starting), + in_state_requiring_prep=lambda: ( + self.is_suspended or self.is_paused or self.is_starting + ), in_actionable_state=lambda: self.is_running, do_prep=self.start, do_action=self.stop, - state=state, timeout=timeout, delay=delay + state=state, + timeout=timeout, + delay=delay, ) elif state == VmState.SUSPENDED: if not self.system.can_suspend: - raise ValueError( - 'System {} is unable to suspend'.format(self.system.__class__.__name__)) + raise ValueError(f"System {self.system.__class__.__name__} is unable to suspend") return self._handle_transition( in_desired_state=lambda: self.is_suspended, in_state_requiring_prep=lambda: self.is_stopped or self.is_paused, in_actionable_state=lambda: self.is_running, do_prep=self.start, do_action=self.suspend, - state=state, timeout=timeout, delay=delay + state=state, + timeout=timeout, + delay=delay, ) elif state == VmState.PAUSED: if not self.system.can_pause: - raise ValueError( - 'System {} is unable to pause'.format(self.system.__class__.__name__)) + raise ValueError(f"System {self.system.__class__.__name__} is unable to pause") return self._handle_transition( in_desired_state=lambda: self.is_paused, in_state_requiring_prep=lambda: self.is_stopped or self.is_suspended, in_actionable_state=lambda: self.is_running, do_prep=self.start, do_action=self.pause, - state=state, timeout=timeout, delay=delay + state=state, + timeout=timeout, + delay=delay, ) else: - raise ValueError("Invalid desired state '{}'".format(state)) + raise ValueError(f"Invalid desired state '{state}'") @property def in_steady_state(self): @@ -338,11 +376,12 @@ def wait_for_steady_state(self, timeout=None, delay=5): lambda: self.in_steady_state, timeout=timeout if timeout else self.system.steady_wait_time, delay=delay, - message="VM/Instance '{}' in steady state".format(self._log_id) + message=f"VM/Instance '{self._log_id}' in steady state", ) except TimedOutError: self.logger.exception( - "VM %s stuck in '%s' while waiting for steady state.", self._log_id, self.state) + "VM %s stuck in '%s' while waiting for steady state.", self._log_id, self.state + ) raise @abstractmethod @@ -373,7 +412,7 @@ def rename(self, name): """ Rename VM/instance. Not supported on all platforms. """ - raise NotImplementedError('rename not implemented.') + raise NotImplementedError("rename not implemented.") def suspend(self): """ @@ -383,7 +422,7 @@ def suspend(self): Returns: True if vm action has been initiated properly """ - raise NotImplementedError('suspend not implemented.') + raise NotImplementedError("suspend not implemented.") def pause(self): """ @@ -393,7 +432,7 @@ def pause(self): Returns: True if vm action has been initiated properly """ - raise NotImplementedError('pause not implemented.') + raise NotImplementedError("pause not implemented.") def clone(self, vm_name, **kwargs): """ @@ -405,13 +444,14 @@ def clone(self, vm_name, **kwargs): vm_name: The name of the new VM Returns: VM object for the new VM """ - raise NotImplementedError('clone not implemented.') + raise NotImplementedError("clone not implemented.") def get_hardware_configuration(self): """Return hardware configuration of the VM.""" raise NotImplementedError( - 'Provider {} does not implement get_hardware_configuration' - .format(type(self.system).__name__) + "Provider {} does not implement get_hardware_configuration".format( + type(self.system).__name__ + ) ) @@ -419,6 +459,7 @@ class VmMixin(EntityMixin, metaclass=ABCMeta): """ Defines methods or properties a wrapanapi.systems.System that manages Vm's should have """ + # Implementations must define whether this system can suspend (True/False) can_suspend = None # Implementations must define whether this system can pause (True/False) @@ -432,13 +473,14 @@ def __init__(self, *args, **kwargs): Since abc has no 'abstract class property' concept, this is the approach taken. """ - required_props = ['can_suspend', 'can_pause'] + required_props = ["can_suspend", "can_pause"] for prop in required_props: prop_value = getattr(self, prop) if not isinstance(prop_value, bool): raise NotImplementedError( - "property '{}' must be implemented in class '{}'" - .format(prop, self.__class__.__name__) + "property '{}' must be implemented in class '{}'".format( + prop, self.__class__.__name__ + ) ) @abstractproperty diff --git a/wrapanapi/entities/volume.py b/wrapanapi/entities/volume.py index 1e786c73..424a2aee 100644 --- a/wrapanapi/entities/volume.py +++ b/wrapanapi/entities/volume.py @@ -3,19 +3,20 @@ Volumes """ -from __future__ import absolute_import -import six +from abc import ABCMeta +from abc import abstractmethod -from abc import ABCMeta, abstractmethod +from wrapanapi.entities.base import Entity +from wrapanapi.entities.base import EntityMixin +from wrapanapi.exceptions import MultipleItemsError +from wrapanapi.exceptions import NotFoundError -from wrapanapi.entities.base import Entity, EntityMixin -from wrapanapi.exceptions import MultipleItemsError, NotFoundError - -class Volume(six.with_metaclass(ABCMeta, Entity)): +class Volume(Entity, metaclass=ABCMeta): """ Defines methods/properties pertaining to volume """ + @abstractmethod def get_details(self): """ @@ -30,10 +31,11 @@ def get_details(self): """ -class VolumeMixin(six.with_metaclass(ABCMeta, EntityMixin)): +class VolumeMixin(EntityMixin, metaclass=ABCMeta): """ Defines methods for systems that support volumes """ + @abstractmethod def create_volume(self, **kwargs): """ diff --git a/wrapanapi/exceptions.py b/wrapanapi/exceptions.py index 850bf675..08d9d9a5 100644 --- a/wrapanapi/exceptions.py +++ b/wrapanapi/exceptions.py @@ -1,5 +1,6 @@ class ActionNotSupported(Exception): """Raised when an action is not supported.""" + pass @@ -11,37 +12,42 @@ class NotFoundError(Exception): """ General exception when raised when something is not found """ + pass class VMInstanceNotFound(NotFoundError): """Raised if a VM or instance is not found.""" + def __init__(self, vm_name): self.vm_name = vm_name def __str__(self): - return 'Could not find a VM/instance named %s.' % self.vm_name + return "Could not find a VM/instance named %s." % self.vm_name class ItemNotFound(NotFoundError): """Raised if an item is not found.""" + def __init__(self, name, item_type): self.name = name self.item_type = item_type def __str__(self): - return 'Could not find a {} named {}.'.format(self.item_type, self.name) + return f"Could not find a {self.item_type} named {self.name}." class VMNotFoundViaIP(NotFoundError): """ Raised if a specific VM cannot be found. """ + pass class ForwardingRuleNotFound(NotFoundError): """Raised if a Forwarding Rule for loadbalancers not found.""" + def __init__(self, forwarding_rule_name): self.vm_name = forwarding_rule_name @@ -52,22 +58,24 @@ class ImageNotFoundError(NotFoundError): class LabelNotFoundException(NotFoundError): """Raised when trying to delete a label which doesn't exist""" + def __init__(self, label_key): self._label_key = label_key def __str__(self): - return 'Could not delete label "{}" (label does not exist).'.format( - self._label_key) + return f'Could not delete label "{self._label_key}" (label does not exist).' class DatastoreNotFoundError(NotFoundError): """Raised if a datastore(s) or datastore clusters(s) are not found""" + def __init__(self, item_type): self.item_type = item_type def __str__(self): return 'Could not find any "{}" available for provisioning, check the status'.format( - self.item_type) + self.item_type + ) class NetworkNotFoundError(NotFoundError): @@ -84,16 +92,18 @@ class VolumeNotFoundError(NotFoundError): class InvalidValueException(ValueError): """Raises when invalid value provided. E.g. invalid OpenShift project name""" + pass class KeystoneVersionNotSupported(Exception): """Raised when inappropriate version of Keystone is provided for Openstack system""" + def __init__(self, ver): self.version = ver def __str__(self): - return "Provided version of Keystone is not supported: {}".format(self.version) + return f"Provided version of Keystone is not supported: {self.version}" class NoMoreFloatingIPs(Exception): @@ -122,39 +132,44 @@ class RestClientException(Exception): class RequestFailedException(Exception): """Raised if some request returned unexpected status code""" + pass class ResourceAlreadyExistsException(Exception): """Raised when trying to create a resource that already exists""" + pass class UncreatableResourceException(Exception): """Raised when trying to create uncreatable resource""" + def __init__(self, resource): self.resource = resource def __str__(self): - return '{} is not creatable resource' + return "{} is not creatable resource" class VMInstanceNotCloned(Exception): """Raised if a VM or instance is not cloned.""" + def __init__(self, template): self.template = template def __str__(self): - return 'Could not clone %s' % self.template + return "Could not clone %s" % self.template class VMInstanceNotSuspended(Exception): """Raised if a VM or instance is not able to be suspended.""" + def __init__(self, vm_name): self.vm_name = vm_name def __str__(self): - return 'Could not suspend %s because it\'s not running.' % self.vm_name + return "Could not suspend %s because it's not running." % self.vm_name class HostNotRemoved(Exception): @@ -167,11 +182,13 @@ class VMError(Exception): class VMCreationDateError(Exception): """Raised when we cannot determine a creation date for a VM""" + pass class VMInstanceNotStopped(Exception): """Raised if a VM or instance is not in stopped state.""" + def __init__(self, vm_name, action="action"): self.vm_name = vm_name self.action = action diff --git a/wrapanapi/systems/__init__.py b/wrapanapi/systems/__init__.py index ff9553ae..fa0adf4f 100644 --- a/wrapanapi/systems/__init__.py +++ b/wrapanapi/systems/__init__.py @@ -1,4 +1,3 @@ - from .ec2 import EC2System from .google import GoogleCloudSystem from .hawkular import HawkularSystem @@ -14,7 +13,17 @@ from .virtualcenter import VMWareSystem __all__ = [ - 'EC2System', 'GoogleCloudSystem', 'HawkularSystem', 'LenovoSystem', - 'AzureSystem', 'NuageSystem', 'OpenstackSystem', 'OpenstackInfraSystem', 'RedfishSystem', - 'RHEVMSystem', 'SCVMMSystem', 'VmwareCloudSystem', 'VMWareSystem' + "EC2System", + "GoogleCloudSystem", + "HawkularSystem", + "LenovoSystem", + "AzureSystem", + "NuageSystem", + "OpenstackSystem", + "OpenstackInfraSystem", + "RedfishSystem", + "RHEVMSystem", + "SCVMMSystem", + "VmwareCloudSystem", + "VMWareSystem", ] diff --git a/wrapanapi/systems/base.py b/wrapanapi/systems/base.py index 9f2d6b61..6a2148cb 100644 --- a/wrapanapi/systems/base.py +++ b/wrapanapi/systems/base.py @@ -1,15 +1,17 @@ -# coding: utf-8 """Backend management system classes Used to communicate with providers without using CFME facilities """ -from abc import ABCMeta, abstractmethod, abstractproperty +from abc import ABCMeta +from abc import abstractmethod +from abc import abstractproperty from wrapanapi.utils import LoggerMixin class System(LoggerMixin, metaclass=ABCMeta): """Represents any system that wrapanapi interacts with.""" + # This should be defined by implementors of System _stats_available = {} @@ -66,11 +68,12 @@ def stats(self, *requested_stats): Returns: A dict of stats. """ if not self._stats_available: - raise Exception('{} has empty self._stats_available dictionary'.format( - self.__class__.__name__)) + raise Exception(f"{self.__class__.__name__} has empty self._stats_available dictionary") - return {stat: self._stats_available[stat](self) - for stat in requested_stats or self._stats_available.keys()} + return { + stat: self._stats_available[stat](self) + for stat in requested_stats or self._stats_available.keys() + } def disconnect(self): """Disconnects the API from mgmt system""" @@ -78,4 +81,5 @@ def disconnect(self): def usage_and_quota(self): raise NotImplementedError( - 'Provider {} does not implement usage_and_quota'.format(type(self).__name__)) + f"Provider {type(self).__name__} does not implement usage_and_quota" + ) diff --git a/wrapanapi/systems/container/__init__.py b/wrapanapi/systems/container/__init__.py index 11b812d0..74d91ea9 100644 --- a/wrapanapi/systems/container/__init__.py +++ b/wrapanapi/systems/container/__init__.py @@ -1,4 +1,3 @@ - from .rhopenshift import Openshift -__all__ = ['Openshift'] +__all__ = ["Openshift"] diff --git a/wrapanapi/systems/container/rhopenshift.py b/wrapanapi/systems/container/rhopenshift.py index 4c388b6b..1e02e20a 100644 --- a/wrapanapi/systems/container/rhopenshift.py +++ b/wrapanapi/systems/container/rhopenshift.py @@ -1,17 +1,20 @@ import copy import json import string -import yaml from collections.abc import Iterable -from functools import partial, wraps +from functools import partial +from functools import wraps from random import choice import inflection +import yaml from kubernetes import client as kubeclient from kubernetes.client.rest import ApiException -from miq_version import TemplateName, Version +from miq_version import TemplateName +from miq_version import Version from openshift import client as ociclient -from wait_for import TimedOutError, wait_for +from wait_for import TimedOutError +from wait_for import wait_for from wrapanapi.systems.base import System @@ -55,17 +58,19 @@ def reconnect(decorator): def decorate(cls): for attr in cls.__dict__: - if callable(getattr(cls, attr)) and not attr.startswith('_'): + if callable(getattr(cls, attr)) and not attr.startswith("_"): setattr(cls, attr, decorator(getattr(cls, attr))) return cls + return decorate def unauthenticated_error_handler(method): """Fixes issue with 401 error by restoring connection. - Sometimes connection to openshift api endpoint gets expired and openshift returns 401. - As a result tasks in some applications like sprout fail. + Sometimes connection to openshift api endpoint gets expired and openshift returns 401. + As a result tasks in some applications like sprout fail. """ + @wraps(method) def wrap(*args, **kwargs): attempts = 3 @@ -73,11 +78,12 @@ def wrap(*args, **kwargs): try: return method(*args, **kwargs) except ApiException as e: - if e.reason == 'Unauthorized': + if e.reason == "Unauthorized": args[0]._connect() else: raise e return method(*args, **kwargs) + return wrap @@ -86,7 +92,7 @@ def is_iterable(item): def drop_item(struct, key): - """ It's needed to workaround one bug in processing openshift template. + """It's needed to workaround one bug in processing openshift template. It just recursively drops passed key from passed struct :param struct: dict or dicts with lists and etc @@ -106,68 +112,80 @@ def drop_item(struct, key): @reconnect(unauthenticated_error_handler) class Openshift(System): - _stats_available = { - 'num_container': lambda self: len(self.list_container()), - 'num_pod': lambda self: len(self.list_pods()), - 'num_service': lambda self: len(self.list_service()), - 'num_replication_controller': - lambda self: len(self.list_replication_controller()), - 'num_image': lambda self: len(self.list_image_id()), - 'num_node': lambda self: len(self.list_node()), - 'num_image_registry': lambda self: len(self.list_image_registry()), - 'num_project': lambda self: len(self.list_project()), - 'num_route': lambda self: len(self.list_route()), - 'num_template': lambda self: len(self.list_template()) + "num_container": lambda self: len(self.list_container()), + "num_pod": lambda self: len(self.list_pods()), + "num_service": lambda self: len(self.list_service()), + "num_replication_controller": lambda self: len(self.list_replication_controller()), + "num_image": lambda self: len(self.list_image_id()), + "num_node": lambda self: len(self.list_node()), + "num_image_registry": lambda self: len(self.list_image_registry()), + "num_project": lambda self: len(self.list_project()), + "num_route": lambda self: len(self.list_route()), + "num_template": lambda self: len(self.list_template()), } stream2template_tags_mapping51012 = { - 'cloudforms47-cfme-openshift-httpd': {'tag': 'HTTPD_IMG_TAG', 'url': 'HTTPD_IMG_NAME'}, - 'cloudforms47-cfme-openshift-app': {'tag': 'BACKEND_APPLICATION_IMG_TAG', - 'url': 'BACKEND_APPLICATION_IMG_NAME'}, - 'cloudforms47-cfme-openshift-app-ui': {'tag': 'FRONTEND_APPLICATION_IMG_TAG', - 'url': 'FRONTEND_APPLICATION_IMG_NAME'}, - 'cloudforms47-cfme-openshift-embedded-ansible': {'tag': 'ANSIBLE_IMG_TAG', - 'url': 'ANSIBLE_IMG_NAME'}, - 'cloudforms47-cfme-openshift-memcached': {'tag': 'MEMCACHED_IMG_TAG', - 'url': 'MEMCACHED_IMG_NAME'}, - 'cloudforms47-cfme-openshift-postgresql': {'tag': 'POSTGRESQL_IMG_TAG', - 'url': 'POSTGRESQL_IMG_NAME'}, + "cloudforms47-cfme-openshift-httpd": {"tag": "HTTPD_IMG_TAG", "url": "HTTPD_IMG_NAME"}, + "cloudforms47-cfme-openshift-app": { + "tag": "BACKEND_APPLICATION_IMG_TAG", + "url": "BACKEND_APPLICATION_IMG_NAME", + }, + "cloudforms47-cfme-openshift-app-ui": { + "tag": "FRONTEND_APPLICATION_IMG_TAG", + "url": "FRONTEND_APPLICATION_IMG_NAME", + }, + "cloudforms47-cfme-openshift-embedded-ansible": { + "tag": "ANSIBLE_IMG_TAG", + "url": "ANSIBLE_IMG_NAME", + }, + "cloudforms47-cfme-openshift-memcached": { + "tag": "MEMCACHED_IMG_TAG", + "url": "MEMCACHED_IMG_NAME", + }, + "cloudforms47-cfme-openshift-postgresql": { + "tag": "POSTGRESQL_IMG_TAG", + "url": "POSTGRESQL_IMG_NAME", + }, } stream2template_tags_mapping_rest = { - 'cfme-openshift-httpd': {'tag': 'HTTPD_IMG_TAG', 'url': 'HTTPD_IMG_NAME'}, - 'cfme-openshift-app': {'tag': 'BACKEND_APPLICATION_IMG_TAG', - 'url': 'BACKEND_APPLICATION_IMG_NAME'}, - 'cfme-openshift-app-ui': {'tag': 'FRONTEND_APPLICATION_IMG_TAG', - 'url': 'FRONTEND_APPLICATION_IMG_NAME'}, - 'cfme-openshift-embedded-ansible': {'tag': 'ANSIBLE_IMG_TAG', 'url': 'ANSIBLE_IMG_NAME'}, - 'cfme-openshift-memcached': {'tag': 'MEMCACHED_IMG_TAG', 'url': 'MEMCACHED_IMG_NAME'}, - 'cfme-openshift-postgresql': {'tag': 'POSTGRESQL_IMG_TAG', 'url': 'POSTGRESQL_IMG_NAME'}, + "cfme-openshift-httpd": {"tag": "HTTPD_IMG_TAG", "url": "HTTPD_IMG_NAME"}, + "cfme-openshift-app": { + "tag": "BACKEND_APPLICATION_IMG_TAG", + "url": "BACKEND_APPLICATION_IMG_NAME", + }, + "cfme-openshift-app-ui": { + "tag": "FRONTEND_APPLICATION_IMG_TAG", + "url": "FRONTEND_APPLICATION_IMG_NAME", + }, + "cfme-openshift-embedded-ansible": {"tag": "ANSIBLE_IMG_TAG", "url": "ANSIBLE_IMG_NAME"}, + "cfme-openshift-memcached": {"tag": "MEMCACHED_IMG_TAG", "url": "MEMCACHED_IMG_NAME"}, + "cfme-openshift-postgresql": {"tag": "POSTGRESQL_IMG_TAG", "url": "POSTGRESQL_IMG_NAME"}, } scc_user_mapping = ( - {'scc': 'anyuid', 'user': 'cfme-anyuid'}, - {'scc': 'anyuid', 'user': 'cfme-orchestrator'}, - {'scc': 'anyuid', 'user': 'cfme-httpd'}, - {'scc': 'privileged', 'user': 'cfme-privileged'}, + {"scc": "anyuid", "user": "cfme-anyuid"}, + {"scc": "anyuid", "user": "cfme-orchestrator"}, + {"scc": "anyuid", "user": "cfme-httpd"}, + {"scc": "privileged", "user": "cfme-privileged"}, ) - default_namespace = 'openshift' - required_project_pods = ('httpd', 'memcached', 'postgresql', - 'cloudforms', 'cloudforms-backend') - not_required_project_pods = ('cloudforms-backend', 'ansible') + default_namespace = "openshift" + required_project_pods = ("httpd", "memcached", "postgresql", "cloudforms", "cloudforms-backend") + not_required_project_pods = ("cloudforms-backend", "ansible") - def __init__(self, hostname, protocol="https", port=8443, debug=False, - verify_ssl=False, **kwargs): - super(Openshift, self).__init__(kwargs) + def __init__( + self, hostname, protocol="https", port=8443, debug=False, verify_ssl=False, **kwargs + ): + super().__init__(kwargs) self.hostname = hostname self.protocol = protocol self.port = port - self.username = kwargs.get('username', '') - self.password = kwargs.get('password', '') - self.base_url = kwargs.get('base_url', None) - self.token = kwargs.get('token', '') + self.username = kwargs.get("username", "") + self.password = kwargs.get("password", "") + self.base_url = kwargs.get("base_url", None) + self.token = kwargs.get("token", "") self.auth = self.token if self.token else (self.username, self.password) self.debug = debug self.verify_ssl = verify_ssl @@ -179,18 +197,19 @@ def _identifying_attrs(self): Return a dict with key, value pairs for each kwarg that is used to uniquely identify this system. """ - return {'hostname': self.hostname, 'port': self.port} + return {"hostname": self.hostname, "port": self.port} def _connect(self): - url = '{proto}://{host}:{port}'.format(proto=self.protocol, host=self.hostname, - port=self.port) + url = "{proto}://{host}:{port}".format( + proto=self.protocol, host=self.hostname, port=self.port + ) - token = 'Bearer {token}'.format(token=self.token) + token = f"Bearer {self.token}" config = ociclient.Configuration() config.host = url config.verify_ssl = self.verify_ssl config.debug = self.debug - config.api_key['authorization'] = token + config.api_key["authorization"] = token self.ociclient = ociclient self.kclient = kubeclient @@ -202,9 +221,10 @@ def _connect(self): self.batch_api = self.kclient.BatchV1Api(api_client=self.kapi_client) # for job api def info(self): - url = '{proto}://{host}:{port}'.format(proto=self.protocol, host=self.hostname, - port=self.port) - return "rhopenshift {}".format(url) + url = "{proto}://{host}:{port}".format( + proto=self.protocol, host=self.hostname, port=self.port + ) + return f"rhopenshift {url}" def list_route(self, namespace=None): """Returns list of routes""" @@ -238,8 +258,11 @@ def list_template(self, namespace=None): def list_image_stream_images(self): """Returns list of images (Docker registry only)""" - return [item for item in self.o_api.list_image().items - if item.docker_image_reference is not None] + return [ + item + for item in self.o_api.list_image().items + if item.docker_image_reference is not None + ] def list_deployment_config(self, namespace=None): """Returns list of deployment configs""" @@ -274,11 +297,11 @@ def cluster_info(self): """Returns information about the cluster - number of CPUs and memory in GB""" aggregate_cpu, aggregate_mem = 0, 0 for node in self.list_node(): - aggregate_cpu += int(node.status.capacity['cpu']) + aggregate_cpu += int(node.status.capacity["cpu"]) # converting KiB to GB. 1KiB = 1.024E-6 GB - aggregate_mem += int(round(int(node.status.capacity['memory'][:-2]) * 0.00000102400)) + aggregate_mem += int(round(int(node.status.capacity["memory"][:-2]) * 0.00000102400)) - return {'cpu': aggregate_cpu, 'memory': aggregate_mem} + return {"cpu": aggregate_cpu, "memory": aggregate_mem} def list_persistent_volume(self): """Returns list of persistent volumes""" @@ -308,7 +331,7 @@ def list_image_id(self, namespace=None): for pod in pods: for status in pod.status.container_statuses: statuses.append(status) - return sorted(set([status.image_id for status in statuses])) + return sorted({status.image_id for status in statuses}) def list_image_registry(self, namespace=None): """Returns list of image registries (derived from pods)""" @@ -318,7 +341,7 @@ def list_image_registry(self, namespace=None): for status in pod.status.container_statuses: statuses.append(status) # returns only the image registry name, without the port number in case of local registry - return sorted(set([status.image.split('/')[0].split(':')[0] for status in statuses])) + return sorted({status.image.split("/")[0].split(":")[0] for status in statuses}) def expose_db_ip(self, namespace): """Creates special service in appliance project (namespace) which makes internal appliance @@ -335,7 +358,7 @@ def expose_db_ip(self, namespace): return self.get_ip_address(namespace) - def deploy_template(self, template, tags=None, password='smartvm', **kwargs): + def deploy_template(self, template, tags=None, password="smartvm", **kwargs): """Deploy a VM from a template Args: @@ -358,41 +381,42 @@ def deploy_template(self, template, tags=None, password='smartvm', **kwargs): version = Version(TemplateName.parse_template(template).version) - if version >= '5.10.12': + if version >= "5.10.12": tags_mapping = self.stream2template_tags_mapping51012 else: tags_mapping = self.stream2template_tags_mapping_rest - prepared_tags = {tag['tag']: 'latest' for tag in tags_mapping.values()} + prepared_tags = {tag["tag"]: "latest" for tag in tags_mapping.values()} if tags: not_found_tags = [t for t in tags.keys() if t not in list(tags_mapping.keys())] if not_found_tags: - raise ValueError("Some passed tags {t} don't exist".format(t=not_found_tags)) + raise ValueError(f"Some passed tags {not_found_tags} don't exist") for tag, value in tags.items(): - prepared_tags[tags_mapping[tag]['url']] = value['url'] - prepared_tags[tags_mapping[tag]['tag']] = value['tag'] + prepared_tags[tags_mapping[tag]["url"]] = value["url"] + prepared_tags[tags_mapping[tag]["tag"]] = value["tag"] # create project # assuming this is cfme installation and generating project name proj_id = "".join(choice(string.digits + string.ascii_lowercase) for _ in range(6)) # for sprout - if 'vm_name' in kwargs: - proj_name = kwargs['vm_name'] + if "vm_name" in kwargs: + proj_name = kwargs["vm_name"] else: - proj_name = "{t}-project-{proj_id}".format(t=template, proj_id=proj_id) + proj_name = f"{template}-project-{proj_id}" - template_params = kwargs.pop('template_params', {}) - running_pods = kwargs.pop('running_pods', ()) - proj_url = "{proj}.{base_url}".format(proj=proj_id, base_url=self.base_url) + template_params = kwargs.pop("template_params", {}) + running_pods = kwargs.pop("running_pods", ()) + proj_url = f"{proj_id}.{self.base_url}" self.logger.info("unique id %s, project name %s", proj_id, proj_name) - default_progress_callback = partial(self._progress_log_callback, self.logger, template, - proj_name) - progress_callback = kwargs.get('progress_callback', default_progress_callback) + default_progress_callback = partial( + self._progress_log_callback, self.logger, template, proj_name + ) + progress_callback = kwargs.get("progress_callback", default_progress_callback) self.create_project(name=proj_name, description=template) - progress_callback("Created Project `{}`".format(proj_name)) + progress_callback(f"Created Project `{proj_name}`") # grant rights according to scc self.logger.info("granting rights to project %s sa", proj_name) @@ -400,12 +424,12 @@ def deploy_template(self, template, tags=None, password='smartvm', **kwargs): self.logger.info("granting required rights to project's service accounts") for mapping in scc_user_mapping: - self.append_sa_to_scc(scc_name=mapping['scc'], namespace=proj_name, sa=mapping['user']) + self.append_sa_to_scc(scc_name=mapping["scc"], namespace=proj_name, sa=mapping["user"]) progress_callback("Added service accounts to appropriate scc") self.logger.info("project sa created via api have no some mandatory roles. adding them") self._restore_missing_project_role_bindings(namespace=proj_name) - progress_callback("Added all necessary role bindings to project `{}`".format(proj_name)) + progress_callback(f"Added all necessary role bindings to project `{proj_name}`") # creating common service with external ip ext_ip = self.expose_db_ip(proj_name) @@ -416,16 +440,18 @@ def deploy_template(self, template, tags=None, password='smartvm', **kwargs): self.create_config_map(namespace=proj_name, **yaml.safe_load(image_repo_cm)) # creating pods and etc - processing_params = {'DATABASE_PASSWORD': password, - 'APPLICATION_DOMAIN': proj_url} + processing_params = {"DATABASE_PASSWORD": password, "APPLICATION_DOMAIN": proj_url} processing_params.update(prepared_tags) # updating template parameters processing_params.update(template_params) - self.logger.info(("processing template and passed params in order to " - "prepare list of required project entities")) - template_entities = self.process_template(name=template, namespace=self.default_namespace, - parameters=processing_params) + self.logger.info( + "processing template and passed params in order to " + "prepare list of required project entities" + ) + template_entities = self.process_template( + name=template, namespace=self.default_namespace, parameters=processing_params + ) self.logger.debug("template entities:\n %r", template_entities) progress_callback("Template has been processed") self.create_template_entities(namespace=proj_name, entities=template_entities) @@ -434,14 +460,18 @@ def deploy_template(self, template, tags=None, password='smartvm', **kwargs): self.logger.info("verifying that all created entities are up and running") progress_callback("Waiting for all pods to be ready and running") try: - wait_for(self.is_vm_running, timeout=600, - func_kwargs={'vm_name': proj_name, 'running_pods': running_pods}) + wait_for( + self.is_vm_running, + timeout=600, + func_kwargs={"vm_name": proj_name, "running_pods": running_pods}, + ) self.logger.info("all pods look up and running") progress_callback("Everything has been deployed w/o errors") - return {'url': proj_url, - 'external_ip': ext_ip, - 'project': proj_name, - } + return { + "url": proj_url, + "external_ip": ext_ip, + "project": proj_name, + } except TimedOutError: self.logger.error("deployment failed. Please check failed pods details") # todo: return and print all failed pod details @@ -460,13 +490,13 @@ def create_template_entities(self, namespace, entities): Returns: None """ self.logger.debug("passed template entities:\n %r", entities) - kinds = set([e['kind'] for e in entities]) + kinds = {e["kind"] for e in entities} entity_names = {e: inflection.underscore(e) for e in kinds} - proc_names = {k: 'create_{e}'.format(e=p) for k, p in entity_names.items()} + proc_names = {k: f"create_{p}" for k, p in entity_names.items()} for entity in entities: - if entity['kind'] in kinds: - procedure = getattr(self, proc_names[entity['kind']], None) + if entity["kind"] in kinds: + procedure = getattr(self, proc_names[entity["kind"]], None) obtained_entity = procedure(namespace=namespace, **entity) self.logger.debug(obtained_entity) else: @@ -484,7 +514,7 @@ def start_vm(self, vm_name): for pod in self.get_required_pods(vm_name): self.scale_entity(name=pod, namespace=vm_name, replicas=1) else: - raise ValueError("Project with name {n} doesn't exist".format(n=vm_name)) + raise ValueError(f"Project with name {vm_name} doesn't exist") def stop_vm(self, vm_name): """Stops a vm. @@ -498,7 +528,7 @@ def stop_vm(self, vm_name): for pod in self.get_required_pods(vm_name): self.scale_entity(name=pod, namespace=vm_name, replicas=0) else: - raise ValueError("Project with name {n} doesn't exist".format(n=vm_name)) + raise ValueError(f"Project with name {vm_name} doesn't exist") def delete_vm(self, vm_name): """Deletes a vm. @@ -536,13 +566,13 @@ def _update_template_parameters(template, **params): new_parameters = template.parameters for new_param, new_value in params.items(): for index, old_param in enumerate(new_parameters): - if old_param['name'] == new_param: + if old_param["name"] == new_param: old_param = new_parameters.pop(index) - if 'generate' in old_param: - old_param['generate'] = None - old_param['_from'] = None + if "generate" in old_param: + old_param["generate"] = None + old_param["_from"] = None - old_param['value'] = new_value + old_param["value"] = new_value new_parameters.append(old_param) template.parameters = new_parameters return template @@ -557,8 +587,9 @@ def process_template(self, name, namespace, parameters=None): Return: list of objects stored in template """ # workaround for bug https://github.com/openshift/openshift-restclient-python/issues/60 - raw_response = self.o_api.read_namespaced_template(name=name, namespace=namespace, - _preload_content=False) + raw_response = self.o_api.read_namespaced_template( + name=name, namespace=namespace, _preload_content=False + ) raw_data = json.loads(raw_response.data) return self.process_raw_template(body=raw_data, namespace=namespace, parameters=parameters) @@ -579,38 +610,39 @@ def process_raw_template(self, body, namespace, parameters=None): updated_data = self.rename_structure(body) read_template = self.ociclient.V1Template(**updated_data) if parameters: - updated_template = self._update_template_parameters(template=read_template, - **parameters) + updated_template = self._update_template_parameters( + template=read_template, **parameters + ) else: updated_template = read_template - raw_response = self.o_api.create_namespaced_processed_template(namespace=namespace, - body=updated_template, - _preload_content=False) + raw_response = self.o_api.create_namespaced_processed_template( + namespace=namespace, body=updated_template, _preload_content=False + ) raw_data = json.loads(raw_response.data) # above api call started adding wrong apiVersion to template what entails deployment errors - updated_data = drop_item(raw_data, 'apiVersion') + updated_data = drop_item(raw_data, "apiVersion") processed_template = self.ociclient.V1Template(**updated_data) return processed_template.objects def rename_structure(self, struct): """Fixes inconsistency in input/output data of openshift python client methods - Args: - struct: data to process and rename - Return: updated data + Args: + struct: data to process and rename + Return: updated data """ if is_iterable(struct): new_struct = {} if isinstance(struct, dict): # we shouldn't rename something under data or spec for key, value in struct.items(): - if key in ('spec', 'data', 'string_data', 'annotations'): + if key in ("spec", "data", "string_data", "annotations"): # these keys and data should be left intact new_struct[key] = value continue else: val = struct.get(key) - if key != 'stringData': + if key != "stringData": # all this data should be processed and updated # except stringData. this key has to be renamed but its contents # should be left intact @@ -630,7 +662,7 @@ def create_config_map(self, namespace, **kwargs): Return: data if entity was created w/o errors """ conf_map = self.kclient.V1ConfigMap(**kwargs) - conf_map_name = conf_map.to_dict()['metadata']['name'] + conf_map_name = conf_map.to_dict()["metadata"]["name"] self.logger.info("creating config map %s", conf_map_name) output = self.k_api.create_namespaced_config_map(namespace=namespace, body=conf_map) self.wait_config_map_exist(namespace=namespace, name=conf_map_name) @@ -645,11 +677,11 @@ def replace_config_map(self, namespace, **kwargs): Return: data if entity was created w/o errors """ conf_map = self.kclient.V1ConfigMap(**kwargs) - conf_map_name = conf_map.to_dict()['metadata']['name'] + conf_map_name = conf_map.to_dict()["metadata"]["name"] self.logger.info("replacing config map %s", conf_map_name) - output = self.k_api.replace_namespaced_config_map(namespace=namespace, - name=conf_map_name, - body=conf_map) + output = self.k_api.replace_namespaced_config_map( + namespace=namespace, name=conf_map_name, body=conf_map + ) return output def create_stateful_set(self, namespace, **kwargs): @@ -661,7 +693,7 @@ def create_stateful_set(self, namespace, **kwargs): Return: data if entity was created w/o errors """ st = self.kclient.V1beta1StatefulSet(**kwargs) - st_name = st.to_dict()['metadata']['name'] + st_name = st.to_dict()["metadata"]["name"] self.logger.info("creating stateful set %s", st_name) api = self.kclient.AppsV1beta1Api(api_client=self.kapi_client) output = api.create_namespaced_stateful_set(namespace=namespace, body=st) @@ -677,7 +709,7 @@ def create_service(self, namespace, **kwargs): Return: data if entity was created w/o errors """ service = self.kclient.V1Service(**kwargs) - service_name = service.to_dict()['metadata']['name'] + service_name = service.to_dict()["metadata"]["name"] self.logger.info("creating service %s", service_name) output = self.k_api.create_namespaced_service(namespace=namespace, body=service) self.wait_service_exist(namespace=namespace, name=service_name) @@ -692,7 +724,7 @@ def create_endpoints(self, namespace, **kwargs): Return: data if entity was created w/o errors """ endpoints = self.kclient.V1Endpoints(**kwargs) - endpoints_name = endpoints.to_dict()['metadata']['name'] + endpoints_name = endpoints.to_dict()["metadata"]["name"] self.logger.info("creating endpoints %s", endpoints_name) output = self.k_api.create_namespaced_endpoints(namespace=namespace, body=endpoints) self.wait_endpoints_exist(namespace=namespace, name=endpoints_name) @@ -707,7 +739,7 @@ def create_route(self, namespace, **kwargs): Return: data if entity was created w/o errors """ route = self.ociclient.V1Route(**kwargs) - route_name = route.to_dict()['metadata']['name'] + route_name = route.to_dict()["metadata"]["name"] self.logger.info("creating route %s", route_name) output = self.o_api.create_namespaced_route(namespace=namespace, body=route) self.wait_route_exist(namespace=namespace, name=route_name) @@ -722,7 +754,7 @@ def create_service_account(self, namespace, **kwargs): Return: data if entity was created w/o errors """ sa = self.kclient.V1ServiceAccount(**kwargs) - sa_name = sa.to_dict()['metadata']['name'] + sa_name = sa.to_dict()["metadata"]["name"] self.logger.info("creating service account %s", sa_name) output = self.k_api.create_namespaced_service_account(namespace=namespace, body=sa) self.wait_service_account_exist(namespace=namespace, name=sa_name) @@ -739,17 +771,16 @@ def create_role_binding(self, namespace, **kwargs): ObjectRef = self.kclient.V1ObjectReference # noqa auth_api = self.ociclient.AuthorizationOpenshiftIoV1Api(api_client=self.oapi_client) # there is some version mismatch in api. so, it would be better to remove version - kwargs.pop('api_version', None) - role_binding_name = kwargs['metadata']['name'] + kwargs.pop("api_version", None) + role_binding_name = kwargs["metadata"]["name"] # role and subjects data should be turned into objects before passing them to RoleBinding - role_name = kwargs.pop('role_ref')['name'] + role_name = kwargs.pop("role_ref")["name"] role = ObjectRef(name=role_name) - subjects = [ObjectRef(namespace=namespace, **subj) for subj in kwargs.pop('subjects')] + subjects = [ObjectRef(namespace=namespace, **subj) for subj in kwargs.pop("subjects")] role_binding = self.ociclient.V1RoleBinding(role_ref=role, subjects=subjects, **kwargs) self.logger.debug("creating role binding %s in project %s", role_binding_name, namespace) - output = auth_api.create_namespaced_role_binding(namespace=namespace, - body=role_binding) + output = auth_api.create_namespaced_role_binding(namespace=namespace, body=role_binding) self.wait_role_binding_exist(namespace=namespace, name=role_binding_name) return output @@ -762,7 +793,7 @@ def create_image_stream(self, namespace, **kwargs): Return: data if entity was created w/o errors """ image_stream = self.ociclient.V1ImageStream(**kwargs) - is_name = image_stream.to_dict()['metadata']['name'] + is_name = image_stream.to_dict()["metadata"]["name"] self.logger.info("creating image stream %s", is_name) output = self.o_api.create_namespaced_image_stream(namespace=namespace, body=image_stream) self.wait_image_stream_exist(namespace=namespace, name=is_name) @@ -777,7 +808,7 @@ def create_secret(self, namespace, **kwargs): Return: data if entity was created w/o errors """ secret = self.kclient.V1Secret(**kwargs) - secret_name = secret.to_dict()['metadata']['name'] + secret_name = secret.to_dict()["metadata"]["name"] self.logger.info("creating secret %s", secret_name) output = self.k_api.create_namespaced_secret(namespace=namespace, body=secret) self.wait_secret_exist(namespace=namespace, name=secret_name) @@ -792,11 +823,10 @@ def create_deployment_config(self, namespace, **kwargs): Return: data if entity was created w/o errors """ dc = self.ociclient.V1DeploymentConfig(**kwargs) - dc_name = dc.to_dict()['metadata']['name'] + dc_name = dc.to_dict()["metadata"]["name"] self.logger.info("creating deployment config %s", dc_name) output = self.o_api.create_namespaced_deployment_config(namespace=namespace, body=dc) - self.wait_deployment_config_exist(namespace=namespace, - name=dc_name) + self.wait_deployment_config_exist(namespace=namespace, name=dc_name) return output def create_persistent_volume_claim(self, namespace, **kwargs): @@ -808,12 +838,12 @@ def create_persistent_volume_claim(self, namespace, **kwargs): Return: data if entity was created w/o errors """ pv_claim = self.kclient.V1PersistentVolumeClaim(**kwargs) - pv_claim_name = pv_claim.to_dict()['metadata']['name'] + pv_claim_name = pv_claim.to_dict()["metadata"]["name"] self.logger.info("creating persistent volume claim %s", pv_claim_name) - output = self.k_api.create_namespaced_persistent_volume_claim(namespace=namespace, - body=pv_claim) - self.wait_persistent_volume_claim_exist(namespace=namespace, - name=pv_claim_name) + output = self.k_api.create_namespaced_persistent_volume_claim( + namespace=namespace, body=pv_claim + ) + self.wait_persistent_volume_claim_exist(namespace=namespace, name=pv_claim_name) return output def create_project(self, name, description=None): @@ -825,9 +855,9 @@ def create_project(self, name, description=None): Return: data if entity was created w/o errors """ proj = self.ociclient.V1Project() - proj.metadata = {'name': name, 'annotations': {}} + proj.metadata = {"name": name, "annotations": {}} if description: - proj.metadata['annotations'] = {'openshift.io/description': description} + proj.metadata["annotations"] = {"openshift.io/description": description} self.logger.info("creating new project with name %s", name) output = self.o_api.create_project(body=proj) self.wait_project_exist(name=name) @@ -842,12 +872,12 @@ def run_job(self, namespace, body): Return: True/False """ body = self.rename_structure(body) - job_name = body['metadata']['name'] + job_name = body["metadata"]["name"] self.batch_api.create_namespaced_job(namespace=namespace, body=body) return self.wait_job_finished(namespace, job_name) - def wait_job_finished(self, namespace, name, wait='15m'): + def wait_job_finished(self, namespace, name, wait="15m"): """Waits for job to accomplish Args: @@ -856,17 +886,18 @@ def wait_job_finished(self, namespace, name, wait='15m'): wait: stop waiting after "wait" time Return: True/False """ + def job_wait_accomplished(): try: - job = self.batch_api.read_namespaced_job_status(name=name, - namespace=namespace) + job = self.batch_api.read_namespaced_job_status(name=name, namespace=namespace) # todo: replace with checking final statuses return bool(job.status.succeeded) except KeyError: return False + return wait_for(job_wait_accomplished, timeout=wait)[0] - def wait_persistent_volume_claim_status(self, namespace, name, status, wait='1m'): + def wait_persistent_volume_claim_status(self, namespace, name, status, wait="1m"): """Waits until pvc gets some particular status. For example: Bound. @@ -877,10 +908,12 @@ def wait_persistent_volume_claim_status(self, namespace, name, status, wait='1m' wait: stop waiting after "wait" time Return: True/False """ + def pvc_wait_status(): try: - pvc = self.k_api.read_namespaced_persistent_volume_claim(name=name, - namespace=namespace) + pvc = self.k_api.read_namespaced_persistent_volume_claim( + name=name, namespace=namespace + ) return pvc.status.phase == status except KeyError: return False @@ -895,8 +928,11 @@ def wait_project_exist(self, name, wait=60): wait: entity should appear for this time then - True, otherwise False Return: True/False """ - return wait_for(self._does_exist, timeout=wait, - func_kwargs={'func': self.o_api.read_project, 'name': name})[0] + return wait_for( + self._does_exist, + timeout=wait, + func_kwargs={"func": self.o_api.read_project, "name": name}, + )[0] def wait_config_map_exist(self, namespace, name, wait=60): """Checks whether Config Map exists within some time. @@ -907,10 +943,15 @@ def wait_config_map_exist(self, namespace, name, wait=60): wait: entity should appear for this time then - True, otherwise False Return: True/False """ - return wait_for(self._does_exist, timeout=wait, - func_kwargs={'func': self.k_api.read_namespaced_config_map, - 'name': name, - 'namespace': namespace})[0] + return wait_for( + self._does_exist, + timeout=wait, + func_kwargs={ + "func": self.k_api.read_namespaced_config_map, + "name": name, + "namespace": namespace, + }, + )[0] def wait_stateful_set_exist(self, namespace, name, wait=900): """Checks whether StatefulSet exists within some time. @@ -923,10 +964,11 @@ def wait_stateful_set_exist(self, namespace, name, wait=900): """ api = self.kclient.AppsV1beta1Api(api_client=self.kapi_client) read_st = api.read_namespaced_stateful_set - return wait_for(self._does_exist, timeout=wait, - func_kwargs={'func': read_st, - 'name': name, - 'namespace': namespace})[0] + return wait_for( + self._does_exist, + timeout=wait, + func_kwargs={"func": read_st, "name": name, "namespace": namespace}, + )[0] def wait_service_exist(self, namespace, name, wait=60): """Checks whether Service exists within some time. @@ -937,10 +979,15 @@ def wait_service_exist(self, namespace, name, wait=60): wait: entity should appear for this time then - True, otherwise False Return: True/False """ - return wait_for(self._does_exist, timeout=wait, - func_kwargs={'func': self.k_api.read_namespaced_service, - 'name': name, - 'namespace': namespace})[0] + return wait_for( + self._does_exist, + timeout=wait, + func_kwargs={ + "func": self.k_api.read_namespaced_service, + "name": name, + "namespace": namespace, + }, + )[0] def wait_endpoints_exist(self, namespace, name, wait=60): """Checks whether Endpoints exists within some time. @@ -951,10 +998,15 @@ def wait_endpoints_exist(self, namespace, name, wait=60): wait: entity should appear for this time then - True, otherwise False Return: True/False """ - return wait_for(self._does_exist, timeout=wait, - func_kwargs={'func': self.k_api.read_namespaced_endpoints, - 'name': name, - 'namespace': namespace})[0] + return wait_for( + self._does_exist, + timeout=wait, + func_kwargs={ + "func": self.k_api.read_namespaced_endpoints, + "name": name, + "namespace": namespace, + }, + )[0] def wait_route_exist(self, namespace, name, wait=60): """Checks whether Route exists within some time. @@ -965,10 +1017,15 @@ def wait_route_exist(self, namespace, name, wait=60): wait: entity should appear for this time then - True, otherwise False Return: True/False """ - return wait_for(self._does_exist, timeout=wait, - func_kwargs={'func': self.o_api.read_namespaced_route, - 'name': name, - 'namespace': namespace})[0] + return wait_for( + self._does_exist, + timeout=wait, + func_kwargs={ + "func": self.o_api.read_namespaced_route, + "name": name, + "namespace": namespace, + }, + )[0] def wait_service_account_exist(self, namespace, name, wait=60): """Checks whether Service Account exists within some time. @@ -979,10 +1036,15 @@ def wait_service_account_exist(self, namespace, name, wait=60): wait: entity should appear for this time then - True, otherwise False Return: True/False """ - return wait_for(self._does_exist, timeout=wait, - func_kwargs={'func': self.k_api.read_namespaced_service_account, - 'name': name, - 'namespace': namespace})[0] + return wait_for( + self._does_exist, + timeout=wait, + func_kwargs={ + "func": self.k_api.read_namespaced_service_account, + "name": name, + "namespace": namespace, + }, + )[0] def wait_image_stream_exist(self, namespace, name, wait=60): """Checks whether Image Stream exists within some time. @@ -993,10 +1055,15 @@ def wait_image_stream_exist(self, namespace, name, wait=60): wait: entity should appear for this time then - True, otherwise False Return: True/False """ - return wait_for(self._does_exist, timeout=wait, - func_kwargs={'func': self.o_api.read_namespaced_image_stream, - 'name': name, - 'namespace': namespace})[0] + return wait_for( + self._does_exist, + timeout=wait, + func_kwargs={ + "func": self.o_api.read_namespaced_image_stream, + "name": name, + "namespace": namespace, + }, + )[0] def wait_role_binding_exist(self, namespace, name, wait=60): """Checks whether RoleBinding exists within some time. @@ -1008,10 +1075,15 @@ def wait_role_binding_exist(self, namespace, name, wait=60): Return: True/False """ auth_api = self.ociclient.AuthorizationOpenshiftIoV1Api(api_client=self.oapi_client) - return wait_for(self._does_exist, timeout=wait, - func_kwargs={'func': auth_api.read_namespaced_role_binding, - 'name': name, - 'namespace': namespace})[0] + return wait_for( + self._does_exist, + timeout=wait, + func_kwargs={ + "func": auth_api.read_namespaced_role_binding, + "name": name, + "namespace": namespace, + }, + )[0] def wait_secret_exist(self, namespace, name, wait=90): """Checks whether Secret exists within some time. @@ -1022,10 +1094,15 @@ def wait_secret_exist(self, namespace, name, wait=90): wait: entity should appear for this time then - True, otherwise False Return: True/False """ - return wait_for(self._does_exist, timeout=wait, - func_kwargs={'func': self.k_api.read_namespaced_secret, - 'name': name, - 'namespace': namespace})[0] + return wait_for( + self._does_exist, + timeout=wait, + func_kwargs={ + "func": self.k_api.read_namespaced_secret, + "name": name, + "namespace": namespace, + }, + )[0] def wait_persistent_volume_claim_exist(self, namespace, name, wait=60): """Checks whether Persistent Volume Claim exists within some time. @@ -1036,10 +1113,15 @@ def wait_persistent_volume_claim_exist(self, namespace, name, wait=60): wait: entity should appear for this time then - True, otherwise False Return: True/False """ - return wait_for(self._does_exist, timeout=wait, - func_kwargs={'func': self.k_api.read_namespaced_persistent_volume_claim, - 'name': name, - 'namespace': namespace})[0] + return wait_for( + self._does_exist, + timeout=wait, + func_kwargs={ + "func": self.k_api.read_namespaced_persistent_volume_claim, + "name": name, + "namespace": namespace, + }, + )[0] def wait_deployment_config_exist(self, namespace, name, wait=600): """Checks whether Deployment Config exists within some time. @@ -1051,10 +1133,11 @@ def wait_deployment_config_exist(self, namespace, name, wait=600): Return: True/False """ read_dc = self.o_api.read_namespaced_deployment_config - return wait_for(self._does_exist, timeout=wait, - func_kwargs={'func': read_dc, - 'name': name, - 'namespace': namespace})[0] + return wait_for( + self._does_exist, + timeout=wait, + func_kwargs={"func": read_dc, "name": name, "namespace": namespace}, + )[0] def wait_template_exist(self, namespace, name, wait=60): """Checks whether Template exists within some time. @@ -1065,10 +1148,15 @@ def wait_template_exist(self, namespace, name, wait=60): wait: entity should appear for this time then - True, otherwise False Return: True/False """ - return wait_for(self._does_exist, timeout=wait, - func_kwargs={'func': self.o_api.read_namespaced_template, - 'name': name, - 'namespace': namespace})[0] + return wait_for( + self._does_exist, + timeout=wait, + func_kwargs={ + "func": self.o_api.read_namespaced_template, + "name": name, + "namespace": namespace, + }, + )[0] def _does_exist(self, func, **kwargs): try: @@ -1088,48 +1176,46 @@ def _restore_missing_project_role_bindings(self, namespace): """ # adding builder role binding auth_api = self.ociclient.AuthorizationOpenshiftIoV1Api(api_client=self.oapi_client) - builder_role = self.kclient.V1ObjectReference(name='system:image-builder') - builder_sa = self.kclient.V1ObjectReference(name='builder', - kind='ServiceAccount', - namespace=namespace) - builder_role_binding_name = self.kclient.V1ObjectMeta(name='builder-binding') - builder_role_binding = self.ociclient.V1RoleBinding(role_ref=builder_role, - subjects=[builder_sa], - metadata=builder_role_binding_name) + builder_role = self.kclient.V1ObjectReference(name="system:image-builder") + builder_sa = self.kclient.V1ObjectReference( + name="builder", kind="ServiceAccount", namespace=namespace + ) + builder_role_binding_name = self.kclient.V1ObjectMeta(name="builder-binding") + builder_role_binding = self.ociclient.V1RoleBinding( + role_ref=builder_role, subjects=[builder_sa], metadata=builder_role_binding_name + ) auth_api.create_namespaced_role_binding(namespace=namespace, body=builder_role_binding) # adding deployer role binding - deployer_role = self.kclient.V1ObjectReference(name='system:deployer') - deployer_sa = self.kclient.V1ObjectReference(name='deployer', - kind='ServiceAccount', - namespace=namespace) - deployer_role_binding_name = self.kclient.V1ObjectMeta(name='deployer-binding') - deployer_role_binding = self.ociclient.V1RoleBinding(role_ref=deployer_role, - subjects=[deployer_sa], - metadata=deployer_role_binding_name) + deployer_role = self.kclient.V1ObjectReference(name="system:deployer") + deployer_sa = self.kclient.V1ObjectReference( + name="deployer", kind="ServiceAccount", namespace=namespace + ) + deployer_role_binding_name = self.kclient.V1ObjectMeta(name="deployer-binding") + deployer_role_binding = self.ociclient.V1RoleBinding( + role_ref=deployer_role, subjects=[deployer_sa], metadata=deployer_role_binding_name + ) auth_api.create_namespaced_role_binding(namespace=namespace, body=deployer_role_binding) # adding admin role binding - admin_role = self.kclient.V1ObjectReference(name='admin') - admin_user = self.kclient.V1ObjectReference(name='admin', - kind='User', - namespace=namespace) - admin_role_binding_name = self.kclient.V1ObjectMeta(name='admin-binding') - admin_role_binding = self.ociclient.V1RoleBinding(role_ref=admin_role, - subjects=[admin_user], - metadata=admin_role_binding_name) + admin_role = self.kclient.V1ObjectReference(name="admin") + admin_user = self.kclient.V1ObjectReference(name="admin", kind="User", namespace=namespace) + admin_role_binding_name = self.kclient.V1ObjectMeta(name="admin-binding") + admin_role_binding = self.ociclient.V1RoleBinding( + role_ref=admin_role, subjects=[admin_user], metadata=admin_role_binding_name + ) auth_api.create_namespaced_role_binding(namespace=namespace, body=admin_role_binding) # adding image-puller role binding - puller_role = self.kclient.V1ObjectReference(name='system:image-puller') - group_name = 'system:serviceaccounts:{proj}'.format(proj=namespace) - puller_group = self.kclient.V1ObjectReference(name=group_name, - kind='SystemGroup', - namespace=namespace) - role_binding_name = self.kclient.V1ObjectMeta(name='image-puller-binding') - puller_role_binding = self.ociclient.V1RoleBinding(role_ref=puller_role, - subjects=[puller_group], - metadata=role_binding_name) + puller_role = self.kclient.V1ObjectReference(name="system:image-puller") + group_name = f"system:serviceaccounts:{namespace}" + puller_group = self.kclient.V1ObjectReference( + name=group_name, kind="SystemGroup", namespace=namespace + ) + role_binding_name = self.kclient.V1ObjectMeta(name="image-puller-binding") + puller_role_binding = self.ociclient.V1RoleBinding( + role_ref=puller_role, subjects=[puller_group], metadata=role_binding_name + ) auth_api.create_namespaced_role_binding(namespace=namespace, body=puller_role_binding) def delete_project(self, name, wait=300): @@ -1144,11 +1230,13 @@ def delete_project(self, name, wait=300): if self.does_project_exist(name=name): self.o_api.delete_project(name=name) try: - wait_for(lambda name: not self.does_project_exist(name=name), timeout=wait, - func_kwargs={'name': name}) + wait_for( + lambda name: not self.does_project_exist(name=name), + timeout=wait, + func_kwargs={"name": name}, + ) except TimedOutError: - raise TimedOutError('project {n} was not removed within {w} sec'.format(n=name, - w=wait)) + raise TimedOutError(f"project {name} was not removed within {wait} sec") def scale_entity(self, namespace, name, replicas, wait=60): """Allows to scale up/down entities. @@ -1166,12 +1254,14 @@ def scale_entity(self, namespace, name, replicas, wait=60): scale_val = self.kclient.V1Scale(spec=self.kclient.V1ScaleSpec(replicas=replicas)) if self.is_deployment_config(name=name, namespace=namespace): - self.o_api.patch_namespaced_deployment_config_scale(name=name, namespace=namespace, - body=scale_val) + self.o_api.patch_namespaced_deployment_config_scale( + name=name, namespace=namespace, body=scale_val + ) def check_scale_value(): - got_scale = self.o_api.read_namespaced_deployment_config_scale(name=name, - namespace=namespace) + got_scale = self.o_api.read_namespaced_deployment_config_scale( + name=name, namespace=namespace + ) return int(got_scale.spec.replicas or 0) elif self.is_stateful_set(name=name, namespace=namespace): @@ -1184,9 +1274,11 @@ def check_scale_value(): def check_scale_value(): got_scale = st_api.read_namespaced_stateful_set(name=name, namespace=namespace) return int(got_scale.spec.replicas or 0) + else: - raise ValueError("This name %s is not found among " - "deployment configs or stateful sets", name) + raise ValueError( + "This name %s is not found among " "deployment configs or stateful sets", name + ) self.logger.info("scaling entity %s to %s replicas", name, replicas) wait_for(check_scale_value, timeout=wait, fail_condition=lambda val: val != replicas) @@ -1212,9 +1304,9 @@ def create_scc(self, body): Returns: security context constraint object """ raw_scc = self.rename_structure(body) - if raw_scc.get('api_version') == 'v1': + if raw_scc.get("api_version") == "v1": # there is inconsistency between api and some scc files. v1 is not accepted by api now - raw_scc.pop('api_version') + raw_scc.pop("api_version") scc = self.ociclient.V1SecurityContextConstraints(**raw_scc) return self.security_api.create_security_context_constraints(body=scc) @@ -1227,22 +1319,16 @@ def append_sa_to_scc(self, scc_name, namespace, sa): sa: service account's name Returns: updated security context constraint object """ - user = 'system:serviceaccount:{proj}:{usr}'.format(proj=namespace, - usr=sa) + user = f"system:serviceaccount:{namespace}:{sa}" if self.get_scc(scc_name).users is None: # ocp 3.6 has None for users if there is no sa in it - update_scc_cmd = [ - {"op": "add", - "path": "/users", - "value": [user]}] + update_scc_cmd = [{"op": "add", "path": "/users", "value": [user]}] else: - update_scc_cmd = [ - {"op": "add", - "path": "/users/-", - "value": user}] + update_scc_cmd = [{"op": "add", "path": "/users/-", "value": user}] self.logger.debug("adding user %r to scc %r", user, scc_name) - return self.security_api.patch_security_context_constraints(name=scc_name, - body=update_scc_cmd) + return self.security_api.patch_security_context_constraints( + name=scc_name, body=update_scc_cmd + ) def remove_sa_from_scc(self, scc_name, namespace, sa): """Removes Service Account from respective Security Constraint @@ -1253,20 +1339,19 @@ def remove_sa_from_scc(self, scc_name, namespace, sa): sa: service account's name Returns: updated security context constraint object """ - user = 'system:serviceaccount:{proj}:{usr}'.format(proj=namespace, - usr=sa) + user = f"system:serviceaccount:{namespace}:{sa}" # json patch's remove works only with indexes. so we have to figure out index try: - index = next(val[0] for val in enumerate(self.get_scc(scc_name).users) - if val[1] == user) + index = next( + val[0] for val in enumerate(self.get_scc(scc_name).users) if val[1] == user + ) except StopIteration: - raise ValueError("No such sa {} in scc {}".format(user, scc_name)) - update_scc_cmd = [ - {"op": "remove", - "path": "/users/{}".format(index)}] + raise ValueError(f"No such sa {user} in scc {scc_name}") + update_scc_cmd = [{"op": "remove", "path": f"/users/{index}"}] self.logger.debug("removing user %r from scc %s with index %s", user, scc_name, index) - return self.security_api.patch_security_context_constraints(name=scc_name, - body=update_scc_cmd) + return self.security_api.patch_security_context_constraints( + name=scc_name, body=update_scc_cmd + ) def is_vm_running(self, vm_name, running_pods=()): """Emulates check is vm(appliance) up and running @@ -1350,8 +1435,9 @@ def is_vm_stopped(self, vm_name): """ pods = self.k_api.list_namespaced_pod(namespace=vm_name).items if pods: - self.logger.info(("some pods are still " - "running: {}").format([pod.metadata.name for pod in pods])) + self.logger.info( + ("some pods are still " "running: {}").format([pod.metadata.name for pod in pods]) + ) return not bool(pods) def wait_vm_running(self, vm_name, num_sec=900): @@ -1384,8 +1470,9 @@ def current_ip_address(self, vm_name): Return: ip address or None """ try: - common_svc = self.k_api.read_namespaced_service(name='common-service', - namespace=vm_name) + common_svc = self.k_api.read_namespaced_service( + name="common-service", namespace=vm_name + ) return common_svc.spec.external_i_ps[0] except Exception: return None @@ -1406,9 +1493,11 @@ def in_steady_state(self, vm_name): vm_name: VM name Returns: True/False """ - return (self.is_vm_running(vm_name) - or self.is_vm_stopped(vm_name) - or self.is_vm_suspended(vm_name)) + return ( + self.is_vm_running(vm_name) + or self.is_vm_stopped(vm_name) + or self.is_vm_suspended(vm_name) + ) @property def can_rename(self): @@ -1433,7 +1522,7 @@ def get_appliance_version(self, vm_name): """ try: proj = self.o_api.read_project(vm_name) - description = proj.metadata.annotations['openshift.io/description'] + description = proj.metadata.annotations["openshift.io/description"] return Version(TemplateName.parse_template(description).version) except (ApiException, KeyError, ValueError): try: @@ -1441,7 +1530,7 @@ def get_appliance_version(self, vm_name): except ValueError: return None - def delete_template(self, template_name, namespace='openshift'): + def delete_template(self, template_name, namespace="openshift"): """Deletes template Args: @@ -1450,16 +1539,19 @@ def delete_template(self, template_name, namespace='openshift'): Returns: result of delete operation """ options = self.kclient.V1DeleteOptions() - return self.o_api.delete_namespaced_template(name=template_name, namespace=namespace, - body=options) + return self.o_api.delete_namespaced_template( + name=template_name, namespace=namespace, body=options + ) def get_meta_value(self, instance, key): raise NotImplementedError( - 'Provider {} does not implement get_meta_value'.format(type(self).__name__)) + f"Provider {type(self).__name__} does not implement get_meta_value" + ) def set_meta_value(self, instance, key): raise NotImplementedError( - 'Provider {} does not implement get_meta_value'.format(type(self).__name__)) + f"Provider {type(self).__name__} does not implement get_meta_value" + ) def vm_status(self, vm_name): """Returns current vm/appliance state @@ -1469,8 +1561,8 @@ def vm_status(self, vm_name): Returns: up/down or exception if vm doesn't exist """ if not self.does_vm_exist(vm_name): - raise ValueError("Vm {} doesn't exist".format(vm_name)) - return 'up' if self.is_vm_running(vm_name) else 'down' + raise ValueError(f"Vm {vm_name} doesn't exist") + return "up" if self.is_vm_running(vm_name) else "down" def vm_creation_time(self, vm_name): """Returns time when vm/appliance was created @@ -1480,15 +1572,14 @@ def vm_creation_time(self, vm_name): Return: datetime obj """ if not self.does_vm_exist(vm_name): - raise ValueError("Vm {} doesn't exist".format(vm_name)) + raise ValueError(f"Vm {vm_name} doesn't exist") projects = self.o_api.list_project().items project = next(proj for proj in projects if proj.metadata.name == vm_name) return project.metadata.creation_timestamp @staticmethod def _progress_log_callback(logger, source, destination, progress): - logger.info("Provisioning progress {}->{}: {}".format( - source, destination, str(progress))) + logger.info(f"Provisioning progress {source}->{destination}: {str(progress)}") def vm_hardware_configuration(self, vm_name): """Collects project's cpu and ram usage @@ -1497,26 +1588,25 @@ def vm_hardware_configuration(self, vm_name): vm_name: openshift's data Returns: collected data """ - hw_config = {'ram': 0, - 'cpu': 0} + hw_config = {"ram": 0, "cpu": 0} if not self.does_vm_exist(vm_name): return hw_config proj_pods = self.k_api.list_namespaced_pod(vm_name) for pod in proj_pods.items: for container in pod.spec.containers: - cpu = container.resources.requests['cpu'] - hw_config['cpu'] += float(cpu[:-1]) / 1000 if cpu.endswith('m') else float(cpu) - - ram = container.resources.requests['memory'] - if ram.endswith('Mi'): - hw_config['ram'] += float(ram[:-2]) - elif ram.endswith('Gi'): - hw_config['ram'] += float(ram[:-2]) * 1024 - elif ram.endswith('Ki'): - hw_config['ram'] += float(ram[:-2]) / 1024 + cpu = container.resources.requests["cpu"] + hw_config["cpu"] += float(cpu[:-1]) / 1000 if cpu.endswith("m") else float(cpu) + + ram = container.resources.requests["memory"] + if ram.endswith("Mi"): + hw_config["ram"] += float(ram[:-2]) + elif ram.endswith("Gi"): + hw_config["ram"] += float(ram[:-2]) * 1024 + elif ram.endswith("Ki"): + hw_config["ram"] += float(ram[:-2]) / 1024 else: - hw_config['ram'] += ram + hw_config["ram"] += ram return hw_config def usage_and_quota(self): @@ -1527,13 +1617,13 @@ def usage_and_quota(self): # todo: finish this method later return { # RAM - 'ram_used': used_ram, - 'ram_total': installed_ram, - 'ram_limit': None, + "ram_used": used_ram, + "ram_total": installed_ram, + "ram_limit": None, # CPU - 'cpu_used': used_cpu, - 'cpu_total': installed_cpu, - 'cpu_limit': None, + "cpu_used": used_cpu, + "cpu_total": installed_cpu, + "cpu_limit": None, } def get_required_pods(self, vm_name): @@ -1546,7 +1636,7 @@ def get_required_pods(self, vm_name): return self.required_project_pods def get_ip_address(self, vm_name, timeout=600): - """ Returns the IP address for the selected appliance. + """Returns the IP address for the selected appliance. Args: vm_name: The name of the vm to obtain the IP for. @@ -1554,11 +1644,13 @@ def get_ip_address(self, vm_name, timeout=600): Returns: A string containing the first found IP that isn't the device. """ try: - ip_address, tc = wait_for(lambda: self.current_ip_address(vm_name), - fail_condition=None, - delay=5, - timeout=timeout, - message="get_ip_address from openshift") + ip_address, tc = wait_for( + lambda: self.current_ip_address(vm_name), + fail_condition=None, + delay=5, + timeout=timeout, + message="get_ip_address from openshift", + ) except TimedOutError: ip_address = None return ip_address @@ -1574,9 +1666,10 @@ def get_appliance_tags(self, name): Returns: dict with tags and urls """ try: - read_data = self.k_api.read_namespaced_config_map(name='image-repo-data', - namespace=name) - return json.loads(read_data.data['tags']) + read_data = self.k_api.read_namespaced_config_map( + name="image-repo-data", namespace=name + ) + return json.loads(read_data.data["tags"]) except ApiException: return {} @@ -1621,7 +1714,7 @@ def find_job_pods(self, namespace, name): """ pods = [] for pod in self.list_pods(namespace=namespace): - if pod.metadata.labels.get('job-name', '') == name: + if pod.metadata.labels.get("job-name", "") == name: pods.append(pod) return pods @@ -1644,8 +1737,9 @@ def delete_pod(self, namespace, name, options=None): options: delete options like force delete and etc Returns: Pod """ - return self.k_api.delete_namespaced_pod(namespace=namespace, name=name, - body=options or self.kclient.V1DeleteOptions()) + return self.k_api.delete_namespaced_pod( + namespace=namespace, name=name, body=options or self.kclient.V1DeleteOptions() + ) def is_pod_running(self, namespace, name): """Checks whether pod is running @@ -1661,8 +1755,9 @@ def is_pod_running(self, namespace, name): dc = self.o_api.read_namespaced_deployment_config(name=name, namespace=namespace) status = dc.status.ready_replicas elif self.is_stateful_set(name=name, namespace=namespace): - pods = self.k_api.list_namespaced_pod(namespace=namespace, - label_selector='name={n}'.format(n=name)) + pods = self.k_api.list_namespaced_pod( + namespace=namespace, label_selector=f"name={name}" + ) pod_stats = [pod.status.container_statuses[-1].ready for pod in pods.items] status = all(pod_stats) else: @@ -1721,9 +1816,6 @@ def run_command(self, namespace, name, cmd, **kwargs): """ # there are some limitations and this code isn't robust enough due to # https://github.com/kubernetes-client/python/issues/58 - return self.k_api.connect_post_namespaced_pod_exec(namespace=namespace, - name=name, - command=cmd, - stdout=True, - stderr=True, - **kwargs) + return self.k_api.connect_post_namespaced_pod_exec( + namespace=namespace, name=name, command=cmd, stdout=True, stderr=True, **kwargs + ) diff --git a/wrapanapi/systems/ec2.py b/wrapanapi/systems/ec2.py index f8ec74b9..3f15b27e 100644 --- a/wrapanapi/systems/ec2.py +++ b/wrapanapi/systems/ec2.py @@ -1,20 +1,26 @@ -# coding: utf-8 - import base64 -import boto3 import os import re +import boto3 +from boto3 import client as boto3client +from boto3 import resource as boto3resource from botocore.config import Config from botocore.exceptions import ClientError -from boto3 import ( - resource as boto3resource, - client as boto3client -) - -from wrapanapi.entities import (Instance, Network, NetworkMixin, Stack, StackMixin, - Template, TemplateMixin, VmMixin, VmState, Volume) -from wrapanapi.exceptions import (ActionTimedOutError, MultipleItemsError, NotFoundError) + +from wrapanapi.entities import Instance +from wrapanapi.entities import Network +from wrapanapi.entities import NetworkMixin +from wrapanapi.entities import Stack +from wrapanapi.entities import StackMixin +from wrapanapi.entities import Template +from wrapanapi.entities import TemplateMixin +from wrapanapi.entities import VmMixin +from wrapanapi.entities import VmState +from wrapanapi.entities import Volume +from wrapanapi.exceptions import ActionTimedOutError +from wrapanapi.exceptions import MultipleItemsError +from wrapanapi.exceptions import NotFoundError from wrapanapi.systems.base import System @@ -25,15 +31,16 @@ def _regions(regionmodule, regionname): return None -class _SharedMethodsMixin(object): +class _SharedMethodsMixin: """ - Mixin class that holds properties/methods EC2Entities share. - This should be listed first in the child class inheritance to satisfy - the methods required by the Entity abstract base class + Mixin class that holds properties/methods EC2Entities share. + This should be listed first in the child class inheritance to satisfy + the methods required by the Entity abstract base class """ + @property def _identifying_attrs(self): - return {'uuid': self._uuid} + return {"uuid": self._uuid} @property def uuid(self): @@ -50,39 +57,42 @@ def get_details(self): return self.raw def rename(self, new_name): - self.logger.info("setting name of %s %s to %s", self.__class__.__name__, self.uuid, - new_name) - self.raw.create_tags(Tags=[{'Key': 'Name', 'Value': new_name}]) + self.logger.info( + "setting name of %s %s to %s", self.__class__.__name__, self.uuid, new_name + ) + self.raw.create_tags(Tags=[{"Key": "Name", "Value": new_name}]) self.refresh() # update raw return new_name -class _TagMixin(object): +class _TagMixin: def set_tag(self, key, value): - self.system.ec2_connection.create_tags(Resources=[self.uuid], - Tags=[{"Key": key, "Value": value}]) + self.system.ec2_connection.create_tags( + Resources=[self.uuid], Tags=[{"Key": key, "Value": value}] + ) def get_tag_value(self, key): self.refresh() if self.raw.tags: for tag in self.raw.tags: - if tag.get('Key') == key: - return tag.get('Value') + if tag.get("Key") == key: + return tag.get("Value") return None def unset_tag(self, key, value): - self.system.ec2_connection.delete_tags(Resources=[self.uuid], - Tags=[{"Key": key, "Value": value}]) + self.system.ec2_connection.delete_tags( + Resources=[self.uuid], Tags=[{"Key": key, "Value": value}] + ) class EC2Instance(_TagMixin, _SharedMethodsMixin, Instance): state_map = { - 'pending': VmState.STARTING, - 'stopping': VmState.STOPPING, - 'shutting-down': VmState.STOPPING, - 'running': VmState.RUNNING, - 'stopped': VmState.STOPPED, - 'terminated': VmState.DELETED + "pending": VmState.STARTING, + "stopping": VmState.STOPPING, + "shutting-down": VmState.STOPPING, + "running": VmState.RUNNING, + "stopped": VmState.STOPPED, + "terminated": VmState.DELETED, } def __init__(self, system, raw=None, **kwargs): @@ -95,18 +105,18 @@ def __init__(self, system, raw=None, **kwargs): uuid: unique ID of instance """ - self._uuid = raw.id if raw else kwargs.get('uuid') + self._uuid = raw.id if raw else kwargs.get("uuid") if not self._uuid: raise ValueError("missing required kwarg: 'uuid'") - super(EC2Instance, self).__init__(system, raw, **kwargs) + super().__init__(system, raw, **kwargs) self._api = self.system.ec2_connection @property def name(self): - tag_value = self.get_tag_value('Name') - return getattr(self.raw, 'name', None) or tag_value if tag_value else self.raw.id + tag_value = self.get_tag_value("Name") + return getattr(self.raw, "name", None) or tag_value if tag_value else self.raw.id def _get_state(self): self.refresh() @@ -119,7 +129,7 @@ def ip(self): @property def all_ips(self): - """ Wrapping self.ip to meet abstractproperty requirement + """Wrapping self.ip to meet abstractproperty requirement Returns: (list) the addresses assigned to the machine """ @@ -217,29 +227,52 @@ def restart(self): def change_type(self, instance_type): try: - self.raw.modify_attribute(InstanceType={'Value': instance_type}) + self.raw.modify_attribute(InstanceType={"Value": instance_type}) return True except Exception: return False -class StackStates(object): - ACTIVE = ['CREATE_COMPLETE', 'ROLLBACK_COMPLETE', 'CREATE_FAILED', - 'UPDATE_ROLLBACK_COMPLETE'] - COMPLETE = ['CREATE_COMPLETE', 'UPDATE_ROLLBACK_COMPLETE'] - FAILED = ['ROLLBACK_COMPLETE', 'CREATE_FAILED', 'ROLLBACK_FAILED', 'DELETE_FAILED', - 'UPDATE_ROLLBACK_FAILED'] - DELETED = ['DELETE_COMPLETE'] - IN_PROGRESS = ['CREATE_IN_PROGRESS', 'ROLLBACK_IN_PROGRESS', 'DELETE_IN_PROGRESS', - 'UPDATE_IN_PROGRESS', 'UPDATE_ROLLBACK_COMPLETE_CLEANUP_IN_PROGRESS', - 'UPDATE_ROLLBACK_IN_PROGRESS', 'UPDATE_COMPLETE_CLEANUP_IN_PROGRESS', - 'REVIEW_IN_PROGRESS'] - ALL = ['CREATE_IN_PROGRESS', 'CREATE_FAILED', 'CREATE_COMPLETE', 'ROLLBACK_IN_PROGRESS', - 'ROLLBACK_FAILED', 'ROLLBACK_COMPLETE', 'DELETE_IN_PROGRESS', 'DELETE_FAILED', - 'DELETE_COMPLETE', 'UPDATE_IN_PROGRESS', 'UPDATE_COMPLETE_CLEANUP_IN_PROGRESS', - 'UPDATE_COMPLETE', 'UPDATE_ROLLBACK_IN_PROGRESS', 'UPDATE_ROLLBACK_FAILED', - 'UPDATE_ROLLBACK_COMPLETE_CLEANUP_IN_PROGRESS', 'UPDATE_ROLLBACK_COMPLETE', - 'REVIEW_IN_PROGRESS'] +class StackStates: + ACTIVE = ["CREATE_COMPLETE", "ROLLBACK_COMPLETE", "CREATE_FAILED", "UPDATE_ROLLBACK_COMPLETE"] + COMPLETE = ["CREATE_COMPLETE", "UPDATE_ROLLBACK_COMPLETE"] + FAILED = [ + "ROLLBACK_COMPLETE", + "CREATE_FAILED", + "ROLLBACK_FAILED", + "DELETE_FAILED", + "UPDATE_ROLLBACK_FAILED", + ] + DELETED = ["DELETE_COMPLETE"] + IN_PROGRESS = [ + "CREATE_IN_PROGRESS", + "ROLLBACK_IN_PROGRESS", + "DELETE_IN_PROGRESS", + "UPDATE_IN_PROGRESS", + "UPDATE_ROLLBACK_COMPLETE_CLEANUP_IN_PROGRESS", + "UPDATE_ROLLBACK_IN_PROGRESS", + "UPDATE_COMPLETE_CLEANUP_IN_PROGRESS", + "REVIEW_IN_PROGRESS", + ] + ALL = [ + "CREATE_IN_PROGRESS", + "CREATE_FAILED", + "CREATE_COMPLETE", + "ROLLBACK_IN_PROGRESS", + "ROLLBACK_FAILED", + "ROLLBACK_COMPLETE", + "DELETE_IN_PROGRESS", + "DELETE_FAILED", + "DELETE_COMPLETE", + "UPDATE_IN_PROGRESS", + "UPDATE_COMPLETE_CLEANUP_IN_PROGRESS", + "UPDATE_COMPLETE", + "UPDATE_ROLLBACK_IN_PROGRESS", + "UPDATE_ROLLBACK_FAILED", + "UPDATE_ROLLBACK_COMPLETE_CLEANUP_IN_PROGRESS", + "UPDATE_ROLLBACK_COMPLETE", + "REVIEW_IN_PROGRESS", + ] class CloudFormationStack(_TagMixin, _SharedMethodsMixin, Stack): @@ -252,11 +285,11 @@ def __init__(self, system, raw=None, **kwargs): raw: raw dict for this stack returned by boto CloudFormation.Client.describe_stacks() uuid: the stack ID """ - self._uuid = raw.stack_id if raw else kwargs.get('uuid') + self._uuid = raw.stack_id if raw else kwargs.get("uuid") if not self._uuid: raise ValueError("missing required kwarg: 'uuid'") - super(CloudFormationStack, self).__init__(system, raw, **kwargs) + super().__init__(system, raw, **kwargs) self._api = self.system.cloudformation_connection @property @@ -308,17 +341,17 @@ def __init__(self, system, raw=None, **kwargs): raw: the boto.ec2.image.Image object if already obtained, or None uuid: unique ID of the image """ - self._uuid = raw.id if raw else kwargs.get('uuid') + self._uuid = raw.id if raw else kwargs.get("uuid") if not self._uuid: raise ValueError("missing required kwarg: 'uuid'") - super(EC2Image, self).__init__(system, raw, **kwargs) + super().__init__(system, raw, **kwargs) self._api = self.system.ec2_connection @property def name(self): - tag_value = self.get_tag_value('Name') + tag_value = self.get_tag_value("Name") return tag_value if tag_value else self.raw.name def delete(self): @@ -353,17 +386,17 @@ def __init__(self, system, raw=None, **kwargs): raw: the boto.ec2.network.Network object if already obtained, or None uuid: unique ID of the network """ - self._uuid = raw.id if raw else kwargs.get('uuid') + self._uuid = raw.id if raw else kwargs.get("uuid") if not self._uuid: raise ValueError("missing required kwarg: 'uuid'") - super(EC2Vpc, self).__init__(system, raw, **kwargs) + super().__init__(system, raw, **kwargs) self._api = self.system.ec2_connection @property def name(self): - tag_value = self.get_tag_value('Name') + tag_value = self.get_tag_value("Name") return tag_value if tag_value else self.raw.id def delete(self): @@ -394,17 +427,17 @@ def __init__(self, system, raw=None, **kwargs): raw: the boto.ec2.volume.Volume object if already obtained, or None uuid: unique ID of the volume """ - self._uuid = raw.id if raw else kwargs.get('uuid') + self._uuid = raw.id if raw else kwargs.get("uuid") if not self._uuid: raise ValueError("missing required kwarg: 'uuid'") - super(EBSVolume, self).__init__(system, raw, **kwargs) + super().__init__(system, raw, **kwargs) self._api = self.system.ec2_connection @property def name(self): - tag_value = self.get_tag_value('Name') + tag_value = self.get_tag_value("Name") return tag_value if tag_value else self.raw.id def resize(self, new_size): @@ -415,7 +448,7 @@ def resize(self, new_size): except Exception: return False - def attach(self, instance_id, device='/dev/sdh'): + def attach(self, instance_id, device="/dev/sdh"): try: self.raw.attach_to_instance(Device=device, InstanceId=instance_id) self.refresh() @@ -423,7 +456,7 @@ def attach(self, instance_id, device='/dev/sdh'): except Exception: return False - def detach(self, instance_id, device='/dev/sdh', force=False): + def detach(self, instance_id, device="/dev/sdh", force=False): try: self.raw.detach_from_instance(Device=device, InstanceId=instance_id, Force=force) self.refresh() @@ -469,51 +502,44 @@ class EC2System(System, VmMixin, TemplateMixin, StackMixin, NetworkMixin): """ _stats_available = { - 'num_vm': lambda self: len(self.list_vms()), - 'num_template': lambda self: len(self.list_templates()), + "num_vm": lambda self: len(self.list_vms()), + "num_template": lambda self: len(self.list_templates()), } can_suspend = False can_pause = False def __init__(self, **kwargs): - super(EC2System, self).__init__(**kwargs) - self._username = kwargs.get('username') - self._password = kwargs.get('password') - self._region_name = kwargs.get('region') - - connection_config = Config( - signature_version='s3v4', - retries=dict( - max_attempts=10 - ) - ) + super().__init__(**kwargs) + self._username = kwargs.get("username") + self._password = kwargs.get("password") + self._region_name = kwargs.get("region") + + connection_config = Config(signature_version="s3v4", retries=dict(max_attempts=10)) connection_kwargs = { - 'aws_access_key_id': self._username, - 'aws_secret_access_key': self._password, - 'region_name': self._region_name, - 'config': connection_config + "aws_access_key_id": self._username, + "aws_secret_access_key": self._password, + "region_name": self._region_name, + "config": connection_config, } - self.sqs_connection = boto3client('sqs', **connection_kwargs) - self.elb_connection = boto3client('elb', **connection_kwargs) - self.s3_connection = boto3resource('s3', **connection_kwargs) - self.ec2_connection = boto3client('ec2', **connection_kwargs) - self.ec2_resource = boto3resource('ec2', **connection_kwargs) - self.ecr_connection = boto3client('ecr', **connection_kwargs) - self.cloudformation_connection = boto3client('cloudformation', **connection_kwargs) - self.cloudformation_resource = boto3resource('cloudformation', **connection_kwargs) - self.ssm_connection = boto3client('ssm', **connection_kwargs) - self.sns_connection = boto3client('sns', **connection_kwargs) - self.cw_events_connection = boto3client('events', **connection_kwargs) + self.sqs_connection = boto3client("sqs", **connection_kwargs) + self.elb_connection = boto3client("elb", **connection_kwargs) + self.s3_connection = boto3resource("s3", **connection_kwargs) + self.ec2_connection = boto3client("ec2", **connection_kwargs) + self.ec2_resource = boto3resource("ec2", **connection_kwargs) + self.ecr_connection = boto3client("ecr", **connection_kwargs) + self.cloudformation_connection = boto3client("cloudformation", **connection_kwargs) + self.cloudformation_resource = boto3resource("cloudformation", **connection_kwargs) + self.ssm_connection = boto3client("ssm", **connection_kwargs) + self.sns_connection = boto3client("sns", **connection_kwargs) + self.cw_events_connection = boto3client("events", **connection_kwargs) self.kwargs = kwargs @property def _identifying_attrs(self): - return { - 'username': self._username, 'password': self._password, 'region': self._region_name - } + return {"username": self._username, "password": self._password, "region": self._region_name} @property def can_suspend(self): @@ -557,42 +583,48 @@ def _get_resource(self, resource, find_method, name=None, id=None, **kwargs): resources = find_method(name=name, id=id, **kwargs) name_or_id = name if name else id if not resources: - raise NotFoundError("{} with {} {} not found".format(resource_name, - 'name' if name else 'id', - name_or_id)) + raise NotFoundError( + "{} with {} {} not found".format( + resource_name, "name" if name else "id", name_or_id + ) + ) elif len(resources) > 1: - raise MultipleItemsError("Multiple {}s with {} {} found".format( - resource_name, 'name' if name else 'id', name_or_id)) + raise MultipleItemsError( + "Multiple {}s with {} {} found".format( + resource_name, "name" if name else "id", name_or_id + ) + ) return resources[0] def _get_instances(self, **kwargs): """ Gets instance reservations and parses instance objects """ - reservations = self.ec2_connection.describe_instances(**kwargs).get('Reservations') + reservations = self.ec2_connection.describe_instances(**kwargs).get("Reservations") instances = list() for reservation in reservations: for instance in reservation.get("Instances"): instances.append( - EC2Instance(system=self, raw=self.ec2_resource.Instance( - instance.get("InstanceId"))) + EC2Instance( + system=self, raw=self.ec2_resource.Instance(instance.get("InstanceId")) + ) ) return instances @staticmethod def _add_filter_for_terminated(kwargs_dict): new_filter = { - 'Name': 'instance-state-name', - 'Values': - [ - api_state for api_state, vm_state in EC2Instance.state_map.items() + "Name": "instance-state-name", + "Values": [ + api_state + for api_state, vm_state in EC2Instance.state_map.items() if vm_state is not VmState.DELETED - ] + ], } - if 'Filters' not in kwargs_dict: - kwargs_dict['Filters'] = [new_filter] + if "Filters" not in kwargs_dict: + kwargs_dict["Filters"] = [new_filter] else: - kwargs_dict['Filters'].append(new_filter) + kwargs_dict["Filters"].append(new_filter) return kwargs_dict def find_vms(self, name=None, id=None, filters=None, hide_deleted=True): @@ -613,23 +645,30 @@ def find_vms(self, name=None, id=None, filters=None, hide_deleted=True): List of EC2Instance objects that match """ # Validate args - filled_args = [arg for arg in (name, id, filters,) if arg] + filled_args = [ + arg + for arg in ( + name, + id, + filters, + ) + if arg + ] if not filled_args or len(filled_args) > 1: - raise ValueError( - "You must select one of these search methods: name, id, or filters") + raise ValueError("You must select one of these search methods: name, id, or filters") if id: - kwargs = {'InstanceIds': [id]} + kwargs = {"InstanceIds": [id]} elif filters: - kwargs = {'Filters': filters} + kwargs = {"Filters": filters} elif name: # Quick validation that the instance name isn't actually an ID - pattern = re.compile(r'^i-\w{8,17}$') + pattern = re.compile(r"^i-\w{8,17}$") if pattern.match(name): # Switch to using the id search method - kwargs = {'InstanceIds': [name]} + kwargs = {"InstanceIds": [name]} else: - kwargs = {'Filters': [{'Name': 'tag:Name', 'Values': [name]}]} + kwargs = {"Filters": [{"Name": "tag:Name", "Values": [name]}]} if hide_deleted: self._add_filter_for_terminated(kwargs) @@ -663,8 +702,9 @@ def list_vms(self, hide_deleted=True): self._add_filter_for_terminated(kwargs) return [inst for inst in self._get_instances(**kwargs)] - def create_vm(self, image_id, min_count=1, max_count=1, instance_type='t1.micro', - vm_name='', **kwargs): + def create_vm( + self, image_id, min_count=1, max_count=1, instance_type="t1.micro", vm_name="", **kwargs + ): """ Creates aws instances. @@ -685,39 +725,48 @@ def create_vm(self, image_id, min_count=1, max_count=1, instance_type='t1.micro' List of EC2Instance objects for all instances created """ self.logger.debug("ec2.create_vm() -- Ignored kwargs: %s", kwargs) - self.logger.info("Creating instances[%d] with name %s,type %s and image ID: %s ", - max_count, vm_name, instance_type, image_id) + self.logger.info( + "Creating instances[%d] with name %s,type %s and image ID: %s ", + max_count, + vm_name, + instance_type, + image_id, + ) try: result = self.ec2_connection.run_instances( - ImageId=image_id, MinCount=min_count, - MaxCount=max_count, InstanceType=instance_type, TagSpecifications=[ + ImageId=image_id, + MinCount=min_count, + MaxCount=max_count, + InstanceType=instance_type, + TagSpecifications=[ { - 'ResourceType': 'instance', - 'Tags': [ + "ResourceType": "instance", + "Tags": [ { - 'Key': 'Name', - 'Value': vm_name, + "Key": "Name", + "Value": vm_name, }, - ] + ], }, - ] + ], ) except Exception: self.logger.exception("Create of instance '%s' failed.", vm_name) raise try: - instances_json = result['Instances'] - instance_ids = [entry['InstanceId'] for entry in instances_json] + instances_json = result["Instances"] + instance_ids = [entry["InstanceId"] for entry in instances_json] except KeyError: self.logger.exception("Unable to parse all InstanceId's from response json") raise - instances = [EC2Instance(system=self, raw=self.ec2_resource.Instance(uuid), uuid=uuid) - for uuid in instance_ids] + instances = [ + EC2Instance(system=self, raw=self.ec2_resource.Instance(uuid), uuid=uuid) + for uuid in instance_ids + ] for instance in instances: - self.logger.info( - "Waiting for instance '%s' to reach steady state", instance.uuid) + self.logger.info("Waiting for instance '%s' to reach steady state", instance.uuid) instance.wait_for_steady_state() if len(instances) == 1: return instances[0] @@ -731,10 +780,13 @@ def list_stacks(self, stack_status_filter=StackStates.ACTIVE): stack_status_filter: list of stack statuses to filter for. See ``StackStates`` """ stack_list = [ - CloudFormationStack(system=self, uuid=stack_summary['StackId'], - raw=self.cloudformation_resource.Stack(stack_summary['StackName'])) - for stack_summary in self.cloudformation_connection.list_stacks()['StackSummaries'] - if stack_summary['StackStatus'] in stack_status_filter + CloudFormationStack( + system=self, + uuid=stack_summary["StackId"], + raw=self.cloudformation_resource.Stack(stack_summary["StackName"]), + ) + for stack_summary in self.cloudformation_connection.list_stacks()["StackSummaries"] + if stack_summary["StackStatus"] in stack_status_filter ] return stack_list @@ -760,7 +812,7 @@ def find_stacks(self, name=None, id=None): List of CloudFormationStack objects """ if not name and not id: - raise ValueError('missing one of required kwargs: name, id') + raise ValueError("missing one of required kwargs: name, id") if name: searching_by_name = True @@ -773,21 +825,28 @@ def find_stacks(self, name=None, id=None): try: # Try to find by name/id directly by using describe_stacks stack_list = [ - CloudFormationStack(system=self, uuid=stack['StackId'], - raw=self.cloudformation_resource.Stack(stack['StackName'])) - for stack - in self.cloudformation_connection.describe_stacks(StackName=name_or_id)['Stacks'] + CloudFormationStack( + system=self, + uuid=stack["StackId"], + raw=self.cloudformation_resource.Stack(stack["StackName"]), + ) + for stack in self.cloudformation_connection.describe_stacks(StackName=name_or_id)[ + "Stacks" + ] ] except ClientError as error: # Stack not found, if searching by name, look through deleted stacks... - if searching_by_name and 'Stack with id {} does not exist'.format(name) in str(error): + if searching_by_name and f"Stack with id {name} does not exist" in str(error): stack_list = [ - CloudFormationStack(system=self, uuid=stack_summary['StackId'], - raw=self.cloudformation_resource.Stack( - stack_summary['StackName'])) - for stack_summary - in self.cloudformation_connection.list_stacks()['StackSummaries'] - if stack_summary['StackName'] == name + CloudFormationStack( + system=self, + uuid=stack_summary["StackId"], + raw=self.cloudformation_resource.Stack(stack_summary["StackName"]), + ) + for stack_summary in self.cloudformation_connection.list_stacks()[ + "StackSummaries" + ] + if stack_summary["StackName"] == name ] return stack_list @@ -800,8 +859,9 @@ def get_stack(self, name): Returns: CloudFormationStack object """ - return self._get_resource(name=name, resource=CloudFormationStack, - find_method=self.find_stacks) + return self._get_resource( + name=name, resource=CloudFormationStack, find_method=self.find_stacks + ) def list_templates(self, executable_by_me=True, owned_by_me=True, public=False): """ @@ -812,25 +872,33 @@ def list_templates(self, executable_by_me=True, owned_by_me=True, public=False): owned_by_me: search images owned only by me (default True) public: search public images (default False) """ - img_filter = [{'Name': 'image-type', 'Values': ['machine']}] + img_filter = [{"Name": "image-type", "Values": ["machine"]}] if not any([public, executable_by_me, owned_by_me]): raise ValueError( - "One of the following must be 'True': owned_by_me, executable_by_me, public") + "One of the following must be 'True': owned_by_me, executable_by_me, public" + ) images = [] if public: - img_filter.append({'Name': 'is-public', 'Values': ['true']}) + img_filter.append({"Name": "is-public", "Values": ["true"]}) images.extend(self.ec2_connection.describe_images(Filters=img_filter).get("Images")) if executable_by_me: - images.extend(self.ec2_connection.describe_images( - ExecutableUsers=['self'], Filters=img_filter).get("Images")) + images.extend( + self.ec2_connection.describe_images( + ExecutableUsers=["self"], Filters=img_filter + ).get("Images") + ) if owned_by_me: - images.extend(self.ec2_connection.describe_images(Owners=['self'], - Filters=img_filter).get("Images")) + images.extend( + self.ec2_connection.describe_images(Owners=["self"], Filters=img_filter).get( + "Images" + ) + ) - return [EC2Image(system=self, raw=self.ec2_resource.Image(image["ImageId"])) - for image in images] + return [ + EC2Image(system=self, raw=self.ec2_resource.Image(image["ImageId"])) for image in images + ] def list_free_images(self, image_list=None): """ @@ -859,10 +927,17 @@ def delete_images(self, image_list=None): image_list (list): ["imageID_1", "imageID_2"] """ for image in image_list: - img=EC2Image(system=self, raw=self.ec2_resource.Image(image)).delete() - - def find_templates(self, name=None, id=None, executable_by_me=True, owned_by_me=True, - public=False, filters=None): + EC2Image(system=self, raw=self.ec2_resource.Image(image)).delete() + + def find_templates( + self, + name=None, + id=None, + executable_by_me=True, + owned_by_me=True, + public=False, + filters=None, + ): """ Find image on ec2 system @@ -882,52 +957,67 @@ def find_templates(self, name=None, id=None, executable_by_me=True, owned_by_me= List of EC2Image objects that match """ # Validate args - filled_args = [arg for arg in (name, id, filters,) if arg] + filled_args = [ + arg + for arg in ( + name, + id, + filters, + ) + if arg + ] if not filled_args or len(filled_args) > 1: - raise ValueError( - "You must select one of these search methods: name, id, or filters") + raise ValueError("You must select one of these search methods: name, id, or filters") if id: - kwargs = {'ImageIds': [id]} + kwargs = {"ImageIds": [id]} elif filters: - kwargs = {'Filters': filters} + kwargs = {"Filters": filters} elif name: # Quick validation that the image name isn't actually an ID - if name.startswith('ami-'): + if name.startswith("ami-"): # Switch to using the id search method - kwargs = {'ImageIds': [name]} + kwargs = {"ImageIds": [name]} else: - kwargs = {'Filters': [{'Name': 'name', 'Values': [name]}]} + kwargs = {"Filters": [{"Name": "name", "Values": [name]}]} if not any([public, executable_by_me, owned_by_me]): raise ValueError( - "One of the following must be 'True': owned_by_me, executable_by_me, public") + "One of the following must be 'True': owned_by_me, executable_by_me, public" + ) images = [] if public: - public_kwargs = {'Filters': [{'Name': 'is-public', 'Values': ['true']}]} - if 'Filters' in kwargs: - public_kwargs['Filters'] = kwargs['Filters'] + public_kwargs['Filters'] + public_kwargs = {"Filters": [{"Name": "is-public", "Values": ["true"]}]} + if "Filters" in kwargs: + public_kwargs["Filters"] = kwargs["Filters"] + public_kwargs["Filters"] else: public_kwargs.update(kwargs) images.extend(self.ec2_connection.describe_images(**public_kwargs).get("Images")) if executable_by_me: - images.extend(self.ec2_connection.describe_images( - ExecutableUsers=['self'], **kwargs).get("Images")) + images.extend( + self.ec2_connection.describe_images(ExecutableUsers=["self"], **kwargs).get( + "Images" + ) + ) if owned_by_me: - images.extend(self.ec2_connection.describe_images( - Owners=['self'], **kwargs).get("Images")) + images.extend( + self.ec2_connection.describe_images(Owners=["self"], **kwargs).get("Images") + ) - return [EC2Image(system=self, raw=self.ec2_resource.Image(image['ImageId'])) - for image in images] + return [ + EC2Image(system=self, raw=self.ec2_resource.Image(image["ImageId"])) for image in images + ] def get_template(self, name_or_id): try: - template = self._get_resource(name=name_or_id, resource=EC2Image, - find_method=self.find_templates) + template = self._get_resource( + name=name_or_id, resource=EC2Image, find_method=self.find_templates + ) except Exception: - template = self._get_resource(name=name_or_id, resource=EC2Image, - find_method=self.find_templates, public=True) + template = self._get_resource( + name=name_or_id, resource=EC2Image, find_method=self.find_templates, public=True + ) return template def create_template(self, *args, **kwargs): @@ -938,8 +1028,10 @@ def create_template(self, *args, **kwargs): def create_s3_bucket(self, bucket_name): self.logger.info("Creating bucket: '%s'", bucket_name) try: - self.s3_connection.create_bucket(Bucket=bucket_name, CreateBucketConfiguration={ - 'LocationConstraint': self.kwargs.get('region')}) + self.s3_connection.create_bucket( + Bucket=bucket_name, + CreateBucketConfiguration={"LocationConstraint": self.kwargs.get("region")}, + ) self.logger.info("Success: Bucket was successfully created.") return True except Exception: @@ -970,7 +1062,7 @@ def object_exists_in_bucket(self, bucket_name, object_key): return any(objects) def delete_s3_buckets(self, bucket_names): - """ Deletes specified bucket(s) with keys """ + """Deletes specified bucket(s) with keys""" deleted_list = [] if isinstance(bucket_names, (set, list, tuple)): buckets = [self.s3_connection.Bucket(obj_name) for obj_name in bucket_names] @@ -986,8 +1078,9 @@ def delete_s3_buckets(self, bucket_names): deleted_list.append(bucket.name) self.logger.info("Success: bucket '%s' was deleted.", bucket.name) except Exception as e: - self.logger.exception("Bucket '%s' deletion failed due to %s", bucket.name, - e.message) + self.logger.exception( + "Bucket '%s' deletion failed due to %s", bucket.name, e.message + ) return deleted_list def delete_objects_from_s3_bucket(self, bucket_name, object_keys): @@ -997,18 +1090,21 @@ def delete_objects_from_s3_bucket(self, bucket_name, object_keys): bucket = self.s3_connection.Bucket(name=bucket_name) try: bucket.delete_objects( - Delete={'Objects': [{'Key': object_key} for object_key in object_keys]}) + Delete={"Objects": [{"Key": object_key} for object_key in object_keys]} + ) return True except Exception: self.logger.exception( - "Deleting object keys %s from Bucket '%s' failed", object_keys, bucket_name) + "Deleting object keys %s from Bucket '%s' failed", object_keys, bucket_name + ) return False def get_all_disassociated_addresses(self): return [ - addr for addr - in self.ec2_connection.describe_addresses().get("Addresses") - if not addr.get("InstanceId") and not addr.get("NetworkInterfaceId")] + addr + for addr in self.ec2_connection.describe_addresses().get("Addresses") + if not addr.get("InstanceId") and not addr.get("NetworkInterfaceId") + ] def release_vpc_address(self, alloc_id): self.logger.info(" Releasing EC2 VPC EIP '%s'", str(alloc_id)) @@ -1030,7 +1126,8 @@ def release_address(self, address): def get_all_unattached_volumes(self): return self.ec2_connection.describe_volumes( - Filters=[{"Name": "status", "Values": ["available"]}]).get("Volumes") + Filters=[{"Name": "status", "Values": ["available"]}] + ).get("Volumes") def delete_sqs_queue(self, queue_url): self.logger.info(" Deleting SQS queue '%s'", queue_url) @@ -1042,16 +1139,21 @@ def delete_sqs_queue(self, queue_url): def get_all_unused_loadbalancers(self): return [ - loadbalancer for loadbalancer - in self.elb_connection.describe_load_balancers().get("LoadBalancerDescriptions") - if not loadbalancer.get("Instances")] + loadbalancer + for loadbalancer in self.elb_connection.describe_load_balancers().get( + "LoadBalancerDescriptions" + ) + if not loadbalancer.get("Instances") + ] def delete_loadbalancer(self, loadbalancer): - self.logger.info(" Deleting Elastic Load Balancer '%s'", - loadbalancer.get("LoadBalancerName")) + self.logger.info( + " Deleting Elastic Load Balancer '%s'", loadbalancer.get("LoadBalancerName") + ) try: self.elb_connection.delete_load_balancer( - LoadBalancerName=loadbalancer.get("LoadBalancerName")) + LoadBalancerName=loadbalancer.get("LoadBalancerName") + ) return True except ActionTimedOutError: @@ -1059,24 +1161,27 @@ def delete_loadbalancer(self, loadbalancer): def get_all_unused_network_interfaces(self): return self.ec2_connection.describe_network_interfaces( - Filters=[{"Name": "status", "Values": ["available"]}]).get("NetworkInterfaces") + Filters=[{"Name": "status", "Values": ["available"]}] + ).get("NetworkInterfaces") def import_image(self, s3bucket, s3key, format="vhd", description=None): self.logger.info( " Importing image %s from %s bucket with description %s in %s started successfully.", - s3key, s3bucket, description, format + s3key, + s3bucket, + description, + format, ) try: - result = self.ec2_connection.import_image(DiskContainers=[ - { - 'Description': description if description is not None else s3key, - 'Format': format, - 'UserBucket': { - 'S3Bucket': s3bucket, - 'S3Key': s3key + result = self.ec2_connection.import_image( + DiskContainers=[ + { + "Description": description if description is not None else s3key, + "Format": format, + "UserBucket": {"S3Bucket": s3bucket, "S3Key": s3key}, } - } - ]) + ] + ) task_id = result.get("ImportTaskId") return task_id @@ -1087,11 +1192,15 @@ def import_image(self, s3bucket, s3key, format="vhd", description=None): def copy_image(self, source_region, source_image, image_id): self.logger.info( " Copying image %s from region %s to region %s with image id %s", - source_image, source_region, self.kwargs.get('region'), image_id + source_image, + source_region, + self.kwargs.get("region"), + image_id, ) try: copy_image = self.ec2_connection.copy_image( - SourceRegion=source_region, SourceImageId=source_image, Name=image_id) + SourceRegion=source_region, SourceImageId=source_image, Name=image_id + ) return copy_image.image_id except Exception: @@ -1106,7 +1215,7 @@ def get_import_image_task(self, task_id): def get_image_id_if_import_completed(self, task_id): result = self.get_import_image_task(task_id) result_status = result.get("Status") - if result_status == 'completed': + if result_status == "completed": return result.get("ImageId") else: return False @@ -1122,9 +1231,9 @@ def get_arn_if_topic_exists(self, topic_name): # like this: arn:aws:sns:sa-east-1:ACCOUNT_NUM:AWSConfig_topic topic_found = [ - t.get('TopicArn') - for t in topics.get('Topics') - if t.get('TopicArn').split(':')[-1] == topic_name + t.get("TopicArn") + for t in topics.get("Topics") + if t.get("TopicArn").split(":")[-1] == topic_name ] if topic_found: return topic_found[0] @@ -1156,15 +1265,9 @@ def volume_exists_and_available(self, volume_name=None, volume_id=None): if volume_id: try: response = self.ec2_connection.describe_volumes( - VolumeIds=[volume_id], - Filters=[ - { - 'Name': 'status', - 'Values': ['available'] - } - ] + VolumeIds=[volume_id], Filters=[{"Name": "status", "Values": ["available"]}] ) - if response.get('Volumes'): + if response.get("Volumes"): return True else: return False @@ -1173,17 +1276,11 @@ def volume_exists_and_available(self, volume_name=None, volume_id=None): elif volume_name: response = self.ec2_connection.describe_volumes( Filters=[ - { - 'Name': 'status', - 'Values': ['available'] - }, - { - 'Name': 'tag:Name', - 'Values': [volume_name] - } + {"Name": "status", "Values": ["available"]}, + {"Name": "tag:Name", "Values": [volume_name]}, ] ) - if response.get('Volumes'): + if response.get("Volumes"): return True else: return False @@ -1206,7 +1303,7 @@ def snapshot_exists(self, snapshot_name=None, snapshot_id=None): if snapshot_id: try: response = self.ec2_connection.describe_snapshots(SnapshotIds=[snapshot_id]) - if response.get('Snapshots'): + if response.get("Snapshots"): return True else: return False @@ -1214,14 +1311,9 @@ def snapshot_exists(self, snapshot_name=None, snapshot_id=None): return False elif snapshot_name: response = self.ec2_connection.describe_snapshots( - Filters=[ - { - 'Name': 'tag:Name', - 'Values': [snapshot_name] - } - ] + Filters=[{"Name": "tag:Name", "Values": [snapshot_name]}] ) - if response.get('Snapshots'): + if response.get("Snapshots"): return True else: return False @@ -1241,11 +1333,12 @@ def copy_snapshot(self, source_snapshot_id, source_region=None): False when snapshot copy didn't start. """ if not source_region: - source_region = self.kwargs.get('region') + source_region = self.kwargs.get("region") try: self.ec2_connection.copy_snapshot( - SourceRegion=source_region, SourceSnapshotId=source_snapshot_id, - DestinationRegion=source_region + SourceRegion=source_region, + SourceSnapshotId=source_snapshot_id, + DestinationRegion=source_region, ) return True except Exception: @@ -1254,41 +1347,47 @@ def copy_snapshot(self, source_snapshot_id, source_region=None): def list_load_balancer(self): self.logger.info("Attempting to List EC2 Load Balancers") - return [loadbalancer.get("LoadBalancerName") for loadbalancer in - self.elb_connection.describe_load_balancers().get("LoadBalancerDescriptions")] + return [ + loadbalancer.get("LoadBalancerName") + for loadbalancer in self.elb_connection.describe_load_balancers().get( + "LoadBalancerDescriptions" + ) + ] def list_network(self): self.logger.info("Attempting to List EC2 Virtual Private Networks") - networks = self.ec2_connection.describe_network_acls()['NetworkAcls'] + networks = self.ec2_connection.describe_network_acls()["NetworkAcls"] # EC2 api does not return the tags of the networks.... so returns only the IDs. - return [vpc_id['VpcId'] for vpc_id in networks] + return [vpc_id["VpcId"] for vpc_id in networks] def list_subnet(self): self.logger.info("Attempting to List EC2 Subnets") - subnets = self.ec2_connection.describe_subnets()['Subnets'] + subnets = self.ec2_connection.describe_subnets()["Subnets"] subnets_names = [] # Subnets are not having mandatory tags names. They can have multiple tags, but only the tag # 'Name' will be taken as the subnet name. If not tag is given, CFME displays the SubnetId for subnet in subnets: subnet_name = None - if 'Tags' in subnet and subnet['Tags']: - for tag in subnet['Tags']: - if 'Name' in list(tag.values()): - subnet_name = tag['Value'] + if "Tags" in subnet and subnet["Tags"]: + for tag in subnet["Tags"]: + if "Name" in list(tag.values()): + subnet_name = tag["Value"] break if not subnet_name: - subnet_name = subnet['SubnetId'] + subnet_name = subnet["SubnetId"] subnets_names.append(subnet_name) return subnets_names def list_security_group(self): self.logger.info("Attempting to List EC2 security groups") - return [sec_gp.get("GroupName") for sec_gp in - self.ec2_connection.describe_security_groups().get("SecurityGroups")] + return [ + sec_gp.get("GroupName") + for sec_gp in self.ec2_connection.describe_security_groups().get("SecurityGroups") + ] def list_router(self): - route_tables = self.ec2_connection.describe_route_tables()['RouteTables'] + route_tables = self.ec2_connection.describe_route_tables()["RouteTables"] routers_names = [] # Routers names are tags which are not mandatory, and tag with key called Name will be @@ -1296,13 +1395,13 @@ def list_router(self): # displayed as name in CFME. for route in route_tables: router_name = None - if route['Tags']: - for tag in route['Tags']: - if 'Name' in list(tag.values()): - router_name = tag['Value'] + if route["Tags"]: + for tag in route["Tags"]: + if "Name" in list(tag.values()): + router_name = tag["Value"] break if not router_name: - router_name = route['RouteTableId'] + router_name = route["RouteTableId"] routers_names.append(router_name) return routers_names @@ -1327,7 +1426,8 @@ def list_queues_with_creation_timestamps(self): for queue_url in queue_list: try: response = self.sqs_connection.get_queue_attributes( - QueueUrl=queue_url, AttributeNames=['CreatedTimestamp']) + QueueUrl=queue_url, AttributeNames=["CreatedTimestamp"] + ) queue_dict[queue_url] = response.get("Attributes").get("CreatedTimestamp") except Exception: pass @@ -1336,23 +1436,25 @@ def list_queues_with_creation_timestamps(self): def get_registry_data(self): # Returns dict with docker registry url and token data = self.ecr_connection.get_authorization_token() - if data['ResponseMetadata']['HTTPStatusCode'] >= 400: + if data["ResponseMetadata"]["HTTPStatusCode"] >= 400: raise NotFoundError("couldn't get registry details. please check environment setup") try: - first_registry = data['authorizationData'][0] - encoded_data = base64.b64decode(first_registry['authorizationToken'].encode('utf-8')) - username, password = encoded_data.decode('utf-8').split(':') - return {'username': username, - 'password': password, - 'registry': first_registry['proxyEndpoint']} + first_registry = data["authorizationData"][0] + encoded_data = base64.b64decode(first_registry["authorizationToken"].encode("utf-8")) + username, password = encoded_data.decode("utf-8").split(":") + return { + "username": username, + "password": password, + "registry": first_registry["proxyEndpoint"], + } except (IndexError, KeyError): raise NotFoundError("couldn't get registry details. please check environment setup") - def create_network(self, cidr_block='10.0.0.0/16'): + def create_network(self, cidr_block="10.0.0.0/16"): try: response = self.ec2_connection.create_vpc(CidrBlock=cidr_block) - network_id = response.get('Vpc').get('VpcId') + network_id = response.get("Vpc").get("VpcId") return EC2Vpc(system=self, uuid=network_id, raw=self.ec2_resource.Vpc(network_id)) except Exception: return False @@ -1365,8 +1467,8 @@ def list_networks(self): Returns a list of Network objects """ network_list = [ - EC2Vpc(system=self, uuid=vpc['VpcId'], raw=self.ec2_resource.Vpc(vpc['VpcId'])) - for vpc in self.ec2_connection.describe_vpcs().get('Vpcs') + EC2Vpc(system=self, uuid=vpc["VpcId"], raw=self.ec2_resource.Vpc(vpc["VpcId"])) + for vpc in self.ec2_connection.describe_vpcs().get("Vpcs") ] return network_list @@ -1384,12 +1486,14 @@ def find_networks(self, name=None, id=None): if id: vpcs = self.ec2_connection.describe_vpcs(VpcIds=[id]) else: - vpcs = self.ec2_connection.describe_vpcs(Filters=[{'Name': 'tag:Name', - 'Values': [name]}]) - return [EC2Vpc(system=self, raw=self.ec2_resource.Vpc(vpc['VpcId'])) - for vpc in vpcs.get('Vpcs')] + vpcs = self.ec2_connection.describe_vpcs( + Filters=[{"Name": "tag:Name", "Values": [name]}] + ) + return [ + EC2Vpc(system=self, raw=self.ec2_resource.Vpc(vpc["VpcId"])) for vpc in vpcs.get("Vpcs") + ] - def create_volume(self, az, iops=None, encrypted=False, size=10, type='gp2', name=None): + def create_volume(self, az, iops=None, encrypted=False, size=10, type="gp2", name=None): """ Creates volume Args: @@ -1403,24 +1507,25 @@ def create_volume(self, az, iops=None, encrypted=False, size=10, type='gp2', nam Created Volume object """ attributes = { - 'AvailabilityZone': az, - 'Size': size, - 'VolumeType': type, - 'Encrypted': encrypted + "AvailabilityZone": az, + "Size": size, + "VolumeType": type, + "Encrypted": encrypted, } - if type not in ('standard', 'io1', 'gp2', 'sc1', 'st1'): + if type not in ("standard", "io1", "gp2", "sc1", "st1"): raise ValueError("One of 'standard'|'io1'|'gp2'|'sc1'|'st1' volume types must be set!") - if type == 'io1': + if type == "io1": if not iops: raise ValueError("iops parameter must be set when creating io1 volume type!") else: attributes["Iops"] = iops if name: - attributes["TagSpecifications"] = [{'Tags': [{'Key': 'Name', 'Value': name}], - 'ResourceType': 'volume'}] + attributes["TagSpecifications"] = [ + {"Tags": [{"Key": "Name", "Value": name}], "ResourceType": "volume"} + ] try: response = self.ec2_connection.create_volume(**attributes) - volume_id = response.get('VolumeId') + volume_id = response.get("VolumeId") return EBSVolume(system=self, uuid=volume_id, raw=self.ec2_resource.Volume(volume_id)) except Exception: return False @@ -1433,9 +1538,12 @@ def list_volumes(self): Returns a list of Volumes objects """ volume_list = [ - EBSVolume(system=self, uuid=volume['VolumeId'], raw=self.ec2_resource.Volume( - volume['VolumeId'])) - for volume in self.ec2_connection.describe_volumes().get('Volumes') + EBSVolume( + system=self, + uuid=volume["VolumeId"], + raw=self.ec2_resource.Volume(volume["VolumeId"]), + ) + for volume in self.ec2_connection.describe_volumes().get("Volumes") ] return volume_list @@ -1453,59 +1561,65 @@ def find_volumes(self, name=None, id=None): if id: volumes = self.ec2_connection.describe_volumes(VolumeIds=[id]) else: - volumes = self.ec2_connection.describe_volumes(Filters=[{'Name': 'tag:Name', - 'Values': [name]}]) - return [EBSVolume(system=self, raw=self.ec2_resource.Volume(volume['VolumeId'])) - for volume in volumes.get('Volumes')] + volumes = self.ec2_connection.describe_volumes( + Filters=[{"Name": "tag:Name", "Values": [name]}] + ) + return [ + EBSVolume(system=self, raw=self.ec2_resource.Volume(volume["VolumeId"])) + for volume in volumes.get("Volumes") + ] def list_regions(self, verbose=False): - regions = self.ec2_connection.describe_regions().get('Regions') - region_names = [r.get('RegionName') for r in regions] + regions = self.ec2_connection.describe_regions().get("Regions") + region_names = [r.get("RegionName") for r in regions] if not verbose: return region_names verbose_region_names = [] for region in region_names: - tmp = '/aws/service/global-infrastructure/regions/{}/longName'.format(region) + tmp = f"/aws/service/global-infrastructure/regions/{region}/longName" ssm_response = self.ssm_connection.get_parameter(Name=tmp) - verbose_region_names.append(ssm_response['Parameter']['Value']) + verbose_region_names.append(ssm_response["Parameter"]["Value"]) return verbose_region_names - def create_stack(self, name, template_url=None, template_body=None, parameters=None, - capabilities=None): + def create_stack( + self, name, template_url=None, template_body=None, parameters=None, capabilities=None + ): if (not template_body and not template_url) or (template_body and template_url): raise ValueError("Either template_body or template_url must be set and not both!") stack_kwargs = { - 'StackName': name, + "StackName": name, } if template_body: - stack_kwargs['TemplateBody'] = template_body + stack_kwargs["TemplateBody"] = template_body else: - stack_kwargs['TemplateURL'] = template_url + stack_kwargs["TemplateURL"] = template_url if parameters: - stack_kwargs['Parameters'] = parameters + stack_kwargs["Parameters"] = parameters if capabilities: - stack_kwargs['Capabilities'] = capabilities + stack_kwargs["Capabilities"] = capabilities response = self.cloudformation_connection.create_stack(**stack_kwargs) - stack_id = response.get('StackId') - return CloudFormationStack(system=self, uuid=stack_id, - raw=self.cloudformation_resource.Stack(stack_id)) + stack_id = response.get("StackId") + return CloudFormationStack( + system=self, uuid=stack_id, raw=self.cloudformation_resource.Stack(stack_id) + ) def set_sns_topic_target_for_all_cw_rules(self, topic_arn): # After recreating sns topic cloudwatch rule targets are not set so we need to set them back try: # Get all enabled rules - rules = self.cw_events_connection.list_rules().get('Rules') + rules = self.cw_events_connection.list_rules().get("Rules") enabled_rules = [] for rule in rules: - if rule.get('State') == "ENABLED": - enabled_rules.append(rule.get('Name')) + if rule.get("State") == "ENABLED": + enabled_rules.append(rule.get("Name")) # Set targets to rules again for enabled_rule in enabled_rules: - target = self.cw_events_connection.list_targets_by_rule( - Rule=enabled_rule).get('Targets')[0] - target['Arn'] = topic_arn + target = self.cw_events_connection.list_targets_by_rule(Rule=enabled_rule).get( + "Targets" + )[0] + target["Arn"] = topic_arn self.cw_events_connection.put_targets(Rule=enabled_rule, Targets=[target]) return True except Exception: @@ -1514,17 +1628,17 @@ def set_sns_topic_target_for_all_cw_rules(self, topic_arn): def import_snapshot(self, s3bucket, s3key, format="vhd", description=None): self.logger.info( " Importing snapshot %s from %s bucket with description %s in %s started successfully.", - s3key, s3bucket, description, format + s3key, + s3bucket, + description, + format, ) try: result = self.ec2_connection.import_snapshot( DiskContainer={ - 'Description': description if description is not None else s3key, - 'Format': format, - 'UserBucket': { - 'S3Bucket': s3bucket, - 'S3Key': s3key - } + "Description": description if description is not None else s3key, + "Format": format, + "UserBucket": {"S3Bucket": s3bucket, "S3Key": s3key}, } ) task_id = result.get("ImportTaskId") @@ -1540,31 +1654,36 @@ def get_import_snapshot_task(self, task_id): return result_task[0] def get_snapshot_id_if_import_completed(self, task_id): - result = self.get_import_snapshot_task(task_id).get('SnapshotTaskDetail') + result = self.get_import_snapshot_task(task_id).get("SnapshotTaskDetail") result_status = result.get("Status") - if result_status == 'completed': + if result_status == "completed": return result.get("SnapshotId") else: return False - def create_image_from_snapshot(self, name, snapshot_id, architecture='x86_64', ena_support=True, - virtualization_type='hvm', device_name='/dev/sda1'): + def create_image_from_snapshot( + self, + name, + snapshot_id, + architecture="x86_64", + ena_support=True, + virtualization_type="hvm", + device_name="/dev/sda1", + ): try: - ami_id = self.ec2_connection.register_image(Name=name, - Architecture=architecture, - VirtualizationType=virtualization_type, - RootDeviceName=device_name, - EnaSupport=ena_support, - BlockDeviceMappings=[ - { - 'DeviceName': device_name, - 'Ebs': - { - 'SnapshotId': snapshot_id, - 'DeleteOnTermination': True - } - } - ]) + ami_id = self.ec2_connection.register_image( + Name=name, + Architecture=architecture, + VirtualizationType=virtualization_type, + RootDeviceName=device_name, + EnaSupport=ena_support, + BlockDeviceMappings=[ + { + "DeviceName": device_name, + "Ebs": {"SnapshotId": snapshot_id, "DeleteOnTermination": True}, + } + ], + ) return ami_id except Exception: self.logger.exception("Creation of image from snapshot '%s' failed.", snapshot_id) @@ -1594,7 +1713,7 @@ def remove_all_unused_nics(self): """ all_unused_nics = self.get_all_unused_network_interfaces() for nic in all_unused_nics: - self.remove_network_interface_by_id(nic_id=nic['NetworkInterfaceId']) + self.remove_network_interface_by_id(nic_id=nic["NetworkInterfaceId"]) def remove_all_unused_volumes(self): """ @@ -1604,7 +1723,7 @@ def remove_all_unused_volumes(self): """ all_unused_volumes = self.get_all_unattached_volumes() for volume in all_unused_volumes: - self.remove_volume_by_id(volume_id=volume['VolumeId']) + self.remove_volume_by_id(volume_id=volume["VolumeId"]) def remove_all_unused_ips(self): """ @@ -1614,7 +1733,7 @@ def remove_all_unused_ips(self): """ all_unused_ips = self.get_all_disassociated_addresses() for ip in all_unused_ips: - self.release_vpc_address(alloc_id=ip['AllocationId']) + self.release_vpc_address(alloc_id=ip["AllocationId"]) def cleanup_resources(self): """ diff --git a/wrapanapi/systems/google.py b/wrapanapi/systems/google.py index 601eec8f..705f08ec 100644 --- a/wrapanapi/systems/google.py +++ b/wrapanapi/systems/google.py @@ -1,27 +1,29 @@ -# coding: utf-8 """ Defines System and Entity classes related to the Google Cloud platform """ - import os import random import time - -from oauth2client.service_account import ServiceAccountCredentials -from googleapiclient.discovery import build -from googleapiclient.http import MediaFileUpload -from googleapiclient import errors from json import dumps as json_dumps import httplib2 import iso8601 import pytz +from googleapiclient import errors +from googleapiclient.discovery import build +from googleapiclient.http import MediaFileUpload +from oauth2client.service_account import ServiceAccountCredentials from wait_for import wait_for -from wrapanapi.entities import (Instance, Template, TemplateMixin, VmMixin, - VmState) -from wrapanapi.exceptions import (ImageNotFoundError, MultipleInstancesError, - NotFoundError, VMInstanceNotFound) +from wrapanapi.entities import Instance +from wrapanapi.entities import Template +from wrapanapi.entities import TemplateMixin +from wrapanapi.entities import VmMixin +from wrapanapi.entities import VmState +from wrapanapi.exceptions import ImageNotFoundError +from wrapanapi.exceptions import MultipleInstancesError +from wrapanapi.exceptions import NotFoundError +from wrapanapi.exceptions import VMInstanceNotFound from wrapanapi.systems.base import System # Retry transport and file IO errors. @@ -31,21 +33,30 @@ # Number of bytes to send/receive in each request. CHUNKSIZE = 2 * 1024 * 1024 # Mimetype to use if one can't be guessed from the file extension. -DEFAULT_MIMETYPE = 'application/octet-stream' +DEFAULT_MIMETYPE = "application/octet-stream" # List of image projects which gce provided from the box. Could be extend in the future and # will have impact on total number of templates/images -IMAGE_PROJECTS = ['centos-cloud', 'debian-cloud', 'rhel-cloud', 'suse-cloud', 'ubuntu-os-cloud', - 'windows-cloud', 'opensuse-cloud', 'coreos-cloud', 'google-containers'] +IMAGE_PROJECTS = [ + "centos-cloud", + "debian-cloud", + "rhel-cloud", + "suse-cloud", + "ubuntu-os-cloud", + "windows-cloud", + "opensuse-cloud", + "coreos-cloud", + "google-containers", +] class GoogleCloudInstance(Instance): state_map = { - 'PROVISIONING': VmState.STARTING, - 'STAGING': VmState.STARTING, - 'STOPPING': VmState.STOPPING, - 'RUNNING': VmState.RUNNING, - 'TERMINATED': VmState.STOPPED, + "PROVISIONING": VmState.STARTING, + "STAGING": VmState.STARTING, + "STOPPING": VmState.STOPPING, + "RUNNING": VmState.RUNNING, + "TERMINATED": VmState.STOPPED, } def __init__(self, system, raw=None, **kwargs): @@ -58,23 +69,23 @@ def __init__(self, system, raw=None, **kwargs): name: the name of the VM zone: the zone of the VM """ - self._name = raw['name'] if raw else kwargs.get('name') - self._zone = raw['zone'].split('/')[-1] if raw else kwargs.get('zone') + self._name = raw["name"] if raw else kwargs.get("name") + self._zone = raw["zone"].split("/")[-1] if raw else kwargs.get("zone") if not self._name or not self._zone: raise ValueError("missing required kwargs: 'name' and 'zone'") - super(GoogleCloudInstance, self).__init__(system, raw, **kwargs) + super().__init__(system, raw, **kwargs) self._project = self.system._project self._api = self.system._compute.instances() @property def _identifying_attrs(self): - return {'name': self._name, 'zone': self._zone, 'project': self._project} + return {"name": self._name, "zone": self._zone, "project": self._project} @property def uuid(self): - return self.raw['id'] + return self.raw["id"] @property def name(self): @@ -87,7 +98,8 @@ def zone(self): def refresh(self): try: self.raw = self._api.get( - project=self._project, zone=self._zone, instance=self._name).execute() + project=self._project, zone=self._zone, instance=self._name + ).execute() except errors.HttpError as error: if error.resp.status == 404: raise VMInstanceNotFound(self._name) @@ -97,13 +109,13 @@ def refresh(self): def _get_state(self): self.refresh() - return self._api_state_to_vmstate(self.raw['status']) + return self._api_state_to_vmstate(self.raw["status"]) @property def ip_internal(self): self.refresh() try: - return self.raw.get('networkInterfaces')[0].get('networkIP') + return self.raw.get("networkInterfaces")[0].get("networkIP") except IndexError: return None @@ -111,14 +123,14 @@ def ip_internal(self): def ip(self): self.refresh() try: - access_configs = self.raw.get('networkInterfaces', [{}])[0].get('accessConfigs', [])[0] - return access_configs.get('natIP') + access_configs = self.raw.get("networkInterfaces", [{}])[0].get("accessConfigs", [])[0] + return access_configs.get("natIP") except IndexError: return None @property def all_ips(self): - """ Wrapping self.ip and self.ip_internal to meet abstractproperty requirement + """Wrapping self.ip and self.ip_internal to meet abstractproperty requirement Returns: (list) the addresses assigned to the machine """ @@ -126,33 +138,37 @@ def all_ips(self): @property def type(self): - if self.raw.get('machineType', None): - return self.raw['machineType'].split('/')[-1] + if self.raw.get("machineType", None): + return self.raw["machineType"].split("/")[-1] return None @property def creation_time(self): self.refresh() - creation_time = iso8601.parse_date(self.raw['creationTimestamp']) + creation_time = iso8601.parse_date(self.raw["creationTimestamp"]) return creation_time.astimezone(pytz.UTC) def delete(self, timeout=360): self.logger.info("Deleting Google Cloud instance '%s'", self.name) operation = self._api.delete( - project=self._project, zone=self.zone, instance=self.name).execute() + project=self._project, zone=self.zone, instance=self.name + ).execute() wait_for( - lambda: self.system.is_zone_operation_done(operation['name']), delay=0.5, - num_sec=timeout, message="Delete {}".format(self.name) + lambda: self.system.is_zone_operation_done(operation["name"]), + delay=0.5, + num_sec=timeout, + message=f"Delete {self.name}", ) self.logger.info( - "DELETE request successful, waiting for instance '%s' to be removed...", - self.name + "DELETE request successful, waiting for instance '%s' to be removed...", self.name ) wait_for( - lambda: not self.exists, delay=0.5, num_sec=timeout, - message=" instance '{}' to not exist".format(self.name) + lambda: not self.exists, + delay=0.5, + num_sec=timeout, + message=f" instance '{self.name}' to not exist", ) return True @@ -168,10 +184,12 @@ def restart(self): def stop(self): self.logger.info("Stopping Google Cloud instance '%s'", self.name) operation = self._api.stop( - project=self._project, zone=self.zone, instance=self.name).execute() + project=self._project, zone=self.zone, instance=self.name + ).execute() wait_for( - lambda: self.system.is_zone_operation_done(operation['name']), - message="stop operation done {}".format(self.name), timeout=360 + lambda: self.system.is_zone_operation_done(operation["name"]), + message=f"stop operation done {self.name}", + timeout=360, ) self.wait_for_state(VmState.STOPPED) return True @@ -179,10 +197,11 @@ def stop(self): def start(self): self.logger.info("Starting Google Cloud instance '%s'", self.name) operation = self._api.start( - project=self._project, zone=self.zone, instance=self.name).execute() + project=self._project, zone=self.zone, instance=self.name + ).execute() wait_for( - lambda: self.system.is_zone_operation_done(operation['name']), - message="start operation done {}".format(self.name) + lambda: self.system.is_zone_operation_done(operation["name"]), + message=f"start operation done {self.name}", ) self.wait_for_state(VmState.RUNNING) return True @@ -195,33 +214,39 @@ def attach_disk(self, disk_name, zone=None, project=None): project = self._project # Attach disk - disk_source = "/compute/v1/projects/{}/zones/{}/disks/{}".format(project, zone, disk_name) - attach_data = {'source': disk_source} - req = self._api.attachDisk( - project=project, zone=zone, instance=self.name, body=attach_data) + disk_source = f"/compute/v1/projects/{project}/zones/{zone}/disks/{disk_name}" + attach_data = {"source": disk_source} + req = self._api.attachDisk(project=project, zone=zone, instance=self.name, body=attach_data) operation = req.execute() - wait_for(lambda: self.system.is_zone_operation_done(operation['name']), delay=0.5, - num_sec=120, message=" Attach {}".format(disk_name)) + wait_for( + lambda: self.system.is_zone_operation_done(operation["name"]), + delay=0.5, + num_sec=120, + message=f" Attach {disk_name}", + ) # Get device name of this new disk self.refresh() device_name = None - for disk in self.raw['disks']: - if disk['source'].endswith(disk_source): - device_name = disk['deviceName'] + for disk in self.raw["disks"]: + if disk["source"].endswith(disk_source): + device_name = disk["deviceName"] - self.logger.info('"Instance disks: %s', self.raw['disks']) + self.logger.info('"Instance disks: %s', self.raw["disks"]) if not device_name: raise Exception("Unable to find deviceName for attached disk.") # Mark disk for auto-delete req = self._api.setDiskAutoDelete( - project=project, zone=zone, instance=self.name, - deviceName=device_name, autoDelete=True + project=project, zone=zone, instance=self.name, deviceName=device_name, autoDelete=True ) operation = req.execute() - wait_for(lambda: self.system.is_zone_operation_done(operation['name']), delay=0.5, - num_sec=120, message=" Set auto-delete {}".format(disk_name)) + wait_for( + lambda: self.system.is_zone_operation_done(operation["name"]), + delay=0.5, + num_sec=120, + message=f" Set auto-delete {disk_name}", + ) class GoogleCloudImage(Template): @@ -235,23 +260,23 @@ def __init__(self, system, raw=None, **kwargs): name: name of image project: project image is located in """ - self._name = raw['name'] if raw else kwargs.get('name') - self._project = kwargs.get('project') or self.system._project + self._name = raw["name"] if raw else kwargs.get("name") + self._project = kwargs.get("project") or self.system._project if not self._name or not self._project: raise ValueError("missing required kwargs: 'name' and 'project'") - super(GoogleCloudImage, self).__init__(system, raw, **kwargs) + super().__init__(system, raw, **kwargs) self._api = self.system._compute.images() self._instances_api = self.system._compute.instances() @property def _identifying_attrs(self): - return {'name': self._name, 'project': self._project} + return {"name": self._name, "project": self._project} @property def uuid(self): - return self.raw['id'] + return self.raw["id"] @property def name(self): @@ -273,24 +298,36 @@ def refresh(self): def delete(self, timeout=360): if self._project in IMAGE_PROJECTS: - raise ValueError('Public images cannot be deleted') + raise ValueError("Public images cannot be deleted") operation = self._api.delete(project=self._project, image=self.name).execute() wait_for( - lambda: self.system.is_global_operation_done(operation['name']), delay=0.5, - num_sec=timeout, message=" Deleting image {}".format(self.name) + lambda: self.system.is_global_operation_done(operation["name"]), + delay=0.5, + num_sec=timeout, + message=f" Deleting image {self.name}", ) wait_for( - lambda: not self.exists, delay=0.5, num_sec=timeout, - message=" image '{}' to not exist".format(self.name) + lambda: not self.exists, + delay=0.5, + num_sec=timeout, + message=f" image '{self.name}' to not exist", ) return True def cleanup(self): return self.delete() - def deploy(self, vm_name, zone=None, machine_type=None, ssh_key=None, - startup_script="#!/bin/bash", timeout=180, **kwargs): + def deploy( + self, + vm_name, + zone=None, + machine_type=None, + ssh_key=None, + startup_script="#!/bin/bash", + timeout=180, + **kwargs, + ): """ Depoy an instance from this template @@ -306,88 +343,89 @@ def deploy(self, vm_name, zone=None, machine_type=None, ssh_key=None, if kwargs: self.logger.warn("deploy() ignored kwargs: %s", kwargs) - template_link = self.raw['selfLink'] + template_link = self.raw["selfLink"] instance_name = vm_name if not zone: zone = self.system._zone if not machine_type: - machine_type = 'n1-standard-1' + machine_type = "n1-standard-1" - full_machine_type = 'zones/{}/machineTypes/{}'.format(zone, machine_type) + full_machine_type = f"zones/{zone}/machineTypes/{machine_type}" self.logger.info("Creating instance '%s'", instance_name) config = { - 'name': instance_name, - 'machineType': full_machine_type, - + "name": instance_name, + "machineType": full_machine_type, # Specify the boot disk and the image to use as a source. - 'disks': [ + "disks": [ { - 'boot': True, - 'autoDelete': True, - 'initializeParams': { - 'sourceImage': template_link, - } + "boot": True, + "autoDelete": True, + "initializeParams": { + "sourceImage": template_link, + }, } ], - # Specify a network interface with NAT to access the public # internet. - 'networkInterfaces': [{ - 'network': 'global/networks/default', - 'accessConfigs': [ - {'type': 'ONE_TO_ONE_NAT', 'name': 'External NAT'} - ] - }], - + "networkInterfaces": [ + { + "network": "global/networks/default", + "accessConfigs": [{"type": "ONE_TO_ONE_NAT", "name": "External NAT"}], + } + ], # Allow the instance to access cloud storage and logging. - 'serviceAccounts': [{ - 'email': 'default', - 'scopes': [ - 'https://www.googleapis.com/auth/devstorage.read_write', - 'https://www.googleapis.com/auth/logging.write' - ] - }], - + "serviceAccounts": [ + { + "email": "default", + "scopes": [ + "https://www.googleapis.com/auth/devstorage.read_write", + "https://www.googleapis.com/auth/logging.write", + ], + } + ], # Metadata is readable from the instance and allows you to # pass configuration from deployment scripts to instances. - 'metadata': { - 'items': [{ - # Startup script is automatically executed by the - # instance upon startup. - 'key': 'startup-script', - 'value': startup_script - }, { - # Every project has a default Cloud Storage bucket that's - # the same name as the project. - 'key': 'bucket', - 'value': self._project - }] + "metadata": { + "items": [ + { + # Startup script is automatically executed by the + # instance upon startup. + "key": "startup-script", + "value": startup_script, + }, + { + # Every project has a default Cloud Storage bucket that's + # the same name as the project. + "key": "bucket", + "value": self._project, + }, + ] }, - 'tags': { - 'items': ['https-server'] - } + "tags": {"items": ["https-server"]}, } if ssh_key: - ssh_keys = { - 'key': 'ssh-keys', - 'value': ssh_key - } - config['metadata']['items'].append(ssh_keys) + ssh_keys = {"key": "ssh-keys", "value": ssh_key} + config["metadata"]["items"].append(ssh_keys) operation = self._instances_api.insert( - project=self._project, zone=zone, body=config).execute() + project=self._project, zone=zone, body=config + ).execute() wait_for( - lambda: self.system.is_zone_operation_done(operation['name']), delay=0.5, - num_sec=timeout, message=" Create {}".format(instance_name) + lambda: self.system.is_zone_operation_done(operation["name"]), + delay=0.5, + num_sec=timeout, + message=f" Create {instance_name}", ) instance = GoogleCloudInstance(system=self.system, name=instance_name, zone=zone) wait_for( - lambda: instance.in_steady_state, timeout=timeout, - delay=0.5, message="Instance {} to reach steady state".format(instance_name) + lambda: instance.in_steady_state, + timeout=timeout, + delay=0.5, + message=f"Instance {instance_name} to reach steady state", ) return instance @@ -397,68 +435,69 @@ class GoogleCloudSystem(System, TemplateMixin, VmMixin): Client to Google Cloud Platform API """ + _stats_available = { - 'num_vm': lambda self: len(self.list_vms()), - 'num_template': lambda self: len(self.list_templates()), + "num_vm": lambda self: len(self.list_vms()), + "num_template": lambda self: len(self.list_templates()), } can_suspend = False can_pause = False - default_scope = ['https://www.googleapis.com/auth/cloud-platform'] + default_scope = ["https://www.googleapis.com/auth/cloud-platform"] def __init__(self, project=None, zone=None, file_type=None, **kwargs): """ - The last three argumets are optional and required only if you want - to use json or p12 files. - By default, we expecting that service_account arg contains service account data. - - Args: - project: name of the project, so called project_id - zone: zone of cloud - service_account: service_account_content - - scope: compute engine, container engine, sqlservice end etc - cache_discovery: turn on cache discovery default off - file_path: path to json or p12 file - file_type: p12 or json - client_email: Require for p12 file - - Returns: A :py:class:`GoogleCloudSystem` object. + The last three argumets are optional and required only if you want + to use json or p12 files. + By default, we expecting that service_account arg contains service account data. + + Args: + project: name of the project, so called project_id + zone: zone of cloud + service_account: service_account_content + + scope: compute engine, container engine, sqlservice end etc + cache_discovery: turn on cache discovery default off + file_path: path to json or p12 file + file_type: p12 or json + client_email: Require for p12 file + + Returns: A :py:class:`GoogleCloudSystem` object. """ - super(GoogleCloudSystem, self).__init__(**kwargs) + super().__init__(**kwargs) self._project = project self._zone = zone - self._region = kwargs.get('region') - scope = kwargs.get('scope', self.default_scope) + self._region = kwargs.get("region") + scope = kwargs.get("scope", self.default_scope) cache_discovery = kwargs.get("cache_discovery", False) - if 'service_account' in kwargs: - service_account = kwargs.get('service_account').copy() - service_account['private_key'] = service_account['private_key'].replace('\\n', '\n') - service_account['type'] = service_account.get('type', 'service_account') # default it - credentials = ServiceAccountCredentials.from_json_keyfile_dict(service_account, - scopes=scope) - elif file_type == 'json': - file_path = kwargs.get('file_path', None) - credentials = ServiceAccountCredentials.from_json_keyfile_name(file_path, - scopes=scope) - elif file_type == 'p12': - file_path = kwargs.get('file_path', None) - client_email = kwargs.get('client_email', None) - credentials = ServiceAccountCredentials.from_p12_keyfile(client_email, - file_path, - scopes=scope) + if "service_account" in kwargs: + service_account = kwargs.get("service_account").copy() + service_account["private_key"] = service_account["private_key"].replace("\\n", "\n") + service_account["type"] = service_account.get("type", "service_account") # default it + credentials = ServiceAccountCredentials.from_json_keyfile_dict( + service_account, scopes=scope + ) + elif file_type == "json": + file_path = kwargs.get("file_path", None) + credentials = ServiceAccountCredentials.from_json_keyfile_name(file_path, scopes=scope) + elif file_type == "p12": + file_path = kwargs.get("file_path", None) + client_email = kwargs.get("client_email", None) + credentials = ServiceAccountCredentials.from_p12_keyfile( + client_email, file_path, scopes=scope + ) http_auth = credentials.authorize(httplib2.Http()) - self._compute = build('compute', 'v1', http=http_auth, cache_discovery=cache_discovery) - self._storage = build('storage', 'v1', http=http_auth, cache_discovery=cache_discovery) + self._compute = build("compute", "v1", http=http_auth, cache_discovery=cache_discovery) + self._storage = build("storage", "v1", http=http_auth, cache_discovery=cache_discovery) self._instances = self._compute.instances() self._forwarding_rules = self._compute.forwardingRules() self._buckets = self._storage.buckets() @property def _identifying_attrs(self): - return {'project': self._project, 'zone': self._zone, 'region': self._region} + return {"project": self._project, "zone": self._zone, "region": self._region} @property def can_suspend(self): @@ -473,12 +512,15 @@ def _get_all_buckets(self): def _get_all_forwarding_rules(self): results = [] - results.extend(self._forwarding_rules.list(project=self._project, region=self._zone). - execute().get('items', [])) + results.extend( + self._forwarding_rules.list(project=self._project, region=self._zone) + .execute() + .get("items", []) + ) return results def info(self): - return "{}: project={}, zone={}".format(self.__class__.__name__, self._project, self._zone) + return f"{self.__class__.__name__}: project={self._project}, zone={self._zone}" def disconnect(self): """ @@ -504,12 +546,11 @@ def list_vms(self, zones=None): zones = [self._zone] for zone_name in zones: - zone_instances = self._instances.list( - project=self._project, zone=zone_name).execute() - for instance in zone_instances.get('items', []): + zone_instances = self._instances.list(project=self._project, zone=zone_name).execute() + for instance in zone_instances.get("items", []): results.append( GoogleCloudInstance( - system=self, raw=instance, name=instance['name'], zone=zone_name + system=self, raw=instance, name=instance["name"], zone=zone_name ) ) @@ -527,18 +568,22 @@ def find_vms(self, name, zones=None): results = [] if not zones: zones = [ - zone['name'].split('/')[-1] for zone # convert url-based name - in self._compute.zones().list(project=self._project).execute().get('items', []) + zone["name"].split("/")[-1] + for zone in self._compute.zones() # convert url-based name + .list(project=self._project) + .execute() + .get("items", []) ] for zone_name in zones: try: # Just use get in each zone instead of iterating through all instances instance = self._instances.get( - project=self._project, zone=zone_name, instance=name).execute() + project=self._project, zone=zone_name, instance=name + ).execute() results.append( GoogleCloudInstance( - system=self, raw=instance, name=instance['name'], zone=zone_name + system=self, raw=instance, name=instance["name"], zone=zone_name ) ) except errors.HttpError as error: @@ -613,7 +658,7 @@ def _list_templates( projects.extend(IMAGE_PROJECTS) for project in projects: results.extend( - GoogleCloudImage(system=self, raw=image, project=project, name=image['name']) + GoogleCloudImage(system=self, raw=image, project=project, name=image["name"]) for image in images.list( project=project, filter=filter_expr, @@ -645,9 +690,7 @@ def get_template(self, name, project=None): return GoogleCloudImage(system=self, raw=image, project=project, name=name) except errors.HttpError as error: if error.resp.status == 404: - raise ImageNotFoundError( - "'{}' not found in project '{}'".format(name, project) - ) + raise ImageNotFoundError(f"'{name}' not found in project '{project}'") else: raise @@ -697,14 +740,13 @@ def create_template(self, name, bucket_url, timeout=360): timeout: time to wait for operation """ images = self._compute.images() - data = { - "name": name, - "rawDisk": {"source": bucket_url} - } + data = {"name": name, "rawDisk": {"source": bucket_url}} operation = images.insert(project=self._project, body=data).execute() wait_for( - lambda: self.is_global_operation_done(operation['name']), delay=0.5, - num_sec=timeout, message=" Creating image {}".format(name) + lambda: self.is_global_operation_done(operation["name"]), + delay=0.5, + num_sec=timeout, + message=f" Creating image {name}", ) return self.get_template(name, self._project) @@ -724,71 +766,77 @@ def create_disk(self, disk_name, size_gb, zone=None, project=None, disk_type="pd if not project: project = self._project disk_data = { - 'sizeGb': size_gb, - 'type': "zones/{}/diskTypes/{}".format(zone, disk_type), - 'name': disk_name + "sizeGb": size_gb, + "type": f"zones/{zone}/diskTypes/{disk_type}", + "name": disk_name, } req = self._compute.disks().insert(project=project, zone=zone, body=disk_data) operation = req.execute() - wait_for(lambda: self.is_zone_operation_done(operation['name']), delay=0.5, - num_sec=120, message=" Create {}".format(disk_name)) + wait_for( + lambda: self.is_zone_operation_done(operation["name"]), + delay=0.5, + num_sec=120, + message=f" Create {disk_name}", + ) def list_bucket(self): buckets = self._get_all_buckets() - return [bucket.get('name') for bucket in buckets.get('items', [])] + return [bucket.get("name") for bucket in buckets.get("items", [])] def list_forwarding_rules(self): rules = self._get_all_forwarding_rules() - return [forwarding_rule.get('name') for forwarding_rule in rules] + return [forwarding_rule.get("name") for forwarding_rule in rules] def _find_forwarding_rule_by_name(self, forwarding_rule_name): try: forwarding_rule = self._forwarding_rules.get( - project=self._project, zone=self._zone, - forwardingRule=forwarding_rule_name).execute() + project=self._project, zone=self._zone, forwardingRule=forwarding_rule_name + ).execute() return forwarding_rule except Exception: raise NotFoundError def _check_operation_result(self, result): - if result['status'] == 'DONE': - self.logger.info("The operation '%s' -> DONE", result['name']) - if 'error' in result: - self.logger.error("Error during operation '%s'", result['name']) - self.logger.error("Error details: %s", result['error']) - raise Exception(result['error']) + if result["status"] == "DONE": + self.logger.info("The operation '%s' -> DONE", result["name"]) + if "error" in result: + self.logger.error("Error during operation '%s'", result["name"]) + self.logger.error("Error details: %s", result["error"]) + raise Exception(result["error"]) return True return False def is_global_operation_done(self, operation_name): - result = self._compute.globalOperations().get( - project=self._project, - operation=operation_name).execute() + result = ( + self._compute.globalOperations() + .get(project=self._project, operation=operation_name) + .execute() + ) self._check_operation_result(result) def is_zone_operation_done(self, operation_name, zone=None): if not zone: zone = self._zone - result = self._compute.zoneOperations().get( - project=self._project, - zone=zone, - operation=operation_name).execute() + result = ( + self._compute.zoneOperations() + .get(project=self._project, zone=zone, operation=operation_name) + .execute() + ) self._check_operation_result(result) def create_bucket(self, bucket_name): - """ Create bucket + """Create bucket Args: bucket_name: Unique name of bucket """ if not self.bucket_exists(bucket_name): - self._buckets.insert( - project=self._project, body={"name": "{}".format(bucket_name)}).execute() + self._buckets.insert(project=self._project, body={"name": f"{bucket_name}"}).execute() self.logger.info("Bucket '%s' was created", bucket_name) else: self.logger.info("Bucket '%s' was not created, exists already", bucket_name) def delete_bucket(self, bucket_name): - """ Delete bucket + """Delete bucket Args: bucket_name: Name of bucket """ @@ -819,7 +867,8 @@ def get_file_from_bucket(self, bucket_name, file_name): except errors.HttpError as error: if "Not Found" in error.content: self.logger.info( - "File '%s' was not found in bucket '%s'", file_name, bucket_name) + "File '%s' was not found in bucket '%s'", file_name, bucket_name + ) else: raise error return {} @@ -827,13 +876,15 @@ def get_file_from_bucket(self, bucket_name, file_name): def delete_file_from_bucket(self, bucket_name, file_name): if self.bucket_exists(bucket_name): try: - data = self._storage.objects().delete(bucket=bucket_name, - object=file_name).execute() + data = ( + self._storage.objects().delete(bucket=bucket_name, object=file_name).execute() + ) return data except errors.HttpError as error: if "No such object" in error.content: self.logger.info( - "File '%s' was not found in bucket '%s'", bucket_name, file_name) + "File '%s' was not found in bucket '%s'", bucket_name, file_name + ) else: raise error return {} @@ -841,18 +892,20 @@ def delete_file_from_bucket(self, bucket_name, file_name): def upload_file_to_bucket(self, bucket_name, file_path): def handle_progressless_iter(error, progressless_iters): if progressless_iters > NUM_RETRIES: - self.logger.info('Failed to make progress for too many consecutive iterations.') + self.logger.info("Failed to make progress for too many consecutive iterations.") raise error - sleeptime = random.random() * (2 ** progressless_iters) + sleeptime = random.random() * (2**progressless_iters) self.logger.info( - 'Caught exception (%s). Sleeping for %d seconds before retry #%d.', - str(error), sleeptime, progressless_iters + "Caught exception (%s). Sleeping for %d seconds before retry #%d.", + str(error), + sleeptime, + progressless_iters, ) time.sleep(sleeptime) - self.logger.info('Building upload request...') + self.logger.info("Building upload request...") media = MediaFileUpload(file_path, chunksize=CHUNKSIZE, resumable=True) if not media.mimetype(): media = MediaFileUpload(file_path, DEFAULT_MIMETYPE, resumable=True) @@ -860,23 +913,22 @@ def handle_progressless_iter(error, progressless_iters): blob_name = os.path.basename(file_path) if not self.bucket_exists(bucket_name): self.logger.error("Bucket '%s' doesn't exist", bucket_name) - raise NotFoundError("bucket {}".format(bucket_name)) + raise NotFoundError(f"bucket {bucket_name}") request = self._storage.objects().insert( - bucket=bucket_name, name=blob_name, media_body=media) + bucket=bucket_name, name=blob_name, media_body=media + ) self.logger.info( - 'Uploading file: %s, to bucket: %s, blob: %s', - file_path, bucket_name, blob_name + "Uploading file: %s, to bucket: %s, blob: %s", file_path, bucket_name, blob_name ) progressless_iters = 0 response = None while response is None: - error = None try: progress, response = request.next_chunk() if progress: - self.logger.info('Upload progress: %d%%', 100 * progress.progress()) + self.logger.info("Upload progress: %d%%", 100 * progress.progress()) except errors.HttpError as error: if error.resp.status < 500: raise @@ -887,8 +939,8 @@ def handle_progressless_iter(error, progressless_iters): else: progressless_iters = 0 - self.logger.info('Upload complete!') - self.logger.info('Uploaded Object:') + self.logger.info("Upload complete!") + self.logger.info("Uploaded Object:") self.logger.info(json_dumps(response, indent=2)) return (True, blob_name) @@ -904,14 +956,14 @@ def does_forwarding_rule_exist(self, forwarding_rule_name): def list_network(self): self.logger.info("Attempting to List GCE Virtual Private Networks") - networks = self._compute.networks().list(project=self._project).execute()['items'] + networks = self._compute.networks().list(project=self._project).execute()["items"] - return [net['name'] for net in networks] + return [net["name"] for net in networks] def list_subnet(self): self.logger.info("Attempting to List GCE Subnets") - networks = self._compute.networks().list(project=self._project).execute()['items'] - subnetworks = [net['subnetworks'] for net in networks] + networks = self._compute.networks().list(project=self._project).execute()["items"] + subnetworks = [net["subnetworks"] for net in networks] subnets_names = [] # Subnetworks is a bi dimensional array, containing urls of subnets. @@ -920,7 +972,7 @@ def list_subnet(self): # and CFME displays networks with subnets from all regions. for urls in subnetworks: for url in urls: - subnets_names.append(url.split('/')[-1]) + subnets_names.append(url.split("/")[-1]) return subnets_names @@ -930,17 +982,23 @@ def list_load_balancer(self): # forwarding rules are displayed instead of loadbalancers, and the regions are neglected. # see: https://bugzilla.redhat.com/show_bug.cgi?id=1547465 # https://bugzilla.redhat.com/show_bug.cgi?id=1433062 - load_balancers = self._compute.targetPools().list(project=self._project, - region=self._region).execute()['items'] - return [lb['name'] for lb in load_balancers] + load_balancers = ( + self._compute.targetPools() + .list(project=self._project, region=self._region) + .execute()["items"] + ) + return [lb["name"] for lb in load_balancers] def list_router(self): self.logger.info("Attempting to List GCE routers") # routers are not shown on CFME # https://bugzilla.redhat.com/show_bug.cgi?id=1543938 - routers = self._compute.routers().list(project=self._project, - region=self._region).execute()['items'] - return [router['name'] for router in routers] + routers = ( + self._compute.routers() + .list(project=self._project, region=self._region) + .execute()["items"] + ) + return [router["name"] for router in routers] def list_security_group(self): - raise NotImplementedError('list_security_group not implemented.') + raise NotImplementedError("list_security_group not implemented.") diff --git a/wrapanapi/systems/hawkular.py b/wrapanapi/systems/hawkular.py index f562dc0d..86bdc36a 100644 --- a/wrapanapi/systems/hawkular.py +++ b/wrapanapi/systems/hawkular.py @@ -1,5 +1,3 @@ - - import base64 import gzip import json @@ -12,7 +10,8 @@ from packaging import version -from wrapanapi.clients import ContainerClient, HawkularWebsocketClient +from wrapanapi.clients import ContainerClient +from wrapanapi.clients import HawkularWebsocketClient from wrapanapi.systems.base import System """ @@ -35,47 +34,94 @@ password: secret """ -Feed = namedtuple('Feed', ['id', 'path']) -ResourceType = namedtuple('ResourceType', ['id', 'name', 'path']) -Resource = namedtuple('Resource', ['id', 'name', 'path']) -ResourceData = namedtuple('ResourceData', ['name', 'path', 'value']) -ResourceWithData = namedtuple('Resource', ['id', 'name', 'path', 'data']) -Server = namedtuple('Server', ['id', 'name', 'path', 'data']) -ServerGroup = namedtuple('ServerGroup', ['id', 'name', 'path', 'data']) -Domain = namedtuple('Domain', ['id', 'name', 'path', 'data']) -Messaging = namedtuple('Messaging', ['id', 'name', 'path']) -Deployment = namedtuple('Deployment', ['id', 'name', 'path']) -Datasource = namedtuple('Datasource', ['id', 'name', 'path']) -OperationType = namedtuple('OperationType', ['id', 'name', 'path']) -ServerStatus = namedtuple('ServerStatus', ['address', 'version', 'state', 'product', 'host']) -Event = namedtuple('event', ['id', 'eventType', 'ctime', 'dataSource', 'dataId', - 'category', 'text', 'tags', 'tenantId', 'context']) -Trigger = namedtuple('Trigger', ['id', 'name', 'enabled', 'severity', 'autoResolve', - 'autoResolveAlerts', 'eventType', 'eventCategory', 'description', - 'autoEnable', 'autoDisable', 'context', 'type', 'tags', 'memberOf', - 'dataIdMap', 'firingMatch', 'autoResolveMatch', 'conditions', - 'dampenings']) -Condition = namedtuple('Condition', ['conditionId', 'type', 'operator', 'threshold', 'triggerMode', - 'dataId', 'data2Id', 'data2Multiplier', 'triggerId']) -Dampening = namedtuple('Dampening', ['dampeningId', 'triggerId', 'type', 'evalTrueSetting', - 'evalTotalSetting', 'evalTimeSetting']) +Feed = namedtuple("Feed", ["id", "path"]) +ResourceType = namedtuple("ResourceType", ["id", "name", "path"]) +Resource = namedtuple("Resource", ["id", "name", "path"]) +ResourceData = namedtuple("ResourceData", ["name", "path", "value"]) +ResourceWithData = namedtuple("Resource", ["id", "name", "path", "data"]) +Server = namedtuple("Server", ["id", "name", "path", "data"]) +ServerGroup = namedtuple("ServerGroup", ["id", "name", "path", "data"]) +Domain = namedtuple("Domain", ["id", "name", "path", "data"]) +Messaging = namedtuple("Messaging", ["id", "name", "path"]) +Deployment = namedtuple("Deployment", ["id", "name", "path"]) +Datasource = namedtuple("Datasource", ["id", "name", "path"]) +OperationType = namedtuple("OperationType", ["id", "name", "path"]) +ServerStatus = namedtuple("ServerStatus", ["address", "version", "state", "product", "host"]) +Event = namedtuple( + "event", + [ + "id", + "eventType", + "ctime", + "dataSource", + "dataId", + "category", + "text", + "tags", + "tenantId", + "context", + ], +) +Trigger = namedtuple( + "Trigger", + [ + "id", + "name", + "enabled", + "severity", + "autoResolve", + "autoResolveAlerts", + "eventType", + "eventCategory", + "description", + "autoEnable", + "autoDisable", + "context", + "type", + "tags", + "memberOf", + "dataIdMap", + "firingMatch", + "autoResolveMatch", + "conditions", + "dampenings", + ], +) +Condition = namedtuple( + "Condition", + [ + "conditionId", + "type", + "operator", + "threshold", + "triggerMode", + "dataId", + "data2Id", + "data2Multiplier", + "triggerId", + ], +) +Dampening = namedtuple( + "Dampening", + ["dampeningId", "triggerId", "type", "evalTrueSetting", "evalTotalSetting", "evalTimeSetting"], +) CANONICAL_PATH_NAME_MAPPING = { - '/d;': 'data_id', - '/e;': 'environment_id', - '/f;': 'feed_id', - '/m;': 'metric_id', - '/mp;': 'metadata_pack_id', - '/mt;': 'metric_type_id', - '/ot;': 'operation_type_id', - '/r;': 'resource_id', - '/rl;': 'relationship_id', - '/rt;': 'resource_type_id', - '/t;': 'tenant_id', + "/d;": "data_id", + "/e;": "environment_id", + "/f;": "feed_id", + "/m;": "metric_id", + "/mp;": "metadata_pack_id", + "/mt;": "metric_type_id", + "/ot;": "operation_type_id", + "/r;": "resource_id", + "/rl;": "relationship_id", + "/rt;": "resource_type_id", + "/t;": "tenant_id", } -class CanonicalPath(object): +class CanonicalPath: """CanonicalPath class Path is class to split canonical path to friendly values.\ @@ -100,7 +146,7 @@ def __init__(self, path): if not path: raise KeyError("CanonicalPath should not be None or empty!") self._path_ids = [] - r_paths = re.split(r'(/\w+;)', path) + r_paths = re.split(r"(/\w+;)", path) if len(r_paths) % 2 == 1: del r_paths[0] for p_index in range(0, len(r_paths), 2): @@ -112,10 +158,7 @@ def __init__(self, path): ex_list.append(path_value) setattr(self, path_id, ex_list) else: - v_list = [ - getattr(self, path_id), - path_value - ] + v_list = [getattr(self, path_id), path_value] setattr(self, path_id, v_list) else: self._path_ids.append(path_id) @@ -127,35 +170,35 @@ def __iter__(self): yield (path_id, getattr(self, path_id)) def __repr__(self): - return "".format(self.to_string) + return f"" @property def to_string(self): - c_path = '' - if 'tenant_id' in self._path_ids: - c_path = "/t;{}".format(self.tenant_id) - if 'feed_id' in self._path_ids: - c_path += "/f;{}".format(self.feed_id) - if 'environment_id' in self._path_ids: - c_path += "/e;{}".format(self.environment_id) - if 'metric_id' in self._path_ids: - c_path += "/m;{}".format(self.metric_id) - if 'resource_id' in self._path_ids: + c_path = "" + if "tenant_id" in self._path_ids: + c_path = f"/t;{self.tenant_id}" + if "feed_id" in self._path_ids: + c_path += f"/f;{self.feed_id}" + if "environment_id" in self._path_ids: + c_path += f"/e;{self.environment_id}" + if "metric_id" in self._path_ids: + c_path += f"/m;{self.metric_id}" + if "resource_id" in self._path_ids: if isinstance(self.resource_id, list): for _resource_id in self.resource_id: - c_path += "/r;{}".format(_resource_id) + c_path += f"/r;{_resource_id}" else: - c_path += "/r;{}".format(self.resource_id) - if 'metric_type_id' in self._path_ids: - c_path += "/mt;{}".format(self.metric_type_id) - if 'resource_type_id' in self._path_ids: - c_path += "/rt;{}".format(self.resource_type_id) - if 'metadata_pack_id' in self._path_ids: - c_path += "/mp;{}".format(self.metadata_pack_id) - if 'operation_type_id' in self._path_ids: - c_path += "/ot;{}".format(self.operation_type_id) - if 'relationship_id' in self._path_ids: - c_path += "/rl;{}".format(self.relationship_id) + c_path += f"/r;{self.resource_id}" + if "metric_type_id" in self._path_ids: + c_path += f"/mt;{self.metric_type_id}" + if "resource_type_id" in self._path_ids: + c_path += f"/rt;{self.resource_type_id}" + if "metadata_pack_id" in self._path_ids: + c_path += f"/mp;{self.metadata_pack_id}" + if "operation_type_id" in self._path_ids: + c_path += f"/ot;{self.operation_type_id}" + if "relationship_id" in self._path_ids: + c_path += f"/rl;{self.relationship_id}" return c_path @@ -175,39 +218,57 @@ class HawkularSystem(System): """ - def __init__(self, - hostname, protocol="http", port=8080, **kwargs): - super(HawkularSystem, self).__init__(**kwargs) + def __init__(self, hostname, protocol="http", port=8080, **kwargs): + super().__init__(**kwargs) self.hostname = hostname self.port = port - self.username = kwargs.get('username', 'jdoe') - self.password = kwargs.get('password', 'password') - self.tenant_id = kwargs.get('tenant_id', 'hawkular') + self.username = kwargs.get("username", "jdoe") + self.password = kwargs.get("password", "password") + self.tenant_id = kwargs.get("tenant_id", "hawkular") self.auth = self.username, self.password - self._hawkular = HawkularService(hostname=hostname, port=port, auth=self.auth, - protocol=protocol, tenant_id=self.tenant_id, - entry="hawkular") - self._alert = HawkularAlert(hostname=hostname, port=port, auth=self.auth, - protocol=protocol, tenant_id=self.tenant_id) - self._metric = HawkularMetric(hostname=hostname, port=port, auth=self.auth, - protocol=protocol, tenant_id=self.tenant_id) + self._hawkular = HawkularService( + hostname=hostname, + port=port, + auth=self.auth, + protocol=protocol, + tenant_id=self.tenant_id, + entry="hawkular", + ) + self._alert = HawkularAlert( + hostname=hostname, + port=port, + auth=self.auth, + protocol=protocol, + tenant_id=self.tenant_id, + ) + self._metric = HawkularMetric( + hostname=hostname, + port=port, + auth=self.auth, + protocol=protocol, + tenant_id=self.tenant_id, + ) self._inventory = self._get_inventory(hostname, port, protocol) - self._operation = HawkularOperation(hostname=self.hostname, port=self.port, - username=self.username, password=self.password, - tenant_id=self.tenant_id, - connect=kwargs.get('ws_connect', True)) + self._operation = HawkularOperation( + hostname=self.hostname, + port=self.port, + username=self.username, + password=self.password, + tenant_id=self.tenant_id, + connect=kwargs.get("ws_connect", True), + ) _stats_available = { - 'num_server': lambda self: len(self.inventory.list_server()), - 'num_domain': lambda self: len(self.inventory.list_domain()), - 'num_deployment': lambda self: len(self.inventory.list_server_deployment()), - 'num_datasource': lambda self: len(self.inventory.list_server_datasource()), - 'num_messaging': lambda self: len(self.inventory.list_messaging()), + "num_server": lambda self: len(self.inventory.list_server()), + "num_domain": lambda self: len(self.inventory.list_domain()), + "num_deployment": lambda self: len(self.inventory.list_server_deployment()), + "num_datasource": lambda self: len(self.inventory.list_server_datasource()), + "num_messaging": lambda self: len(self.inventory.list_messaging()), } @property def _identifying_attrs(self): - return {'hostname': self.hostname, 'tenant_id': self.tenant_id} + return {"hostname": self.hostname, "tenant_id": self.tenant_id} @property def alert(self): @@ -226,21 +287,25 @@ def operation(self): return self._operation def _get_inventory(self, hostname, port, protocol): - cls = HawkularInventoryInMetrics\ - if self._metrics_older("0.26.1.Final") else HawkularInventory + cls = ( + HawkularInventoryInMetrics if self._metrics_older("0.26.1.Final") else HawkularInventory + ) kwargs = dict( - hostname=hostname, port=port, auth=self.auth, - protocol=protocol, tenant_id=self.tenant_id + hostname=hostname, + port=port, + auth=self.auth, + protocol=protocol, + tenant_id=self.tenant_id, ) return cls(**kwargs) def _metrics_older(self, metrics_version): - metrics_version = metrics_version.rstrip('.Final') - status_version = self.metric._get("status")['Implementation-Version'].rstrip('.Final') + metrics_version = metrics_version.rstrip(".Final") + status_version = self.metric._get("status")["Implementation-Version"].rstrip(".Final") return version.parse(status_version) > version.parse(metrics_version) def info(self): - raise NotImplementedError('info not implemented.') + raise NotImplementedError("info not implemented.") def disconnect(self): pass @@ -248,14 +313,14 @@ def disconnect(self): def status(self): """Returns status of hawkular services""" return { - 'hawkular_services': self._hawkular.status(), - 'alerts': self.alert.status(), - 'inventory': self.inventory.status(), - 'metrics': self.metric.status() + "hawkular_services": self._hawkular.status(), + "alerts": self.alert.status(), + "inventory": self.inventory.status(), + "metrics": self.metric.status(), } -class HawkularService(object): +class HawkularService: def __init__(self, hostname, port, protocol, auth, tenant_id, entry): """This class is parent class for all hawkular services Args: @@ -271,12 +336,13 @@ def __init__(self, hostname, port, protocol, auth, tenant_id, entry): self.port = port self.protocol = protocol self.tenant_id = tenant_id - self._api = ContainerClient(hostname=hostname, auth=self.auth, protocol=protocol, - port=port, entry=entry) + self._api = ContainerClient( + hostname=hostname, auth=self.auth, protocol=protocol, port=port, entry=entry + ) def status(self): """Returns status of a service""" - return self._get(path='status') + return self._get(path="status") def _get(self, path, params=None): """runs GET request and returns response as JSON""" @@ -288,52 +354,91 @@ def _delete(self, path): def _put(self, path, data): """runs PUT request and returns status""" - return self._api.put_status(path, data, headers={"Hawkular-Tenant": self.tenant_id, - "Content-Type": "application/json"}) + return self._api.put_status( + path, + data, + headers={"Hawkular-Tenant": self.tenant_id, "Content-Type": "application/json"}, + ) def _post(self, path, data): """runs POST request and returns status""" - return self._api.post_status(path, data, - headers={"Hawkular-Tenant": self.tenant_id, - "Content-Type": "application/json"}) + return self._api.post_status( + path, + data, + headers={"Hawkular-Tenant": self.tenant_id, "Content-Type": "application/json"}, + ) def _post_raw(self, path, data): """runs POST request and returns result as JSON""" return self._api.raw_post( - path, data, - headers={"Hawkular-Tenant": self.tenant_id, "Content-Type": "application/json"} + path, + data, + headers={"Hawkular-Tenant": self.tenant_id, "Content-Type": "application/json"}, ) class HawkularAlert(HawkularService): def __init__(self, hostname, port, protocol, auth, tenant_id): """Creates hawkular alert service instance. For args refer 'HawkularService'""" - HawkularService.__init__(self, hostname=hostname, port=port, protocol=protocol, - auth=auth, tenant_id=tenant_id, entry="hawkular/alerts") + HawkularService.__init__( + self, + hostname=hostname, + port=port, + protocol=protocol, + auth=auth, + tenant_id=tenant_id, + entry="hawkular/alerts", + ) @classmethod def _convert_trigger(cls, entity): - return Trigger(entity.get('id'), entity.get('name'), entity.get('enabled'), - entity.get('severity'), entity.get('autoResolve'), - entity.get('autoResolveAlerts'), entity.get('eventType'), - entity.get('eventCategory'), entity.get('description', None), - entity.get('autoEnable'), entity.get('autoDisable'), - entity.get('context'), entity.get('type'), entity.get('tags'), - entity.get('memberOf'), entity.get('dataIdMap'), entity.get('firingMatch'), - entity.get('autoResolveMatch'), [], []) + return Trigger( + entity.get("id"), + entity.get("name"), + entity.get("enabled"), + entity.get("severity"), + entity.get("autoResolve"), + entity.get("autoResolveAlerts"), + entity.get("eventType"), + entity.get("eventCategory"), + entity.get("description", None), + entity.get("autoEnable"), + entity.get("autoDisable"), + entity.get("context"), + entity.get("type"), + entity.get("tags"), + entity.get("memberOf"), + entity.get("dataIdMap"), + entity.get("firingMatch"), + entity.get("autoResolveMatch"), + [], + [], + ) @classmethod def _convert_condition(cls, entity): - return Condition(entity.get('conditionId'), entity.get('type'), entity.get('operator'), - entity.get('threshold'), entity.get('triggerMode'), entity.get('dataId'), - entity.get('data2Id'), entity.get('data2Multiplier'), - entity.get('triggerId')) + return Condition( + entity.get("conditionId"), + entity.get("type"), + entity.get("operator"), + entity.get("threshold"), + entity.get("triggerMode"), + entity.get("dataId"), + entity.get("data2Id"), + entity.get("data2Multiplier"), + entity.get("triggerId"), + ) @classmethod def _convert_dampening(cls, entity): - return Dampening(entity.get('dampeningId'), entity.get('triggerId'), entity.get('type'), - entity.get('evalTrueSetting'), entity.get('evalTotalSetting'), - entity.get('evalTimeSetting')) + return Dampening( + entity.get("dampeningId"), + entity.get("triggerId"), + entity.get("type"), + entity.get("evalTrueSetting"), + entity.get("evalTotalSetting"), + entity.get("evalTimeSetting"), + ) def list_event(self, start_time=0, end_time=sys.maxsize): """Returns the list of events. @@ -343,20 +448,37 @@ def list_event(self, start_time=0, end_time=sys.maxsize): Args: start_time: Start time as timestamp end_time: End time as timestamp - """ + """ entities = [] - entities_j = self._get('events?startTime={}&endTime={}'.format(start_time, end_time)) + entities_j = self._get(f"events?startTime={start_time}&endTime={end_time}") if entities_j: for entity_j in entities_j: - entity = Event(entity_j['id'], entity_j['eventType'], entity_j['ctime'], - entity_j['dataSource'], entity_j.get('dataId', None), - entity_j['category'], entity_j['text'], entity_j.get('tags', None), - entity_j.get('tenantId', None), entity_j.get('context', None)) + entity = Event( + entity_j["id"], + entity_j["eventType"], + entity_j["ctime"], + entity_j["dataSource"], + entity_j.get("dataId", None), + entity_j["category"], + entity_j["text"], + entity_j.get("tags", None), + entity_j.get("tenantId", None), + entity_j.get("context", None), + ) entities.append(entity) return entities - def list_alert(self, start_time=None, end_time=None, alert_ids=None, trigger_ids=None, - statuses=None, severities=None, tags=None, thin=None): + def list_alert( + self, + start_time=None, + end_time=None, + alert_ids=None, + trigger_ids=None, + statuses=None, + severities=None, + tags=None, + thin=None, + ): """Obtain the alerts with optional filters Args: start_time: Filter out alerts created before this time, millisecond since epoch. @@ -373,10 +495,17 @@ def list_alert(self, start_time=None, end_time=None, alert_ids=None, trigger_ids each tag of format 'name thin: Return only thin alerts, do not include: evalSets, resolvedEvalSets. """ - parms = {'startTime': start_time, 'endTime': end_time, 'alertIds': alert_ids, - 'triggerIds': trigger_ids, 'statuses': statuses, 'severities': severities, - 'tags': tags, 'thin': thin} - entities = self._get(path='', params=parms) + parms = { + "startTime": start_time, + "endTime": end_time, + "alertIds": alert_ids, + "triggerIds": trigger_ids, + "statuses": statuses, + "severities": severities, + "tags": tags, + "thin": thin, + } + entities = self._get(path="", params=parms) if entities: return entities return [] @@ -392,8 +521,8 @@ def list_trigger(self, ids=None, tags=None): ids = [] if not tags: tags = [] - params = {'triggerIds': ids, 'tags': tags} - entities = self._get(path='triggers', params=params) + params = {"triggerIds": ids, "tags": tags} + entities = self._get(path="triggers", params=params) triggers = [] if not entities: return triggers @@ -408,18 +537,18 @@ def get_single_trigger(self, trigger_id, full=False): full: If True then conditions and dampenings for the trigger are also fetched """ if full: - path = "triggers/trigger/{}".format(trigger_id) + path = f"triggers/trigger/{trigger_id}" else: - path = "triggers/{}".format(trigger_id) + path = f"triggers/{trigger_id}" entity = self._get(path=path) if not entity: return None trigger = self._convert_trigger(entity) - c_entities = entity.get('conditions', None) + c_entities = entity.get("conditions", None) if c_entities: for c_entity in c_entities: trigger.conditions.append(self._convert_condition(c_entity)) - d_entities = entity.get('dampenings', None) + d_entities = entity.get("dampenings", None) if d_entities: for d_entity in d_entities: trigger.dampenings.append(self._convert_dampening(d_entity)) @@ -431,26 +560,33 @@ def create_trigger(self, trigger, conditions=None, dampenings=None): conditions = [] if not dampenings: dampenings = [] - full_trigger = {'trigger': trigger, 'conditions': conditions, 'dampenings': dampenings} - self._post(path='triggers/trigger', data=full_trigger) + full_trigger = {"trigger": trigger, "conditions": conditions, "dampenings": dampenings} + self._post(path="triggers/trigger", data=full_trigger) def delete_trigger(self, trigger_id): """Deletes the trigger definition.""" - self._delete(path="triggers/{}".format(trigger_id)) + self._delete(path=f"triggers/{trigger_id}") class HawkularInventory(HawkularService): def __init__(self, hostname, port, protocol, auth, tenant_id): """Creates hawkular inventory service instance. For args refer 'HawkularService'""" - HawkularService.__init__(self, hostname=hostname, port=port, protocol=protocol, - auth=auth, tenant_id=tenant_id, entry="hawkular/inventory") + HawkularService.__init__( + self, + hostname=hostname, + port=port, + protocol=protocol, + auth=auth, + tenant_id=tenant_id, + entry="hawkular/inventory", + ) _stats_available = { - 'num_server': lambda self: len(self.list_server()), - 'num_domain': lambda self: len(self.list_domain()), - 'num_deployment': lambda self: len(self.list_server_deployment()), - 'num_datasource': lambda self: len(self.list_server_datasource()), - 'num_messaging': lambda self: len(self.list_messaging()), + "num_server": lambda self: len(self.list_server()), + "num_domain": lambda self: len(self.list_domain()), + "num_deployment": lambda self: len(self.list_server_deployment()), + "num_datasource": lambda self: len(self.list_server_datasource()), + "num_messaging": lambda self: len(self.list_messaging()), } def list_server_deployment(self, feed_id=None): @@ -459,10 +595,8 @@ def list_server_deployment(self, feed_id=None): Args: feed_id: Feed id of the resource (optional) """ - resources = self.list_resource(feed_id=feed_id, resource_type_id='Deployment') - resources.extend(self.list_resource( - feed_id=feed_id, - resource_type_id='SubDeployment')) + resources = self.list_resource(feed_id=feed_id, resource_type_id="Deployment") + resources.extend(self.list_resource(feed_id=feed_id, resource_type_id="SubDeployment")) deployments = [] if resources: for resource in resources: @@ -472,13 +606,11 @@ def list_server_deployment(self, feed_id=None): def list_messaging(self, feed_id=None): """Returns list of massagings (JMS Queue and JMS Topic). - Args: - feed_id: Feed id of the resource (optional) + Args: + feed_id: Feed id of the resource (optional) """ - resources = self.list_resource(feed_id=feed_id, resource_type_id='JMS Queue') - resources.extend(self.list_resource( - feed_id=feed_id, - resource_type_id='JMS Topic')) + resources = self.list_resource(feed_id=feed_id, resource_type_id="JMS Queue") + resources.extend(self.list_resource(feed_id=feed_id, resource_type_id="JMS Topic")) messagings = [] if resources: for resource in resources: @@ -488,19 +620,20 @@ def list_messaging(self, feed_id=None): def list_server(self, feed_id=None): """Returns list of middleware servers. - Args: - feed_id: Feed id of the resource (optional) + Args: + feed_id: Feed id of the resource (optional) """ - resources = self.list_resource(feed_id=feed_id, resource_type_id='WildFly Server') - resources.extend(self.list_resource( - feed_id=feed_id, - resource_type_id='Domain WildFly Server')) + resources = self.list_resource(feed_id=feed_id, resource_type_id="WildFly Server") + resources.extend( + self.list_resource(feed_id=feed_id, resource_type_id="Domain WildFly Server") + ) servers = [] if resources: for resource in resources: resource_data = self.get_config_data( feed_id=resource.path.feed_id, - resource_id=self._get_resource_id(resource.path.resource_id)) + resource_id=self._get_resource_id(resource.path.resource_id), + ) server_data = resource_data.value servers.append(Server(resource.id, resource.name, resource.path, server_data)) return servers @@ -508,15 +641,16 @@ def list_server(self, feed_id=None): def list_domain(self, feed_id=None): """Returns list of middleware domains. - Args: - feed_id: Feed id of the resource (optional) + Args: + feed_id: Feed id of the resource (optional) """ - resources = self.list_resource(feed_id=feed_id, resource_type_id='Host Controller') + resources = self.list_resource(feed_id=feed_id, resource_type_id="Host Controller") domains = [] if resources: for resource in resources: resource_data = self.get_config_data( - feed_id=resource.path.feed_id, resource_id=resource.id) + feed_id=resource.path.feed_id, resource_id=resource.id + ) domain_data = resource_data.value domains.append(Domain(resource.id, resource.name, resource.path, domain_data)) return domains @@ -524,33 +658,38 @@ def list_domain(self, feed_id=None): def list_server_group(self, feed_id): """Returns list of middleware domain's server groups. - Args: - feed_id: Feed id of the resource (optional) + Args: + feed_id: Feed id of the resource (optional) """ - resources = self.list_resource(feed_id=feed_id, resource_type_id='Domain Server Group') + resources = self.list_resource(feed_id=feed_id, resource_type_id="Domain Server Group") server_groups = [] if resources: for resource in resources: resource_data = self.get_config_data( feed_id=resource.path.feed_id, - resource_id=self._get_resource_id(resource.path.resource_id)) + resource_id=self._get_resource_id(resource.path.resource_id), + ) server_group_data = resource_data.value - server_groups.append(ServerGroup( - resource.id, resource.name, resource.path, server_group_data)) + server_groups.append( + ServerGroup(resource.id, resource.name, resource.path, server_group_data) + ) return server_groups def list_resource(self, resource_type_id, feed_id=None): """Returns list of resources. - Args: - feed_id: Feed id of the resource (optional) - resource_type_id: Resource type id + Args: + feed_id: Feed id of the resource (optional) + resource_type_id: Resource type id """ if not feed_id: resources = [] for feed in self.list_feed(): - resources.extend(self._list_resource(feed_id=feed.path.feed_id, - resource_type_id=resource_type_id)) + resources.extend( + self._list_resource( + feed_id=feed.path.feed_id, resource_type_id=resource_type_id + ) + ) return resources else: return self._list_resource(feed_id=feed_id, resource_type_id=resource_type_id) @@ -558,45 +697,46 @@ def list_resource(self, resource_type_id, feed_id=None): def list_child_resource(self, feed_id, resource_id, recursive=False): """Returns list of resources. - Args: - feed_id: Feed id of the resource - resource_id: Resource id - recursive: should be True when you want to get recursively, Default False + Args: + feed_id: Feed id of the resource + resource_id: Resource id + recursive: should be True when you want to get recursively, Default False """ if not feed_id or not resource_id: raise KeyError("'feed_id' and 'resource_id' are a mandatory field!") resources = [] if recursive: - entities_j = self._get('traversal/f;{}/r;{}/recursive;over=isParentOf;type=r' - .format(feed_id, resource_id)) + entities_j = self._get( + f"traversal/f;{feed_id}/r;{resource_id}/recursive;over=isParentOf;type=r" + ) else: - entities_j = self._get('traversal/f;{}/r;{}/type=r' - .format(feed_id, resource_id)) + entities_j = self._get(f"traversal/f;{feed_id}/r;{resource_id}/type=r") if entities_j: for entity_j in entities_j: - resources.append(Resource(entity_j['id'], entity_j['name'], - CanonicalPath(entity_j['path']))) + resources.append( + Resource(entity_j["id"], entity_j["name"], CanonicalPath(entity_j["path"])) + ) return resources def _list_resource(self, feed_id, resource_type_id=None): """Returns list of resources. - Args: - feed_id: Feed id of the resource - resource_type_id: Resource type id (optional) + Args: + feed_id: Feed id of the resource + resource_type_id: Resource type id (optional) """ if not feed_id: raise KeyError("'feed_id' is a mandatory field!") entities = [] if resource_type_id: - entities_j = self._get('traversal/f;{}/rt;{}/rl;defines/type=r' - .format(feed_id, resource_type_id)) + entities_j = self._get(f"traversal/f;{feed_id}/rt;{resource_type_id}/rl;defines/type=r") else: - entities_j = self._get('traversal/f;{}/type=r'.format(feed_id)) + entities_j = self._get(f"traversal/f;{feed_id}/type=r") if entities_j: for entity_j in entities_j: - entities.append(Resource(entity_j['id'], entity_j['name'], - CanonicalPath(entity_j['path']))) + entities.append( + Resource(entity_j["id"], entity_j["name"], CanonicalPath(entity_j["path"])) + ) return entities def get_config_data(self, feed_id, resource_id): @@ -605,44 +745,46 @@ def get_config_data(self, feed_id, resource_id): Args: feed_id: Feed id of the resource resource_id: Resource id - """ + """ if not feed_id or not resource_id: raise KeyError("'feed_id' and 'resource_id' are mandatory field!") - entity_j = self._get('entity/f;{}/r;{}/d;configuration' - .format(feed_id, self._get_resource_id(resource_id))) + entity_j = self._get( + f"entity/f;{feed_id}/r;{self._get_resource_id(resource_id)}/d;configuration" + ) if entity_j: - return ResourceData(entity_j['name'], CanonicalPath(entity_j['path']), - entity_j['value']) + return ResourceData( + entity_j["name"], CanonicalPath(entity_j["path"]), entity_j["value"] + ) return None def _get_resource_id(self, resource_id): if isinstance(resource_id, list): - return "{}".format('/r;'.join(resource_id)) + return "{}".format("/r;".join(resource_id)) else: return resource_id def list_feed(self): """Returns list of feeds""" entities = [] - entities_j = self._get('traversal/type=f') + entities_j = self._get("traversal/type=f") if entities_j: for entity_j in entities_j: - entities.append(Feed(entity_j['id'], CanonicalPath(entity_j['path']))) + entities.append(Feed(entity_j["id"], CanonicalPath(entity_j["path"]))) return entities def list_resource_type(self, feed_id): """Returns list of resource types. - Args: - feed_id: Feed id of the resource type + Args: + feed_id: Feed id of the resource type """ if not feed_id: raise KeyError("'feed_id' is a mandatory field!") entities = [] - entities_j = self._get('traversal/f;{}/type=rt'.format(feed_id)) + entities_j = self._get(f"traversal/f;{feed_id}/type=rt") if entities_j: for entity_j in entities_j: - entities.append(ResourceType(entity_j['id'], entity_j['name'], entity_j['path'])) + entities.append(ResourceType(entity_j["id"], entity_j["name"], entity_j["path"])) return entities def list_operation_definition(self, feed_id, resource_type_id): @@ -654,23 +796,21 @@ def list_operation_definition(self, feed_id, resource_type_id): """ if feed_id is None or resource_type_id is None: raise KeyError("'feed_id' and 'resource_type_id' are mandatory fields!") - res_j = self._get('traversal/f;{}/rt;{}/type=ot'.format(feed_id, resource_type_id)) + res_j = self._get(f"traversal/f;{feed_id}/rt;{resource_type_id}/type=ot") operations = [] if res_j: for res in res_j: - operations.append(OperationType(res['id'], res['name'], CanonicalPath(res['path']))) + operations.append(OperationType(res["id"], res["name"], CanonicalPath(res["path"]))) return operations def list_server_datasource(self, feed_id=None): """Returns list of datasources (both XA and non XA). - Args: - feed_id: Feed id of the datasource (optional) + Args: + feed_id: Feed id of the datasource (optional) """ - resources = self.list_resource(feed_id=feed_id, resource_type_id='Datasource') - resources.extend(self.list_resource( - feed_id=feed_id, - resource_type_id='XA Datasource')) + resources = self.list_resource(feed_id=feed_id, resource_type_id="Datasource") + resources.extend(self.list_resource(feed_id=feed_id, resource_type_id="XA Datasource")) datasources = [] if resources: for resource in resources: @@ -684,13 +824,13 @@ def edit_config_data(self, resource_data, **kwargs): resource_data: Resource data """ if not isinstance(resource_data, ResourceData) or not resource_data.value: - raise KeyError( - "'resource_data' should be ResourceData with 'value' attribute") - if not kwargs or 'feed_id' not in kwargs or 'resource_id' not in kwargs: + raise KeyError("'resource_data' should be ResourceData with 'value' attribute") + if not kwargs or "feed_id" not in kwargs or "resource_id" not in kwargs: raise KeyError("'feed_id' and 'resource_id' are mandatory field!") - r = self._put('entity/f;{}/r;{}/d;configuration' - .format(kwargs['feed_id'], kwargs['resource_id']), - {"value": resource_data.value}) + r = self._put( + "entity/f;{}/r;{}/d;configuration".format(kwargs["feed_id"], kwargs["resource_id"]), + {"value": resource_data.value}, + ) return r def create_resource(self, resource, resource_data, resource_type, **kwargs): @@ -704,30 +844,29 @@ def create_resource(self, resource, resource_data, resource_type, **kwargs): if not isinstance(resource, Resource): raise KeyError("'resource' should be an instance of Resource") if not isinstance(resource_data, ResourceData) or not resource_data.value: - raise KeyError( - "'resource_data' should be ResourceData with 'value' attribute") + raise KeyError("'resource_data' should be ResourceData with 'value' attribute") if not isinstance(resource_type, ResourceType): raise KeyError("'resource_type' should be an instance of ResourceType") - if not kwargs or 'feed_id' not in kwargs: + if not kwargs or "feed_id" not in kwargs: raise KeyError('Variable "feed_id" id mandatory field!') - resource_id = urlquote(resource.id, safe='') + resource_id = urlquote(resource.id, safe="") r = self._post( - 'entity/f;{}/resource'.format(kwargs['feed_id']), + "entity/f;{}/resource".format(kwargs["feed_id"]), data={ "name": resource.name, "id": resource.id, - "resourceTypePath": "rt;{}".format(resource_type.path.resource_type_id) - } + "resourceTypePath": f"rt;{resource_type.path.resource_type_id}", + }, ) if r: r = self._post( - 'entity/f;{}/r;{}/data'.format(kwargs['feed_id'], resource_id), - data={'role': 'configuration', "value": resource_data.value} + "entity/f;{}/r;{}/data".format(kwargs["feed_id"], resource_id), + data={"role": "configuration", "value": resource_data.value}, ) else: # if resource or it's data was not created correctly, delete resource - self._delete('entity/f;{}/r;{}'.format(kwargs['feed_id'], resource_id)) + self._delete("entity/f;{}/r;{}".format(kwargs["feed_id"], resource_id)) return r def delete_resource(self, feed_id, resource_id): @@ -738,47 +877,56 @@ def delete_resource(self, feed_id, resource_id): """ if not feed_id or not resource_id: raise KeyError("'feed_id' and 'resource_id' are mandatory fields!") - r = self._delete('entity/f;{}/r;{}'.format(feed_id, resource_id)) + r = self._delete(f"entity/f;{feed_id}/r;{resource_id}") return r class HawkularInventoryInMetrics(HawkularService): def __init__(self, hostname, port, protocol, auth, tenant_id): """Creates hawkular inventory service instance. For args refer 'HawkularService'""" - HawkularService.__init__(self, hostname=hostname, port=port, protocol=protocol, - auth=auth, tenant_id=tenant_id, entry="hawkular/metrics") + HawkularService.__init__( + self, + hostname=hostname, + port=port, + protocol=protocol, + auth=auth, + tenant_id=tenant_id, + entry="hawkular/metrics", + ) _stats_available = { - 'num_server': lambda self: len(self.list_server()), - 'num_domain': lambda self: len(self.list_domain()), - 'num_deployment': lambda self: len(self.list_server_deployment()), - 'num_datasource': lambda self: len(self.list_server_datasource()), - 'num_messaging': lambda self: len(self.list_messaging()), + "num_server": lambda self: len(self.list_server()), + "num_domain": lambda self: len(self.list_domain()), + "num_deployment": lambda self: len(self.list_server_deployment()), + "num_datasource": lambda self: len(self.list_server_datasource()), + "num_messaging": lambda self: len(self.list_messaging()), } def list_feed(self): """Returns list of feeds""" entities = [] - entities_j = self._get('strings/tags/module:inventory,feed:*') - if entities_j and entities_j['feed']: - for entity_j in entities_j['feed']: - entities.append(Feed(entity_j, CanonicalPath('/f;{}'.format(entity_j)))) + entities_j = self._get("strings/tags/module:inventory,feed:*") + if entities_j and entities_j["feed"]: + for entity_j in entities_j["feed"]: + entities.append(Feed(entity_j, CanonicalPath(f"/f;{entity_j}"))) return entities def list_server(self, feed_id=None): """Returns list of middleware servers. - Args: - feed_id: Feed id of the resource (optional) + Args: + feed_id: Feed id of the resource (optional) """ servers = self.list_resource( - feed_id=feed_id, resource_type_id='WildFly Server', - cls=Server, include_data=True + feed_id=feed_id, resource_type_id="WildFly Server", cls=Server, include_data=True ) servers.extend( self.list_resource( - feed_id=feed_id, resource_type_id='Domain WildFly Server', - cls=Server, list_children=True, include_data=True + feed_id=feed_id, + resource_type_id="Domain WildFly Server", + cls=Server, + list_children=True, + include_data=True, ) ) return servers @@ -786,24 +934,30 @@ def list_server(self, feed_id=None): def list_domain(self, feed_id=None): """Returns list of middleware domains. - Args: - feed_id: Feed id of the resource (optional) + Args: + feed_id: Feed id of the resource (optional) """ domains = self.list_resource( - feed_id=feed_id, resource_type_id='Domain Host', - cls=Domain, list_children=True, include_data=True + feed_id=feed_id, + resource_type_id="Domain Host", + cls=Domain, + list_children=True, + include_data=True, ) return domains def list_server_group(self, feed_id): """Returns list of middleware domain's server groups. - Args: - feed_id: Feed id of the resource (optional) + Args: + feed_id: Feed id of the resource (optional) """ server_groups = self.list_resource( - feed_id=feed_id, resource_type_id='Domain Server Group', - cls=ServerGroup, list_children=True, include_data=True + feed_id=feed_id, + resource_type_id="Domain Server Group", + cls=ServerGroup, + list_children=True, + include_data=True, ) return server_groups @@ -814,13 +968,14 @@ def list_server_deployment(self, feed_id=None): feed_id: Feed id of the resource (optional) """ deployments = self.list_resource( - feed_id=feed_id, resource_type_id='Deployment', - cls=Deployment, list_children=True + feed_id=feed_id, resource_type_id="Deployment", cls=Deployment, list_children=True ) deployments.extend( self.list_resource( - feed_id=feed_id, resource_type_id='SubDeployment', - cls=Deployment, list_children=True + feed_id=feed_id, + resource_type_id="SubDeployment", + cls=Deployment, + list_children=True, ) ) return deployments @@ -828,17 +983,15 @@ def list_server_deployment(self, feed_id=None): def list_messaging(self, feed_id=None): """Returns list of massagings (JMS Queue and JMS Topic). - Args: - feed_id: Feed id of the resource (optional) + Args: + feed_id: Feed id of the resource (optional) """ messagings = self.list_resource( - feed_id=feed_id, resource_type_id='JMS Queue', - cls=Messaging, list_children=True + feed_id=feed_id, resource_type_id="JMS Queue", cls=Messaging, list_children=True ) messagings.extend( self.list_resource( - feed_id=feed_id, resource_type_id='JMS Topic', - cls=Messaging, list_children=True + feed_id=feed_id, resource_type_id="JMS Topic", cls=Messaging, list_children=True ) ) return messagings @@ -846,31 +999,33 @@ def list_messaging(self, feed_id=None): def list_server_datasource(self, feed_id=None): """Returns list of datasources (both XA and non XA). - Args: - feed_id: Feed id of the datasource (optional) + Args: + feed_id: Feed id of the datasource (optional) """ datasources = self.list_resource( - feed_id=feed_id, resource_type_id='Datasource', - cls=Datasource, list_children=True + feed_id=feed_id, resource_type_id="Datasource", cls=Datasource, list_children=True ) datasources.extend( self.list_resource( - feed_id=feed_id, resource_type_id='XA Datasource', - cls=Datasource, list_children=True + feed_id=feed_id, + resource_type_id="XA Datasource", + cls=Datasource, + list_children=True, ) ) return datasources - def list_resource(self, resource_type_id, cls, feed_id=None, - list_children=False, include_data=False): + def list_resource( + self, resource_type_id, cls, feed_id=None, list_children=False, include_data=False + ): """Returns list of resources. - Args: - feed_id: Feed id of the resource (optional) - resource_type_id: Resource type id - cls: the class of resource - list_children: whether recursively list child resources (optional) - include_data: whether to include data value of resource (optional) + Args: + feed_id: Feed id of the resource (optional) + resource_type_id: Resource type id + cls: the class of resource + list_children: whether recursively list child resources (optional) + include_data: whether to include data value of resource (optional) """ results = [] resources = [] @@ -878,33 +1033,36 @@ def list_resource(self, resource_type_id, cls, feed_id=None, for feed in self.list_feed(): resources.extend( self._list_resource( - feed_id=feed.path.feed_id, resource_type_id=resource_type_id, - list_children=list_children, include_data=include_data + feed_id=feed.path.feed_id, + resource_type_id=resource_type_id, + list_children=list_children, + include_data=include_data, ) ) else: resources = self._list_resource( - feed_id=feed_id, resource_type_id=resource_type_id, - list_children=list_children, include_data=include_data + feed_id=feed_id, + resource_type_id=resource_type_id, + list_children=list_children, + include_data=include_data, ) for resource in resources: - kwargs = dict( - id=resource.id, name=resource.name, path=resource.path - ) + kwargs = dict(id=resource.id, name=resource.name, path=resource.path) if include_data: kwargs.update(data=resource.data) results.append(cls(**kwargs)) return results - def _list_resource(self, feed_id, resource_type_id=None, - list_children=False, include_data=False): + def _list_resource( + self, feed_id, resource_type_id=None, list_children=False, include_data=False + ): """Returns list of resources. - Args: - feed_id: Feed id of the resource - resource_type_id: Resource type id (optional) - list_children: whether recursively list child resources (optional) - include_data: whether to include data value of resource (optional) + Args: + feed_id: Feed id of the resource + resource_type_id: Resource type id (optional) + list_children: whether recursively list child resources (optional) + include_data: whether to include data value of resource (optional) """ if not feed_id: raise KeyError("'feed_id' is a mandatory field!") @@ -912,84 +1070,89 @@ def _list_resource(self, feed_id, resource_type_id=None, data = {"fromEarliest": "true", "order": "DESC"} if resource_type_id: - data["tags"] = "feed:{},type:r,restypes:.*\\|{}\\|.*"\ - .format(feed_id, resource_type_id) + data["tags"] = f"feed:{feed_id},type:r,restypes:.*\\|{resource_type_id}\\|.*" else: - data["tags"] = "feed:{},type:r".format(feed_id) - result = self._post_raw('strings/raw/query', data=data) + data["tags"] = f"feed:{feed_id},type:r" + result = self._post_raw("strings/raw/query", data=data) if result.status_code != 200: return entities for entity_j in json.loads(result.content): - entity_value = self._get_data_value(entity_j['data']) + entity_value = self._get_data_value(entity_j["data"]) if entity_value: - types_index = self._filter_types_index(entity_value['typesIndex'], - resource_type_id) - entity_data = entity_value['inventoryStructure']['data'] - parent_resource_id = entity_data['id'] + types_index = self._filter_types_index(entity_value["typesIndex"], resource_type_id) + entity_data = entity_value["inventoryStructure"]["data"] + parent_resource_id = entity_data["id"] if not list_children: # return only parent resource entities.append( ResourceWithData( - entity_data['id'], - entity_data['name'], + entity_data["id"], + entity_data["name"], self._get_canonical_path( - types_index, entity_data['id'], - entity_data['resourceTypePath'], - parent_resource_id), + types_index, + entity_data["id"], + entity_data["resourceTypePath"], + parent_resource_id, + ), self._get_child_data_value( - include_data, - entity_value['inventoryStructure']) + include_data, entity_value["inventoryStructure"] + ), ) ) else: # recursivelly search in child resources entities.extend( self._list_child_resource( - entity_value['inventoryStructure']['children']['resource'], - include_data, resource_type_id, types_index, - parent_resource_id + entity_value["inventoryStructure"]["children"]["resource"], + include_data, + resource_type_id, + types_index, + parent_resource_id, ) ) return entities - def _list_child_resource(self, children_j, include_data, - resource_type_id, - types_index, - parent_resource_id): + def _list_child_resource( + self, children_j, include_data, resource_type_id, types_index, parent_resource_id + ): """Returns list of child resources by recursively searching. - Args: - children_j: list of children json objects - include_data: whether to include data value of resource - resource_type_id: Resource type id - types_index: list of typesIndex from inventory - parent_resource_id: Id of parent resource + Args: + children_j: list of children json objects + include_data: whether to include data value of resource + resource_type_id: Resource type id + types_index: list of typesIndex from inventory + parent_resource_id: Id of parent resource """ entities = [] for child_j in children_j: - child_data = child_j['data'] - if child_data['name'].startswith('{} ['.format(resource_type_id)): + child_data = child_j["data"] + if child_data["name"].startswith(f"{resource_type_id} ["): # chose those children which name starts with provided resource type id entities.append( ResourceWithData( - child_data['id'], - child_data['name'], + child_data["id"], + child_data["name"], self._get_canonical_path( types_index, - child_data['id'], - child_data['resourceTypePath'], - parent_resource_id), - self._get_child_data_value(include_data, child_j) + child_data["id"], + child_data["resourceTypePath"], + parent_resource_id, + ), + self._get_child_data_value(include_data, child_j), ) ) - elif 'resource' in child_j['children']: + elif "resource" in child_j["children"]: # otherwise recursively search in children resources entities.extend( self._list_child_resource( - child_j['children']['resource'], include_data, - resource_type_id, types_index, parent_resource_id + child_j["children"]["resource"], + include_data, + resource_type_id, + types_index, + parent_resource_id, ) ) return entities @@ -1000,43 +1163,48 @@ def get_config_data(self, feed_id, resource_id): Args: feed_id: Feed id of the resource resource_id: Resource id - """ + """ if not feed_id or not resource_id: raise KeyError("'feed_id' and 'resource_id' are mandatory field!") result = self._post_raw( - 'strings/raw/query', + "strings/raw/query", data={ "fromEarliest": "true", "order": "DESC", "tags": "feed:{},type:r,id:{}".format( - feed_id, self._get_parent_resource_id(resource_id)) - } + feed_id, self._get_parent_resource_id(resource_id) + ), + }, ) if result.status_code == 200: entity_j = json.loads(result.content) if entity_j: try: - inventory_j = self._get_data_value( - entity_j[0]['data'])['inventoryStructure'] - if inventory_j['data']['id'] in self._decode_resource_id( - self._get_child_resource_id(resource_id)): + inventory_j = self._get_data_value(entity_j[0]["data"])["inventoryStructure"] + if inventory_j["data"]["id"] in self._decode_resource_id( + self._get_child_resource_id(resource_id) + ): data_value = self._get_child_data_value(True, inventory_j) - return ResourceData(inventory_j['data']['name'], - CanonicalPath( - inventory_j['data']['resourceTypePath']), - data_value) - for resource_j in inventory_j['children']['resource']: - if resource_j['data']['id'] in self._decode_resource_id( - self._get_resource_id(resource_id)): + return ResourceData( + inventory_j["data"]["name"], + CanonicalPath(inventory_j["data"]["resourceTypePath"]), + data_value, + ) + for resource_j in inventory_j["children"]["resource"]: + if resource_j["data"]["id"] in self._decode_resource_id( + self._get_resource_id(resource_id) + ): data_value = self._get_child_data_value(True, resource_j) - return ResourceData(resource_j['data']['name'], - CanonicalPath( - resource_j['data']['resourceTypePath']), - data_value) + return ResourceData( + resource_j["data"]["name"], + CanonicalPath(resource_j["data"]["resourceTypePath"]), + data_value, + ) except Exception: raise KeyError( - 'Resource data not found for resource {} in feed {}' - .format(resource_id, feed_id) + "Resource data not found for resource {} in feed {}".format( + resource_id, feed_id + ) ) return None @@ -1061,28 +1229,26 @@ def _get_canonical_path(self, types_index, data_id, resource_type_path, parent_r resource_type_path: the path of resource type parent_resource_id: id of parent Resource """ - resource_id = '' + resource_id = "" data_id = self._encode_resource_id(data_id) for index in types_index: if index.endswith(data_id): - """ take full resource_id if it is listed in index types, - otherwise resource itself is the parent resource + """take full resource_id if it is listed in index types, + otherwise resource itself is the parent resource """ resource_id = index break # format resource id to be used in canonical path if resource_id: - if resource_id.startswith('r;'): - resource_id = '/{}'.format(resource_id) + if resource_id.startswith("r;"): + resource_id = f"/{resource_id}" else: - resource_id = '/r;{}'.format(resource_id) + resource_id = f"/r;{resource_id}" """ build canonical path based on resource type path, parent resource id and appropriate resource id of provided data_id """ - return CanonicalPath('{}/r;{}{}'.format(resource_type_path, - parent_resource_id, - resource_id)) + return CanonicalPath(f"{resource_type_path}/r;{parent_resource_id}{resource_id}") def _get_child_data_value(self, include_data, child_j): """ @@ -1091,16 +1257,16 @@ def _get_child_data_value(self, include_data, child_j): data_value = None if include_data: try: - data_value = child_j['children']['dataEntity'][0]['data']['value'] + data_value = child_j["children"]["dataEntity"][0]["data"]["value"] except Exception: - raise KeyError('Data value not found for {}'.format(child_j)) + raise KeyError(f"Data value not found for {child_j}") return data_value def _encode_resource_id(self, resource_id): """ URL quotes the provided resource id """ - return urlquote(resource_id, safe='~') + return urlquote(resource_id, safe="~") def _decode_resource_id(self, resource_id): """ @@ -1116,7 +1282,7 @@ def _get_parent_resource_id(self, resource_id): def _get_resource_id(self, resource_id): if isinstance(resource_id, list): - return "{}".format('/r;'.join(resource_id)) + return "{}".format("/r;".join(resource_id)) else: return resource_id @@ -1133,22 +1299,22 @@ def _build_from_chunks(self, data_node): """ Builds the whole data from several chunks. """ - result = '' + result = "" if not data_node: - return '' + return "" master_data = data_node[0] - result = "{}{}".format(result, self._decode(master_data['value'])) + result = "{}{}".format(result, self._decode(master_data["value"])) # if data is not in chunks, then return the first node's value - if 'tags' not in master_data or 'chunks' not in master_data['tags']: + if "tags" not in master_data or "chunks" not in master_data["tags"]: return result # join the values in chunks - last_chunk = int(master_data['tags']['chunks']) + last_chunk = int(master_data["tags"]["chunks"]) for chunk_id in range(1, last_chunk): slave_data = data_node[chunk_id] - result = "{}{}".format(result, self._decode(slave_data['value'])) + result = "{}{}".format(result, self._decode(slave_data["value"])) return result def _decode(self, raw): @@ -1164,64 +1330,77 @@ def _decompress(self, raw): class HawkularMetric(HawkularService): def __init__(self, hostname, port, protocol, auth, tenant_id): """Creates hawkular metric service instance. For args refer 'HawkularService'""" - HawkularService.__init__(self, hostname=hostname, port=port, protocol=protocol, - auth=auth, tenant_id=tenant_id, entry="hawkular/metrics") + HawkularService.__init__( + self, + hostname=hostname, + port=port, + protocol=protocol, + auth=auth, + tenant_id=tenant_id, + entry="hawkular/metrics", + ) @staticmethod def _metric_id_availability_feed(feed_id): - return "hawkular-feed-availability-{}".format(feed_id) + return f"hawkular-feed-availability-{feed_id}" @staticmethod def _metric_id_availability_server(feed_id, server_id): - return "AI~R~[{}/{}~~]~AT~Server Availability~Server Availability"\ - .format(feed_id, server_id) + return "AI~R~[{}/{}~~]~AT~Server Availability~Server Availability".format( + feed_id, server_id + ) @staticmethod def _metric_id_availability_deployment(feed_id, server_id, resource_id): - return "AI~R~[{}/{}~/deployment={}]~AT~Deployment Status~Deployment Status"\ - .format(feed_id, server_id, resource_id) + return "AI~R~[{}/{}~/deployment={}]~AT~Deployment Status~Deployment Status".format( + feed_id, server_id, resource_id + ) @staticmethod def _metric_id_guage_server(feed_id, server_id, metric_enum): if not isinstance(metric_enum, MetricEnumGauge): raise KeyError("'metric_enum' should be a type of 'MetricEnumGauge' Enum class") - return "MI~R~[{}/{}~~]~MT~{}~{}".format(feed_id, server_id, metric_enum.metric_type, - metric_enum.sub_type) + return "MI~R~[{}/{}~~]~MT~{}~{}".format( + feed_id, server_id, metric_enum.metric_type, metric_enum.sub_type + ) @staticmethod def _metric_id_guage_datasource(feed_id, server_id, resource_id, metric_enum): if not isinstance(metric_enum, MetricEnumGauge): raise KeyError("'metric_enum' should be a type of 'MetricEnumGauge' Enum class") - return "MI~R~[{}/{}~/subsystem=datasources/data-source={}]~MT~{}~{}" \ - .format(feed_id, server_id, resource_id, metric_enum.metric_type, metric_enum.sub_type) + return "MI~R~[{}/{}~/subsystem=datasources/data-source={}]~MT~{}~{}".format( + feed_id, server_id, resource_id, metric_enum.metric_type, metric_enum.sub_type + ) @staticmethod def _metric_id_counter_server(feed_id, server_id, metric_enum): if not isinstance(metric_enum, MetricEnumCounter): raise KeyError("'metric_enum' should be a type of 'MetricEnumCounter' Enum class") if MetricEnumCounter.SVR_TXN_NUMBER_OF_TRANSACTIONS.metric_type == metric_enum.metric_type: - metric_id = "MI~R~[{}/{}~/subsystem=transactions]~MT~{}~{}" \ - .format(feed_id, server_id, metric_enum.metric_type, metric_enum.sub_type) + metric_id = "MI~R~[{}/{}~/subsystem=transactions]~MT~{}~{}".format( + feed_id, server_id, metric_enum.metric_type, metric_enum.sub_type + ) else: - metric_id = "MI~R~[{}/{}~~]~MT~{}~{}".format(feed_id, server_id, - metric_enum.metric_type, - metric_enum.sub_type) + metric_id = "MI~R~[{}/{}~~]~MT~{}~{}".format( + feed_id, server_id, metric_enum.metric_type, metric_enum.sub_type + ) return metric_id @staticmethod def _metric_id_counter_deployment(feed_id, server_id, resource_id, metric_enum): if not isinstance(metric_enum, MetricEnumCounter): raise KeyError("'metric_enum' should be a type of 'MetricEnumCounter' Enum class") - return "MI~R~[{}/{}~/deployment={}]~MT~{}~{}".format(feed_id, server_id, resource_id, - metric_enum.metric_type, - metric_enum.sub_type) + return "MI~R~[{}/{}~/deployment={}]~MT~{}~{}".format( + feed_id, server_id, resource_id, metric_enum.metric_type, metric_enum.sub_type + ) @staticmethod def _metric_id_jms(feed_id, server_id, jms_name, metric_enum): if not isinstance(metric_enum, MetricEnum): raise KeyError("'metric_enum' should be a type of 'MetricEnum' Enum class") - return "MI~R~[{}/{}~/subsystem=messaging-activemq/server=default/{}]~MT~{}~{}"\ - .format(feed_id, server_id, jms_name, metric_enum.metric_type, metric_enum.sub_type) + return "MI~R~[{}/{}~/subsystem=messaging-activemq/server=default/{}]~MT~{}~{}".format( + feed_id, server_id, jms_name, metric_enum.metric_type, metric_enum.sub_type + ) def list_availability_feed(self, feed_id, **kwargs): """Returns list of DataPoint of a feed @@ -1250,8 +1429,9 @@ def list_availability_deployment(self, feed_id, server_id, resource_id, **kwargs resource_id: deployment id kwargs: Refer ``list_availability`` """ - metric_id = self._metric_id_availability_deployment(feed_id=feed_id, server_id=server_id, - resource_id=resource_id) + metric_id = self._metric_id_availability_deployment( + feed_id=feed_id, server_id=server_id, resource_id=resource_id + ) return self.list_availability(metric_id=metric_id, **kwargs) def list_availability(self, metric_id, **kwargs): @@ -1272,52 +1452,56 @@ def list_availability(self, metric_id, **kwargs): Query type: raw: set True when you want to get raw data, Default False which returns stats """ - prefix_id = "availability/{}".format(urlquote(metric_id, safe='')) + prefix_id = "availability/{}".format(urlquote(metric_id, safe="")) return self._list_data(prefix_id=prefix_id, **kwargs) def list_gauge_datasource(self, feed_id, server_id, resource_id, metric_enum, **kwargs): """Returns list of NumericBucketPoint of datasource metric - Args: - feed_id: feed id of the datasource - server_id: server id of the datasource - resource_id: resource id, here which is datasource id - metric_enum: Any one of *DS_* Enum value from ``MetricEnumGauge`` - kwargs: Refer ``list_gauge`` - """ - metric_id = self._metric_id_guage_datasource(feed_id=feed_id, server_id=server_id, - resource_id=resource_id, - metric_enum=metric_enum) + Args: + feed_id: feed id of the datasource + server_id: server id of the datasource + resource_id: resource id, here which is datasource id + metric_enum: Any one of *DS_* Enum value from ``MetricEnumGauge`` + kwargs: Refer ``list_gauge`` + """ + metric_id = self._metric_id_guage_datasource( + feed_id=feed_id, server_id=server_id, resource_id=resource_id, metric_enum=metric_enum + ) return self.list_gauge(metric_id=metric_id, **kwargs) def list_gauge_server(self, feed_id, server_id, metric_enum, **kwargs): """Returns list of `NumericBucketPoint` of server metric - Args: - feed_id: feed id of the server - server_id: server id - metric_enum: Any one of *SVR_* ``Enum`` value from ``MetricEnumGauge`` - kwargs: Refer ``list_gauge`` - """ - metric_id = self._metric_id_guage_server(feed_id=feed_id, server_id=server_id, - metric_enum=metric_enum) + Args: + feed_id: feed id of the server + server_id: server id + metric_enum: Any one of *SVR_* ``Enum`` value from ``MetricEnumGauge`` + kwargs: Refer ``list_gauge`` + """ + metric_id = self._metric_id_guage_server( + feed_id=feed_id, server_id=server_id, metric_enum=metric_enum + ) return self.list_gauge(metric_id=metric_id, **kwargs) def list_jms(self, feed_id, server_id, name, metric_enum, **kwargs): """Returns list of `NumericBucketPoint` of server metric - Args: - feed_id: feed id of the server - server_id: server id - name: name of the JMS queue/topic - metric_enum: Any one of *SVR_* ``Enum`` value from ``MetricEnumGauge`` - kwargs: Refer ``list_gauge`` - """ + Args: + feed_id: feed id of the server + server_id: server id + name: name of the JMS queue/topic + metric_enum: Any one of *SVR_* ``Enum`` value from ``MetricEnumGauge`` + kwargs: Refer ``list_gauge`` + """ if not isinstance(metric_enum, MetricEnum): raise KeyError("'metric_enum' should be a type of 'MetricEnum' Enum class") jms_type = "topic" if "Topic" not in metric_enum.metric_type: jms_type = "queue" - metric_id = self._metric_id_jms(feed_id=feed_id, server_id=server_id, - jms_name="jms-{}={}".format(jms_type, name), - metric_enum=metric_enum) + metric_id = self._metric_id_jms( + feed_id=feed_id, + server_id=server_id, + jms_name=f"jms-{jms_type}={name}", + metric_enum=metric_enum, + ) if isinstance(metric_enum, MetricEnumGauge): return self.list_gauge(metric_id=metric_id, **kwargs) elif isinstance(metric_enum, MetricEnumCounter): @@ -1325,115 +1509,115 @@ def list_jms(self, feed_id, server_id, name, metric_enum, **kwargs): def list_gauge(self, metric_id, **kwargs): """Returns list of `NumericBucketPoint` of a metric - Args: - metric_id: Metric id - kwargs: Refer optional query params and query type - - Optional query params: - start: timestamp, Defaults to now: 8 hours - end: timestamp, Defaults to now - buckets: Total number of buckets - bucketDuration: Bucket duration - distinct: Set to true to return only distinct, contiguous values - limit: Limit the number of data points returned - order: Data point sort order, based on timestamp [values: ASC, DESC] - - Query type: - raw: set True when you want to get raw data, Default False which returns stats - rate: set True when you want rate data default False - stats: return stats data default True - """ - prefix_id = "gauges/{}".format(urlquote(metric_id, safe='')) + Args: + metric_id: Metric id + kwargs: Refer optional query params and query type + + Optional query params: + start: timestamp, Defaults to now: 8 hours + end: timestamp, Defaults to now + buckets: Total number of buckets + bucketDuration: Bucket duration + distinct: Set to true to return only distinct, contiguous values + limit: Limit the number of data points returned + order: Data point sort order, based on timestamp [values: ASC, DESC] + + Query type: + raw: set True when you want to get raw data, Default False which returns stats + rate: set True when you want rate data default False + stats: return stats data default True + """ + prefix_id = "gauges/{}".format(urlquote(metric_id, safe="")) return self._list_data(prefix_id=prefix_id, **kwargs) def list_counter_server(self, feed_id, server_id, metric_enum, **kwargs): """Returns list of `NumericBucketPoint` of server metric - Args: - feed_id: feed id of the server - server_id: server id - metric_enum: Any one of *SVR_* ``Enum`` value from ``MetricEnumCounter`` - kwargs: Refer ``list_counter`` - """ - metric_id = self._metric_id_counter_server(feed_id=feed_id, server_id=server_id, - metric_enum=metric_enum) + Args: + feed_id: feed id of the server + server_id: server id + metric_enum: Any one of *SVR_* ``Enum`` value from ``MetricEnumCounter`` + kwargs: Refer ``list_counter`` + """ + metric_id = self._metric_id_counter_server( + feed_id=feed_id, server_id=server_id, metric_enum=metric_enum + ) return self.list_counter(metric_id=metric_id, **kwargs) - def list_counter_deployment(self, - feed_id, server_id, resource_id, metric_enum, **kwargs): + def list_counter_deployment(self, feed_id, server_id, resource_id, metric_enum, **kwargs): """Returns list of `NumericBucketPoint` of server metric - Args: - feed_id: feed id of the deployment - server_id: server id of the deployment - resource_id: resource id, that's deployment id - metric_enum: Any one of *DEP_* ``Enum`` value from ``MetricEnumCounter`` - kwargs: Refer ``list_counter`` - """ - metric_id = self._metric_id_counter_deployment(feed_id=feed_id, server_id=server_id, - resource_id=resource_id, - metric_enum=metric_enum) + Args: + feed_id: feed id of the deployment + server_id: server id of the deployment + resource_id: resource id, that's deployment id + metric_enum: Any one of *DEP_* ``Enum`` value from ``MetricEnumCounter`` + kwargs: Refer ``list_counter`` + """ + metric_id = self._metric_id_counter_deployment( + feed_id=feed_id, server_id=server_id, resource_id=resource_id, metric_enum=metric_enum + ) return self.list_counter(metric_id=metric_id, **kwargs) def list_counter(self, metric_id, **kwargs): """Returns list of `NumericBucketPoint` of a metric - Args: - metric_id: metric id - kwargs: Refer optional query params and query type - - Optional query params: - start: timestamp, Defaults to now: 8 hours - end: timestamp, Defaults to now - buckets: Total number of buckets - bucketDuration: Bucket duration - distinct: Set to true to return only distinct, contiguous values - limit: Limit the number of data points returned - order: Data point sort order, based on timestamp [values: ASC, DESC] - - Query type: - raw: set True when you want to get raw data, Default False which returns stats - rate: set True when you want rate data default False - stats: return stats data default True - """ - prefix_id = "counters/{}".format(urlquote(metric_id, safe='')) + Args: + metric_id: metric id + kwargs: Refer optional query params and query type + + Optional query params: + start: timestamp, Defaults to now: 8 hours + end: timestamp, Defaults to now + buckets: Total number of buckets + bucketDuration: Bucket duration + distinct: Set to true to return only distinct, contiguous values + limit: Limit the number of data points returned + order: Data point sort order, based on timestamp [values: ASC, DESC] + + Query type: + raw: set True when you want to get raw data, Default False which returns stats + rate: set True when you want rate data default False + stats: return stats data default True + """ + prefix_id = "counters/{}".format(urlquote(metric_id, safe="")) return self._list_data(prefix_id=prefix_id, **kwargs) def list_availability_definition(self): """Lists all availability type metric definitions""" - return self._get(path='availability') + return self._get(path="availability") def list_gauge_definition(self): """Lists all gauge type metric definitions""" - return self._get(path='gauges') + return self._get(path="gauges") def list_counter_definition(self): """Lists all counter type metric definitions""" - return self._get(path='counters') + return self._get(path="counters") def list_definition(self): """Lists all metric definitions""" - return self._get(path='metrics') + return self._get(path="metrics") def _list_data(self, prefix_id, **kwargs): params = { - 'start': kwargs.get('start', None), - 'end': kwargs.get('end', None), - 'bucketDuration': kwargs.get('bucket_duration', None), - 'buckets': kwargs.get('buckets', None), - 'percentiles': kwargs.get('percentiles', None), - 'limit': kwargs.get('limit', None), - 'order': kwargs.get('order', None), + "start": kwargs.get("start", None), + "end": kwargs.get("end", None), + "bucketDuration": kwargs.get("bucket_duration", None), + "buckets": kwargs.get("buckets", None), + "percentiles": kwargs.get("percentiles", None), + "limit": kwargs.get("limit", None), + "order": kwargs.get("order", None), } - if kwargs.get('bucketDuration', None) is not None: - params['bucketDuration'] = kwargs.get('bucketDuration') - raw = kwargs.get('raw', False) - rate = kwargs.get('rate', False) - if not raw and params['bucketDuration'] is None and params['buckets'] is None: + if kwargs.get("bucketDuration", None) is not None: + params["bucketDuration"] = kwargs.get("bucketDuration") + raw = kwargs.get("raw", False) + rate = kwargs.get("rate", False) + if not raw and params["bucketDuration"] is None and params["buckets"] is None: raise KeyError("Either the 'buckets' or 'bucket_duration' parameter must be used") if rate: - return self._get(path='{}/rate/stats'.format(prefix_id), params=params) + return self._get(path=f"{prefix_id}/rate/stats", params=params) elif raw: - return self._get(path='{}/raw'.format(prefix_id), params=params) + return self._get(path=f"{prefix_id}/raw", params=params) else: - return self._get(path='{}/stats'.format(prefix_id), params=params) + return self._get(path=f"{prefix_id}/stats", params=params) def add_availability_feed(self, data, feed_id): """Add availability data for a feed @@ -1462,8 +1646,9 @@ def add_availability_deployment(self, data, feed_id, server_id, resource_id): server_id: server id resource_id: resource id (deployment id) """ - metric_id = self._metric_id_counter_deployment(feed_id=feed_id, server_id=server_id, - resource_id=resource_id) + metric_id = self._metric_id_counter_deployment( + feed_id=feed_id, server_id=server_id, resource_id=resource_id + ) self.add_availability(data=data, metric_id=metric_id) def add_gauge_server(self, data, feed_id, server_id, metric_enum): @@ -1474,8 +1659,9 @@ def add_gauge_server(self, data, feed_id, server_id, metric_enum): server_id: server id metric_enum: type of MetricEmumGuage """ - metric_id = self._metric_id_gauge_server(feed_id=feed_id, server_id=server_id, - metric_enum=metric_enum) + metric_id = self._metric_id_gauge_server( + feed_id=feed_id, server_id=server_id, metric_enum=metric_enum + ) self.add_gauge(data=data, metric_id=metric_id) def add_gauge_datasource(self, data, feed_id, server_id, resource_id, metric_enum): @@ -1487,9 +1673,9 @@ def add_gauge_datasource(self, data, feed_id, server_id, resource_id, metric_enu resource_id: resource id (datasource id) metric_enum: type of MetricEmumGuage """ - metric_id = self._metric_id_guage_datasource(feed_id=feed_id, server_id=server_id, - resource_id=resource_id, - metric_enum=metric_enum) + metric_id = self._metric_id_guage_datasource( + feed_id=feed_id, server_id=server_id, resource_id=resource_id, metric_enum=metric_enum + ) self.add_gauge(data=data, metric_id=metric_id) def add_counter_server(self, data, feed_id, server_id, metric_enum): @@ -1500,65 +1686,66 @@ def add_counter_server(self, data, feed_id, server_id, metric_enum): server_id: server id metric_enum: type of MetricEmumCounter """ - metric_id = self._metric_id_counter_server(feed_id=feed_id, server_id=server_id, - metric_enum=metric_enum) + metric_id = self._metric_id_counter_server( + feed_id=feed_id, server_id=server_id, metric_enum=metric_enum + ) self.add_counter(data=data, metric_id=metric_id) def add_counter_deployment(self, data, feed_id, server_id, resource_id, metric_enum): """Add counter data for a deployment - Args: - data: list of DataPoint - feed_id: feed id - server_id: server id - resource_id: resource id (deployment id) - metric_enum: type of MetricEmumCounter - """ - metric_id = self._metric_id_counter_deployment(feed_id=feed_id, server_id=server_id, - resource_id=resource_id, - metric_enum=metric_enum) + Args: + data: list of DataPoint + feed_id: feed id + server_id: server id + resource_id: resource id (deployment id) + metric_enum: type of MetricEmumCounter + """ + metric_id = self._metric_id_counter_deployment( + feed_id=feed_id, server_id=server_id, resource_id=resource_id, metric_enum=metric_enum + ) self.add_counter(data=data, metric_id=metric_id) def add_string(self, data, metric_id=None): """Add string data for a metric or metrics - Args: - data: list of DataPoint - metric_id: metric id - """ - self._post_data(prefix_id='strings', data=data, metric_id=metric_id) + Args: + data: list of DataPoint + metric_id: metric id + """ + self._post_data(prefix_id="strings", data=data, metric_id=metric_id) def add_gauge(self, data, metric_id=None): """Add guage data for a metric or metrics - Args: - data: list of DataPoint - metric_id: metric id - """ - self._post_data(prefix_id='gauges', data=data, metric_id=metric_id) + Args: + data: list of DataPoint + metric_id: metric id + """ + self._post_data(prefix_id="gauges", data=data, metric_id=metric_id) def add_counter(self, data, metric_id=None): """Add counter data for a metric or metrics - Args: - data: list of DataPoint - metric_id: metric id - """ - self._post_data(prefix_id='counters', data=data, metric_id=metric_id) + Args: + data: list of DataPoint + metric_id: metric id + """ + self._post_data(prefix_id="counters", data=data, metric_id=metric_id) def add_availability(self, data, metric_id=None): """Add availability data for a metric or metrics - Args: - data: list of DataPoint - metric_id: metric id - """ - self._post_data(prefix_id='availability', data=data, metric_id=metric_id) + Args: + data: list of DataPoint + metric_id: metric id + """ + self._post_data(prefix_id="availability", data=data, metric_id=metric_id) def _post_data(self, prefix_id, data, metric_id=None): if metric_id: - metric_id = urlquote(metric_id, safe='') - self._post(path='{}/{}/raw'.format(prefix_id, metric_id), data=data) + metric_id = urlquote(metric_id, safe="") + self._post(path=f"{prefix_id}/{metric_id}/raw", data=data) else: - self._post(path='{}/raw'.format(prefix_id), data=data) + self._post(path=f"{prefix_id}/raw", data=data) -class HawkularOperation(object): +class HawkularOperation: def __init__(self, hostname, port, username, password, tenant_id, connect=True): """Creates hawkular command gateway websocket client service instance. Args: @@ -1570,16 +1757,26 @@ def __init__(self, hostname, port, username, password, tenant_id, connect=True): connect: If you do not want to connect on initialization pass this as False """ self.cmd_gw_ws_api = HawkularWebsocketClient( - url="ws://{}:{}/hawkular/command-gateway/ui/ws".format(hostname, port), + url=f"ws://{hostname}:{port}/hawkular/command-gateway/ui/ws", headers={"Hawkular-Tenant": tenant_id, "Accept": "application/json"}, - username=username, password=password) + username=username, + password=password, + ) self.tenant_id = tenant_id if connect: self.cmd_gw_ws_api.connect() - def add_jdbc_driver(self, feed_id, server_id, driver_name, module_name, - driver_class, driver_jar_name=None, binary_content=None, - binary_file_location=None): + def add_jdbc_driver( + self, + feed_id, + server_id, + driver_name, + module_name, + driver_class, + driver_jar_name=None, + binary_content=None, + binary_file_location=None, + ): """Adds JDBC driver on specified server under specified feed. return status Args: feed_id: feed id of the server @@ -1592,16 +1789,24 @@ def add_jdbc_driver(self, feed_id, server_id, driver_name, module_name, binary_file_location: driver file location(on local disk) """ if driver_jar_name and not binary_content and not binary_file_location: - raise KeyError("If 'driver_jar_name' field is set the jar file must be passed" - " as binary or file location") - resource_path = "/t;{}/f;{}/r;{}~~".format(self.tenant_id, feed_id, server_id) - payload = {"resourcePath": resource_path, "driverJarName": driver_jar_name, - "driverName": driver_name, "moduleName": module_name, - "driverClass": driver_class} - return self.cmd_gw_ws_api.hwk_invoke_operation(operation_name="AddJdbcDriver", - payload=payload, - binary_file_location=binary_file_location, - binary_content=binary_content) + raise KeyError( + "If 'driver_jar_name' field is set the jar file must be passed" + " as binary or file location" + ) + resource_path = f"/t;{self.tenant_id}/f;{feed_id}/r;{server_id}~~" + payload = { + "resourcePath": resource_path, + "driverJarName": driver_jar_name, + "driverName": driver_name, + "moduleName": module_name, + "driverClass": driver_class, + } + return self.cmd_gw_ws_api.hwk_invoke_operation( + operation_name="AddJdbcDriver", + payload=payload, + binary_file_location=binary_file_location, + binary_content=binary_content, + ) def remove_jdbc_driver(self, feed_id, server_id, driver_name): """Removes JDBC driver on specified server under specified feed. return status @@ -1610,14 +1815,26 @@ def remove_jdbc_driver(self, feed_id, server_id, driver_name): server_id: server id under a feed driver_name: driver name """ - payload = {"resourcePath": "/t;{}/f;{}/r;{}~%2Fsubsystem%3Ddatasources%2Fjdbc-driver%3D{}" - .format(self.tenant_id, feed_id, server_id, driver_name)} - return self.cmd_gw_ws_api.hwk_invoke_operation(operation_name="RemoveJdbcDriver", - payload=payload) + payload = { + "resourcePath": "/t;{}/f;{}/r;{}~%2Fsubsystem%3Ddatasources%2Fjdbc-driver%3D{}".format( + self.tenant_id, feed_id, server_id, driver_name + ) + } + return self.cmd_gw_ws_api.hwk_invoke_operation( + operation_name="RemoveJdbcDriver", payload=payload + ) - def add_deployment(self, feed_id, server_id, destination_file_name, force_deploy=False, - enabled=True, server_groups=None, binary_file_location=None, - binary_content=None): + def add_deployment( + self, + feed_id, + server_id, + destination_file_name, + force_deploy=False, + enabled=True, + server_groups=None, + binary_file_location=None, + binary_content=None, + ): """Adds deployment to hawkular server. Return status Args: feed_id: feed id of the server @@ -1631,16 +1848,24 @@ def add_deployment(self, feed_id, server_id, destination_file_name, force_deploy """ if not binary_content and not binary_file_location: raise KeyError("Deployment file must be passed as binary or file location") - resource_path = "/t;{}/f;{}/r;{}~~".format(self.tenant_id, feed_id, server_id) - payload = {"destinationFileName": destination_file_name, "forceDeploy": force_deploy, - "resourcePath": resource_path, "enabled": enabled, "serverGroups": server_groups} - return self.cmd_gw_ws_api.hwk_invoke_operation(operation_name="DeployApplication", - payload=payload, - binary_content=binary_content, - binary_file_location=binary_file_location) - - def undeploy(self, feed_id, server_id, destination_file_name, remove_content=True, - server_groups=None): + resource_path = f"/t;{self.tenant_id}/f;{feed_id}/r;{server_id}~~" + payload = { + "destinationFileName": destination_file_name, + "forceDeploy": force_deploy, + "resourcePath": resource_path, + "enabled": enabled, + "serverGroups": server_groups, + } + return self.cmd_gw_ws_api.hwk_invoke_operation( + operation_name="DeployApplication", + payload=payload, + binary_content=binary_content, + binary_file_location=binary_file_location, + ) + + def undeploy( + self, feed_id, server_id, destination_file_name, remove_content=True, server_groups=None + ): """Removes deployment on a hawkular server. Return status Args: feed_id: feed id of the server @@ -1649,11 +1874,16 @@ def undeploy(self, feed_id, server_id, destination_file_name, remove_content=Tru remove_content: whether to remove the deployment content or not (default = true) server_groups: comma-separated list of server groups for the operation (default = None) """ - resource_path = "/t;{}/f;{}/r;{}~~".format(self.tenant_id, feed_id, server_id) - payload = {"destinationFileName": destination_file_name, "removeContent": remove_content, - "serverGroups": server_groups, "resourcePath": resource_path} - return self.cmd_gw_ws_api.hwk_invoke_operation(operation_name="UndeployApplication", - payload=payload) + resource_path = f"/t;{self.tenant_id}/f;{feed_id}/r;{server_id}~~" + payload = { + "destinationFileName": destination_file_name, + "removeContent": remove_content, + "serverGroups": server_groups, + "resourcePath": resource_path, + } + return self.cmd_gw_ws_api.hwk_invoke_operation( + operation_name="UndeployApplication", payload=payload + ) def enable_deployment(self, feed_id, server_id, destination_file_name, server_groups=None): """Enables deployment on a hawkular server. Return status @@ -1662,12 +1892,16 @@ def enable_deployment(self, feed_id, server_id, destination_file_name, server_gr server_id: server id under a feed destination_file_name: deployment file name server_groups: comma-separated list of server groups for the operation (default = None) - """ - resource_path = "/t;{}/f;{}/r;{}~~".format(self.tenant_id, feed_id, server_id) - payload = {"destinationFileName": destination_file_name, "serverGroups": server_groups, - "resourcePath": resource_path} - return self.cmd_gw_ws_api.hwk_invoke_operation(operation_name="EnableApplication", - payload=payload) + """ + resource_path = f"/t;{self.tenant_id}/f;{feed_id}/r;{server_id}~~" + payload = { + "destinationFileName": destination_file_name, + "serverGroups": server_groups, + "resourcePath": resource_path, + } + return self.cmd_gw_ws_api.hwk_invoke_operation( + operation_name="EnableApplication", payload=payload + ) def disable_deployment(self, feed_id, server_id, destination_file_name, server_groups=None): """Disable deployment on a hawkular server. Return status @@ -1677,11 +1911,15 @@ def disable_deployment(self, feed_id, server_id, destination_file_name, server_g destination_file_name: deployment file name server_groups: comma-separated list of server groups for the operation (default = None) """ - resource_path = "/t;{}/f;{}/r;{}~~".format(self.tenant_id, feed_id, server_id) - payload = {"destinationFileName": destination_file_name, "serverGroups": server_groups, - "resourcePath": resource_path} - return self.cmd_gw_ws_api.hwk_invoke_operation(operation_name="DisableApplication", - payload=payload) + resource_path = f"/t;{self.tenant_id}/f;{feed_id}/r;{server_id}~~" + payload = { + "destinationFileName": destination_file_name, + "serverGroups": server_groups, + "resourcePath": resource_path, + } + return self.cmd_gw_ws_api.hwk_invoke_operation( + operation_name="DisableApplication", payload=payload + ) def restart_deployment(self, feed_id, server_id, destination_file_name, server_groups=None): """Restarts deployment on a hawkular server. Return status @@ -1690,12 +1928,16 @@ def restart_deployment(self, feed_id, server_id, destination_file_name, server_g server_id: server id under a feed destination_file_name: deployment file name server_groups: comma-separated list of server groups for the operation (default = None) - """ - resource_path = "/t;{}/f;{}/r;{}~~".format(self.tenant_id, feed_id, server_id) - payload = {"destinationFileName": destination_file_name, "serverGroups": server_groups, - "resourcePath": resource_path} - return self.cmd_gw_ws_api.hwk_invoke_operation(operation_name="RestartApplication", - payload=payload) + """ + resource_path = f"/t;{self.tenant_id}/f;{feed_id}/r;{server_id}~~" + payload = { + "destinationFileName": destination_file_name, + "serverGroups": server_groups, + "resourcePath": resource_path, + } + return self.cmd_gw_ws_api.hwk_invoke_operation( + operation_name="RestartApplication", payload=payload + ) def close_ws(self): """Closes web socket client session""" @@ -1712,6 +1954,7 @@ def __init__(self, metric_type, sub_type): class MetricEnumGauge(MetricEnum): """Enum to define Gauge metric types and sub types""" + DS_POOL_ACTIVE_COUNT = ("Datasource Pool Metrics", "Active Count") DS_POOL_AVAILABLE_COUNT = ("Datasource Pool Metrics", "Available Count") DS_POOL_AVERAGE_BLOCKING_TIME = ("Datasource Pool Metrics", "Average Blocking Time") @@ -1741,8 +1984,10 @@ class MetricEnumGauge(MetricEnum): JMS_TOPIC_DURABLE_SUBSCRIPTION_COUNT = ("JMS Topic Metrics", "Durable Subscription Count") JMS_TOPIC_MESSAGE_COUNT = ("JMS Topic Metrics", "Message Count") JMS_TOPIC_NON_DURABLE_MESSAGE_COUNT = ("JMS Topic Metrics", "Non-Durable Message Count") - JMS_TOPIC_NON_DURABLE_SUBSCRIPTION_COUNT = \ - ("JMS Topic Metrics", "Non-Durable Subscription Count") + JMS_TOPIC_NON_DURABLE_SUBSCRIPTION_COUNT = ( + "JMS Topic Metrics", + "Non-Durable Subscription Count", + ) JMS_TOPIC_SUBSCRIPTION_COUNT = ("JMS Topic Metrics", "Subscription Count") SVR_MEM_HEAP_COMMITTED = ("WildFly Memory Metrics", "Heap Committed") SVR_MEM_HEAP_MAX = ("WildFly Memory Metrics", "Heap Max") @@ -1750,38 +1995,61 @@ class MetricEnumGauge(MetricEnum): SVR_MEM_NON_HEAP_COMMITTED = ("WildFly Memory Metrics", "NonHeap Committed") SVR_MEM_NON_HEAP_USED = ("WildFly Memory Metrics", "NonHeap Used") SVR_TH_THREAD_COUNT = ("WildFly Threading Metrics", "Thread Count") - SVR_WEB_AGGREGATED_ACTIVE_WEB_SESSIONS = \ - ("WildFly Aggregated Web Metrics", "Aggregated Active Web Sessions") - SVR_WEB_AGGREGATED_MAX_ACTIVE_WEB_SESSIONS = \ - ("WildFly Aggregated Web Metrics", "Aggregated Max Active Web Sessions") + SVR_WEB_AGGREGATED_ACTIVE_WEB_SESSIONS = ( + "WildFly Aggregated Web Metrics", + "Aggregated Active Web Sessions", + ) + SVR_WEB_AGGREGATED_MAX_ACTIVE_WEB_SESSIONS = ( + "WildFly Aggregated Web Metrics", + "Aggregated Max Active Web Sessions", + ) class MetricEnumCounter(MetricEnum): """Enum Counter metric types and sub types""" + DEP_UTM_EXPIRED_SESSIONS = ("Undertow Metrics", "Expired Sessions") DEP_UTM_REJECTED_SESSIONS = ("Undertow Metrics", "Rejected Sessions") DEP_UTM_SESSIONS_CREATED = ("Undertow Metrics", "Sessions Created") JMS_QUEUE_MESSAGES_ADDED = ("JMS Queue Metrics", "Messages Added") JMS_TOPIC_MESSAGES_ADDED = ("JMS Topic Metrics", "Messages Added") SVR_MEM_ACCUMULATED_GC_DURATION = ("WildFly Memory Metrics", "Accumulated GC Duration") - SVR_TXN_NUMBER_OF_ABORTED_TRANSACTIONS = \ - ("Transactions Metrics", "Number of Aborted Transactions") - SVR_TXN_NUMBER_OF_APPLICATION_ROLLBACKS = \ - ("Transactions Metrics", "Number of Application Rollbacks") - SVR_TXN_NUMBER_OF_COMMITTED_TRANSACTIONS = \ - ("Transactions Metrics", "Number of Committed Transactions") + SVR_TXN_NUMBER_OF_ABORTED_TRANSACTIONS = ( + "Transactions Metrics", + "Number of Aborted Transactions", + ) + SVR_TXN_NUMBER_OF_APPLICATION_ROLLBACKS = ( + "Transactions Metrics", + "Number of Application Rollbacks", + ) + SVR_TXN_NUMBER_OF_COMMITTED_TRANSACTIONS = ( + "Transactions Metrics", + "Number of Committed Transactions", + ) SVR_TXN_NUMBER_OF_HEURISTICS = ("Transactions Metrics", "Number of Heuristics") - SVR_TXN_NUMBER_OF_NESTED_TRANSACTIONS = \ - ("Transactions Metrics", "Number of Nested Transactions") + SVR_TXN_NUMBER_OF_NESTED_TRANSACTIONS = ( + "Transactions Metrics", + "Number of Nested Transactions", + ) SVR_TXN_NUMBER_OF_RESOURCE_ROLLBACKS = ("Transactions Metrics", "Number of Resource Rollbacks") - SVR_TXN_NUMBER_OF_TIMED_OUT_TRANSACTIONS = \ - ("Transactions Metrics", "Number of Timed Out Transactions") + SVR_TXN_NUMBER_OF_TIMED_OUT_TRANSACTIONS = ( + "Transactions Metrics", + "Number of Timed Out Transactions", + ) SVR_TXN_NUMBER_OF_TRANSACTIONS = ("Transactions Metrics", "Number of Transactions") - SVR_WEB_AGGREGATED_EXPIRED_WEB_SESSIONS = \ - ("WildFly Aggregated Web Metrics", "Aggregated Expired Web Sessions") - SVR_WEB_AGGREGATED_REJECTED_WEB_SESSIONS = \ - ("WildFly Aggregated Web Metrics", "Aggregated Rejected Web Sessions") - SVR_WEB_AGGREGATED_SERVLET_REQUEST_COUNT = \ - ("WildFly Aggregated Web Metrics", "Aggregated Servlet Request Count") - SVR_WEB_AGGREGATED_SERVLET_REQUEST_TIME = \ - ("WildFly Aggregated Web Metrics", "Aggregated Servlet Request Time") + SVR_WEB_AGGREGATED_EXPIRED_WEB_SESSIONS = ( + "WildFly Aggregated Web Metrics", + "Aggregated Expired Web Sessions", + ) + SVR_WEB_AGGREGATED_REJECTED_WEB_SESSIONS = ( + "WildFly Aggregated Web Metrics", + "Aggregated Rejected Web Sessions", + ) + SVR_WEB_AGGREGATED_SERVLET_REQUEST_COUNT = ( + "WildFly Aggregated Web Metrics", + "Aggregated Servlet Request Count", + ) + SVR_WEB_AGGREGATED_SERVLET_REQUEST_TIME = ( + "WildFly Aggregated Web Metrics", + "Aggregated Servlet Request Time", + ) diff --git a/wrapanapi/systems/lenovo.py b/wrapanapi/systems/lenovo.py index 29b0cff3..8987c4af 100644 --- a/wrapanapi/systems/lenovo.py +++ b/wrapanapi/systems/lenovo.py @@ -1,14 +1,15 @@ -# coding: utf-8 """Backend management system classes Used to communicate with providers without using CFME facilities """ - import json + import requests from requests.exceptions import Timeout -from wrapanapi.systems.base import System + from wrapanapi.entities.base import Entity from wrapanapi.exceptions import ItemNotFound +from wrapanapi.systems.base import System + class LenovoSystem(System): """Client to Lenovo API @@ -17,56 +18,59 @@ class LenovoSystem(System): username: The username to connect with. password: The password to connect with. """ + _api = None _server_stats_available = { - 'num_server': lambda self, _: len(self.list_servers()), - 'cores_capacity': lambda self, requester: self.get_server_cores(requester.name), - 'memory_capacity': lambda self, requester: self.get_server_memory(requester.name), - 'num_firmwares': lambda self, requester: len(self.get_server_firmwares(requester.name)), - 'num_network_devices': lambda self, - requester: len(self.get_network_devices(requester.name)), - 'num_storage_devices': lambda self, - requester: len(self.get_storage_devices(requester.name)), + "num_server": lambda self, _: len(self.list_servers()), + "cores_capacity": lambda self, requester: self.get_server_cores(requester.name), + "memory_capacity": lambda self, requester: self.get_server_memory(requester.name), + "num_firmwares": lambda self, requester: len(self.get_server_firmwares(requester.name)), + "num_network_devices": lambda self, requester: len( + self.get_network_devices(requester.name) + ), + "num_storage_devices": lambda self, requester: len( + self.get_storage_devices(requester.name) + ), } _server_inventory_available = { - 'hostname': lambda self, requester: self.get_server_hostname(requester.name), - 'ipv4_address': lambda self, requester: self.get_server_ipv4_address(requester.name), - 'ipv6_address': lambda self, requester: self.get_server_ipv6_address(requester.name), - 'mac_address': lambda self, requester: self.get_server_mac_address(requester.name), - 'power_state': lambda self, requester: self.get_server_power_status(requester.name), - 'health_state': lambda self, requester: self.get_server_health_state(requester.name), - 'manufacturer': lambda self, requester: self.get_server_manufacturer(requester.name), - 'model': lambda self, requester: self.get_server_model(requester.name), - 'machine_type': lambda self, requester: self.get_server_machine_type(requester.name), - 'serial_number': lambda self, requester: self.get_server_serial_number(requester.name), - 'description': lambda self, requester: self.get_server_description(requester.name), - 'product_name': lambda self, requester: self.get_server_product_name(requester.name), - 'uuid': lambda self, requester: self.get_server_uuid(requester.name), - 'field_replaceable_unit': lambda self, requester: self.get_server_fru(requester.name), + "hostname": lambda self, requester: self.get_server_hostname(requester.name), + "ipv4_address": lambda self, requester: self.get_server_ipv4_address(requester.name), + "ipv6_address": lambda self, requester: self.get_server_ipv6_address(requester.name), + "mac_address": lambda self, requester: self.get_server_mac_address(requester.name), + "power_state": lambda self, requester: self.get_server_power_status(requester.name), + "health_state": lambda self, requester: self.get_server_health_state(requester.name), + "manufacturer": lambda self, requester: self.get_server_manufacturer(requester.name), + "model": lambda self, requester: self.get_server_model(requester.name), + "machine_type": lambda self, requester: self.get_server_machine_type(requester.name), + "serial_number": lambda self, requester: self.get_server_serial_number(requester.name), + "description": lambda self, requester: self.get_server_description(requester.name), + "product_name": lambda self, requester: self.get_server_product_name(requester.name), + "uuid": lambda self, requester: self.get_server_uuid(requester.name), + "field_replaceable_unit": lambda self, requester: self.get_server_fru(requester.name), } POWERED_ON = 8 POWERED_OFF = 5 STANDBY = 18 HEALTH_VALID = ("normal", "non-critical") - HEALTH_WARNING = ("warning") + HEALTH_WARNING = "warning" HEALTH_CRITICAL = ("critical", "minor-failure", "major-failure", "non-recoverable", "fatal") def __init__(self, hostname, username, password, protocol="https", port=None, **kwargs): - super(LenovoSystem, self).__init__(**kwargs) - self.port = port or kwargs.get('api_port', 443) + super().__init__(**kwargs) + self.port = port or kwargs.get("api_port", 443) self.auth = (username, password) - self.url = '{}://{}:{}/'.format(protocol, hostname, self.port) + self.url = f"{protocol}://{hostname}:{self.port}/" self._servers_list = None self.kwargs = kwargs @property def _identifying_attrs(self): - return {'url': self.url} + return {"url": self.url} def info(self): - return 'LenovoSystem url={}'.format(self.url) + return f"LenovoSystem url={self.url}" def __del__(self): """Disconnect from the API when the object is deleted""" @@ -84,8 +88,9 @@ def _service_instance(self, path): def _service_put(self, path, request): """An instance of the service""" try: - response = requests.put(self.url + path, data=json.dumps(request), auth=self.auth, - verify=False) + response = requests.put( + self.url + path, data=json.dumps(request), auth=self.auth, verify=False + ) return response except Timeout: return None @@ -93,8 +98,12 @@ def _service_put(self, path, request): def _service_post(self, path, request): """Makes POST request and returns the response""" try: - response = requests.post('{}/{}'.format(self.url, path), data=json.dumps(request), - auth=self.auth, verify=False) + response = requests.post( + f"{self.url}/{path}", + data=json.dumps(request), + auth=self.auth, + verify=False, + ) return response except Timeout: return None @@ -103,30 +112,30 @@ def _service_post(self, path, request): def version(self): """The product version""" response = self._service_instance("aicc") - return response['appliance']['version'] + return response["appliance"]["version"] def list_servers(self): inventory = [] # Collect the nodes associated with a cabinet or chassis response = self._service_instance("cabinet?status=includestandalone") - for cabinet in response['cabinetList']: - cabinet_nodes = cabinet['nodeList'] - inventory.extend([node['itemInventory'] for node in cabinet_nodes]) + for cabinet in response["cabinetList"]: + cabinet_nodes = cabinet["nodeList"] + inventory.extend([node["itemInventory"] for node in cabinet_nodes]) - for chassis in cabinet['chassisList']: - chassis_nodes = chassis['itemInventory']['nodes'] - inventory.extend([node for node in chassis_nodes if node['type'] != 'SCU']) + for chassis in cabinet["chassisList"]: + chassis_nodes = chassis["itemInventory"]["nodes"] + inventory.extend([node for node in chassis_nodes if node["type"] != "SCU"]) self._servers_list = inventory return inventory def list_switches(self): - raw_switches = self._service_instance(LenovoSwitch.API_PATH).get('switchList', []) + raw_switches = self._service_instance(LenovoSwitch.API_PATH).get("switchList", []) return [LenovoSwitch(data, system=self) for data in raw_switches] def get_switch(self, uuid): - switch_data = self._service_instance("{}/{}".format(LenovoSwitch.API_PATH, uuid.lower())) + switch_data = self._service_instance(f"{LenovoSwitch.API_PATH}/{uuid.lower()}") if not switch_data: raise ItemNotFound("switch", uuid) switch = LenovoSwitch(switch_data, system=self) @@ -141,15 +150,15 @@ def cleanup_switch(self, uuid): switch.cleanup() def change_node_power_status(self, server, request): - url = "nodes/" + str(server['uuid']) - payload = {'powerState': request} + url = "nodes/" + str(server["uuid"]) + payload = {"powerState": request} response = self._service_put(url, payload) return response def change_led_status(self, server, name, state): - url = "nodes/" + str(server['uuid']) - payload = {'leds': [{'name': name, 'state': state}]} + url = "nodes/" + str(server["uuid"]) + payload = {"leds": [{"name": name, "state": state}]} response = self._service_put(url, payload) return response @@ -160,7 +169,7 @@ def get_server(self, server_name): try: for node in self._servers_list: - if node['name'] == server_name: + if node["name"] == server_name: return node except AttributeError: return None @@ -168,37 +177,37 @@ def get_server(self, server_name): def get_led(self, server_name): try: server = self.get_server(server_name) - leds = server['leds'] + leds = server["leds"] for led in leds: - if led['name'] == 'Identify' or led['name'] == 'Identification': + if led["name"] == "Identify" or led["name"] == "Identification": return led except AttributeError: return None def get_server_hostname(self, server_name): server = self.get_server(server_name) - return str(server['hostname']) + return str(server["hostname"]) def get_server_ipv4_address(self, server_name): server = self.get_server(server_name) - return server['ipv4Addresses'] + return server["ipv4Addresses"] def get_server_ipv6_address(self, server_name): server = self.get_server(server_name) - return server['ipv6Addresses'] + return server["ipv6Addresses"] def get_server_mac_address(self, server_name): server = self.get_server(server_name) - return server['macAddress'] + return server["macAddress"] def get_server_power_status(self, server_name): server = self.get_server(server_name) - if server['powerStatus'] == self.POWERED_ON: + if server["powerStatus"] == self.POWERED_ON: return "on" - elif server['powerStatus'] == self.POWERED_OFF: + elif server["powerStatus"] == self.POWERED_OFF: return "off" - elif server['powerStatus'] == self.STANDBY: + elif server["powerStatus"] == self.STANDBY: return "Standby" else: return "Unknown" @@ -206,171 +215,171 @@ def get_server_power_status(self, server_name): def get_server_health_state(self, server_name): server = self.get_server(server_name) - if str(server['cmmHealthState'].lower()) in self.HEALTH_VALID: + if str(server["cmmHealthState"].lower()) in self.HEALTH_VALID: return "Valid" - elif str(server['cmmHealthState'].lower()) in self.HEALTH_WARNING: + elif str(server["cmmHealthState"].lower()) in self.HEALTH_WARNING: return "Warning" - elif str(server['cmmHealthState'].lower()) in self.HEALTH_CRITICAL: + elif str(server["cmmHealthState"].lower()) in self.HEALTH_CRITICAL: return "Critical" else: return "Unknown" def is_server_running(self, server_name): server = self.get_server(server_name) - return server['powerStatus'] == self.POWERED_ON + return server["powerStatus"] == self.POWERED_ON def is_server_stopped(self, server_name): server = self.get_server(server_name) - return server['powerStatus'] == self.POWERED_OFF + return server["powerStatus"] == self.POWERED_OFF def is_server_standby(self, server_name): server = self.get_server(server_name) - return server['powerStatus'] == self.STANDBY + return server["powerStatus"] == self.STANDBY def is_server_valid(self, server_name): server = self.get_server(server_name) - return str(server['cmmHealthState'].lower()) in self.HEALTH_VALID + return str(server["cmmHealthState"].lower()) in self.HEALTH_VALID def is_server_warning(self, server_name): server = self.get_server(server_name) - return str(server['cmmHealthState'].lower()) in self.HEALTH_WARNING + return str(server["cmmHealthState"].lower()) in self.HEALTH_WARNING def is_server_critical(self, server_name): server = self.get_server(server_name) - return str(server['cmmHealthState'].lower()) in self.HEALTH_CRITICAL + return str(server["cmmHealthState"].lower()) in self.HEALTH_CRITICAL def is_server_led_on(self, server_name): led = self.get_led(server_name) - return led['state'] == 'On' + return led["state"] == "On" def is_server_led_off(self, server_name): led = self.get_led(server_name) - return led['state'] == 'Off' + return led["state"] == "Off" def is_server_led_blinking(self, server_name): led = self.get_led(server_name) - return led['state'] == 'Blinking' + return led["state"] == "Blinking" def get_server_cores(self, server_name): server = self.get_server(server_name) - processors = server['processors'] - cores = sum([processor['cores'] for processor in processors]) + processors = server["processors"] + cores = sum(processor["cores"] for processor in processors) return cores def get_server_memory(self, server_name): server = self.get_server(server_name) - memorys = server['memoryModules'] - total_memory = sum([memory['capacity'] for memory in memorys]) + memorys = server["memoryModules"] + total_memory = sum(memory["capacity"] for memory in memorys) # Convert it to bytes, so it matches the value in the UI - return (1024 * total_memory) + return 1024 * total_memory def get_server_manufacturer(self, server_name): server = self.get_server(server_name) - return str(server['manufacturer']) + return str(server["manufacturer"]) def get_server_model(self, server_name): server = self.get_server(server_name) - return str(server['model']) + return str(server["model"]) def get_server_machine_type(self, server_name): server = self.get_server(server_name) - return str(server['machineType']) + return str(server["machineType"]) def get_server_serial_number(self, server_name): server = self.get_server(server_name) - return str(server['serialNumber']) + return str(server["serialNumber"]) def get_server_description(self, server_name): server = self.get_server(server_name) - return str(server['description']) + return str(server["description"]) def get_server_product_name(self, server_name): - return self.get_server(server_name)['productName'] + return self.get_server(server_name)["productName"] def get_server_uuid(self, server_name): - return self.get_server(server_name)['uuid'] + return self.get_server(server_name)["uuid"] def get_server_fru(self, server_name): - return self.get_server(server_name)['FRU'] + return self.get_server(server_name)["FRU"] def get_server_firmwares(self, server_name): - return self.get_server(server_name)['firmware'] + return self.get_server(server_name)["firmware"] def set_power_on_server(self, server_name): server = self.get_server(server_name) - response = self.change_node_power_status(server, 'powerOn') + response = self.change_node_power_status(server, "powerOn") return "Power state action has been sent, status:" + str(response.status_code) def set_power_off_server(self, server_name): server = self.get_server(server_name) - response = self.change_node_power_status(server, 'powerOffSoftGraceful') + response = self.change_node_power_status(server, "powerOffSoftGraceful") return "Power state action has been sent, status:" + str(response.status_code) def set_power_off_immediately_server(self, server_name): server = self.get_server(server_name) - response = self.change_node_power_status(server, 'powerOff') + response = self.change_node_power_status(server, "powerOff") return "Power state action has been sent, status:" + str(response.status_code) def set_restart_server(self, server_name): server = self.get_server(server_name) - response = self.change_node_power_status(server, 'powerOffSoftGraceful') + response = self.change_node_power_status(server, "powerOffSoftGraceful") return "Restart state action has been sent, status:" + str(response.status_code) def set_restart_immediately_server(self, server_name): server = self.get_server(server_name) - response = self.change_node_power_status(server, 'powerCycleSoft') + response = self.change_node_power_status(server, "powerCycleSoft") return "Restart state action has been sent, status:" + str(response.status_code) def set_restart_setup_system_server(self, server_name): server = self.get_server(server_name) - response = self.change_node_power_status(server, 'bootToF1') + response = self.change_node_power_status(server, "bootToF1") return "Restart state action has been sent, status:" + str(response.status_code) def set_restart_controller_server(self, server_name): server = self.get_server(server_name) - response = self.change_node_power_status(server, 'restart') + response = self.change_node_power_status(server, "restart") return "Restart state action has been sent, status:" + str(response.status_code) def set_server_led_on(self, server_name): server = self.get_server(server_name) led = self.get_led(server_name) - response = self.change_led_status(server, led['name'], 'On') + response = self.change_led_status(server, led["name"], "On") return "LED state action has been sent, status:" + str(response.status_code) def set_server_led_off(self, server_name): server = self.get_server(server_name) led = self.get_led(server_name) - response = self.change_led_status(server, led['name'], 'Off') + response = self.change_led_status(server, led["name"], "Off") return "LED state action has been sent, status:" + str(response.status_code) def set_server_led_blinking(self, server_name): server = self.get_server(server_name) led = self.get_led(server_name) - response = self.change_led_status(server, led['name'], 'Blinking') + response = self.change_led_status(server, led["name"], "Blinking") return "LED state action has been sent, status:" + str(response.status_code) @@ -385,8 +394,10 @@ def server_stats(self, physical_server, requested_stats, **kwargs): # Retrieve and return the stats requested_stats = requested_stats or self._stats_available - return {stat: self._server_stats_available[stat](self, physical_server) - for stat in requested_stats} + return { + stat: self._server_stats_available[stat](self, physical_server) + for stat in requested_stats + } def server_inventory(self, physical_server, requested_items, **kwargs): """ @@ -398,8 +409,10 @@ def server_inventory(self, physical_server, requested_items, **kwargs): """ # Retrieve and return the inventory requested_items = requested_items or self._server_inventory_available - return {item: self._server_inventory_available[item](self, physical_server) - for item in requested_items} + return { + item: self._server_inventory_available[item](self, physical_server) + for item in requested_items + } def get_network_devices(self, server_name): addin_cards = self.get_addin_cards(server_name) or [] @@ -407,13 +420,15 @@ def get_network_devices(self, server_name): network_devices = [] for addin_card in addin_cards: - if (LenovoSystem.is_network_device(addin_card) and not - LenovoSystem.is_device_in_list(addin_card, network_devices)): + if LenovoSystem.is_network_device(addin_card) and not LenovoSystem.is_device_in_list( + addin_card, network_devices + ): network_devices.append(addin_card) for pci_device in pci_devices: - if (LenovoSystem.is_network_device(pci_device) and not - LenovoSystem.is_device_in_list(pci_device, network_devices)): + if LenovoSystem.is_network_device(pci_device) and not LenovoSystem.is_device_in_list( + pci_device, network_devices + ): network_devices.append(pci_device) return network_devices @@ -424,13 +439,15 @@ def get_storage_devices(self, server_name): storage_devices = [] for addin_card in addin_cards: - if (LenovoSystem.is_storage_device(addin_card) and not - LenovoSystem.is_device_in_list(addin_card, storage_devices)): + if LenovoSystem.is_storage_device(addin_card) and not LenovoSystem.is_device_in_list( + addin_card, storage_devices + ): storage_devices.append(addin_card) for pci_device in pci_devices: - if (LenovoSystem.is_storage_device(pci_device) and not - LenovoSystem.is_device_in_list(pci_device, storage_devices)): + if LenovoSystem.is_storage_device(pci_device) and not LenovoSystem.is_device_in_list( + pci_device, storage_devices + ): storage_devices.append(pci_device) return storage_devices @@ -453,9 +470,11 @@ def is_network_device(device): # We expect that supported network devices will have a class of "network controller" or # "nic" or "ethernet" contained in the device name. - return (device.get("class", "").lower() == "network controller" or - "nic" in device_name or - "ethernet" in device_name) + return ( + device.get("class", "").lower() == "network controller" + or "nic" in device_name + or "ethernet" in device_name + ) @staticmethod def is_storage_device(device): @@ -465,9 +484,11 @@ def is_storage_device(device): # We expect that supported storage devices will have a class of "mass storage controller" # or "serveraid" or "sd media raid" contained in the device name. - return (device.get("class", "").lower() == "mass storage controller" or - "serveraid" in device_name or - "sd media raid" in device_name) + return ( + device.get("class", "").lower() == "mass storage controller" + or "serveraid" in device_name + or "sd media raid" in device_name + ) def get_addin_cards(self, server_name): server = self.get_server(server_name) @@ -483,8 +504,9 @@ def get_pci_devices(self, server_name): def get_device_unique_id(device): # The ID used to uniquely identify each device is the UUID of the device # if it has one or the concatenation of the PCI bus number and PCI device number. - unique_id = (device.get("uuid") or - "{}{}".format(device.get("pciBusNumber"), device.get("pciDeviceNumber"))) + unique_id = device.get("uuid") or "{}{}".format( + device.get("pciBusNumber"), device.get("pciDeviceNumber") + ) return unique_id @@ -493,11 +515,12 @@ def disconnect(self): class LenovoSwitch(Entity): - """ Encapsulates all Lenovo Switches behavior """ + """Encapsulates all Lenovo Switches behavior""" + API_PATH = "switches" def __init__(self, switch_data, **kwargs): - super(LenovoSwitch, self).__init__(raw=switch_data, **kwargs) + super().__init__(raw=switch_data, **kwargs) @property def product_name(self): @@ -521,34 +544,34 @@ def description(self): @property def firmwares(self): - return self.raw.get('firmware', None) + return self.raw.get("firmware", None) @property def power_status(self): - return self.raw.get('powerState', None).lower() + return self.raw.get("powerState", None).lower() @property def type(self): - return self.raw.get('type', None) + return self.raw.get("type", None) @property def health_state(self): - return self.raw.get('overallHealthState', None) + return self.raw.get("overallHealthState", None) @property def ipv4_addresses(self): - return self.raw.get('ipv4Addresses', None) + return self.raw.get("ipv4Addresses", None) @property def ipv6_addresses(self): - return self.raw.get('ipv6Addresses', None) + return self.raw.get("ipv6Addresses", None) @property def _identifying_attrs(self): return {"name": self.name, "uuid": self.uuid} def refresh(self): - uri = "{}/{}".format(LenovoSwitch.API_PATH, self.name.lower()) + uri = f"{LenovoSwitch.API_PATH}/{self.name.lower()}" switch_data = self.system._service_instance(uri) self.raw = switch_data return self.raw @@ -563,44 +586,47 @@ def uuid(self): @property def ipv4_assignments(self): - ip_interfaces = self.raw.get('ipInterfaces', None) + ip_interfaces = self.raw.get("ipInterfaces", None) ipv4_assignments = [] if ip_interfaces: for interface in ip_interfaces: if "IPv4assignments" in interface: - ipv4_assignments.append(interface['IPv4assignments']) + ipv4_assignments.append(interface["IPv4assignments"]) return ipv4_assignments @property def ipv6_assignments(self): - ip_interfaces = self.raw.get('ipInterfaces', None) + ip_interfaces = self.raw.get("ipInterfaces", None) ipv6_assignments = [] if ip_interfaces: for interface in ip_interfaces: if "IPv6assignments" in interface: - ipv6_assignments.append(interface['IPv6assignments']) + ipv6_assignments.append(interface["IPv6assignments"]) return ipv6_assignments @property def ports(self): - ''' Filters ports data that are also filtered by the provider's parser ''' - raw_ports = self.raw.get('ports', None) + """Filters ports data that are also filtered by the provider's parser""" + raw_ports = self.raw.get("ports", None) ports = [] if raw_ports: for port in raw_ports: - port_to_append = {'portName': port.get('portName', None), - 'portType': port.get('port', None), - 'vlanEnabled': 'PVID' in port, - 'peerMacAddress': port.get('peerMacAddress', None)} + port_to_append = { + "portName": port.get("portName", None), + "portType": port.get("port", None), + "vlanEnabled": "PVID" in port, + "peerMacAddress": port.get("peerMacAddress", None), + } ports.append(port_to_append) return ports def delete(self): - data = {"endpoints": [{ - "ipAdresses": self.ipv4_addresses, - "type": self.type, - "uuid": self.uuid}], - "forceUnmanage": True} + data = { + "endpoints": [ + {"ipAdresses": self.ipv4_addresses, "type": self.type, "uuid": self.uuid} + ], + "forceUnmanage": True, + } self.system._service_post(path="unmanageRequest", request=data) def cleanup(self): diff --git a/wrapanapi/systems/msazure.py b/wrapanapi/systems/msazure.py index 00f4524e..b87bc999 100644 --- a/wrapanapi/systems/msazure.py +++ b/wrapanapi/systems/msazure.py @@ -1,12 +1,10 @@ -# -*- coding: utf-8 -*- """Backend management system classes Used to communicate with providers without using CFME facilities """ - import os -from dateutil import parser -from datetime import datetime, timedelta +from datetime import datetime +from datetime import timedelta import pytz from azure.common import AzureConflictHttpError @@ -15,29 +13,35 @@ from azure.mgmt.compute import ComputeManagementClient from azure.mgmt.iothub import IotHubClient from azure.mgmt.network import NetworkManagementClient -from azure.mgmt.network.models import NetworkSecurityGroup, SecurityRule +from azure.mgmt.network.models import NetworkSecurityGroup +from azure.mgmt.network.models import SecurityRule from azure.mgmt.resource import SubscriptionClient from azure.mgmt.resource.resources import ResourceManagementClient from azure.mgmt.resource.subscriptions.models import SubscriptionState from azure.mgmt.storage import StorageManagementClient from azure.storage.blob import BlockBlobService from cached_property import cached_property +from dateutil import parser from wait_for import wait_for -from wrapanapi.entities import (Instance, Template, TemplateMixin, VmMixin, - VmState) -from wrapanapi.exceptions import (ImageNotFoundError, MultipleImagesError, - VMInstanceNotFound) +from wrapanapi.entities import Instance +from wrapanapi.entities import Template +from wrapanapi.entities import TemplateMixin +from wrapanapi.entities import VmMixin +from wrapanapi.entities import VmState +from wrapanapi.exceptions import ImageNotFoundError +from wrapanapi.exceptions import MultipleImagesError +from wrapanapi.exceptions import VMInstanceNotFound from wrapanapi.systems.base import System class AzureInstance(Instance): state_map = { - 'VM starting': VmState.STARTING, - 'VM running': VmState.RUNNING, - 'VM deallocated': VmState.STOPPED, - 'VM stopped': VmState.SUSPENDED, - 'Paused': VmState.PAUSED, + "VM starting": VmState.STARTING, + "VM running": VmState.RUNNING, + "VM deallocated": VmState.STOPPED, + "VM stopped": VmState.SUSPENDED, + "Paused": VmState.PAUSED, } def __init__(self, system, raw=None, **kwargs): @@ -50,24 +54,25 @@ def __init__(self, system, raw=None, **kwargs): name: name of instance resource_group: name of resource group this instance is in """ - self._resource_group = kwargs.get('resource_group') - self._name = kwargs.get('name') + self._resource_group = kwargs.get("resource_group") + self._name = kwargs.get("name") if not self._name or not self._resource_group: raise ValueError("missing required kwargs: 'resource_group', 'name'") - super(AzureInstance, self).__init__(system, raw, **kwargs) + super().__init__(system, raw, **kwargs) self._api = self.system.vms_collection @property def _identifying_attrs(self): - return {'name': self._name, 'resource_group': self._resource_group} + return {"name": self._name, "resource_group": self._resource_group} def _wait_on_operation(self, operation): if operation: operation.wait() return True if operation.status().lower() == "succeeded" else False self.logger.warning( - "wait_on_operation got operation=None, expected an OperationStatusResponse") + "wait_on_operation got operation=None, expected an OperationStatusResponse" + ) return True @property @@ -86,7 +91,10 @@ def refresh(self): """ try: vm = self._api.get( - resource_group_name=self._resource_group, vm_name=self._name, expand='instanceView') + resource_group_name=self._resource_group, + vm_name=self._name, + expand="instanceView", + ) except CloudError as e: if e.response.status_code == 404: raise VMInstanceNotFound(self._name) @@ -94,8 +102,8 @@ def refresh(self): raise first_status = vm.instance_view.statuses[0] - if first_status.display_status == 'Provisioning failed': - raise VMInstanceNotFound('provisioning failed for VM {}'.format(self._name)) + if first_status.display_status == "Provisioning failed": + raise VMInstanceNotFound(f"provisioning failed for VM {self._name}") self.raw = vm return self.raw @@ -150,23 +158,29 @@ def ip(self): break ip_config_obj = network_client.network_interface_ip_configurations.get( - self._resource_group, if_name, ip_config_name) + self._resource_group, if_name, ip_config_name + ) # Getting public IP id from the IP configuration object try: pub_ip_id = ip_config_obj.public_ip_address.id except AttributeError: self.logger.error( - "VM '%s' doesn't have public IP on %s:%s", self.name, if_name, ip_config_name) + "VM '%s' doesn't have public IP on %s:%s", + self.name, + if_name, + ip_config_name, + ) return None pub_ip_name = os.path.split(pub_ip_id)[1] public_ip = network_client.public_ip_addresses.get(self._resource_group, pub_ip_name) - if not hasattr(public_ip, 'ip_address') or not public_ip.ip_address: + if not hasattr(public_ip, "ip_address") or not public_ip.ip_address: # Dynamic ip will be allocated for Running VMs only self.logger.error( "Couldn't get Public IP of vm '%s'. public_ip_allocation_method -- '%s'. ", - self.name, public_ip.public_ip_allocation_method + self.name, + public_ip.public_ip_allocation_method, ) return None @@ -174,7 +188,7 @@ def ip(self): @property def all_ips(self): - """ Wrapping self.ip to meet abstractproperty requirement + """Wrapping self.ip to meet abstractproperty requirement TODO: Actually fetch the various addresses on non-primary interfaces @@ -195,8 +209,7 @@ def creation_time(self): def delete(self): self.logger.info("deleting vm '%s'", self.name) - operation = self._api.delete( - resource_group_name=self._resource_group, vm_name=self.name) + operation = self._api.delete(resource_group_name=self._resource_group, vm_name=self.name) return self._wait_on_operation(operation) def cleanup(self): @@ -214,13 +227,13 @@ def cleanup(self): self.system.remove_discs_by_search(self.name) except Exception: self.logger.exception( - "cleanup: failed to cleanup NICs/PIPs/Discs for VM '%s'", self.name) + "cleanup: failed to cleanup NICs/PIPs/Discs for VM '%s'", self.name + ) return True def start(self): self.logger.info("starting vm '%s'", self.name) - operation = self._api.start( - resource_group_name=self._resource_group, vm_name=self.name) + operation = self._api.start(resource_group_name=self._resource_group, vm_name=self.name) if self._wait_on_operation(operation): self.wait_for_state(VmState.RUNNING) return True @@ -229,7 +242,8 @@ def start(self): def stop(self): self.logger.info("stopping vm '%s'", self.name) operation = self._api.deallocate( - resource_group_name=self._resource_group, vm_name=self.name) + resource_group_name=self._resource_group, vm_name=self.name + ) if self._wait_on_operation(operation): self.wait_for_state(VmState.STOPPED) return True @@ -237,8 +251,7 @@ def stop(self): def restart(self): self.logger.info("restarting vm '%s'", self.name) - operation = self._api.restart( - resource_group_name=self._resource_group, vm_name=self.name) + operation = self._api.restart(resource_group_name=self._resource_group, vm_name=self.name) if self._wait_on_operation(operation): self.wait_for_state(VmState.RUNNING) return True @@ -246,8 +259,7 @@ def restart(self): def suspend(self): self.logger.info("suspending vm '%s'", self.name) - operation = self._api.power_off( - resource_group_name=self._resource_group, vm_name=self.name) + operation = self._api.power_off(resource_group_name=self._resource_group, vm_name=self.name) if self._wait_on_operation(operation): self.wait_for_state(VmState.SUSPENDED) return True @@ -261,17 +273,20 @@ def capture(self, container, image_name, overwrite_vhds=True): params = ComputeManagementClient.models().VirtualMachineCaptureParameters( vhd_prefix=image_name, destination_container_name=container, - overwrite_vhds=overwrite_vhds + overwrite_vhds=overwrite_vhds, ) self.stop() self.logger.info("Generalizing VM '%s'", self.name) - operation = self._api.generalize(resource_group_name=self._resource_group, - vm_name=self.name) + operation = self._api.generalize( + resource_group_name=self._resource_group, vm_name=self.name + ) self._wait_on_operation(operation) self.logger.info("Capturing VM '%s'", self.name) - operation = self._api.capture(resource_group_name=self._resource_group, - vm_name=self.name, - parameters=params) + operation = self._api.capture( + resource_group_name=self._resource_group, + vm_name=self.name, + parameters=params, + ) return self._wait_on_operation(operation) def get_vhd_uri(self): @@ -296,17 +311,17 @@ def __init__(self, system, raw=None, **kwargs): name: name of template container: container the template is stored in """ - self._name = kwargs.get('name') - self._container = kwargs.get('container') + self._name = kwargs.get("name") + self._container = kwargs.get("container") if not self._name or not self._container: raise ValueError("missing required kwargs: 'name', 'container'") - super(AzureBlobImage, self).__init__(system, raw, **kwargs) + super().__init__(system, raw, **kwargs) self._api = self.system.container_client @property def _identifying_attrs(self): - return {'name': self._name, 'container': self._container} + return {"name": self._name, "container": self._container} @property def name(self): @@ -332,11 +347,11 @@ def refresh(self): def delete(self, delete_snapshots=True): kwargs = {} if delete_snapshots: - kwargs['delete_snapshots'] = 'include' + kwargs["delete_snapshots"] = "include" self._api.delete_blob(self._container, self.name, **kwargs) def delete_snapshots_only(self): - self._api.delete_blob(self._container, self.name, delete_snapshots='only') + self._api.delete_blob(self._container, self.name, delete_snapshots="only") def cleanup(self): return self.delete() @@ -345,81 +360,83 @@ def deploy(self, vm_name, **vm_settings): # TODO: this method is huge, it should be broken up ... # TODO #2: check args of vm_settings better # TODO #3: possibly use compute images instead of blob images? - resource_group = vm_settings.get('resource_group', self.system.resource_group) - location = vm_settings.get('region_api', self.system.region) - subnet = vm_settings['subnet_range'] - address_space = vm_settings['address_space'] - vnet_name = vm_settings['virtual_net'] + resource_group = vm_settings.get("resource_group", self.system.resource_group) + location = vm_settings.get("region_api", self.system.region) + subnet = vm_settings["subnet_range"] + address_space = vm_settings["address_space"] + vnet_name = vm_settings["virtual_net"] # checking whether passed vm size value is correct vm_sizes = {t.value for t in ComputeManagementClient.models().VirtualMachineSizeTypes} - vm_size = vm_settings['vm_size'] + vm_size = vm_settings["vm_size"] if vm_size not in vm_sizes: - raise ValueError("wrong vm size %s passed. possible size: %s", vm_size, - ",".join(vm_sizes)) + raise ValueError( + "wrong vm size %s passed. possible size: %s", + vm_size, + ",".join(vm_sizes), + ) - storage_container = vm_settings['storage_container'] + storage_container = vm_settings["storage_container"] # nsg_name = vm_settings['network_nsg'] # todo: check whether nsg is necessary at all # allocating public ip address for new vm public_ip_params = { - 'location': location, - 'public_ip_allocation_method': 'Dynamic' + "location": location, + "public_ip_allocation_method": "Dynamic", } public_ip = self.system.network_client.public_ip_addresses.create_or_update( resource_group_name=resource_group, public_ip_address_name=vm_name, - parameters=public_ip_params + parameters=public_ip_params, ).result() # creating virtual network virtual_networks = self.system.network_client.virtual_networks if vnet_name not in [v.name for v in virtual_networks.list(resource_group)]: vnet_params = { - 'location': location, - 'address_space': { - 'address_prefixes': [address_space] - } + "location": location, + "address_space": {"address_prefixes": [address_space]}, } virtual_networks.create_or_update( resource_group_name=resource_group, virtual_network_name=vnet_name, - parameters=vnet_params + parameters=vnet_params, ).result() # creating sub net - subnet_name = 'default' + subnet_name = "default" subnets = self.system.network_client.subnets if subnet_name not in [v.name for v in subnets.list(resource_group, vnet_name)]: vsubnet = subnets.create_or_update( resource_group_name=resource_group, virtual_network_name=vnet_name, - subnet_name='default', - subnet_parameters={'address_prefix': subnet} + subnet_name="default", + subnet_parameters={"address_prefix": subnet}, ).result() else: vsubnet = subnets.get( resource_group_name=resource_group, virtual_network_name=vnet_name, - subnet_name='default') + subnet_name="default", + ) # creating network interface nic_params = { - 'location': location, - 'ip_configurations': [{ - 'name': vm_name, - 'public_ip_address': public_ip, - 'subnet': { - 'id': vsubnet.id + "location": location, + "ip_configurations": [ + { + "name": vm_name, + "public_ip_address": public_ip, + "subnet": {"id": vsubnet.id}, } - }] + ], } def _create_or_update_nic(): return self.system.network_client.network_interfaces.create_or_update( resource_group_name=resource_group, network_interface_name=vm_name, - parameters=nic_params + parameters=nic_params, ).result() try: @@ -431,77 +448,78 @@ def _create_or_update_nic(): # preparing os disk # todo: replace with copy disk operation self.system.copy_blob_image( - self.name, vm_name, vm_settings['storage_account'], - self._container, storage_container + self.name, + vm_name, + vm_settings["storage_account"], + self._container, + storage_container, ) image_uri = self.system.container_client.make_blob_url( - container_name=storage_container, blob_name=vm_name) + container_name=storage_container, blob_name=vm_name + ) # creating virtual machine vm_parameters = { - 'location': location, - 'hardware_profile': { - 'vm_size': vm_size - }, - 'storage_profile': { - 'os_disk': { - 'os_type': 'Linux', # TODO: why is this hardcoded? - 'name': vm_name, - 'vhd': ComputeManagementClient.models().VirtualHardDisk(uri='{}.vhd' - .format(image_uri)), - 'create_option': ComputeManagementClient.models().DiskCreateOptionTypes.attach, + "location": location, + "hardware_profile": {"vm_size": vm_size}, + "storage_profile": { + "os_disk": { + "os_type": "Linux", # TODO: why is this hardcoded? + "name": vm_name, + "vhd": ComputeManagementClient.models().VirtualHardDisk(uri=f"{image_uri}.vhd"), + "create_option": ComputeManagementClient.models().DiskCreateOptionTypes.attach, } }, - 'network_profile': { - 'network_interfaces': [{ - 'id': nic.id - }] - }, + "network_profile": {"network_interfaces": [{"id": nic.id}]}, } vm = self.system.compute_client.virtual_machines.create_or_update( resource_group_name=resource_group, vm_name=vm_name, - parameters=vm_parameters).result() + parameters=vm_parameters, + ).result() vm = AzureInstance( - system=self.system, name=vm.name, - resource_group=vm_settings['resource_group'], raw=vm) + system=self.system, + name=vm.name, + resource_group=vm_settings["resource_group"], + raw=vm, + ) vm.wait_for_state(VmState.RUNNING) return vm class AzureSystem(System, VmMixin, TemplateMixin): - """This class is used to connect to Microsoft Azure Portal via PowerShell AzureRM Module - """ + """This class is used to connect to Microsoft Azure Portal via PowerShell AzureRM Module""" + _stats_available = { - 'num_vm': lambda self: len(self.list_vms()), - 'num_template': lambda self: len(list(self.list_compute_images())), + "num_vm": lambda self: len(self.list_vms()), + "num_template": lambda self: len(list(self.list_compute_images())), } can_suspend = True can_pause = False def __init__(self, **kwargs): - super(AzureSystem, self).__init__(**kwargs) + super().__init__(**kwargs) self.client_id = kwargs.get("username") self.client_secret = kwargs.get("password") self.tenant = kwargs.get("tenant_id") self.subscription_id = kwargs.get("subscription_id") - self.resource_group = kwargs['provisioning']['resource_group'] # default resource group + self.resource_group = kwargs["provisioning"]["resource_group"] # default resource group self.storage_account = kwargs.get("storage_account") self.storage_key = kwargs.get("storage_key") - self.template_container = kwargs['provisioning']['template_container'] - self.orphaned_discs_path = 'Microsoft.Compute/Images/templates/' - self.region = kwargs["provisioning"]["region_api"].replace(' ', '').lower() + self.template_container = kwargs["provisioning"]["template_container"] + self.orphaned_discs_path = "Microsoft.Compute/Images/templates/" + self.region = kwargs["provisioning"]["region_api"].replace(" ", "").lower() - self.credentials = ServicePrincipalCredentials(client_id=self.client_id, - secret=self.client_secret, - tenant=self.tenant) + self.credentials = ServicePrincipalCredentials( + client_id=self.client_id, secret=self.client_secret, tenant=self.tenant + ) @property def _identifying_attrs(self): return { - 'tenant': self.tenant, - 'subscription_id': self.subscription_id, - 'storage_account': self.storage_account + "tenant": self.tenant, + "subscription_id": self.subscription_id, + "storage_account": self.storage_account, } @property @@ -514,14 +532,20 @@ def can_pause(self): def __setattr__(self, key, value): """If the subscription_id is changed, invalidate client caches""" - if key in ['credentials', 'subscription_id']: - for client in ['compute_client', 'iot_client', 'resource_client', 'network_client', - 'subscription_client', 'storage_client']: + if key in ["credentials", "subscription_id"]: + for client in [ + "compute_client", + "iot_client", + "resource_client", + "network_client", + "subscription_client", + "storage_client", + ]: if getattr(self, client, False): del self.__dict__[client] - if key in ['storage_account', 'storage_key']: - if getattr(self, 'container_client', False): - del self.__dict__['container_client'] + if key in ["storage_account", "storage_key"]: + if getattr(self, "container_client", False): + del self.__dict__["container_client"] self.__dict__[key] = value @cached_property @@ -559,7 +583,7 @@ def vms_collection(self): def create_vm(self, vm_name, *args, **kwargs): raise NotImplementedError - def create_iothub(self, name, sku_name='F1', sku_capacity=1): + def create_iothub(self, name, sku_name="F1", sku_capacity=1): """ Create an iothub in Azure with the specified name. sku_name and sku_capacity are required for the creation @@ -568,14 +592,13 @@ def create_iothub(self, name, sku_name='F1', sku_capacity=1): async_iot_hub = self.iot_client.iot_hub_resource.create_or_update( self.resource_group, name, - {'location': self.region, - 'subscriptionid': self.subscription_id, - 'resourcegroup': self.resource_group, - 'sku': { - 'name': sku_name, - 'capacity': sku_capacity - }, - 'features': 'None'} + { + "location": self.region, + "subscriptionid": self.subscription_id, + "resourcegroup": self.resource_group, + "sku": {"name": sku_name, "capacity": sku_capacity}, + "features": "None", + }, ) return async_iot_hub.result() @@ -597,10 +620,13 @@ def find_vms(self, name=None, resource_group=None): resource_groups = [resource_group] if resource_group else self.list_resource_groups() for res_group in resource_groups: vms = self.vms_collection.list(resource_group_name=res_group) - vm_list.extend([ - AzureInstance(system=self, name=vm.name, resource_group=res_group, raw=vm) - for vm in vms if vm.location == self.region - ]) + vm_list.extend( + [ + AzureInstance(system=self, name=vm.name, resource_group=res_group, raw=vm) + for vm in vms + if vm.location == self.region + ] + ) if name: return [vm for vm in vm_list if vm.name == name] return vm_list @@ -616,12 +642,14 @@ def get_vm(self, name): return vms[0] def data(self, vm_name, resource_group=None): - raise NotImplementedError('data not implemented.') + raise NotImplementedError("data not implemented.") def list_subscriptions(self): - return [(str(s.display_name), str(s.subscription_id)) for s in - self.subscription_client.subscriptions.list() if - s.state == SubscriptionState.enabled] + return [ + (str(s.display_name), str(s.subscription_id)) + for s in self.subscription_client.subscriptions.list() + if s.state == SubscriptionState.enabled + ] def list_region(self, subscription=None): """ @@ -633,21 +661,27 @@ def list_region(self, subscription=None): Return: list of tuples - (name, display_name) """ subscription = subscription or self.subscription_id - return [(region.name, region.display_name) for region in - self.subscription_client.subscriptions.list_locations(subscription)] + return [ + (region.name, region.display_name) + for region in self.subscription_client.subscriptions.list_locations(subscription) + ] def list_storage_accounts_by_resource_group(self, resource_group): """List Azure Storage accounts on current subscription by resource group""" return [ - s.name for s in - self.storage_client.storage_accounts.list_by_resource_group(resource_group) + s.name + for s in self.storage_client.storage_accounts.list_by_resource_group(resource_group) ] def get_storage_account_key(self, storage_account_name, resource_group): """Each Storage account has 2 keys by default - both are valid and equal""" - keys = {v.key_name: v.value for v in self.storage_client.storage_accounts.list_keys( - resource_group, storage_account_name).keys} - return keys['key1'] + keys = { + v.key_name: v.value + for v in self.storage_client.storage_accounts.list_keys( + resource_group, storage_account_name + ).keys + } + return keys["key1"] def list_resource_groups(self): """ @@ -682,16 +716,19 @@ def list_all_resources_by_resource_group(self, resource_group=None): List all resources in selected resource_group """ resource_group = resource_group or self.resource_group - return list(self.resource_client.resources.list_by_resource_group(resource_group, expand="changedTime,createdTime")) + return list( + self.resource_client.resources.list_by_resource_group( + resource_group, expand="changedTime,createdTime" + ) + ) def list_free_discs(self, disc_template=None, resource_group=None): """ List all or specific VMs attached Disc(s) in selected resource_group """ resource_group = resource_group or self.resource_group - disks = self.compute_client.disks.list_by_resource_group( - resource_group_name=resource_group) - all_free_discs = [disk.name for disk in disks if not disk.disk_state == 'Attached'] + disks = self.compute_client.disks.list_by_resource_group(resource_group_name=resource_group) + all_free_discs = [disk.name for disk in disks if not disk.disk_state == "Attached"] if disc_template: return [disc_name for disc_name in all_free_discs if disc_template in disc_name] return all_free_discs @@ -715,14 +752,14 @@ def list_stack(self, resource_group=None, days_old=0): return found_stacks def list_flavor(self): - raise NotImplementedError('list_flavor not implemented.') + raise NotImplementedError("list_flavor not implemented.") def list_network(self): self.logger.info("Attempting to list Azure Virtual Private Networks in '%s'", self.region) # Azure API returns all networks from all regions, and there is options to filter by region. # In CFME only the networks of the provider regions are displayed. all_networks = self.network_client.virtual_networks.list_all() - self.logger.debug('self.region=%s', self.region) + self.logger.debug("self.region=%s", self.region) networks_in_region = [] for network in all_networks: if network.location == self.region: @@ -735,7 +772,7 @@ def list_subnet(self): # is only one network in the resource_group defined in cfme_data. all_networks = self.network_client.virtual_networks.list_all() - self.logger.debug('self.region=%s', self.region) + self.logger.debug("self.region=%s", self.region) subnets = dict() for network in all_networks: if network.location == self.region: @@ -746,8 +783,8 @@ def list_subnet(self): def list_security_group(self): self.logger.info("Attempting to List Azure security groups") all_sec_groups = self.network_client.network_security_groups.list_all() - self.logger.debug('self.region=%s', self.region) - location = self.region.replace(' ', '').lower() + self.logger.debug("self.region=%s", self.region) + location = self.region.replace(" ", "").lower() sec_groups_in_location = [] for sec_gp in all_sec_groups: if sec_gp.location == location: @@ -756,8 +793,11 @@ def list_security_group(self): def list_security_group_ports(self, sec_group_name, resource_group=None): resource_group = resource_group or self.resource_group - self.logger.info('Attempting to List ports from Azure security group "%s"' - 'in resource group "%s"', sec_group_name, resource_group) + self.logger.info( + 'Attempting to List ports from Azure security group "%s"' 'in resource group "%s"', + sec_group_name, + resource_group, + ) sg_rules = self.network_client.security_rules.list(resource_group, sec_group_name) sg_ports = [sgr.destination_port_range for sgr in sg_rules] return sg_ports @@ -777,7 +817,7 @@ def disconnect(self): def _age_filter(self, resource, hours_old=0): now_time = datetime.utcnow().replace(tzinfo=pytz.utc) - start_time = parser.parse(resource.additional_properties['createdTime']) + start_time = parser.parse(resource.additional_properties["createdTime"]) timediff = now_time - start_time totalhours = timediff.total_seconds() / 3600 @@ -790,7 +830,11 @@ def _list_resources(self, resource_group=None, hours_old=0): hours_old = float(hours_old) resources = self.list_all_resources_by_resource_group(resource_group) - return filter(lambda f: self._age_filter(f, hours_old), resources) if bool(hours_old) else resources + return ( + filter(lambda f: self._age_filter(f, hours_old), resources) + if bool(hours_old) + else resources + ) def list_resources_from_hours_old(self, resource_group=None, hours_old=0): """ @@ -801,12 +845,12 @@ def list_resources_from_hours_old(self, resource_group=None, hours_old=0): def remove_resource_group_of_old_resources(self, resource_group=None, hours_old=0): """ - Used for clean_up jobs to remove group containing resources older than hours_old numeric value. + Used for clean_up jobs to remove group containing resources older than hours_old. Age is calculated as difference of current time and resource creation timestamp. """ hours_old = float(hours_old) - self.logger.info('Attempting to remove all old resources') + self.logger.info("Attempting to remove all old resources") resource_group = resource_group or self.resource_group resources = self._list_resources(resource_group, hours_old) @@ -819,25 +863,27 @@ def remove_nics_by_search(self, nic_template=None, resource_group=None): in selected resource_group.If None (default) resource_group provided, the instance's resource group is used instead """ - self.logger.info('Attempting to List all unused NICs') + self.logger.info("Attempting to List all unused NICs") results = [] - nic_list = self.list_free_nics(nic_template, - resource_group=resource_group or self.resource_group) + nic_list = self.list_free_nics( + nic_template, resource_group=resource_group or self.resource_group + ) for nic in nic_list: try: operation = self.network_client.network_interfaces.delete( resource_group_name=resource_group or self.resource_group, - network_interface_name=nic) + network_interface_name=nic, + ) except CloudError as e: - self.logger.error('{} nic can\'t be removed - {}'.format(nic, e.error.error)) + self.logger.error(f"{nic} nic can't be removed - {e.error.error}") results.append((nic, e.error.error)) continue operation.wait() self.logger.info('"%s" nic removed', nic) results.append((nic, operation.status())) if not results: - self.logger.debug('No unused/unattached NIC(s) found to be removed!') + self.logger.debug("No unused/unattached NIC(s) found to be removed!") return results def remove_pips_by_search(self, pip_template=None, resource_group=None): @@ -846,20 +892,22 @@ def remove_pips_by_search(self, pip_template=None, resource_group=None): in selected resource_group. If None (default) resource_group provided, the instance's resource group is used instead """ - self.logger.info('Attempting to list all unused Public IPs') + self.logger.info("Attempting to list all unused Public IPs") results = [] - pip_list = self.list_free_pip(pip_template, - resource_group=resource_group or self.resource_group) + pip_list = self.list_free_pip( + pip_template, resource_group=resource_group or self.resource_group + ) for pip in pip_list: operation = self.network_client.public_ip_addresses.delete( resource_group_name=resource_group or self.resource_group, - public_ip_address_name=pip) + public_ip_address_name=pip, + ) operation.wait() self.logger.info('"%s" pip removed', pip) results.append((pip, operation.status())) if not results: - self.logger.debug('No unused/unattached PIPs found to be removed!') + self.logger.debug("No unused/unattached PIPs found to be removed!") return results def remove_discs_by_search(self, disc_name=None, resource_group=None): @@ -868,39 +916,42 @@ def remove_discs_by_search(self, disc_name=None, resource_group=None): """ results = [] if disc_name: - self.logger.info('Attempting to find the disc image {}'.format(disc_name)) - discs = self.find_templates(container='system', - prefix='{}{}'.format(self.orphaned_discs_path, disc_name)) + self.logger.info(f"Attempting to find the disc image {disc_name}") + discs = self.find_templates( + container="system", prefix=f"{self.orphaned_discs_path}{disc_name}" + ) for disc in discs: disc.delete() - self.logger.info('disc {} removed'.format(disc_name)) + self.logger.info(f"disc {disc_name} removed") results.append(disc_name) if not results: - self.logger.debug('No discs matching {} were found'.format(disc_name)) + self.logger.debug(f"No discs matching {disc_name} were found") else: # Remove all discs - self.logger.info('Attempting to find all the unattached disks and delete.') + self.logger.info("Attempting to find all the unattached disks and delete.") discs = self.list_free_discs(resource_group=self.resource_group) for disc_name in discs: operation = self.compute_client.disks.delete( resource_group_name=resource_group or self.resource_group, - disk_name=disc_name) + disk_name=disc_name, + ) operation.wait() self.logger.info('"%s" disc removed', disc_name) results.append((disc_name, operation.status())) if not results: - self.logger.debug('No unused/attached discs were found to be removed!') + self.logger.debug("No unused/attached discs were found to be removed!") return results def create_netsec_group(self, group_name, resource_group=None): security_groups = self.network_client.network_security_groups self.logger.info("Attempting to Create New Azure Security Group %s", group_name) nsg = NetworkSecurityGroup(location=self.region) - operation = security_groups.create_or_update(resource_group_name=resource_group or - self.resource_group, - network_security_group_name=group_name, - parameters=nsg) + operation = security_groups.create_or_update( + resource_group_name=resource_group or self.resource_group, + network_security_group_name=group_name, + parameters=nsg, + ) operation.wait() self.logger.info("Network Security Group '%s' is created", group_name) return operation.status() @@ -913,25 +964,41 @@ def remove_netsec_group(self, group_name, resource_group=None): """ self.logger.info("Attempting to Remove Azure Security Group '%s'", group_name) security_groups = self.network_client.network_security_groups - operation = security_groups.delete(resource_group_name=resource_group or - self.resource_group, - network_security_group_name=group_name) + operation = security_groups.delete( + resource_group_name=resource_group or self.resource_group, + network_security_group_name=group_name, + ) operation.wait() self.logger.info("Network Security Group '%s' is removed", group_name) return operation.status() - def create_netsec_group_port_allow(self, secgroup_name, protocol, - source_address_prefix, destination_address_prefix, access, direction, - resource_group=None, **kwargs): + def create_netsec_group_port_allow( + self, + secgroup_name, + protocol, + source_address_prefix, + destination_address_prefix, + access, + direction, + resource_group=None, + **kwargs, + ): resource_group = resource_group or self.resource_group - self.logger.info("Attempting to Create New Azure Security Group " - "Rule '%s'.", secgroup_name) + self.logger.info( + "Attempting to Create New Azure Security Group " "Rule '%s'.", secgroup_name + ) parameters = NetworkSecurityGroup(location=self.region) parameters.security_rules = [ - SecurityRule(protocol=protocol, source_address_prefix=source_address_prefix, - destination_address_prefix=destination_address_prefix, - access=access, direction=direction, **kwargs)] + SecurityRule( + protocol=protocol, + source_address_prefix=source_address_prefix, + destination_address_prefix=destination_address_prefix, + access=access, + direction=direction, + **kwargs, + ) + ] nsg = self.network_client.network_security_groups operation = nsg.create_or_update(resource_group, secgroup_name, parameters) operation.wait() @@ -940,7 +1007,7 @@ def create_netsec_group_port_allow(self, secgroup_name, protocol, def list_load_balancer(self): self.logger.info("Attempting to List Azure Load Balancers") - self.logger.debug('self.region=%s', self.region) + self.logger.debug("self.region=%s", self.region) all_lbs = self.network_client.load_balancers.list_all() lbs_in_location = [] for lb in all_lbs: @@ -958,22 +1025,27 @@ def remove_diags_container(self, container_client=None): """ container_client = container_client or self.container_client for container in container_client.list_containers(): - if container.name.startswith('bootdiagnostics-test'): + if container.name.startswith("bootdiagnostics-test"): self.logger.info("Removing container '%s'", container.name) self.container_client.delete_container(container_name=container.name) - self.logger.info("All diags containers are removed from '%s'", - container_client.account_name) + self.logger.info( + "All diags containers are removed from '%s'", container_client.account_name + ) - def copy_blob_image(self, template, vm_name, storage_account, - template_container, storage_container): + def copy_blob_image( + self, template, vm_name, storage_account, template_container, storage_container + ): # todo: weird method to refactor it later container_client = BlockBlobService(storage_account, self.storage_key) - src_uri = container_client.make_blob_url(container_name=template_container, - blob_name=template) - operation = container_client.copy_blob(container_name=storage_container, - blob_name=vm_name + ".vhd", - copy_source=src_uri) - wait_for(lambda: operation.status != 'pending', timeout='10m', delay=15) + src_uri = container_client.make_blob_url( + container_name=template_container, blob_name=template + ) + operation = container_client.copy_blob( + container_name=storage_container, + blob_name=vm_name + ".vhd", + copy_source=src_uri, + ) + wait_for(lambda: operation.status != "pending", timeout="10m", delay=15) # copy operation obj.status->str return operation.status @@ -981,13 +1053,15 @@ def _remove_container_blob(self, container_client, container, blob, remove_snaps # Redundant with AzureBlobImage.delete(), but used below in self.remove_unused_blobs() self.logger.info("Removing Blob '%s' from containter '%s'", blob.name, container.name) try: - container_client.delete_blob( - container_name=container.name, blob_name=blob.name) + container_client.delete_blob(container_name=container.name, blob_name=blob.name) except AzureConflictHttpError as e: - if 'SnapshotsPresent' in str(e) and remove_snapshots: + if "SnapshotsPresent" in str(e) and remove_snapshots: self.logger.warn("Blob '%s' has snapshots present, removing them", blob.name) container_client.delete_blob( - container_name=container.name, blob_name=blob.name, delete_snapshots="include") + container_name=container.name, + blob_name=blob.name, + delete_snapshots="include", + ) else: raise @@ -1010,28 +1084,31 @@ def remove_unused_blobs(self, resource_group=None): container_client = BlockBlobService(storage_account, key) for container in container_client.list_containers(): removed_blobs[resource_group][storage_account][container.name] = [] - for blob in container_client.list_blobs(container_name=container.name, - prefix='test'): - if blob.properties.lease.status == 'unlocked': + for blob in container_client.list_blobs( + container_name=container.name, prefix="test" + ): + if blob.properties.lease.status == "unlocked": self._remove_container_blob(container_client, container, blob) removed_blobs[resource_group][storage_account][container.name].append( - blob.name) + blob.name + ) # also delete unused 'bootdiag' containers self.remove_diags_container(container_client) # removing managed disks removed_disks = [] for disk in self.compute_client.disks.list_by_resource_group(resource_group): - if disk.name.startswith('test') and disk.managed_by is None: + if disk.name.startswith("test") and disk.managed_by is None: self.logger.info("Removing disk '%s'", disk.name) - self.compute_client.disks.delete(resource_group_name=resource_group, - disk_name=disk.name) - removed_disks.append({'resource_group': resource_group, - 'disk': disk.name}) + self.compute_client.disks.delete( + resource_group_name=resource_group, disk_name=disk.name + ) + removed_disks.append({"resource_group": resource_group, "disk": disk.name}) if not removed_disks: - self.logger.debug("No Managed disks matching 'test*' were found in '%s'", - resource_group) - return {'Managed': removed_disks, 'Unmanaged': removed_blobs} + self.logger.debug( + "No Managed disks matching 'test*' were found in '%s'", resource_group + ) + return {"Managed": removed_disks, "Unmanaged": removed_blobs} def create_template(self, *args, **kwargs): raise NotImplementedError @@ -1058,13 +1135,18 @@ def find_templates(self, name=None, container=None, prefix=None, only_vhd=True): continue for image in self.container_client.list_blobs(found_container_name, prefix=prefix): img_name = image.name - if only_vhd and (not img_name.endswith('.vhd') and not img_name.endswith('.vhdx')): + if only_vhd and (not img_name.endswith(".vhd") and not img_name.endswith(".vhdx")): continue if name and name.lower() != img_name.lower(): continue matches.append( AzureBlobImage( - system=self, name=img_name, container=found_container_name, raw=image)) + system=self, + name=img_name, + container=found_container_name, + raw=image, + ) + ) return matches def list_templates(self): @@ -1075,18 +1157,23 @@ def list_templates(self): def list_compute_images(self): return self.resource_client.resources.list( - filter="resourceType eq 'Microsoft.Compute/images'") + filter="resourceType eq 'Microsoft.Compute/images'" + ) def list_compute_images_by_resource_group(self, resource_group=None, free_images=None): """ Args: resource_group (str): Name of the resource group - free_images (bool): Whether to collect image which do not have any resource(VM) linked to it + free_images (bool): Whether to collect image which do not have + any resource(VM) linked to it """ resource_group = resource_group or self.resource_group image_list = list( self.resource_client.resources.list( - filter=f"resourceType eq 'Microsoft.Compute/images' and resourceGroup eq '{resource_group}'" + filter=( + "resourceType eq 'Microsoft.Compute/images' " + f"and resourceGroup eq '{resource_group}'" + ) ) ) @@ -1101,8 +1188,7 @@ def list_compute_images_by_resource_group(self, resource_group=None, free_images for vm_name in vm_list: images_used_by_vm.append( self.compute_client.virtual_machines.get( - resource_group_name=resource_group, - vm_name=vm_name + resource_group_name=resource_group, vm_name=vm_name ).storage_profile.image_reference.id ) @@ -1131,9 +1217,7 @@ def get_template(self, name, container=None): if not templates: raise ImageNotFoundError(name) elif len(templates) > 1: - raise MultipleImagesError( - "Image with name '{}' exists in multiple containers".format(name) - ) + raise MultipleImagesError(f"Image with name '{name}' exists in multiple containers") return templates[0] # TODO: Refactor the below stack methods into the StackMixin/StackEntity structure @@ -1146,11 +1230,16 @@ def delete_stack(self, stack_name, resource_group=None): """ self.logger.info("Removes a Deployment Stack resource created with Orchestration") deps = self.resource_client.deployments - operation = deps.delete(resource_group_name=resource_group or self.resource_group, - deployment_name=stack_name) + operation = deps.delete( + resource_group_name=resource_group or self.resource_group, + deployment_name=stack_name, + ) operation.wait() - self.logger.info("'%s' was removed from '%s' resource group", stack_name, - resource_group or self.resource_group) + self.logger.info( + "'%s' was removed from '%s' resource group", + stack_name, + resource_group or self.resource_group, + ) return operation.status() def delete_stack_by_date(self, days_old, resource_group=None): @@ -1162,8 +1251,9 @@ def delete_stack_by_date(self, days_old, resource_group=None): self.logger.info("Removing Deployment Stack '%s'", stack) result = self.delete_stack(stack_name=stack, resource_group=resource_group) results.append((stack, result)) - self.logger.info("Attempt to remove Stack '%s' finished with status '%s'", stack, - result) + self.logger.info( + "Attempt to remove Stack '%s' finished with status '%s'", stack, result + ) return results def delete_compute_image_by_resource_group(self, resource_group=None, image_list=None): @@ -1177,7 +1267,9 @@ def delete_compute_image_by_resource_group(self, resource_group=None, image_list resource_group = resource_group or self.resource_group for image in image_list: self.logger.info("Deleting '%s' from '%s'", image, resource_group) - response = self.compute_client.images.delete(resource_group_name=resource_group, image_name=image) + response = self.compute_client.images.delete( + resource_group_name=resource_group, image_name=image + ) result.append((image, response)) return result @@ -1185,50 +1277,50 @@ def list_stack_resources(self, stack_name, resource_group=None): self.logger.info("Checking Stack %s resources ", stack_name) # todo: weird implementation to refactor this method later resources = { - 'vms': [], - 'nics': [], - 'pips': [], + "vms": [], + "nics": [], + "pips": [], } dep_op_list = self.resource_client.deployment_operations.list( resource_group_name=resource_group or self.resource_group, - deployment_name=stack_name + deployment_name=stack_name, ) for dep in dep_op_list: if dep.properties.target_resource: target = dep.properties.target_resource res_type, res_name = (target.resource_type, target.resource_name) - if res_type == 'Microsoft.Compute/virtualMachines': + if res_type == "Microsoft.Compute/virtualMachines": try: self.compute_client.virtual_machines.get( resource_group_name=resource_group or self.resource_group, - vm_name=res_name + vm_name=res_name, ) res_exists = True except CloudError: res_exists = False - resources['vms'].append((res_name, res_exists)) - elif res_type == 'Microsoft.Network/networkInterfaces': + resources["vms"].append((res_name, res_exists)) + elif res_type == "Microsoft.Network/networkInterfaces": try: self.network_client.network_interfaces.get( resource_group_name=resource_group or self.resource_group, - network_interface_name=res_name + network_interface_name=res_name, ) res_exists = True except CloudError: res_exists = False - resources['nics'].append((res_name, res_exists)) - elif res_type == 'Microsoft.Network/publicIpAddresses': + resources["nics"].append((res_name, res_exists)) + elif res_type == "Microsoft.Network/publicIpAddresses": # todo: double check this match try: self.network_client.public_ip_addresses.get( resource_group_name=resource_group or self.resource_group, - public_ip_address_name=res_name + public_ip_address_name=res_name, ) res_exists = True except CloudError: res_exists = False - resources['pips'].append((res_name, res_exists)) + resources["pips"].append((res_name, res_exists)) return resources def is_stack_empty(self, stack_name, resource_group): diff --git a/wrapanapi/systems/nuage.py b/wrapanapi/systems/nuage.py index 8488ea47..fb044349 100644 --- a/wrapanapi/systems/nuage.py +++ b/wrapanapi/systems/nuage.py @@ -1,8 +1,6 @@ -# coding: utf-8 """Backend management system classes Used to communicate with providers without using CFME facilities """ - from wrapanapi.systems.base import System from wrapanapi.utils.random import random_name @@ -21,31 +19,34 @@ class NuageSystem(System): _stats_available = { # We're returning 3rd element of .count() tuple which is formed as # entities.count() == (fetcher, served object, count of fetched objects) - 'num_security_group': lambda self: self.api.policy_groups.count()[2], + "num_security_group": lambda self: self.api.policy_groups.count()[2], # Filter out 'BackHaulSubnet' and combine it with l2_domains the same way CloudForms does - 'num_cloud_subnet': lambda self: self.api.subnets.count( - filter="name != 'BackHaulSubnet'")[2] + self.api.l2_domains.count()[2], - 'num_cloud_tenant': lambda self: self.api.enterprises.count()[2], - 'num_network_router': lambda self: self.api.domains.count()[2], - 'num_cloud_network': lambda self: len(self.list_floating_network_resources()), - 'num_floating_ip': lambda self: self.api.floating_ips.count()[2], - 'num_network_port': lambda self: len(self.list_vports()) + "num_cloud_subnet": lambda self: self.api.subnets.count(filter="name != 'BackHaulSubnet'")[ + 2 + ] + + self.api.l2_domains.count()[2], + "num_cloud_tenant": lambda self: self.api.enterprises.count()[2], + "num_network_router": lambda self: self.api.domains.count()[2], + "num_cloud_network": lambda self: len(self.list_floating_network_resources()), + "num_floating_ip": lambda self: self.api.floating_ips.count()[2], + "num_network_port": lambda self: len(self.list_vports()), } - def __init__(self, hostname, username, password, api_port, api_version, security_protocol, - **kwargs): - super(NuageSystem, self).__init__(**kwargs) - protocol = 'http' if 'non' in security_protocol.lower() else 'https' + def __init__( + self, hostname, username, password, api_port, api_version, security_protocol, **kwargs + ): + super().__init__(**kwargs) + protocol = "http" if "non" in security_protocol.lower() else "https" self.username = username self.password = password - self.url = '{}://{}:{}'.format(protocol, hostname, api_port) - self.enterprise = 'csp' + self.url = f"{protocol}://{hostname}:{api_port}" + self.enterprise = "csp" self.api_version = api_version self._api = None @property def vspk(self): - if self.api_version == 'v4_0': + if self.api_version == "v4_0": from vspk import v4_0 as vspk else: from vspk import v5_0 as vspk @@ -58,7 +59,7 @@ def api(self): username=self.username, password=self.password, enterprise=self.enterprise, - api_url=self.url + api_url=self.url, ) session.start() self._api = session.user @@ -69,10 +70,10 @@ def disconnect(self): @property def _identifying_attrs(self): - return {'url': self.url} + return {"url": self.url} def info(self): - return 'NuageSystem: url={}'.format(self.url) + return f"NuageSystem: url={self.url}" def list_floating_network_resources(self): return self.api.shared_network_resources.get(filter='type is "FLOATING"') diff --git a/wrapanapi/systems/openstack.py b/wrapanapi/systems/openstack.py index 1415ff5f..4658a1b4 100644 --- a/wrapanapi/systems/openstack.py +++ b/wrapanapi/systems/openstack.py @@ -1,9 +1,7 @@ -# coding: utf-8 """Backend management system classes Used to communicate with providers without using CFME facilities """ - import json import os import time @@ -30,13 +28,20 @@ from swiftclient.exceptions import ClientException as SwiftException from wait_for import wait_for -from wrapanapi.entities import ( - Instance, Template, TemplateMixin, VmMixin, VmState) -from wrapanapi.exceptions import ( - ActionTimedOutError, ImageNotFoundError, ItemNotFound, KeystoneVersionNotSupported, - MultipleImagesError, MultipleInstancesError, NetworkNameNotFound, NoMoreFloatingIPs, - VMInstanceNotFound -) +from wrapanapi.entities import Instance +from wrapanapi.entities import Template +from wrapanapi.entities import TemplateMixin +from wrapanapi.entities import VmMixin +from wrapanapi.entities import VmState +from wrapanapi.exceptions import ActionTimedOutError +from wrapanapi.exceptions import ImageNotFoundError +from wrapanapi.exceptions import ItemNotFound +from wrapanapi.exceptions import KeystoneVersionNotSupported +from wrapanapi.exceptions import MultipleImagesError +from wrapanapi.exceptions import MultipleInstancesError +from wrapanapi.exceptions import NetworkNameNotFound +from wrapanapi.exceptions import NoMoreFloatingIPs +from wrapanapi.exceptions import VMInstanceNotFound from wrapanapi.systems.base import System # TODO The following monkeypatch nonsense is criminal, and would be @@ -54,25 +59,26 @@ def _request_timeout_handler(self, url, method, retry_count=0, **kwargs): return SessionClient.request(self, url, method, **kwargs) except Timeout: if retry_count >= 3: - self._cfme_logger.error('nova request timed out after {} retries'.format(retry_count)) + self._cfme_logger.error(f"nova request timed out after {retry_count} retries") raise else: # feed back into the replaced method that supports retry_count retry_count += 1 - self._cfme_logger.info('nova request timed out; retry {}'.format(retry_count)) + self._cfme_logger.info(f"nova request timed out; retry {retry_count}") return self.request(url, method, retry_count=retry_count, **kwargs) -class _SharedMethodsMixin(object): +class _SharedMethodsMixin: """ Mixin class that holds properties/methods both VM's and templates share. This should be listed first in the child class inheritance to satisfy the methods required by the Vm/Template abstract base class """ + @property def _identifying_attrs(self): - return {'uuid': self._uuid} + return {"uuid": self._uuid} @property def name(self): @@ -92,20 +98,20 @@ def uuid(self): def creation_time(self): # Example vm.creation_time: 2014-08-14T23:29:30Z self.refresh() - creation_time = datetime.strptime(self.raw.created, '%Y-%m-%dT%H:%M:%SZ') + creation_time = datetime.strptime(self.raw.created, "%Y-%m-%dT%H:%M:%SZ") # create time is UTC, localize it, strip tzinfo return creation_time.replace(tzinfo=pytz.UTC) class OpenstackInstance(_SharedMethodsMixin, Instance): state_map = { - 'PAUSED': VmState.PAUSED, - 'ACTIVE': VmState.RUNNING, - 'SHUTOFF': VmState.STOPPED, - 'SUSPENDED': VmState.SUSPENDED, - 'ERROR': VmState.ERROR, - 'SHELVED': VmState.SHELVED, - 'SHELVED_OFFLOADED': VmState.SHELVED_OFFLOADED, + "PAUSED": VmState.PAUSED, + "ACTIVE": VmState.RUNNING, + "SHUTOFF": VmState.STOPPED, + "SUSPENDED": VmState.SUSPENDED, + "ERROR": VmState.ERROR, + "SHELVED": VmState.SHELVED, + "SHELVED_OFFLOADED": VmState.SHELVED_OFFLOADED, } def __init__(self, system, raw=None, **kwargs): @@ -117,8 +123,8 @@ def __init__(self, system, raw=None, **kwargs): raw: the raw novaclient Resource object (if already obtained) uuid: unique ID of the instance """ - super(OpenstackInstance, self).__init__(system, raw, **kwargs) - self._uuid = raw.id if raw else kwargs.get('uuid') + super().__init__(system, raw, **kwargs) + self._uuid = raw.id if raw else kwargs.get("uuid") if not self._uuid: raise ValueError("missing required kwarg: 'uuid'") self._api = self.system.api @@ -139,33 +145,35 @@ def _get_state(self): inst = self.raw status = self._api_state_to_vmstate(inst.status) if status == VmState.ERROR: - fault_code = 'UNKNOWN' - fault_msg = 'UNKNOWN' - if hasattr(inst, 'fault'): - fault_code = inst.fault['code'] - fault_msg = inst.fault['message'] + fault_code = "UNKNOWN" + fault_msg = "UNKNOWN" + if hasattr(inst, "fault"): + fault_code = inst.fault["code"] + fault_msg = inst.fault["message"] self.logger.error( - 'Instance %s in error state, code: %s, fault message: %s', - self.name, fault_code, fault_msg + "Instance %s in error state, code: %s, fault message: %s", + self.name, + fault_code, + fault_msg, ) return status def _get_networks(self): self.refresh() # TODO: Do we really need to access a private attr here? - return self.raw._info['addresses'] + return self.raw._info["addresses"] @property def ip(self): networks = self._get_networks() for network_nics in networks.values(): for nic in network_nics: - if nic['OS-EXT-IPS:type'] == 'floating': - return str(nic['addr']) + if nic["OS-EXT-IPS:type"] == "floating": + return str(nic["addr"]) @property def all_ips(self): - """ Get all the IPs on the machine + """Get all the IPs on the machine Returns: (list) the addresses assigned to the machine """ @@ -175,7 +183,7 @@ def all_ips(self): @property def flavor(self): if not self._flavor: - flavor_id = self.raw.flavor['id'] + flavor_id = self.raw.flavor["id"] self._flavor = self._api.flavors.get(flavor_id) return self._flavor @@ -209,9 +217,11 @@ def assign_floating_ip(self, floating_ip_pool, safety_timer=5): # so this will loop until it really get the address. A small timeout is added to ensure # the instance really got that address and other process did not steal it. # TODO: Introduce neutron client and its create+assign? - allowed_exceptions = (os_exceptions.ClientException, - os_exceptions.OverLimit, - os_exceptions.NotFound) + allowed_exceptions = ( + os_exceptions.ClientException, + os_exceptions.OverLimit, + os_exceptions.NotFound, + ) while self.ip is None: free_ips = self.system.free_fips(floating_ip_pool) # We maintain 1 floating IP as a protection against race condition @@ -226,28 +236,28 @@ def assign_floating_ip(self, floating_ip_pool, safety_timer=5): try: ip = self._api.floating_ips.create(floating_ip_pool) except allowed_exceptions as e: - self.logger.error('Probably no more FIP slots available: %s', str(e)) + self.logger.error("Probably no more FIP slots available: %s", str(e)) free_ips = self.system.free_fips(floating_ip_pool) # So, try picking one from the list (there still might be one) if free_ips: # There is something free. Slight risk of race condition ip = free_ips[0] self.logger.info( - 'Reused %s from pool %s because no more free spaces for new ips', - ip.ip, floating_ip_pool + "Reused %s from pool %s because no more free spaces for new ips", + ip.ip, + floating_ip_pool, ) else: # Nothing can be done - raise NoMoreFloatingIPs( - 'Provider {} ran out of FIPs'.format(self.system.auth_url)) - self.logger.info('Created %s in pool %s', ip.ip, floating_ip_pool) + raise NoMoreFloatingIPs(f"Provider {self.system.auth_url} ran out of FIPs") + self.logger.info("Created %s in pool %s", ip.ip, floating_ip_pool) instance.add_floating_ip(ip) # Now the grace period in which a FIP theft could happen time.sleep(safety_timer) - self.logger.info('Instance %s got a floating IP %s', self.name, ip.ip) - assert self.ip == ip.ip, 'Current IP does not match reserved floating IP!' + self.logger.info("Instance %s got a floating IP %s", self.name, ip.ip) + assert self.ip == ip.ip, "Current IP does not match reserved floating IP!" return ip.ip def unassign_floating_ip(self): @@ -265,24 +275,24 @@ def unassign_floating_ip(self): return None floating_ip = floating_ips[0] self.logger.info( - 'Detaching floating IP %s/%s from %s', floating_ip.id, floating_ip.ip, instance.name) + "Detaching floating IP %s/%s from %s", floating_ip.id, floating_ip.ip, instance.name + ) instance.remove_floating_ip(floating_ip) - wait_for( - lambda: self.ip is None, delay=1, timeout='1m') + wait_for(lambda: self.ip is None, delay=1, timeout="1m") return floating_ip def delete(self, delete_fip=False): - self.logger.info(' Deleting OpenStack instance %s', self.name) + self.logger.info(" Deleting OpenStack instance %s", self.name) - self.logger.info(' Unassigning floating IP instance %s', self.name) + self.logger.info(" Unassigning floating IP instance %s", self.name) if delete_fip: self.system.delete_floating_ip(self.unassign_floating_ip()) else: self.unassign_floating_ip() - self.logger.info(' Delete in progress instance %s', self.name) + self.logger.info(" Delete in progress instance %s", self.name) self.raw.delete() - wait_for(lambda: not self.exists, timeout='3m', delay=5) + wait_for(lambda: not self.exists, timeout="3m", delay=5) return True def cleanup(self): @@ -290,7 +300,7 @@ def cleanup(self): return self.delete(delete_fip=True) def start(self): - self.logger.info(' Starting OpenStack instance %s', self.name) + self.logger.info(" Starting OpenStack instance %s", self.name) if self.is_running: return True @@ -301,11 +311,11 @@ def start(self): instance.unpause() else: instance.start() - wait_for(lambda: self.is_running, message='start {}'.format(self.name)) + wait_for(lambda: self.is_running, message=f"start {self.name}") return True def stop(self): - self.logger.info(' Stopping OpenStack instance %s', self.name) + self.logger.info(" Stopping OpenStack instance %s", self.name) if self.is_stopped: return True @@ -354,8 +364,8 @@ def mark_as_template(self, template_name=None, **kwargs): We have to rename the instance, create a snapshot of the original name and then delete the instance.""" - image_name = template_name or '{}_copy'.format(self.name) - self.logger.info('Marking %s as OpenStack template with name: %s', self.name, image_name) + image_name = template_name or f"{self.name}_copy" + self.logger.info("Marking %s as OpenStack template with name: %s", self.name, image_name) original_name = self.name # no new name passed, rename VM so template can take its name if not template_name: @@ -369,20 +379,21 @@ def mark_as_template(self, template_name=None, **kwargs): wait_for(lambda: self._api.images.get(uuid).status == "ACTIVE", num_sec=900, delay=5) self.delete() wait_for(lambda: not self.exists, num_sec=180, delay=5) - except Exception as e: - self.logger.error( - "Could not mark %s as a OpenStack template! (%s)", original_name, str(e)) + except Exception: + self.logger.exception("Could not mark %s as a OpenStack template!", original_name) try: self.rename(original_name) # Clean up after ourselves - except Exception as e: + except Exception: self.logger.exception( - 'Failed to rename %s back to original name (%s)', image_name, original_name) + "Failed to rename %s back to original name (%s)", image_name, original_name + ) raise return OpenstackImage(system=self.system, uuid=uuid) def set_meta_value(self, key, value): return self.raw.manager.set_meta_item( - self.raw, key, value if isinstance(value, str) else json.dumps(value)) + self.raw, key, value if isinstance(value, str) else json.dumps(value) + ) def get_meta_value(self, key): instance = self.raw @@ -394,10 +405,10 @@ def get_meta_value(self, key): # Support metadata set by others return data except KeyError: - raise KeyError('Metadata {} not found in {}'.format(key, instance.name)) + raise KeyError(f"Metadata {key} not found in {instance.name}") def get_hardware_configuration(self): - return {'ram': self.flavor.ram, 'cpu': self.flavor.vcpus} + return {"ram": self.flavor.ram, "cpu": self.flavor.vcpus} @property def attached_volumes(self): @@ -411,7 +422,7 @@ def attached_volumes(self): mgmt.get_vm(name='instance_name').attached_volumes """ - return [v['id'] for v in self.raw._info['os-extended-volumes:volumes_attached']] + return [v["id"] for v in self.raw._info["os-extended-volumes:volumes_attached"]] class OpenstackImage(_SharedMethodsMixin, Template): @@ -424,8 +435,8 @@ def __init__(self, system, raw=None, **kwargs): raw: the novaclient Image resource object if already obtained, or None uuid: uuid of image """ - super(OpenstackImage, self).__init__(system, raw, **kwargs) - self._uuid = raw.id if raw else kwargs.get('uuid') + super().__init__(system, raw, **kwargs) + self._uuid = raw.id if raw else kwargs.get("uuid") if not self._uuid: raise ValueError("missing required kwarg: 'uuid'") self._api = self.system.api @@ -451,7 +462,8 @@ def _get_or_create_override_flavor(self, flavor, cpu=None, ram=None): Keep the parameters from the original flavor """ self.logger.info( - 'RAM/CPU override of flavor %s: RAM %r MB, CPU: %r cores', flavor.name, ram, cpu) + "RAM/CPU override of flavor %s: RAM %r MB, CPU: %r cores", flavor.name, ram, cpu + ) ram = ram or flavor.ram cpu = cpu or flavor.vcpus disk = flavor.disk @@ -461,13 +473,18 @@ def _get_or_create_override_flavor(self, flavor, cpu=None, ram=None): is_public = flavor.is_public try: new_flavor = self._api.flavors.find( - ram=ram, vcpus=cpu, - disk=disk, ephemeral=ephemeral, swap=swap, - rxtx_factor=rxtx_factor, is_public=is_public) + ram=ram, + vcpus=cpu, + disk=disk, + ephemeral=ephemeral, + swap=swap, + rxtx_factor=rxtx_factor, + is_public=is_public, + ) except os_exceptions.NotFound: # The requested flavor was not found, create a custom one - self.logger.info('No suitable flavor found, creating a new one.') - base_flavor_name = '{}-{}M-{}C'.format(flavor.name, ram, cpu) + self.logger.info("No suitable flavor found, creating a new one.") + base_flavor_name = f"{flavor.name}-{ram}M-{cpu}C" flavor_name = base_flavor_name counter = 0 new_flavor = None @@ -478,25 +495,30 @@ def _get_or_create_override_flavor(self, flavor, cpu=None, ram=None): try: new_flavor = self._api.flavors.create( name=flavor_name, - ram=ram, vcpus=cpu, - disk=disk, ephemeral=ephemeral, swap=swap, - rxtx_factor=rxtx_factor, is_public=is_public) + ram=ram, + vcpus=cpu, + disk=disk, + ephemeral=ephemeral, + swap=swap, + rxtx_factor=rxtx_factor, + is_public=is_public, + ) except os_exceptions.Conflict: - self.logger.info( - 'Name %s is already taken, changing the name', flavor_name) + self.logger.info("Name %s is already taken, changing the name", flavor_name) counter += 1 - flavor_name = base_flavor_name + '_{}'.format(counter) + flavor_name = base_flavor_name + f"_{counter}" else: self.logger.info( - 'Created a flavor %r with id %r', new_flavor.name, new_flavor.id) + "Created a flavor %r with id %r", new_flavor.name, new_flavor.id + ) flavor = new_flavor else: - self.logger.info('Found a flavor %s', new_flavor.name) + self.logger.info("Found a flavor %s", new_flavor.name) flavor = new_flavor return flavor def deploy(self, vm_name, **kwargs): - """ Deploys an OpenStack instance from a template. + """Deploys an OpenStack instance from a template. For all available args, see ``create`` method found here: http://docs.openstack.org/python-novaclient/latest/reference/api/novaclient.v2.servers.html @@ -518,42 +540,38 @@ def deploy(self, vm_name, **kwargs): """ power_on = kwargs.pop("power_on", True) nics = [] - timeout = kwargs.pop('timeout', 900) - - if 'flavor_name' in kwargs: - flavor = self._api.flavors.find(name=kwargs['flavor_name']) - elif 'instance_type' in kwargs: - flavor = self._api.flavors.find(name=kwargs['instance_type']) - elif 'flavor_id' in kwargs: - flavor = self._api.flavors.find(id=kwargs['flavor_id']) + timeout = kwargs.pop("timeout", 900) + + if "flavor_name" in kwargs: + flavor = self._api.flavors.find(name=kwargs["flavor_name"]) + elif "instance_type" in kwargs: + flavor = self._api.flavors.find(name=kwargs["instance_type"]) + elif "flavor_id" in kwargs: + flavor = self._api.flavors.find(id=kwargs["flavor_id"]) else: - flavor = self._api.flavors.find(name='m1.tiny') - ram = kwargs.pop('ram', None) - cpu = kwargs.pop('cpu', None) + flavor = self._api.flavors.find(name="m1.tiny") + ram = kwargs.pop("ram", None) + cpu = kwargs.pop("cpu", None) if ram or cpu: self._get_or_create_override_flavor(flavor, cpu, ram) self.logger.info( - ' Deploying OpenStack template %s to instance %s (%s)', - self.name, vm_name, flavor.name + " Deploying OpenStack template %s to instance %s (%s)", self.name, vm_name, flavor.name ) if len(self.system.list_network()) > 1: - if 'network_name' not in kwargs: - raise NetworkNameNotFound('Must select a network name') + if "network_name" not in kwargs: + raise NetworkNameNotFound("Must select a network name") else: - net_id = self._api.networks.find(label=kwargs['network_name']).id - nics = [{'net-id': net_id}] + net_id = self._api.networks.find(label=kwargs["network_name"]).id + nics = [{"net-id": net_id}] image = self.raw new_instance = self._api.servers.create(vm_name, image, flavor, nics=nics, **kwargs) - instance = OpenstackInstance( - system=self.system, - uuid=new_instance.id, - raw=new_instance) + instance = OpenstackInstance(system=self.system, uuid=new_instance.id, raw=new_instance) instance.wait_for_steady_state(timeout=timeout) - if kwargs.get('floating_ip_pool'): - instance.assign_floating_ip(kwargs['floating_ip_pool']) + if kwargs.get("floating_ip_pool"): + instance.assign_floating_ip(kwargs["floating_ip_pool"]) if power_on: instance.start() @@ -579,8 +597,8 @@ class OpenstackSystem(System, VmMixin, TemplateMixin): """ _stats_available = { - 'num_vm': lambda self: len(self.list_vms(filter_tenants=True)), - 'num_template': lambda self: len(self.list_templates()), + "num_vm": lambda self: len(self.list_vms(filter_tenants=True)), + "num_template": lambda self: len(self.list_templates()), } can_suspend = True @@ -589,7 +607,7 @@ class OpenstackSystem(System, VmMixin, TemplateMixin): def __init__(self, tenant, username, password, auth_url, **kwargs): self.keystone_version = kwargs.get("keystone_version") if not self.keystone_version: - parsed_keystone_version = search(r'v([2-3])$', auth_url) + parsed_keystone_version = search(r"v([2-3])$", auth_url) if parsed_keystone_version: self.keystone_version = int(parsed_keystone_version.group(1)) else: @@ -597,12 +615,12 @@ def __init__(self, tenant, username, password, auth_url, **kwargs): self.keystone_version = 2 if int(self.keystone_version) not in (2, 3): raise KeystoneVersionNotSupported(self.keystone_version) - super(OpenstackSystem, self).__init__(**kwargs) + super().__init__(**kwargs) self.tenant = tenant self.username = username self.password = password self.auth_url = auth_url - self.domain_id = kwargs['domain_id'] if self.keystone_version == 3 else None + self.domain_id = kwargs["domain_id"] if self.keystone_version == 3 else None self._session = None self._api = None self._gapi = None @@ -615,7 +633,7 @@ def __init__(self, tenant, username, password, auth_url, **kwargs): @property def _identifying_attrs(self): - return {'auth_url': self.auth_url, 'tenant': self.tenant} + return {"auth_url": self.auth_url, "tenant": self.tenant} @property def can_suspend(self): @@ -628,11 +646,16 @@ def can_pause(self): @property def session(self): if not self._session: - auth_kwargs = dict(auth_url=self.auth_url, username=self.username, - password=self.password, project_name=self.tenant) + auth_kwargs = dict( + auth_url=self.auth_url, + username=self.username, + password=self.password, + project_name=self.tenant, + ) if self.keystone_version == 3: - auth_kwargs.update(dict(user_domain_id=self.domain_id, - project_domain_id=self.domain_id)) + auth_kwargs.update( + dict(user_domain_id=self.domain_id, project_domain_id=self.domain_id) + ) pass_auth = Password(**auth_kwargs) self._session = Session(auth=pass_auth, verify=False) return self._session @@ -640,23 +663,25 @@ def session(self): @property def api(self): if not self._api: - self._api = osclient.Client('2', session=self.session, service_type="compute", - timeout=30) + self._api = osclient.Client( + "2", session=self.session, service_type="compute", timeout=30 + ) # replace the client request method with our version that # can handle timeouts; uses explicit binding (versus # replacing the method directly on the SessionClient class) # so we can still call out to SessionClient's original request # method in the timeout handler method self._api.client._cfme_logger = self.logger - self._api.client.request = _request_timeout_handler.__get__(self._api.client, - SessionClient) + self._api.client.request = _request_timeout_handler.__get__( + self._api.client, SessionClient + ) return self._api @property def gapi(self): """separate endpoint for glance API, novaclient.v2.images Deprecated in Nova 15.0""" if not self._gapi: - self._gapi = gClient('2', session=self.session) + self._gapi = gClient("2", session=self.session) return self._gapi @property @@ -697,18 +722,17 @@ def sapi(self): def stackapi(self): if not self._stackapi: heat_endpoint = self.kapi.session.auth.auth_ref.service_catalog.url_for( - service_type='orchestration' + service_type="orchestration" + ) + self._stackapi = heat_client.Client( + "1", heat_endpoint, token=self.kapi.session.auth.auth_ref.auth_token, insecure=True ) - self._stackapi = heat_client.Client('1', heat_endpoint, - token=self.kapi.session.auth.auth_ref.auth_token, - insecure=True) return self._stackapi def info(self): - return '%s %s' % (self.api.client.service_type, self.api.client.version) + return f"{self.api.client.service_type} {self.api.client.version}" def _get_tenants(self): - if self.keystone_version == 3: return self.tenant_api.list() real_tenants = [] @@ -722,7 +746,7 @@ def _get_tenants(self): def _get_tenant(self, **kwargs): if not kwargs: - kwargs = {'name': self.tenant} + kwargs = {"name": self.tenant} return self.tenant_api.find(**kwargs).id def _get_user(self, **kwargs): @@ -731,20 +755,21 @@ def _get_user(self, **kwargs): def _get_role(self, **kwargs): return self.kapi.roles.find(**kwargs).id - def add_tenant(self, tenant_name, description=None, enabled=True, user=None, roles=None, - domain=None): - params = dict(description=description, - enabled=enabled) + def add_tenant( + self, tenant_name, description=None, enabled=True, user=None, roles=None, domain=None + ): + params = dict(description=description, enabled=enabled) if self.keystone_version == 2: - params['tenant_name'] = tenant_name + params["tenant_name"] = tenant_name elif self.keystone_version == 3: - params['name'] = tenant_name - params['domain'] = domain + params["name"] = tenant_name + params["domain"] = domain tenant = self.tenant_api.create(**params) if user and roles: if self.keystone_version == 3: - raise NotImplementedError('Role assignments for users are not implemented yet for ' - 'Keystone V3') + raise NotImplementedError( + "Role assignments for users are not implemented yet for " "Keystone V3" + ) user = self._get_user(name=user) for role in roles: role_id = self._get_role(name=role) @@ -759,7 +784,7 @@ def remove_tenant(self, tenant_name): self.tenant_api.delete(tid) def create_vm(self): - raise NotImplementedError('create_vm not implemented.') + raise NotImplementedError("create_vm not implemented.") def _generic_paginator(self, f): """A generic paginator for OpenStack services @@ -794,7 +819,7 @@ def _generic_paginator(self, f): return lists def list_vms(self, filter_tenants=True, all_tenants=True): - call = partial(self.api.servers.list, True, {'all_tenants': all_tenants}) + call = partial(self.api.servers.list, True, {"all_tenants": all_tenants}) instances = self._generic_paginator(call) if filter_tenants and all_tenants: # Filter instances based on their tenant ID @@ -859,14 +884,14 @@ def get_vm(self, name=None, id=None, ip=None, all_tenants=True): MultipleInstancesError -- more than 1 vm found """ # Store the kwargs used for the exception msg's - kwargs = {'name': name, 'id': id, 'ip': ip} + kwargs = {"name": name, "id": id, "ip": ip} kwargs = {key: val for key, val in kwargs.items() if val is not None} matches = self.find_vms(**kwargs, all_tenants=all_tenants) if not matches: - raise VMInstanceNotFound('match criteria: {}'.format(kwargs)) + raise VMInstanceNotFound(f"match criteria: {kwargs}") elif len(matches) > 1: - raise MultipleInstancesError('match criteria: {}'.format(kwargs)) + raise MultipleInstancesError(f"match criteria: {kwargs}") return matches[0] @property @@ -877,7 +902,7 @@ def get_ports(self): Returns: List of server ports objects """ - return self.napi.list_ports()['ports'] + return self.napi.list_ports()["ports"] def create_template(self, *args, **kwargs): raise NotImplementedError @@ -947,8 +972,8 @@ def delete_volume(self, *ids, **kwargs): return # Wait for them wait_for( - lambda: all([not self.volume_exists(id) for id in ids]), - delay=0.5, num_sec=timeout) + lambda: all([not self.volume_exists(id) for id in ids]), delay=0.5, num_sec=timeout + ) def volume_exists(self, id): try: @@ -1031,8 +1056,10 @@ def volume_configurations(self, *configurations, **kwargs): def volume_attachments(self, volume_id): """Returns a dictionary of ``{instance: device}`` relationship of the volume.""" volume = self.capi.volumes.get(volume_id) - return {self.get_vm(id=attachment['server_id']).name: attachment['device'] - for attachment in volume.attachments} + return { + self.get_vm(id=attachment["server_id"]).name: attachment["device"] + for attachment in volume.attachments + } def free_fips(self, pool): """Returns list of free floating IPs sorted by ip address.""" @@ -1056,11 +1083,13 @@ def delete_floating_ip(self, floating_ip): if not floating_ip: return False floating_ip = floating_ip[0] - self.logger.info('Deleting floating IP %s/%s', floating_ip.id, floating_ip.ip) + self.logger.info("Deleting floating IP %s/%s", floating_ip.id, floating_ip.ip) floating_ip.delete() wait_for( lambda: len(self.api.floating_ips.findall(ip=floating_ip.ip)) == 0, - delay=1, timeout='1m') + delay=1, + timeout="1m", + ) return True def get_first_floating_ip(self, pool=None): @@ -1072,24 +1101,24 @@ def get_first_floating_ip(self, pool=None): Args: pool (str) -- pool to try to get IP from (optional) """ - pool_name = getattr(pool, 'name', pool) # obj attr, or passed thing (string) otherwise + pool_name = getattr(pool, "name", pool) # obj attr, or passed thing (string) otherwise fip = None try: fip = self.api.floating_ips.create(pool_name) except os_exceptions.NotFound: - self.logger.exception('Exception while creating FIP for pool: %s', pool_name) + self.logger.exception("Exception while creating FIP for pool: %s", pool_name) else: if not fip: self.logger.error( "Unable to create new floating IP in pool %s," " trying to find an existing one that is free" - " in any pool", pool_name + " in any pool", + pool_name, ) try: - fip = next(ip for ip in self.api.floating_ips.list() - if ip.instance_id is None) + fip = next(ip for ip in self.api.floating_ips.list() if ip.instance_id is None) except StopIteration: - self.logger.error('No more Floating IPs available') + self.logger.error("No more Floating IPs available") return None return fip.ip @@ -1114,7 +1143,7 @@ def delete_stack(self, stack_name): return False def usage_and_quota(self): - data = self.api.limits.get().to_dict()['absolute'] + data = self.api.limits.get().to_dict()["absolute"] host_cpus = 0 host_ram = 0 for hypervisor in self.api.hypervisors.list(): @@ -1123,13 +1152,13 @@ def usage_and_quota(self): # -1 == no limit return { # RAM - 'ram_used': data['totalRAMUsed'], - 'ram_total': host_ram, - 'ram_limit': data['maxTotalRAMSize'] if data['maxTotalRAMSize'] >= 0 else None, + "ram_used": data["totalRAMUsed"], + "ram_total": host_ram, + "ram_limit": data["maxTotalRAMSize"] if data["maxTotalRAMSize"] >= 0 else None, # CPU - 'cpu_used': data['totalCoresUsed'], - 'cpu_total': host_cpus, - 'cpu_limit': data['maxTotalCores'] if data['maxTotalCores'] >= 0 else None, + "cpu_used": data["totalCoresUsed"], + "cpu_total": host_cpus, + "cpu_limit": data["maxTotalCores"] if data["maxTotalCores"] >= 0 else None, } def list_containers(self): @@ -1208,7 +1237,7 @@ def create_object(self, container_name, path, object_name=None): name = object_name or os.path.basename(path) - with open(path, 'rb') as obj: + with open(path, "rb") as obj: self.sapi.put_object(container_name, name, contents=obj) def delete_object(self, container_name, object_name): @@ -1248,7 +1277,7 @@ def download_object(self, container_name, object_name, path): if e.http_reason == "Not Found": raise ItemNotFound( name=object_name, - item_type="Swift Object in Container {}".format(container_name) + item_type=f"Swift Object in Container {container_name}", ) else: raise @@ -1256,7 +1285,7 @@ def download_object(self, container_name, object_name, path): with open(path, "wb") as obj: obj.write(obj_contents) - def get_quota(self, quota='all'): + def get_quota(self, quota="all"): """Get quota details. Examples: @@ -1277,10 +1306,10 @@ def get_quota(self, quota='all'): quota = str(quota).lower() project_id = self._get_tenant(name=self.tenant) quota_values = {} - if quota in ['all', 'compute']: - quota_values.update({'compute': self.api.quotas.get(project_id).to_dict()}) - if quota in ['all', 'volume']: - quota_values.update({'volume': self.capi.quotas.get(project_id).to_dict()}) - if quota in ['all', 'network']: - quota_values.update({'network': self.napi.show_quota(project_id)['quota']}) + if quota in ["all", "compute"]: + quota_values.update({"compute": self.api.quotas.get(project_id).to_dict()}) + if quota in ["all", "volume"]: + quota_values.update({"volume": self.capi.quotas.get(project_id).to_dict()}) + if quota in ["all", "network"]: + quota_values.update({"network": self.napi.show_quota(project_id)["quota"]}) return quota_values diff --git a/wrapanapi/systems/openstack_infra.py b/wrapanapi/systems/openstack_infra.py index c89b392a..7c36ebf3 100644 --- a/wrapanapi/systems/openstack_infra.py +++ b/wrapanapi/systems/openstack_infra.py @@ -1,6 +1,5 @@ -# coding: utf-8 - from collections import namedtuple + from ironicclient import client as iclient from keystoneauth1.identity import Password from keystoneauth1.session import Session @@ -9,11 +8,11 @@ from novaclient.client import SessionClient from requests.exceptions import Timeout -from wrapanapi.systems.base import System from wrapanapi.exceptions import KeystoneVersionNotSupported +from wrapanapi.systems.base import System -Node = namedtuple('Node', ['uuid', 'name', 'power_state', 'provision_state']) +Node = namedtuple("Node", ["uuid", "name", "power_state", "provision_state"]) # TODO The following monkeypatch nonsense is criminal, and would be @@ -23,6 +22,7 @@ # Note: This same mechanism may be required for keystone and cinder # clients, but hopefully won't be. + # monkeypatch method to add retry support to openstack def _request_timeout_handler(self, url, method, retry_count=0, **kwargs): try: @@ -30,12 +30,12 @@ def _request_timeout_handler(self, url, method, retry_count=0, **kwargs): return SessionClient.request(self, url, method, **kwargs) except Timeout: if retry_count >= 3: - self._cfme_logger.error('nova request timed out after {} retries'.format(retry_count)) + self._cfme_logger.error(f"nova request timed out after {retry_count} retries") raise else: # feed back into the replaced method that supports retry_count retry_count += 1 - self._cfme_logger.error('nova request timed out; retry {}'.format(retry_count)) + self._cfme_logger.error(f"nova request timed out; retry {retry_count}") return self.request(url, method, retry_count=retry_count, **kwargs) @@ -45,20 +45,20 @@ class OpenstackInfraSystem(System): """ _stats_available = { - 'num_template': lambda self: len(self.list_templates()), - 'num_host': lambda self: len(self.list_host()), + "num_template": lambda self: len(self.list_templates()), + "num_host": lambda self: len(self.list_host()), } def __init__(self, **kwargs): - self.keystone_version = kwargs.get('keystone_version', 2) + self.keystone_version = kwargs.get("keystone_version", 2) if int(self.keystone_version) not in (2, 3): raise KeystoneVersionNotSupported(self.keystone_version) - super(OpenstackInfraSystem, self).__init__(**kwargs) - self.tenant = kwargs['tenant'] - self.username = kwargs['username'] - self.password = kwargs['password'] - self.auth_url = kwargs['auth_url'] - self.domain_id = kwargs['domain_id'] if self.keystone_version == 3 else None + super().__init__(**kwargs) + self.tenant = kwargs["tenant"] + self.username = kwargs["username"] + self.password = kwargs["password"] + self.auth_url = kwargs["auth_url"] + self.domain_id = kwargs["domain_id"] if self.keystone_version == 3 else None self._session = None self._api = None self._kapi = None @@ -67,16 +67,21 @@ def __init__(self, **kwargs): @property def _identifying_attrs(self): - return {'auth_url': self.auth_url, 'tenant': self.tenant} + return {"auth_url": self.auth_url, "tenant": self.tenant} @property def session(self): if not self._session: - auth_kwargs = dict(auth_url=self.auth_url, username=self.username, - password=self.password, project_name=self.tenant) + auth_kwargs = dict( + auth_url=self.auth_url, + username=self.username, + password=self.password, + project_name=self.tenant, + ) if self.keystone_version == 3: - auth_kwargs.update(dict(user_domain_id=self.domain_id, - project_domain_name=self.domain_id)) + auth_kwargs.update( + dict(user_domain_id=self.domain_id, project_domain_name=self.domain_id) + ) pass_auth = Password(**auth_kwargs) self._session = Session(auth=pass_auth, verify=False) return self._session @@ -84,11 +89,9 @@ def session(self): @property def api(self): if not self._api: - self._api = osclient.Client('2', - session=self.session, - service_type="compute", - insecure=True, - timeout=30) + self._api = osclient.Client( + "2", session=self.session, service_type="compute", insecure=True, timeout=30 + ) # replace the client request method with our version that # can handle timeouts; uses explicit binding (versus # replacing the method directly on the SessionClient class) @@ -96,8 +99,7 @@ def api(self): # method in the timeout handler method self._api.client._cfme_logger = self.logger self._api.client.request = _request_timeout_handler.__get__( - self._api.client, - SessionClient + self._api.client, SessionClient ) return self._api @@ -145,8 +147,10 @@ def list_nodes(self): # Sometimes Ironic does not show the names, pull them from Nova if possible. selected_nova_node = None for nova_node in nodes: - if getattr( - nova_node, 'OS-EXT-SRV-ATTR:hypervisor_hostname', None) == i_node.uuid: + if ( + getattr(nova_node, "OS-EXT-SRV-ATTR:hypervisor_hostname", None) + == i_node.uuid + ): selected_nova_node = nova_node break if selected_nova_node: @@ -157,7 +161,7 @@ def list_nodes(self): return result def info(self): - raise NotImplementedError('info not implemented.') + raise NotImplementedError("info not implemented.") def disconnect(self): pass diff --git a/wrapanapi/systems/redfish.py b/wrapanapi/systems/redfish.py index 0b5e2bb0..41d8378c 100644 --- a/wrapanapi/systems/redfish.py +++ b/wrapanapi/systems/redfish.py @@ -1,29 +1,32 @@ -# coding: utf-8 """Backend management system classes Used to communicate with providers without using CFME facilities """ - import redfish_client -from wrapanapi.entities import PhysicalContainer, Server, ServerState +from wrapanapi.entities import PhysicalContainer +from wrapanapi.entities import Server +from wrapanapi.entities import ServerState from wrapanapi.entities.base import Entity -from wrapanapi.exceptions import InvalidValueException, ItemNotFound +from wrapanapi.exceptions import InvalidValueException +from wrapanapi.exceptions import ItemNotFound from wrapanapi.systems.base import System class RedfishItemNotFound(ItemNotFound): """Raised if a Redfish item is not found.""" + def __init__(self, name, item_type, response): - super(RedfishItemNotFound, self).__init__(name, item_type) + super().__init__(name, item_type) self.response = response def __str__(self): - return 'Could not find a {} named {}. Response:\n{}'.format(self.item_type, self.name, - self.response) + return "Could not find a {} named {}. Response:\n{}".format( + self.item_type, self.name, self.response + ) class RedfishResource(Entity): - """Class representing a generic Redfish resource such as Server or Chassis. """ + """Class representing a generic Redfish resource such as Server or Chassis.""" def __init__(self, system, raw=None, **kwargs): """ @@ -34,11 +37,11 @@ def __init__(self, system, raw=None, **kwargs): raw: the root resource in the Redfish API odata_id: (optional) the @odata.id reference of this instance """ - self._odata_id = raw['@odata.id'] if raw else kwargs.get('odata_id') + self._odata_id = raw["@odata.id"] if raw else kwargs.get("odata_id") if not self._odata_id: raise ValueError("missing required kwargs: 'odata_id'") - super(RedfishResource, self).__init__(system, raw, **kwargs) + super().__init__(system, raw, **kwargs) @property def _identifying_attrs(self): @@ -48,7 +51,7 @@ def _identifying_attrs(self): These attributes identify the instance without needing to query the API for updated data. """ - return {'odata_id': self._odata_id} + return {"odata_id": self._odata_id} def refresh(self): """ @@ -67,9 +70,9 @@ def refresh(self): @property def name(self): """Return name from most recent raw data.""" - name = "{} {}".format(self.raw.Manufacturer, self.raw.Name) + name = f"{self.raw.Manufacturer} {self.raw.Name}" if "SerialNumber" in self.raw: - name = "{} ({})".format(name, self.raw.SerialNumber) + name = f"{name} ({self.raw.SerialNumber})" return name @property @@ -84,10 +87,10 @@ def uuid(self): class RedfishServer(Server, RedfishResource): state_map = { - 'On': ServerState.ON, - 'Off': ServerState.OFF, - 'PoweringOn': ServerState.POWERING_ON, - 'PoweringOff': ServerState.POWERING_OFF, + "On": ServerState.ON, + "Off": ServerState.OFF, + "PoweringOn": ServerState.POWERING_ON, + "PoweringOff": ServerState.POWERING_OFF, } @property @@ -156,53 +159,52 @@ class RedfishSystem(System): # statistics for the provider _stats_available = { - 'num_server': lambda system: system.num_servers, - 'num_chassis': lambda system: system.num_chassis, - 'num_racks': lambda system: system.num_racks, + "num_server": lambda system: system.num_servers, + "num_chassis": lambda system: system.num_chassis, + "num_racks": lambda system: system.num_racks, } # statistics for an individual server _server_stats_available = { - 'cores_capacity': lambda server: server.server_cores, - 'memory_capacity': lambda server: server.server_memory, + "cores_capacity": lambda server: server.server_cores, + "memory_capacity": lambda server: server.server_memory, } _server_inventory_available = { - 'power_state': lambda server: server.state.lower(), + "power_state": lambda server: server.state.lower(), } # rack statistics - _rack_stats_available = { - } + _rack_stats_available = {} _rack_inventory_available = { - 'rack_name': lambda rack: rack.name, + "rack_name": lambda rack: rack.name, } _chassis_stats_available = { - 'num_physical_servers': lambda chassis: chassis.num_servers, + "num_physical_servers": lambda chassis: chassis.num_servers, } _chassis_inventory_available = { - 'chassis_name': lambda chassis: chassis.name, - 'description': lambda chassis: chassis.description, - 'identify_led_state': lambda chassis: chassis.led_state, + "chassis_name": lambda chassis: chassis.name, + "description": lambda chassis: chassis.description, + "identify_led_state": lambda chassis: chassis.led_state, } def __init__(self, hostname, username, password, security_protocol, api_port=443, **kwargs): - super(RedfishSystem, self).__init__(**kwargs) - protocol = 'http' if security_protocol == 'Non-SSL' else 'https' - self.url = '{}://{}:{}/'.format(protocol, hostname, api_port) + super().__init__(**kwargs) + protocol = "http" if security_protocol == "Non-SSL" else "https" + self.url = f"{protocol}://{hostname}:{api_port}/" self.kwargs = kwargs self.api_client = redfish_client.connect(self.url, username, password) @property def _identifying_attrs(self): - return {'url': self.url} + return {"url": self.url} def info(self): - return 'RedfishSystem url={}'.format(self.url) + return f"RedfishSystem url={self.url}" def server_stats(self, physical_server, requested_stats, **kwargs): """ @@ -220,8 +222,9 @@ def server_stats(self, physical_server, requested_stats, **kwargs): # Get an instance of the requested Redfish server redfish_server = self.get_server(physical_server.ems_ref) - return {stat: self._server_stats_available[stat](redfish_server) - for stat in requested_stats} + return { + stat: self._server_stats_available[stat](redfish_server) for stat in requested_stats + } def server_inventory(self, physical_server, requested_items, **kwargs): """ @@ -239,8 +242,9 @@ def server_inventory(self, physical_server, requested_items, **kwargs): # Get an instance of the requested Redfish server redfish_server = self.get_server(physical_server.ems_ref) - return {item: self._server_inventory_available[item](redfish_server) - for item in requested_items} + return { + item: self._server_inventory_available[item](redfish_server) for item in requested_items + } def rack_stats(self, physical_rack, requested_stats): """ @@ -258,8 +262,7 @@ def rack_stats(self, physical_rack, requested_stats): # Get an instance of the requested Redfish rack redfish_rack = self.get_rack(physical_rack.ems_ref) - return {stat: self._rack_stats_available[stat](redfish_rack) - for stat in requested_stats} + return {stat: self._rack_stats_available[stat](redfish_rack) for stat in requested_stats} def rack_inventory(self, physical_rack, requested_items): """ @@ -277,8 +280,9 @@ def rack_inventory(self, physical_rack, requested_items): # Get an instance of the requested Redfish rack redfish_rack = self.get_rack(physical_rack.ems_ref) - return {item: self._rack_inventory_available[item](redfish_rack) - for item in requested_items} + return { + item: self._rack_inventory_available[item](redfish_rack) for item in requested_items + } def chassis_stats(self, physical_chassis, requested_stats): """ @@ -296,8 +300,9 @@ def chassis_stats(self, physical_chassis, requested_stats): # Get an instance of the requested Redfish chassis redfish_chassis = self.get_chassis(physical_chassis.ems_ref) - return {stat: self._chassis_stats_available[stat](redfish_chassis) - for stat in requested_stats} + return { + stat: self._chassis_stats_available[stat](redfish_chassis) for stat in requested_stats + } def chassis_inventory(self, physical_chassis, requested_items): """ @@ -315,8 +320,10 @@ def chassis_inventory(self, physical_chassis, requested_items): # Get an instance of the requested Redfish chassis redfish_chassis = self.get_chassis(physical_chassis.ems_ref) - return {item: self._chassis_inventory_available[item](redfish_chassis) - for item in requested_items} + return { + item: self._chassis_inventory_available[item](redfish_chassis) + for item in requested_items + } def find(self, resource_id): """ @@ -372,8 +379,7 @@ def get_chassis(self, resource_id, *required_types): chassis = RedfishChassis(self, raw=self.find(resource_id)) if required_types and chassis.raw.ChassisType not in required_types: - raise InvalidValueException( - "This chassis is of wrong type {}".format(chassis.raw.ChassisType)) + raise InvalidValueException(f"This chassis is of wrong type {chassis.raw.ChassisType}") return chassis @@ -390,8 +396,9 @@ def get_rack(self, resource_id): """ rack_data = self.find(resource_id) if rack_data.ChassisType != "Rack": - raise InvalidValueException("Chassis type {} does not match that of a Rack".format( - rack_data.ChassisType)) + raise InvalidValueException( + f"Chassis type {rack_data.ChassisType} does not match that of a Rack" + ) return RedfishRack(self, raw=rack_data) @@ -403,11 +410,15 @@ def num_servers(self): @property def num_chassis(self): """Return the count of Physical Chassis discovered by the provider.""" - return len([chassis for chassis in self.api_client.Chassis.Members - if chassis.ChassisType != "Rack"]) + return len( + [ + chassis + for chassis in self.api_client.Chassis.Members + if chassis.ChassisType != "Rack" + ] + ) @property def num_racks(self): """Return the number of Physical Racks discovered by the provider.""" - return len([rack for rack in self.api_client.Chassis.Members - if rack.ChassisType == "Rack"]) + return len([rack for rack in self.api_client.Chassis.Members if rack.ChassisType == "Rack"]) diff --git a/wrapanapi/systems/rhevm.py b/wrapanapi/systems/rhevm.py index 221eabf1..588202c2 100644 --- a/wrapanapi/systems/rhevm.py +++ b/wrapanapi/systems/rhevm.py @@ -1,32 +1,42 @@ -# coding: utf-8 """Backend management system classes Used to communicate with providers without using CFME facilities """ - import fauxfactory import pytz +from ovirtsdk4 import Connection +from ovirtsdk4 import Error from ovirtsdk4 import NotFoundError as OVirtNotFoundError -from ovirtsdk4 import Connection, Error, types -from wait_for import TimedOutError, wait_for - -from wrapanapi.entities import Template, TemplateMixin, Vm, VmMixin, VmState -from wrapanapi.exceptions import ( - ItemNotFound, MultipleItemsError, NotFoundError, VMInstanceNotFound, VMInstanceNotSuspended, - VMNotFoundViaIP, ResourceAlreadyExistsException) +from ovirtsdk4 import types +from wait_for import TimedOutError +from wait_for import wait_for + +from wrapanapi.entities import Template +from wrapanapi.entities import TemplateMixin +from wrapanapi.entities import Vm +from wrapanapi.entities import VmMixin +from wrapanapi.entities import VmState +from wrapanapi.exceptions import ItemNotFound +from wrapanapi.exceptions import MultipleItemsError +from wrapanapi.exceptions import NotFoundError +from wrapanapi.exceptions import ResourceAlreadyExistsException +from wrapanapi.exceptions import VMInstanceNotFound +from wrapanapi.exceptions import VMInstanceNotSuspended +from wrapanapi.exceptions import VMNotFoundViaIP from wrapanapi.systems.base import System -class _SharedMethodsMixin(object): +class _SharedMethodsMixin: """ Mixin class that holds properties/methods both VM's and templates share. This should be listed first in the child class inheritance to satisfy the methods required by the Vm/Template abstract base class """ + @property def _identifying_attrs(self): - return {'uuid': self._uuid} + return {"uuid": self._uuid} def refresh(self, **kwargs): """ @@ -70,26 +80,35 @@ def _get_nic_service(self, nic_name): if nic.name == nic_name: return self.api.nics_service().nic_service(nic.id) else: - raise NotFoundError('Unable to find NicService for nic {} on {}'.format(nic_name, self)) + raise NotFoundError(f"Unable to find NicService for nic {nic_name} on {self}") def _get_network(self, network_name): """retreive a network object by name""" - networks = self.system.api.system_service().networks_service().list( - search='name={}'.format(network_name)) + networks = ( + self.system.api.system_service().networks_service().list(search=f"name={network_name}") + ) try: return networks[0] except IndexError: - raise NotFoundError('No match for network by "name={}"'.format(network_name)) + raise NotFoundError(f'No match for network by "name={network_name}"') def get_nics(self): return self.api.nics_service().list() def get_vnic_profiles(self): - """ Get vnic_profiles of the VM/template """ + """Get vnic_profiles of the VM/template""" return [nic.vnic_profile for nic in self.get_nics()] - def _nic_action(self, nic, network_name, interface='VIRTIO', on_boot=True, - vnic_profile=None, nic_service=None, action='add'): + def _nic_action( + self, + nic, + network_name, + interface="VIRTIO", + on_boot=True, + vnic_profile=None, + nic_service=None, + action="add", + ): """Call an action on nic_service, could be a vmnic or vmnics service example, action 'add' on vmnicsservice, or 'update' on VmNicService currently written for nic actions on the service, though other actions are available @@ -113,8 +132,9 @@ def _nic_action(self, nic, network_name, interface='VIRTIO', on_boot=True, # service attribute should be method we can call and pass the nic to getattr(service, action)(nic) - def add_nic(self, network_name, nic_name='nic1', interface='VIRTIO', on_boot=True, - vnic_profile=None): + def add_nic( + self, network_name, nic_name="nic1", interface="VIRTIO", on_boot=True, vnic_profile=None + ): """Add a nic to VM/Template Args: @@ -132,15 +152,18 @@ def add_nic(self, network_name, nic_name='nic1', interface='VIRTIO', on_boot=Tru except NotFoundError: pass else: - raise ResourceAlreadyExistsException('Nic with name {} already exists on {}' - .format(nic_name, self.name)) + raise ResourceAlreadyExistsException( + f"Nic with name {nic_name} already exists on {self.name}" + ) nics_service = self.api.nics_service() nic = types.Nic(name=nic_name) - self._nic_action(nic, network_name, interface, on_boot, vnic_profile, - nics_service, action='add') + self._nic_action( + nic, network_name, interface, on_boot, vnic_profile, nics_service, action="add" + ) - def update_nic(self, network_name, nic_name='nic1', interface='VIRTIO', on_boot=True, - vnic_profile=None): + def update_nic( + self, network_name, nic_name="nic1", interface="VIRTIO", on_boot=True, vnic_profile=None + ): """Update a nic on VM/Template Args: network_name: string name of the network, also default for vnic_profile name if empty @@ -153,20 +176,28 @@ def update_nic(self, network_name, nic_name='nic1', interface='VIRTIO', on_boot= NotFoundError: from _get_nic_service call if the name doesn't exist """ nic_service = self._get_nic_service(nic_name) - self._nic_action(nic_service.get(), network_name, interface, on_boot, vnic_profile, - nic_service, action='update') + self._nic_action( + nic_service.get(), + network_name, + interface, + on_boot, + vnic_profile, + nic_service, + action="update", + ) class RHEVMVirtualMachine(_SharedMethodsMixin, Vm): """ Represents a VM entity on RHEV """ + state_map = { - 'up': VmState.RUNNING, - 'down': VmState.STOPPED, - 'powering_up': VmState.STARTING, - 'suspended': VmState.SUSPENDED, - 'reboot_in_progress': VmState.STARTING, + "up": VmState.RUNNING, + "down": VmState.STOPPED, + "powering_up": VmState.STARTING, + "suspended": VmState.SUSPENDED, + "reboot_in_progress": VmState.STARTING, } def __init__(self, system, raw=None, **kwargs): @@ -178,8 +209,8 @@ def __init__(self, system, raw=None, **kwargs): raw - raw ovirtsdk4.types.Vm object (if already obtained) uuid - vm ID """ - super(RHEVMVirtualMachine, self).__init__(system, raw, **kwargs) - self._uuid = raw.id if raw else kwargs.get('uuid') + super().__init__(system, raw, **kwargs) + self._uuid = raw.id if raw else kwargs.get("uuid") if not self._uuid: raise ValueError("missing required kwarg: 'uuid'") self.api = system.api.system_service().vms_service().vm_service(self._uuid) @@ -205,14 +236,14 @@ def delete(self): Removes the entity on the provider """ self.ensure_state(VmState.STOPPED) - self.logger.debug(' Deleting RHEV VM %s/%s', self.name, self.uuid) + self.logger.debug(" Deleting RHEV VM %s/%s", self.name, self.uuid) self.api.remove() wait_for( lambda: not self.exists, - message="wait for RHEV VM '{}' deleted".format(self.uuid), - num_sec=300 + message=f"wait for RHEV VM '{self.uuid}' deleted", + num_sec=300, ) return True @@ -234,8 +265,7 @@ def rename(self, new_name): self.logger.exception("Failed to rename VM %s to %s", self.name, new_name) return False else: - self.logger.info( - "RHEVM VM '%s' renamed to '%s', now restarting", self.name, new_name) + self.logger.info("RHEVM VM '%s' renamed to '%s', now restarting", self.name, new_name) self.restart() # Restart is required for a rename in RHEV self.refresh() # Update raw so we pick up the new name return True @@ -258,13 +288,13 @@ def ip(self): """ potentials = [] for ip in self.all_ips: - if not ip.startswith('fe80::'): + if not ip.startswith("fe80::"): potentials.append(ip) return potentials[0] if potentials else None @property def all_ips(self): - """ Return all of the IPs + """Return all of the IPs Returns: (list) the addresses assigned to the machine """ @@ -282,9 +312,9 @@ def start(self): Returns: True if vm action has been initiated properly """ self.wait_for_steady_state() - self.logger.info(' Starting RHEV VM %s', self.name) + self.logger.info(" Starting RHEV VM %s", self.name) if self.is_running: - self.logger.info(' RHEV VM %s is already running.', self.name) + self.logger.info(" RHEV VM %s is already running.", self.name) return True else: self.api.start() @@ -298,9 +328,9 @@ def stop(self): Returns: True if vm action has been initiated properly """ self.wait_for_steady_state() - self.logger.info(' Stopping RHEV VM %s', self.name) + self.logger.info(" Stopping RHEV VM %s", self.name) if self.is_stopped: - self.logger.info(' RHEV VM %s is already stopped.', self.name) + self.logger.info(" RHEV VM %s is already stopped.", self.name) return True else: self.api.stop() @@ -313,7 +343,7 @@ def restart(self): Returns: True if vm action has been initiated properly """ - self.logger.debug(' Restarting RHEV VM %s', self.name) + self.logger.debug(" Restarting RHEV VM %s", self.name) return self.stop() and self.start() def suspend(self): @@ -323,12 +353,12 @@ def suspend(self): Returns: True if vm action has been initiated properly """ self.wait_for_steady_state() - self.logger.debug(' Suspending RHEV VM %s', self.name) + self.logger.debug(" Suspending RHEV VM %s", self.name) if self.is_stopped: # TODO: possibly use ensure_state(VmState.RUNNING) here? raise VMInstanceNotSuspended(self.name) elif self.is_suspended: - self.logger.info(' RHEV VM %s is already suspended.', self.name) + self.logger.info(" RHEV VM %s is already suspended.", self.name) return True else: self.api.suspend() @@ -342,7 +372,7 @@ def mark_as_template( storage_domain_name=None, delete=True, delete_on_error=True, - **kwargs + **kwargs, ): """Turns the VM off, creates template from it and deletes the original VM. @@ -361,7 +391,7 @@ def mark_as_template( Returns: wrapanapi.systems.rhevm.RHEVMTemplate object """ - temp_template_name = template_name or "mrk_tmpl_{}".format(fauxfactory.gen_alphanumeric(8)) + temp_template_name = template_name or f"mrk_tmpl_{fauxfactory.gen_alphanumeric(8)}" template = None try: # Check if this template already exists and ensure it is in an OK state... @@ -383,7 +413,7 @@ def mark_as_template( template_name=temp_template_name, vm_name=self.name, cluster_name=cluster_name, - storage_domain_name=storage_domain_name + storage_domain_name=storage_domain_name, ) if delete and self.exists: # Delete the original VM @@ -405,8 +435,8 @@ def mark_as_template( def get_hardware_configuration(self): self.refresh() return { - 'ram': self.raw.memory / 1024 / 1024, - 'cpu': self.raw.cpu.topology.cores * self.raw.cpu.topology.sockets + "ram": self.raw.memory / 1024 / 1024, + "cpu": self.raw.cpu.topology.cores * self.raw.cpu.topology.sockets, } def _get_disk_attachment_service(self, disk_name): @@ -415,7 +445,7 @@ def _get_disk_attachment_service(self, disk_name): disk = self.system.api.follow_link(disk_attachment_service.disk) if disk.name == disk_name: return disk_attachments_service.service(disk.id) - raise ItemNotFound(disk_name, 'disk') + raise ItemNotFound(disk_name, "disk") def is_disk_attached(self, disk_name): try: @@ -427,13 +457,23 @@ def get_disks_count(self): return len(self.api.disk_attachments_service().list()) def _is_disk_ok(self, disk_id): - disk = [self.system.api.follow_link(disk_attach.disk) - for disk_attach in self.api.disk_attachments_service().list() - if self.system.api.follow_link(disk_attach.disk).id == disk_id].pop() - return getattr(disk, 'status', None) == types.DiskStatus.OK - - def add_disk(self, storage_domain=None, size=None, interface='virtio', format='cow', - active=True, sparse=True, name=None): + disk = [ + self.system.api.follow_link(disk_attach.disk) + for disk_attach in self.api.disk_attachments_service().list() + if self.system.api.follow_link(disk_attach.disk).id == disk_id + ].pop() + return getattr(disk, "status", None) == types.DiskStatus.OK + + def add_disk( + self, + storage_domain=None, + size=None, + interface="virtio", + format="cow", + active=True, + sparse=True, + name=None, + ): """ Add disk to VM @@ -454,20 +494,28 @@ def add_disk(self, storage_domain=None, size=None, interface='virtio', format='c """ disk_attachments_service = self.api.disk_attachments_service() disk_attach = types.DiskAttachment( - disk=types.Disk(name=name, - format=types.DiskFormat(format.lower()), - provisioned_size=size, - storage_domains=[types.StorageDomain(name=storage_domain)], - sparse=bool(sparse)), + disk=types.Disk( + name=name, + format=types.DiskFormat(format.lower()), + provisioned_size=size, + storage_domains=[types.StorageDomain(name=storage_domain)], + sparse=bool(sparse), + ), interface=types.DiskInterface(interface.lower()), - active=active + active=active, ) disk_attachment = disk_attachments_service.add(disk_attach) - wait_for(self._is_disk_ok, func_args=[disk_attachment.disk.id], delay=5, num_sec=900, - message="check if disk is attached") + wait_for( + self._is_disk_ok, + func_args=[disk_attachment.disk.id], + delay=5, + num_sec=900, + message="check if disk is attached", + ) - def connect_direct_lun(self, lun_name=None, lun_ip_addr=None, lun_port=None, - lun_iscsi_target=None, interface=None): + def connect_direct_lun( + self, lun_name=None, lun_ip_addr=None, lun_port=None, lun_iscsi_target=None, interface=None + ): """ Connects a direct lun disk to the VM. @@ -483,7 +531,7 @@ def connect_direct_lun(self, lun_name=None, lun_ip_addr=None, lun_port=None, disk=types.Disk( name=lun_name, shareable=True, - format='raw', + format="raw", lun_storage=types.HostStorage( type=types.StorageType.ISCSI, logical_units=[ @@ -492,18 +540,21 @@ def connect_direct_lun(self, lun_name=None, lun_ip_addr=None, lun_port=None, port=lun_port, target=lun_iscsi_target, ) - ] - ) + ], + ), ), - interface=types.DiskInterface(getattr(types.DiskInterface, interface or 'VIRTIO')), - active=True + interface=types.DiskInterface(getattr(types.DiskInterface, interface or "VIRTIO")), + active=True, ) else: disk_attachment = self._get_disk_attachment_service(lun_name).get() disk_attachments_service.add(disk_attachment) wait_for( - self._is_disk_ok, func_args=[disk_attachment.disk.id], delay=5, num_sec=900, - message="check if disk is attached" + self._is_disk_ok, + func_args=[disk_attachment.disk.id], + delay=5, + num_sec=900, + message="check if disk is attached", ) return True @@ -516,7 +567,9 @@ def disconnect_disk(self, disk_name): disk_attachment_service.remove(detach_only=True, wait=True) wait_for( lambda: not self.is_disk_attached(disk_name), - delay=5, num_sec=900, message="disk to no longer be attached" + delay=5, + num_sec=900, + message="disk to no longer be attached", ) return True @@ -525,6 +578,7 @@ class RHEVMTemplate(_SharedMethodsMixin, Template): """ Represents a template entity on RHEV. """ + def __init__(self, system, raw=None, **kwargs): """ Constructor for a RHEV template tied to a specific system @@ -534,8 +588,8 @@ def __init__(self, system, raw=None, **kwargs): raw - raw ovirtsdk4.types.Vm object (if already obtained) uuid - template ID """ - super(RHEVMTemplate, self).__init__(system, raw=None, **kwargs) - self._uuid = raw.id if raw else kwargs.get('uuid') + super().__init__(system, raw=None, **kwargs) + self._uuid = raw.id if raw else kwargs.get("uuid") if not self._uuid: raise ValueError("missing required kwarg: 'uuid'") self.api = system.api.system_service().templates_service().template_service(self._uuid) @@ -547,7 +601,7 @@ def delete(self, timeout=120): Args: timeout: time to wait for template to be successfully deleted """ - self.logger.debug(' Deleting RHEV template %s/%s', self.name, self.uuid) + self.logger.debug(" Deleting RHEV template %s/%s", self.name, self.uuid) self.wait_for_ok_status() self.api.remove() wait_for(lambda: not self.exists, num_sec=timeout, delay=5) @@ -579,7 +633,8 @@ def wait_for_ok_status(self, timeout=1800): lambda: self.api.get().status == types.TemplateStatus.OK, num_sec=timeout, message="template is OK", - delay=10) + delay=10, + ) def deploy(self, vm_name, cluster, timeout=900, power_on=True, initialization=None, **kwargs): """ @@ -604,14 +659,14 @@ def deploy(self, vm_name, cluster, timeout=900, power_on=True, initialization=No Returns: wrapanapi.systems.rhevm.RHEVMVirtualMachine """ - self.logger.debug(' Deploying RHEV template %s to VM %s', self.name, vm_name) + self.logger.debug(" Deploying RHEV template %s to VM %s", self.name, vm_name) vm_kwargs = { - 'name': vm_name, - 'cluster': self.system.get_cluster(cluster), - 'template': self.raw, + "name": vm_name, + "cluster": self.system.get_cluster(cluster), + "template": self.raw, } - clone = kwargs.get('clone') - domain_name = kwargs.get('storage_domain') + clone = kwargs.get("clone") + domain_name = kwargs.get("storage_domain") if domain_name: # need to specify storage domain, if its different than the template's disks location # then additional options required. disk allocation mode in UI required to be clone @@ -622,32 +677,32 @@ def deploy(self, vm_name, cluster, timeout=900, power_on=True, initialization=No disk=types.Disk( id=template_attachment.id, format=types.DiskFormat.COW, - storage_domains=[target_storage_domain] + storage_domains=[target_storage_domain], ) ) disk_attachments.append(new_attachment) - vm_kwargs['disk_attachments'] = disk_attachments + vm_kwargs["disk_attachments"] = disk_attachments # Placement requires two args - if 'placement_policy_host' in kwargs and 'placement_policy_affinity' in kwargs: - host = types.Host(name=kwargs['placement_policy_host']) + if "placement_policy_host" in kwargs and "placement_policy_affinity" in kwargs: + host = types.Host(name=kwargs["placement_policy_host"]) policy = types.VmPlacementPolicy( - hosts=[host], - affinity=kwargs['placement_policy_affinity']) - vm_kwargs['placement_policy'] = policy + hosts=[host], affinity=kwargs["placement_policy_affinity"] + ) + vm_kwargs["placement_policy"] = policy # if cpu is passed, also default a sockets # unless its passed - cpu = kwargs.get('cpu', None) # don't set default if its not passed + cpu = kwargs.get("cpu", None) # don't set default if its not passed if cpu: - vm_kwargs['cpu'] = types.Cpu( - topology=types.CpuTopology(cores=cpu, sockets=kwargs.get('sockets', 1)) + vm_kwargs["cpu"] = types.Cpu( + topology=types.CpuTopology(cores=cpu, sockets=kwargs.get("sockets", 1)) ) - if 'ram' in kwargs: - vm_kwargs['memory'] = int(kwargs['ram']) # in Bytes + if "ram" in kwargs: + vm_kwargs["memory"] = int(kwargs["ram"]) # in Bytes vms_service = self.system.api.system_service().vms_service() if initialization: - vm_kwargs['initialization'] = types.Initialization(**initialization) + vm_kwargs["initialization"] = types.Initialization(**initialization) vms_service.add(types.Vm(**vm_kwargs), clone=clone) vm = self.system.get_vm(vm_name) vm.wait_for_state(VmState.STOPPED, timeout=timeout) @@ -657,7 +712,7 @@ def deploy(self, vm_name, cluster, timeout=900, power_on=True, initialization=No class RHEVMSystem(System, VmMixin, TemplateMixin): - """ + r""" Client to RHEVM API This class piggy backs off ovirtsdk. @@ -726,11 +781,11 @@ class RHEVMSystem(System, VmMixin, TemplateMixin): """ _stats_available = { - 'num_vm': lambda self: len(self.list_vms()), - 'num_host': lambda self: len(self.list_host()), - 'num_cluster': lambda self: len(self.list_cluster()), - 'num_template': lambda self: len(self.list_templates()), - 'num_datastore': lambda self: len(self.list_datastore()), + "num_vm": lambda self: len(self.list_vms()), + "num_host": lambda self: len(self.list_host()), + "num_cluster": lambda self: len(self.list_cluster()), + "num_template": lambda self: len(self.list_templates()), + "num_datastore": lambda self: len(self.list_datastore()), } can_suspend = True @@ -740,28 +795,28 @@ class RHEVMSystem(System, VmMixin, TemplateMixin): def __init__(self, hostname, username, password, **kwargs): # generate URL from hostname - super(RHEVMSystem, self).__init__(kwargs) - less_than_rhv_4 = float(kwargs['version']) < 4.0 - url_component = 'api' if less_than_rhv_4 else 'ovirt-engine/api' - if 'api_endpoint' in kwargs: - url = kwargs['api_endpoint'] - elif 'port' in kwargs: - url = 'https://{}:{}/{}'.format(hostname, kwargs['port'], url_component) + super().__init__(kwargs) + less_than_rhv_4 = float(kwargs["version"]) < 4.0 + url_component = "api" if less_than_rhv_4 else "ovirt-engine/api" + if "api_endpoint" in kwargs: + url = kwargs["api_endpoint"] + elif "port" in kwargs: + url = "https://{}:{}/{}".format(hostname, kwargs["port"], url_component) else: - url = 'https://{}/{}'.format(hostname, url_component) + url = f"https://{hostname}/{url_component}" self._api = None self._api_kwargs = { - 'url': url, - 'username': username, - 'password': password, - 'insecure': True, + "url": url, + "username": username, + "password": password, + "insecure": True, } self.kwargs = kwargs @property def _identifying_attrs(self): - return {'url': self._api_kwargs['url']} + return {"url": self._api_kwargs["url"]} @property def can_suspend(self): @@ -790,17 +845,14 @@ def find_vms(self, name=None, uuid=None): if not name and not uuid: raise ValueError("Must specify name or uuid for find_vms()") if name: - query = 'name={}'.format(name) + query = f"name={name}" elif uuid: - query = 'id={}'.format(uuid) + query = f"id={uuid}" query_result = self._vms_service.list(search=query) return [RHEVMVirtualMachine(system=self, uuid=vm.id) for vm in query_result] def list_vms(self): - return [ - RHEVMVirtualMachine(system=self, uuid=vm.id) - for vm in self._vms_service.list() - ] + return [RHEVMVirtualMachine(system=self, uuid=vm.id) for vm in self._vms_service.list()] def get_vm(self, name=None, uuid=None): """ @@ -814,16 +866,13 @@ def get_vm(self, name=None, uuid=None): """ matches = self.find_vms(name=name, uuid=uuid) if not matches: - raise VMInstanceNotFound('name={}, id={}'.format(name, uuid)) + raise VMInstanceNotFound(f"name={name}, id={uuid}") if len(matches) > 1: - raise MultipleItemsError( - 'Found multiple matches for VM with name={}, id={}' - .format(name, uuid) - ) + raise MultipleItemsError(f"Found multiple matches for VM with name={name}, id={uuid}") return matches[0] def create_vm(self, vm_name, **kwargs): - raise NotImplementedError('create_vm not implemented') + raise NotImplementedError("create_vm not implemented") def get_vm_from_ip(self, ip): """ @@ -838,7 +887,7 @@ def get_vm_from_ip(self, ip): for vm in vms: if ip in vm.all_ips: return vm - raise VMNotFoundViaIP("IP '{}' is not known as a VM".format(ip)) + raise VMNotFoundViaIP(f"IP '{ip}' is not known as a VM") def list_host(self, **kwargs): host_list = self.api.system_service().hosts_service().list(**kwargs) @@ -847,11 +896,15 @@ def list_host(self, **kwargs): def list_datastore(self, sd_type=None, **kwargs): datastore_list = self.api.system_service().storage_domains_service().list(**kwargs) if sd_type: + def cond(ds): return ds.status is None and ds.type.value == sd_type + else: + def cond(ds): return ds.status is None + return [ds.name for ds in datastore_list if cond(ds)] def list_cluster(self, **kwargs): @@ -876,7 +929,8 @@ def list_disks(self, status=None, **kwargs): return [disk.name for disk in disks_list] try: return [ - disk.name for disk in disks_list + disk.name + for disk in disks_list if disk.status == types.DiskStatus.__members__[status.upper()] ] @@ -891,15 +945,15 @@ def disconnect(self): self.api.close() def remove_host_from_cluster(self, hostname): - raise NotImplementedError('remove_host_from_cluster not implemented') + raise NotImplementedError("remove_host_from_cluster not implemented") def get_cluster(self, cluster_name): try: - return self.api.system_service().clusters_service().list( - search='name={}'.format(cluster_name) - )[0] + return ( + self.api.system_service().clusters_service().list(search=f"name={cluster_name}")[0] + ) except IndexError: - raise NotFoundError('Cluster not found with name {}'.format(cluster_name)) + raise NotFoundError(f"Cluster not found with name {cluster_name}") @property def _templates_service(self): @@ -909,14 +963,11 @@ def find_templates(self, name=None, uuid=None): if not name and not uuid: raise ValueError("Must specify name or uuid for find_templates()") if name: - query = 'name={}'.format(name) + query = f"name={name}" elif uuid: - query = 'id={}'.format(uuid) + query = f"id={uuid}" query_result = self._templates_service.list(search=query) - return [ - RHEVMTemplate(system=self, uuid=template.id) - for template in query_result - ] + return [RHEVMTemplate(system=self, uuid=template.id) for template in query_result] def list_templates(self): """ @@ -924,7 +975,8 @@ def list_templates(self): """ return [ RHEVMTemplate(system=self, uuid=template.id) - for template in self._templates_service.list() if template.name != "Blank" + for template in self._templates_service.list() + if template.name != "Blank" ] def get_template(self, name=None, uuid=None): @@ -939,16 +991,16 @@ def get_template(self, name=None, uuid=None): """ matches = self.find_templates(name=name, uuid=uuid) if not matches: - raise NotFoundError('Template with name={}, id={}'.format(name, uuid)) + raise NotFoundError(f"Template with name={name}, id={uuid}") if len(matches) > 1: raise MultipleItemsError( - 'Found multiple matches for template with name={}, id={}' - .format(name, uuid) + f"Found multiple matches for template with name={name}, id={uuid}" ) return matches[0] - def create_template(self, template_name, vm_name, cluster_name=None, storage_domain_name=None, - timeout=600): + def create_template( + self, template_name, vm_name, cluster_name=None, storage_domain_name=None, timeout=600 + ): """ Create a template based on a VM. @@ -966,7 +1018,7 @@ def create_template(self, template_name, vm_name, cluster_name=None, storage_dom total wait time for function is 2 times this value """ vm = self.get_vm(vm_name) - vm.refresh(follow='disk_attachments') # include disk_attachment refs + vm.refresh(follow="disk_attachments") # include disk_attachment refs cluster = self.get_cluster(cluster_name) if cluster_name else vm.cluster @@ -977,7 +1029,7 @@ def create_template(self, template_name, vm_name, cluster_name=None, storage_dom ) if storage_domain_name: - template_kwargs.update({'storage_domain': self.get_storage_domain(storage_domain_name)}) + template_kwargs.update({"storage_domain": self.get_storage_domain(storage_domain_name)}) # FIXME: pick domain from the VM's disk storage domains # might not need to pass explicitly in this case anyway # ovirt API a bit complicated here, failing to pickup on the setting @@ -1003,7 +1055,7 @@ def create_template(self, template_name, vm_name, cluster_name=None, storage_dom func_args=[template_name], num_sec=timeout, message="template exists", - delay=5 + delay=5, ) # Then the process has to finish template = self.get_template(template_name) @@ -1032,13 +1084,13 @@ def usage_and_quota(self): return { # RAM - 'ram_used': used_ram, - 'ram_limit': host_ram, - 'ram_total': host_ram, + "ram_used": used_ram, + "ram_limit": host_ram, + "ram_total": host_ram, # CPU - 'cpu_used': used_cpu, - 'cpu_total': host_cpu, - 'cpu_limit': None, + "cpu_used": used_cpu, + "cpu_total": host_cpu, + "cpu_limit": None, } @property @@ -1049,7 +1101,7 @@ def _get_glance_server_service(self, name): for glance_server in self._glance_servers_service.list(): if glance_server.name == name: return self._glance_servers_service.provider_service(glance_server.id) - raise ItemNotFound(name, 'glance server') + raise ItemNotFound(name, "glance server") def _get_glance_server(self, name): return self._get_glance_server_service(name).get() @@ -1060,10 +1112,22 @@ def does_glance_server_exist(self, name): except ItemNotFound: return False - def add_glance_server(self, authentication_url=None, certificates=None, comment=None, - description=None, id=None, images=None, name=None, password=None, - properties=None, requires_authentication=None, tenant_name=None, - url=None, username=None): + def add_glance_server( + self, + authentication_url=None, + certificates=None, + comment=None, + description=None, + id=None, + images=None, + name=None, + password=None, + properties=None, + requires_authentication=None, + tenant_name=None, + url=None, + username=None, + ): self._glance_servers_service.add( types.OpenStackImageProvider( name=name, @@ -1078,7 +1142,7 @@ def add_glance_server(self, authentication_url=None, certificates=None, comment= comment=comment, id=id, images=images, - properties=properties + properties=properties, ) ) wait_for(self.does_glance_server_exist, func_args=[name], delay=5, num_sec=240) @@ -1088,10 +1152,10 @@ def _storage_domains_service(self): return self.api.system_service().storage_domains_service() def _get_storage_domain_service(self, name): - query = 'name={}'.format(name) + query = f"name={name}" query_result = self._storage_domains_service.list(search=query) if not query_result: - raise ItemNotFound(name, 'storage domain') + raise ItemNotFound(name, "storage domain") else: storage_domain = query_result[0] return self._storage_domains_service.storage_domain_service(storage_domain.id) @@ -1107,24 +1171,31 @@ def _get_image_service(self, storage_domain_name, image_name): if image.name == image_name: return self._get_images_service(storage_domain_name).image_service(image.id) - def import_glance_image(self, source_storage_domain_name, source_template_name, - target_storage_domain_name, target_cluster_name, target_template_name, - async_=True, import_as_template=True): + def import_glance_image( + self, + source_storage_domain_name, + source_template_name, + target_storage_domain_name, + target_cluster_name, + target_template_name, + async_=True, + import_as_template=True, + ): image_service = self._get_image_service(source_storage_domain_name, source_template_name) image_service.import_( async_=async_, import_as_template=import_as_template, template=types.Template(name=target_template_name), cluster=types.Cluster(name=target_cluster_name), - storage_domain=types.StorageDomain(name=target_storage_domain_name) + storage_domain=types.StorageDomain(name=target_storage_domain_name), ) wait_for(self.does_template_exist, func_args=[target_template_name], delay=5, num_sec=240) def _get_disk_service(self, disk_name): disks_service = self.api.system_service().disks_service() - query_result = disks_service.list(search="name={}".format(disk_name)) + query_result = disks_service.list(search=f"name={disk_name}") if not query_result: - raise ItemNotFound(disk_name, 'disk') + raise ItemNotFound(disk_name, "disk") else: disk = query_result[0] return disks_service.service(disk.id) @@ -1140,8 +1211,11 @@ def _data_centers_service(self): return self.api.system_service().data_centers_service() def _get_attached_storage_domain_service(self, datacenter_id, storage_domain_id): - return (self._data_centers_service.data_center_service(datacenter_id). - storage_domains_service().storage_domain_service(storage_domain_id)) + return ( + self._data_centers_service.data_center_service(datacenter_id) + .storage_domains_service() + .storage_domain_service(storage_domain_id) + ) def get_storage_domain_connections(self, storage_domain): return self._get_storage_domain_service(storage_domain).storage_connections_service().list() @@ -1171,13 +1245,16 @@ def change_storage_domain_state(self, state, storage_domain_name, timeout=300): desired_state = getattr(types.StorageDomainStatus, state.upper(), None) active = types.StorageDomainStatus.ACTIVE if desired_state is None: - raise ValueError('Invalid state [{}] passed for setting storage domain, ' - 'value values are {}'.format(state, list(types.StorageDomainStatus))) + raise ValueError( + "Invalid state [{}] passed for setting storage domain, " + "value values are {}".format(state, list(types.StorageDomainStatus)) + ) for datacenter in self._data_centers_service.list(): for domain in self.api.follow_link(datacenter.storage_domains): if domain.name == storage_domain_name: - attached_service = self._get_attached_storage_domain_service(datacenter.id, - domain.id) + attached_service = self._get_attached_storage_domain_service( + datacenter.id, domain.id + ) domain_status = self.api.follow_link(domain).status if domain_status == desired_state: return None # already on the state we wanted @@ -1191,13 +1268,14 @@ def change_storage_domain_state(self, state, storage_domain_name, timeout=300): lambda: self.api.follow_link(domain).status == expected_state, delay=5, num_sec=timeout, - message='waiting for {} to reach state {}'.format(storage_domain_name, - expected_state) + message="waiting for {} to reach state {}".format( + storage_domain_name, expected_state + ), ) return True else: # domain name was never matched on any data center - raise ValueError('Given domain name [{}] was never matched'.format(storage_domain_name)) + raise ValueError(f"Given domain name [{storage_domain_name}] was never matched") def get_template_from_storage_domain( self, template_name, storage_domain_name, unregistered=False @@ -1216,10 +1294,7 @@ def get_template_from_storage_domain( for template in sds.templates_service().list(unregistered=unregistered): if template.name == template_name: return RHEVMTemplate(system=self, uuid=template.id) - raise NotFoundError( - 'template {} in storage domain {}' - .format(template_name, storage_domain_name) - ) + raise NotFoundError(f"template {template_name} in storage domain {storage_domain_name}") def list_templates_from_storage_domain(self, storage_domain_name, unregistered=False): """list the templates on a specific given storage_domain @@ -1242,11 +1317,12 @@ def import_template(self, edomain, sdomain, cluster, temp_template): target_storage_domain = self.get_storage_domain(sdomain) cluster_id = self.get_cluster(cluster).id sd_template_service = export_sd_service.templates_service().template_service( - export_template.id) + export_template.id + ) sd_template_service.import_( storage_domain=types.StorageDomain(id=target_storage_domain.id), cluster=types.Cluster(id=cluster_id), - template=types.Template(id=export_template.id) + template=types.Template(id=export_template.id), ) @property @@ -1254,12 +1330,12 @@ def _vnic_profile_service(self): return self.api.system_service().vnic_profiles_service() def list_vnic_profiles(self): - """ List all the vnic profiles on the RHEVM system.""" + """List all the vnic profiles on the RHEVM system.""" return self._vnic_profile_service.list() def get_vnic_profile(self, profile_name): - """ The vnic_profiles that exist on the system, where the key is the vnic_profile name.""" + """The vnic_profiles that exist on the system, where the key is the vnic_profile name.""" try: return next(vnic for vnic in self.list_vnic_profiles() if vnic.name == profile_name) except StopIteration: - raise NotFoundError('Unable to find vnic_profile matching name {}'.format(profile_name)) + raise NotFoundError(f"Unable to find vnic_profile matching name {profile_name}") diff --git a/wrapanapi/systems/scvmm.py b/wrapanapi/systems/scvmm.py index d79b965f..3f9a6bf4 100644 --- a/wrapanapi/systems/scvmm.py +++ b/wrapanapi/systems/scvmm.py @@ -1,14 +1,12 @@ -# coding: utf-8 """Backend management system classes Used to communicate with providers without using CFME facilities """ - import json import re +import time from datetime import datetime from textwrap import dedent -import time import pytz import tzlocal @@ -16,113 +14,117 @@ from cached_property import cached_property from wait_for import wait_for -from wrapanapi.entities import Template, TemplateMixin, Vm, VmMixin, VmState -from wrapanapi.exceptions import ( - ImageNotFoundError, VMInstanceNotFound, MultipleItemsError -) +from wrapanapi.entities import Template +from wrapanapi.entities import TemplateMixin +from wrapanapi.entities import Vm +from wrapanapi.entities import VmMixin +from wrapanapi.entities import VmState +from wrapanapi.exceptions import ImageNotFoundError +from wrapanapi.exceptions import MultipleItemsError +from wrapanapi.exceptions import VMInstanceNotFound from wrapanapi.systems.base import System WINDOWS_TZ_INFO = { - 'AUS Central Standard Time': 'Australia/Darwin', - 'AUS Eastern Standard Time': 'Australia/Sydney', - 'Afghanistan Standard Time': 'Asia/Kabul', - 'Alaskan Standard Time': 'America/Anchorage', - 'Arab Standard Time': 'Asia/Riyadh', - 'Arabian Standard Time': 'Asia/Dubai', - 'Arabic Standard Time': 'Asia/Baghdad', - 'Argentina Standard Time': 'America/Buenos_Aires', - 'Atlantic Standard Time': 'America/Halifax', - 'Azerbaijan Standard Time': 'Asia/Baku', - 'Azores Standard Time': 'Atlantic/Azores', - 'Bahia Standard Time': 'America/Bahia', - 'Bangladesh Standard Time': 'Asia/Dhaka', - 'Canada Central Standard Time': 'America/Regina', - 'Cape Verde Standard Time': 'Atlantic/Cape_Verde', - 'Caucasus Standard Time': 'Asia/Yerevan', - 'Cen. Australia Standard Time': 'Australia/Adelaide', - 'Central America Standard Time': 'America/Guatemala', - 'Central Asia Standard Time': 'Asia/Almaty', - 'Central Brazilian Standard Time': 'America/Cuiaba', - 'Central Europe Standard Time': 'Europe/Budapest', - 'Central European Standard Time': 'Europe/Warsaw', - 'Central Pacific Standard Time': 'Pacific/Guadalcanal', - 'Central Standard Time': 'America/Chicago', - 'Central Standard Time (Mexico)': 'America/Mexico_City', - 'China Standard Time': 'Asia/Shanghai', - 'Dateline Standard Time': 'Etc/GMT+12', - 'E. Africa Standard Time': 'Africa/Nairobi', - 'E. Australia Standard Time': 'Australia/Brisbane', - 'E. Europe Standard Time': 'Asia/Nicosia', - 'E. South America Standard Time': 'America/Sao_Paulo', - 'Eastern Standard Time': 'America/New_York', - 'Egypt Standard Time': 'Africa/Cairo', - 'Ekaterinburg Standard Time': 'Asia/Yekaterinburg', - 'FLE Standard Time': 'Europe/Kiev', - 'Fiji Standard Time': 'Pacific/Fiji', - 'GMT Standard Time': 'Europe/London', - 'GTB Standard Time': 'Europe/Bucharest', - 'Georgian Standard Time': 'Asia/Tbilisi', - 'Greenland Standard Time': 'America/Godthab', - 'Greenwich Standard Time': 'Atlantic/Reykjavik', - 'Hawaiian Standard Time': 'Pacific/Honolulu', - 'India Standard Time': 'Asia/Calcutta', - 'Iran Standard Time': 'Asia/Tehran', - 'Israel Standard Time': 'Asia/Jerusalem', - 'Jordan Standard Time': 'Asia/Amman', - 'Kaliningrad Standard Time': 'Europe/Kaliningrad', - 'Korea Standard Time': 'Asia/Seoul', - 'Magadan Standard Time': 'Asia/Magadan', - 'Mauritius Standard Time': 'Indian/Mauritius', - 'Middle East Standard Time': 'Asia/Beirut', - 'Montevideo Standard Time': 'America/Montevideo', - 'Morocco Standard Time': 'Africa/Casablanca', - 'Mountain Standard Time': 'America/Denver', - 'Mountain Standard Time (Mexico)': 'America/Chihuahua', - 'Myanmar Standard Time': 'Asia/Rangoon', - 'N. Central Asia Standard Time': 'Asia/Novosibirsk', - 'Namibia Standard Time': 'Africa/Windhoek', - 'Nepal Standard Time': 'Asia/Katmandu', - 'New Zealand Standard Time': 'Pacific/Auckland', - 'Newfoundland Standard Time': 'America/St_Johns', - 'North Asia East Standard Time': 'Asia/Irkutsk', - 'North Asia Standard Time': 'Asia/Krasnoyarsk', - 'Pacific SA Standard Time': 'America/Santiago', - 'Pacific Standard Time': 'America/Los_Angeles', - 'Pacific Standard Time (Mexico)': 'America/Santa_Isabel', - 'Pakistan Standard Time': 'Asia/Karachi', - 'Paraguay Standard Time': 'America/Asuncion', - 'Romance Standard Time': 'Europe/Paris', - 'Russian Standard Time': 'Europe/Moscow', - 'SA Eastern Standard Time': 'America/Cayenne', - 'SA Pacific Standard Time': 'America/Bogota', - 'SA Western Standard Time': 'America/La_Paz', - 'SE Asia Standard Time': 'Asia/Bangkok', - 'Samoa Standard Time': 'Pacific/Apia', - 'Singapore Standard Time': 'Asia/Singapore', - 'South Africa Standard Time': 'Africa/Johannesburg', - 'Sri Lanka Standard Time': 'Asia/Colombo', - 'Syria Standard Time': 'Asia/Damascus', - 'Taipei Standard Time': 'Asia/Taipei', - 'Tasmania Standard Time': 'Australia/Hobart', - 'Tokyo Standard Time': 'Asia/Tokyo', - 'Tonga Standard Time': 'Pacific/Tongatapu', - 'Turkey Standard Time': 'Europe/Istanbul', - 'US Eastern Standard Time': 'America/Indianapolis', - 'US Mountain Standard Time': 'America/Phoenix', - 'UTC': 'Etc/GMT', - 'UTC+12': 'Etc/GMT-12', - 'UTC-02': 'Etc/GMT+2', - 'UTC-11': 'Etc/GMT+11', - 'Ulaanbaatar Standard Time': 'Asia/Ulaanbaatar', - 'Venezuela Standard Time': 'America/Caracas', - 'Vladivostok Standard Time': 'Asia/Vladivostok', - 'W. Australia Standard Time': 'Australia/Perth', - 'W. Central Africa Standard Time': 'Africa/Lagos', - 'W. Europe Standard Time': 'Europe/Berlin', - 'West Asia Standard Time': 'Asia/Tashkent', - 'West Pacific Standard Time': 'Pacific/Port_Moresby', - 'Yakutsk Standard Time': 'Asia/Yakutsk' + "AUS Central Standard Time": "Australia/Darwin", + "AUS Eastern Standard Time": "Australia/Sydney", + "Afghanistan Standard Time": "Asia/Kabul", + "Alaskan Standard Time": "America/Anchorage", + "Arab Standard Time": "Asia/Riyadh", + "Arabian Standard Time": "Asia/Dubai", + "Arabic Standard Time": "Asia/Baghdad", + "Argentina Standard Time": "America/Buenos_Aires", + "Atlantic Standard Time": "America/Halifax", + "Azerbaijan Standard Time": "Asia/Baku", + "Azores Standard Time": "Atlantic/Azores", + "Bahia Standard Time": "America/Bahia", + "Bangladesh Standard Time": "Asia/Dhaka", + "Canada Central Standard Time": "America/Regina", + "Cape Verde Standard Time": "Atlantic/Cape_Verde", + "Caucasus Standard Time": "Asia/Yerevan", + "Cen. Australia Standard Time": "Australia/Adelaide", + "Central America Standard Time": "America/Guatemala", + "Central Asia Standard Time": "Asia/Almaty", + "Central Brazilian Standard Time": "America/Cuiaba", + "Central Europe Standard Time": "Europe/Budapest", + "Central European Standard Time": "Europe/Warsaw", + "Central Pacific Standard Time": "Pacific/Guadalcanal", + "Central Standard Time": "America/Chicago", + "Central Standard Time (Mexico)": "America/Mexico_City", + "China Standard Time": "Asia/Shanghai", + "Dateline Standard Time": "Etc/GMT+12", + "E. Africa Standard Time": "Africa/Nairobi", + "E. Australia Standard Time": "Australia/Brisbane", + "E. Europe Standard Time": "Asia/Nicosia", + "E. South America Standard Time": "America/Sao_Paulo", + "Eastern Standard Time": "America/New_York", + "Egypt Standard Time": "Africa/Cairo", + "Ekaterinburg Standard Time": "Asia/Yekaterinburg", + "FLE Standard Time": "Europe/Kiev", + "Fiji Standard Time": "Pacific/Fiji", + "GMT Standard Time": "Europe/London", + "GTB Standard Time": "Europe/Bucharest", + "Georgian Standard Time": "Asia/Tbilisi", + "Greenland Standard Time": "America/Godthab", + "Greenwich Standard Time": "Atlantic/Reykjavik", + "Hawaiian Standard Time": "Pacific/Honolulu", + "India Standard Time": "Asia/Calcutta", + "Iran Standard Time": "Asia/Tehran", + "Israel Standard Time": "Asia/Jerusalem", + "Jordan Standard Time": "Asia/Amman", + "Kaliningrad Standard Time": "Europe/Kaliningrad", + "Korea Standard Time": "Asia/Seoul", + "Magadan Standard Time": "Asia/Magadan", + "Mauritius Standard Time": "Indian/Mauritius", + "Middle East Standard Time": "Asia/Beirut", + "Montevideo Standard Time": "America/Montevideo", + "Morocco Standard Time": "Africa/Casablanca", + "Mountain Standard Time": "America/Denver", + "Mountain Standard Time (Mexico)": "America/Chihuahua", + "Myanmar Standard Time": "Asia/Rangoon", + "N. Central Asia Standard Time": "Asia/Novosibirsk", + "Namibia Standard Time": "Africa/Windhoek", + "Nepal Standard Time": "Asia/Katmandu", + "New Zealand Standard Time": "Pacific/Auckland", + "Newfoundland Standard Time": "America/St_Johns", + "North Asia East Standard Time": "Asia/Irkutsk", + "North Asia Standard Time": "Asia/Krasnoyarsk", + "Pacific SA Standard Time": "America/Santiago", + "Pacific Standard Time": "America/Los_Angeles", + "Pacific Standard Time (Mexico)": "America/Santa_Isabel", + "Pakistan Standard Time": "Asia/Karachi", + "Paraguay Standard Time": "America/Asuncion", + "Romance Standard Time": "Europe/Paris", + "Russian Standard Time": "Europe/Moscow", + "SA Eastern Standard Time": "America/Cayenne", + "SA Pacific Standard Time": "America/Bogota", + "SA Western Standard Time": "America/La_Paz", + "SE Asia Standard Time": "Asia/Bangkok", + "Samoa Standard Time": "Pacific/Apia", + "Singapore Standard Time": "Asia/Singapore", + "South Africa Standard Time": "Africa/Johannesburg", + "Sri Lanka Standard Time": "Asia/Colombo", + "Syria Standard Time": "Asia/Damascus", + "Taipei Standard Time": "Asia/Taipei", + "Tasmania Standard Time": "Australia/Hobart", + "Tokyo Standard Time": "Asia/Tokyo", + "Tonga Standard Time": "Pacific/Tongatapu", + "Turkey Standard Time": "Europe/Istanbul", + "US Eastern Standard Time": "America/Indianapolis", + "US Mountain Standard Time": "America/Phoenix", + "UTC": "Etc/GMT", + "UTC+12": "Etc/GMT-12", + "UTC-02": "Etc/GMT+2", + "UTC-11": "Etc/GMT+11", + "Ulaanbaatar Standard Time": "Asia/Ulaanbaatar", + "Venezuela Standard Time": "America/Caracas", + "Vladivostok Standard Time": "Asia/Vladivostok", + "W. Australia Standard Time": "Australia/Perth", + "W. Central Africa Standard Time": "Africa/Lagos", + "W. Europe Standard Time": "Europe/Berlin", + "West Asia Standard Time": "Asia/Tashkent", + "West Pacific Standard Time": "Pacific/Port_Moresby", + "Yakutsk Standard Time": "Asia/Yakutsk", } @@ -135,34 +137,32 @@ def convert_powershell_date(date_obj_string): So this converts to: "/Date(1449273876697)/" == datetime.datetime.fromtimestamp(1449273876697/1000.) """ - match = re.search(r'^/Date\((\d+)\)/$', date_obj_string) + match = re.search(r"^/Date\((\d+)\)/$", date_obj_string) if not match: - raise ValueError('Invalid date object string: {}'.format(date_obj_string)) - return datetime.fromtimestamp(int(match.group(1)) / 1000.) + raise ValueError(f"Invalid date object string: {date_obj_string}") + return datetime.fromtimestamp(int(match.group(1)) / 1000.0) -class _LogStrMixin(object): +class _LogStrMixin: @property def _log_str(self): """ Returns name or ID, but doesn't refresh raw to get name if we don't have raw data yet.. This is used only for logging purposes. """ - return ( - "[name: {}, id: {}]" - .format(self._raw['Name'] if self._raw else "", self._id) + return "[name: {}, id: {}]".format( + self._raw["Name"] if self._raw else "", self._id ) class SCVirtualMachine(Vm, _LogStrMixin): - state_map = { - 'Running': VmState.RUNNING, - 'PowerOff': VmState.STOPPED, - 'Stopped': VmState.STOPPED, - 'Paused': VmState.SUSPENDED, # 'Paused' is scvmm's version of 'suspended' - 'Missing': VmState.ERROR, - 'Creation Failed': VmState.ERROR, + "Running": VmState.RUNNING, + "PowerOff": VmState.STOPPED, + "Stopped": VmState.STOPPED, + "Paused": VmState.SUSPENDED, # 'Paused' is scvmm's version of 'suspended' + "Missing": VmState.ERROR, + "Creation Failed": VmState.ERROR, } ALLOWED_CHECK_TYPES = ["Standard", "Production", "ProductionOnly"] @@ -175,8 +175,8 @@ def __init__(self, system, raw=None, **kwargs): raw: raw json (as dict) for the VM returned by the API id: uuid of the VM (the SCVMM 'ID' property on the VM) """ - super(SCVirtualMachine, self).__init__(system, raw, **kwargs) - self._id = raw['ID'] if raw else kwargs.get('id') + super().__init__(system, raw, **kwargs) + self._id = raw["ID"] if raw else kwargs.get("id") if not self._id: raise ValueError("missing required kwarg: 'id'") self._run_script = self.system.run_script @@ -184,7 +184,7 @@ def __init__(self, system, raw=None, **kwargs): @property def _identifying_attrs(self): - return {'id': self._id} + return {"id": self._id} def refresh(self, read_from_hyperv=True): """ @@ -196,17 +196,17 @@ def refresh(self, read_from_hyperv=True): Returns: raw VM json """ - script = 'Get-SCVirtualMachine -ID \"{}\" -VMMServer $scvmm_server' + script = 'Get-SCVirtualMachine -ID "{}" -VMMServer $scvmm_server' if read_from_hyperv: - script = '{} | Read-SCVirtualMachine'.format(script) + script = f"{script} | Read-SCVirtualMachine" try: data = self._get_json(script.format(self._id)) except SCVMMSystem.PowerShellScriptError as error: if "Error ID: 801" in str(error): # Error ID 801 is a "not found" error data = None - elif 'Error ID: 1730' in str(error): - self.logger.warning('Refresh called on a VM in a state not valid for refresh') + elif "Error ID: 1730" in str(error): + self.logger.warning("Refresh called on a VM in a state not valid for refresh") return None else: raise @@ -217,7 +217,7 @@ def refresh(self, read_from_hyperv=True): @property def name(self): - return self.raw['Name'] + return self.raw["Name"] @property def host(self): @@ -225,7 +225,7 @@ def host(self): def _get_state(self): self.refresh(read_from_hyperv=False) - return self._api_state_to_vmstate(self.raw['StatusString']) + return self._api_state_to_vmstate(self.raw["StatusString"]) @property def uuid(self): @@ -233,37 +233,38 @@ def uuid(self): @property def vmid(self): - """ VMId is the ID of the VM according to Hyper-V""" + """VMId is the ID of the VM according to Hyper-V""" return self.raw["VMId"] @property def ip(self): self.refresh(read_from_hyperv=True) data = self._run_script( - "Get-SCVirtualMachine -ID \"{}\" -VMMServer $scvmm_server |" + 'Get-SCVirtualMachine -ID "{}" -VMMServer $scvmm_server |' "Get-SCVirtualNetworkAdapter | Select IPv4Addresses |" - "ft -HideTableHeaders".format(self._id)) + "ft -HideTableHeaders".format(self._id) + ) table = str.maketrans(dict.fromkeys("{}")) ip = data.translate(table) return ip if ip else None @property def all_ips(self): - """ wrap self.ip to meet abstractproperty """ + """wrap self.ip to meet abstractproperty""" return [self.ip] @property def creation_time(self): self.refresh() - creation_time = convert_powershell_date(self.raw['CreationTime']) + creation_time = convert_powershell_date(self.raw["CreationTime"]) return creation_time.replace( tzinfo=self.system.timezone or tzlocal.get_localzone() ).astimezone(pytz.UTC) def _do_vm(self, action, params=""): cmd = ( - "Get-SCVirtualMachine -ID \"{}\" -VMMServer $scvmm_server | {}-SCVirtualMachine {}" - .format(self._id, action, params).strip() + 'Get-SCVirtualMachine -ID "{}" -VMMServer $scvmm_server ' + "| {}-SCVirtualMachine {}".format(self._id, action, params).strip() ) self.logger.info(cmd) self._run_script(cmd) @@ -295,8 +296,10 @@ def delete(self): self.ensure_state(VmState.STOPPED) self._do_vm("Remove") wait_for( - lambda: not self.exists, delay=5, timeout="3m", - message="vm {} to not exist".format(self._log_str) + lambda: not self.exists, + delay=5, + timeout="3m", + message=f"vm {self._log_str} to not exist", ) return True @@ -306,11 +309,13 @@ def cleanup(self): def rename(self, name): self.logger.info(" Renaming SCVMM VM '%s' to '%s'", self._log_str, name) self.ensure_state(VmState.STOPPED) - self._do_vm("Set", "-Name {}".format(name)) - old_name = self.raw['Name'] + self._do_vm("Set", f"-Name {name}") + old_name = self.raw["Name"] wait_for( - lambda: self.refresh(read_from_hyperv=True) and self.name != old_name, delay=5, - timeout="3m", message="vm {} to change names".format(self._log_str) + lambda: self.refresh(read_from_hyperv=True) and self.name != old_name, + delay=5, + timeout="3m", + message=f"vm {self._log_str} to change names", ) return True @@ -320,9 +325,11 @@ def clone(self, vm_name, vm_host, path, start_vm=True): $vm_new = Get-SCVirtualMachine -ID "{src_vm}" -VMMServer $scvmm_server $vm_host = Get-SCVMHost -VMMServer $scvmm_server -ComputerName "{vm_host}" New-SCVirtualMachine -Name "{vm_name}" -VM $vm_new -VMHost $vm_host -Path "{path}" - """.format(vm_name=vm_name, src_vm=self._id, vm_host=vm_host, path=path) + """.format( + vm_name=vm_name, src_vm=self._id, vm_host=vm_host, path=path + ) if start_vm: - script = "{} -StartVM".format(script) + script = f"{script} -StartVM" self._run_script(script) return SCVirtualMachine(system=self.system, name=vm_name) @@ -335,27 +342,32 @@ def enable_virtual_services(self): Get-VM -Id {h_id} | Enable-VMIntegrationService -Name 'Guest Service Interface' }} Read-SCVirtualMachine -VM $vm """.format( - dom=self.system.domain, user=self.system.user, - password=self.system.password, scvmm_vm_id=self._id, h_id=self.vmid + dom=self.system.domain, + user=self.system.user, + password=self.system.password, + scvmm_vm_id=self._id, + h_id=self.vmid, ) self.system.run_script(script) def create_snapshot(self, check_type="Standard"): - """ Create a snapshot of a VM, set checkpoint type to standard by default. """ + """Create a snapshot of a VM, set checkpoint type to standard by default.""" self.set_checkpoint_type(check_type=check_type) self.logger.info("Creating a checkpoint/snapshot of VM '%s'", self.name) script = """ $vm = Get-SCVirtualMachine -ID "{scvmm_vm_id}" New-SCVMCheckpoint -VM $vm - """.format(scvmm_vm_id=self._id) + """.format( + scvmm_vm_id=self._id + ) self.system.run_script(script) def set_checkpoint_type(self, check_type="Standard"): - """ Set the checkpoint type of a VM, check_type must be one of ALLOW_CHECK_TYPES """ + """Set the checkpoint type of a VM, check_type must be one of ALLOW_CHECK_TYPES""" self.logger.info("Setting checkpoint type to %s for VM '%s'", check_type, self.name) if check_type not in self.ALLOWED_CHECK_TYPES: - raise NameError("checkpoint type '{}' not understood".format(check_type)) + raise NameError(f"checkpoint type '{check_type}' not understood") script = """ $vm = Get-SCVirtualMachine -ID "{scvmm_vm_id}" @@ -370,17 +382,14 @@ def set_checkpoint_type(self, check_type="Standard"): password=self.system.password, scvmm_vm_id=self._id, h_id=self.vmid, - check_type=check_type + check_type=check_type, ) self.system.run_script(script) def get_hardware_configuration(self): self.refresh(read_from_hyperv=True) - data = {'mem': self.raw['Memory'], 'cpu': self.raw['CPUCount']} - return { - key: str(val) if isinstance(val, str) else val - for key, val in data.items() - } + data = {"mem": self.raw["Memory"], "cpu": self.raw["CPUCount"]} + return {key: str(val) if isinstance(val, str) else val for key, val in data.items()} def disconnect_dvd_drives(self): number_dvds_disconnected = 0 @@ -389,25 +398,28 @@ def disconnect_dvd_drives(self): $DVDDrives = Get-SCVirtualDVDDrive -VM $VM foreach ($drive in $DVDDrives) {{$drive | Remove-SCVirtualDVDDrivce}} Write-Host "number_dvds_disconnected: " + $DVDDrives.length - """.format(self._id) + """.format( + self._id + ) output = self._run_script(script) output = output.splitlines() num_removed_line = [line for line in output if "number_dvds_disconnected:" in line] if num_removed_line: number_dvds_disconnected = int( - num_removed_line[0].split('number_dvds_disconnected:')[1].replace(" ", "") + num_removed_line[0].split("number_dvds_disconnected:")[1].replace(" ", "") ) return number_dvds_disconnected def mark_as_template(self, library_server, library_share, template_name=None, **kwargs): # Converts an existing VM into a template. VM no longer exists afterwards. - name = template_name or self.raw['Name'] + name = template_name or self.raw["Name"] script = """ $VM = Get-SCVirtualMachine -ID \"{id}\" -VMMServer $scvmm_server New-SCVMTemplate -Name \"{name}\" -VM $VM -LibraryServer \"{ls}\" -SharePath \"{lp}\" - """.format(id=self._id, name=name, ls=library_server, lp=library_share) - self.logger.info( - "Creating SCVMM Template '%s' from VM '%s'", name, self._log_str) + """.format( + id=self._id, name=name, ls=library_server, lp=library_share + ) + self.logger.info("Creating SCVMM Template '%s' from VM '%s'", name, self._log_str) self._run_script(script) self.system.update_scvmm_library() return self.system.get_template(name=name) @@ -423,8 +435,8 @@ def __init__(self, system, raw=None, **kwargs): raw: raw json (as dict) for the template returned by the API id: uuid of template (the 'ID' property on the template) """ - super(SCVMTemplate, self).__init__(system, raw, **kwargs) - self._id = raw['ID'] if raw else kwargs.get('id') + super().__init__(system, raw, **kwargs) + self._id = raw["ID"] if raw else kwargs.get("id") if not self._id: raise ValueError("missing required kwarg: 'id'") self._run_script = self.system.run_script @@ -432,11 +444,11 @@ def __init__(self, system, raw=None, **kwargs): @property def _identifying_attrs(self): - return {'id': self._id} + return {"id": self._id} @property def name(self): - return self.raw['Name'] + return self.raw["Name"] @property def uuid(self): @@ -449,7 +461,7 @@ def refresh(self): Returns: dict of raw template json """ - script = 'Get-SCVMTemplate -ID \"{}\" -VMMServer $scvmm_server' + script = 'Get-SCVMTemplate -ID "{}" -VMMServer $scvmm_server' try: data = self._get_json(script.format(self._id)) except SCVMMSystem.PowerShellScriptError as error: @@ -470,16 +482,20 @@ def deploy(self, vm_name, host_group, timeout=900, vm_cpu=None, vm_ram=None, **k $vmc = New-SCVMConfiguration -VMTemplate $tpl -Name "{vm_name}" -VMHostGroup $vm_hg Update-SCVMConfiguration -VMConfiguration $vmc New-SCVirtualMachine -Name "{vm_name}" -VMConfiguration $vmc - """.format(id=self._id, vm_name=vm_name, host_group=host_group) + """.format( + id=self._id, vm_name=vm_name, host_group=host_group + ) if kwargs: self.logger.warn("deploy() ignored kwargs: %s", kwargs) if vm_cpu: - script += " -CPUCount '{vm_cpu}'".format(vm_cpu=vm_cpu) + script += f" -CPUCount '{vm_cpu}'" if vm_ram: - script += " -MemoryMB '{vm_ram}'".format(vm_ram=vm_ram) + script += f" -MemoryMB '{vm_ram}'" self.logger.info( " Deploying SCVMM VM '%s' from template '%s' on host group '%s'", - vm_name, self._log_str, host_group + vm_name, + self._log_str, + host_group, ) self._run_script(script) @@ -493,7 +509,9 @@ def delete(self): script = """ $Template = Get-SCVMTemplate -ID \"{id}\" -VMMServer $scvmm_server Remove-SCVMTemplate -VMTemplate $Template -Force - """.format(id=self._id) + """.format( + id=self._id + ) self.logger.info("Removing SCVMM VM Template '%s'", self._log_str) self._run_script(script) self.system.update_scvmm_library() @@ -510,16 +528,17 @@ class SCVMMSystem(System, VmMixin, TemplateMixin): It still has some drawback, the main one is that pywinrm does not support domains with simple auth mode so I have to do the connection manually in the script which seems to be VERY slow. """ + _stats_available = { - 'num_vm': lambda self: len(self.list_vms()), - 'num_template': lambda self: len(self.list_templates()), + "num_vm": lambda self: len(self.list_vms()), + "num_template": lambda self: len(self.list_templates()), } can_suspend = True can_pause = False def __init__(self, **kwargs): - super(SCVMMSystem, self).__init__(**kwargs) + super().__init__(**kwargs) self.host = kwargs["hostname"] self.port = kwargs.get("winrm_port", 5985) self.scheme = kwargs.get("winrm_scheme", "http") @@ -529,14 +548,14 @@ def __init__(self, **kwargs): self.domain = kwargs["domain"] self.provisioning = kwargs["provisioning"] self.api = winrm.Session( - '{scheme}://{host}:{port}'.format(scheme=self.scheme, host=self.host, port=self.port), + f"{self.scheme}://{self.host}:{self.port}", auth=(self.user, self.password), - server_cert_validation='validate' if self.winrm_validate_ssl_cert else 'ignore', + server_cert_validation="validate" if self.winrm_validate_ssl_cert else "ignore", ) @property def _identifying_attrs(self): - return {'hostname': self.host} + return {"hostname": self.host} @property def can_suspend(self): @@ -554,11 +573,15 @@ def pre_script(self): we need to create our own authentication object (PSCredential) which will provide the domain. Then it works. Big drawback is speed of this solution. """ - return dedent(""" + return dedent( + """ $secpasswd = ConvertTo-SecureString "{}" -AsPlainText -Force $mycreds = New-Object System.Management.Automation.PSCredential ("{}\\{}", $secpasswd) $scvmm_server = Get-SCVMMServer -Computername localhost -Credential $mycreds - """.format(self.password, self.domain, self.user)) + """.format( + self.password, self.domain, self.user + ) + ) @cached_property def timezone(self): @@ -578,26 +601,27 @@ def run_script(self, script): def _raise_for_result(result): raise self.PowerShellScriptError( - "Script returned {}!: {}" - .format(result.status_code, result.std_err) + f"Script returned {result.status_code}!: {result.std_err}" ) # Add retries for error id 1600 num_tries = 6 sleep_time = 10 for attempt in range(1, num_tries + 1): - self.logger.debug(' Running PowerShell script:\n%s\n', script) - result = self.api.run_ps("{}\n\n{}".format(self.pre_script, script)) + self.logger.debug(" Running PowerShell script:\n%s\n", script) + result = self.api.run_ps(f"{self.pre_script}\n\n{script}") if result.status_code == 0: break - elif hasattr(result, 'std_err') and 'Error ID: 1600' in result.std_err: + elif hasattr(result, "std_err") and "Error ID: 1600" in result.std_err: if attempt == num_tries: self.logger.error("Retried %d times, giving up", num_tries) _raise_for_result(result) self.logger.warning( "Hit scvmm error 1600 running script, waiting %d sec... (%d/%d)", - sleep_time, attempt, num_tries + sleep_time, + attempt, + num_tries, ) time.sleep(sleep_time) else: @@ -613,8 +637,7 @@ def get_json(self, script, depth=2): """ Run script and parse output as json """ - result = self.run_script( - "{} | ConvertTo-Json -Compress -Depth {}".format(script, depth)) + result = self.run_script(f"{script} | ConvertTo-Json -Compress -Depth {depth}") if not result: return None try: @@ -627,7 +650,7 @@ def create_vm(self, vm_name): raise NotImplementedError def list_vms(self): - vm_list = self.get_json('Get-SCVirtualMachine -All -VMMServer $scvmm_server') + vm_list = self.get_json("Get-SCVirtualMachine -All -VMMServer $scvmm_server") return [SCVirtualMachine(system=self, raw=vm) for vm in vm_list] def find_vms(self, name): @@ -636,8 +659,7 @@ def find_vms(self, name): Returns a list of SCVirtualMachine objects matching this name. """ - script = ( - 'Get-SCVirtualMachine -Name \"{}\" -VMMServer $scvmm_server') + script = 'Get-SCVirtualMachine -Name "{}" -VMMServer $scvmm_server' data = self.get_json(script.format(name)) # Check if the data returned to us was a list or 1 dict. Always return a list if not data: @@ -655,9 +677,9 @@ def get_vm(self, vm_name): """ matches = self.find_vms(name=vm_name) if not matches: - raise VMInstanceNotFound('vm with name {}'.format(vm_name)) + raise VMInstanceNotFound(f"vm with name {vm_name}") if len(matches) > 1: - raise MultipleItemsError('multiple VMs with name {}'.format(vm_name)) + raise MultipleItemsError(f"multiple VMs with name {vm_name}") return matches[0] def list_templates(self): @@ -670,8 +692,7 @@ def find_templates(self, name): Returns a list of SCVMTemplate objects matching this name. """ - script = ( - 'Get-SCVMTemplate -Name \"{}\" -VMMServer $scvmm_server') + script = 'Get-SCVMTemplate -Name "{}" -VMMServer $scvmm_server' data = self.get_json(script.format(name)) # Check if the data returned to us was a list or 1 dict. Always return a list if not data: @@ -689,9 +710,9 @@ def get_template(self, name): """ matches = self.find_templates(name=name) if not matches: - raise ImageNotFoundError('template with name {}'.format(name)) + raise ImageNotFoundError(f"template with name {name}") if len(matches) > 1: - raise MultipleItemsError('multiple templates with name {}'.format(name)) + raise MultipleItemsError(f"multiple templates with name {name}") return matches[0] def create_template(self, **kwargs): @@ -701,29 +722,29 @@ def _get_names(self, item_type): """ Return names for an arbitrary item type """ - data = self.get_json('Get-{} -VMMServer $scvmm_server'.format(item_type)) + data = self.get_json(f"Get-{item_type} -VMMServer $scvmm_server") if data: - return [item['Name'] for item in data] if isinstance(data, list) else [data["Name"]] + return [item["Name"] for item in data] if isinstance(data, list) else [data["Name"]] else: return None def list_clusters(self, **kwargs): """List all clusters' names.""" - return self._get_names('SCVMHostCluster') + return self._get_names("SCVMHostCluster") def list_networks(self): """List all networks' names.""" - return self._get_names('SCLogicalNetwork') + return self._get_names("SCLogicalNetwork") def list_host(self, **kwargs): - return self._get_names('SCVMHost') + return self._get_names("SCVMHost") def list_vhds(self, **kwargs): - """ List all VHD names.""" + """List all VHD names.""" return self._get_names("SCVirtualHardDisk") def info(self): - return "SCVMMSystem host={}".format(self.host) + return f"SCVMMSystem host={self.host}" def disconnect(self): pass @@ -734,29 +755,35 @@ def update_scvmm_library(self, path="VHDs"): script = """ $lib = Get-SCLibraryShare Read-SCLibraryShare -LibraryShare $lib[0] -Path {path} -RunAsynchronously - """.format(path=path) + """.format( + path=path + ) self.run_script(script) def unzip_archive(self, path, dest): - """ Unzips an archive file (Expand-Archive doesn't work for PowerShell < 5)""" + """Unzips an archive file (Expand-Archive doesn't work for PowerShell < 5)""" self.logger.info(f"Unzipping {path} into {dest}") script = """ $path = "{path}" $dest = "{dest}" Add-Type -assembly "system.io.compression.filesystem" [io.compression.zipfile]::ExtractToDirectory($path, $dest) - """.format(path=path, dest=dest) + """.format( + path=path, dest=dest + ) self.run_script(script) def download_file(self, url, name, dest="L:\\Library\\VHDs\\", unzip=False): - """ Downloads a file given a URL into the SCVMM library (or any dest) """ - self.logger.info("Downloading file {} from url into: {}".format(name, dest)) + """Downloads a file given a URL into the SCVMM library (or any dest)""" + self.logger.info(f"Downloading file {name} from url into: {dest}") script = """ $url = "{url}" $output = "{dest}{name}" $wc = New-Object System.Net.WebClient $wc.DownloadFile($url, $output) - """.format(url=url, name=name, dest=dest) + """.format( + url=url, name=name, dest=dest + ) self.run_script(script) if unzip: self.unzip_archive(f"{dest}{name}", dest) @@ -764,30 +791,36 @@ def download_file(self, url, name, dest="L:\\Library\\VHDs\\", unzip=False): self.update_scvmm_library(dest) def delete_file(self, name, dest="L:\\Library\\VHDs\\"): - """ Deletes a file from the SCVMM library """ - self.logger.info("Deleting file {} from: {}".format(name, dest)) + """Deletes a file from the SCVMM library""" + self.logger.info(f"Deleting file {name} from: {dest}") script = """ $fname = "{dest}{name}" Remove-Item -Path $fname - """.format(name=name, dest=dest) + """.format( + name=name, dest=dest + ) self.run_script(script) self.update_scvmm_library(dest) def delete_app_package(self, name): - self.logger.info("Deleting application package: {}".format(name)) + self.logger.info(f"Deleting application package: {name}") script = """ $app_package = Get-SCApplicationPackage -Name "{}" Remove-SCApplicationPackage -ApplicationPackage $app_package - """.format(name) + """.format( + name + ) self.run_script(script) def delete_vhd(self, name): - """ Deletes a vhd or vhdx file """ - self.logger.info("Removing the vhd {} from the library".format(name)) + """Deletes a vhd or vhdx file""" + self.logger.info(f"Removing the vhd {name} from the library") script = """ $vhd = Get-SCVirtualHardDisk -Name "{}" Remove-SCVirtualHardDisk -VirtualHardDisk $vhd - """.format(name) + """.format( + name + ) self.run_script(script) class PowerShellScriptError(Exception): diff --git a/wrapanapi/systems/vcloud.py b/wrapanapi/systems/vcloud.py index a92c9300..3a83a6f1 100644 --- a/wrapanapi/systems/vcloud.py +++ b/wrapanapi/systems/vcloud.py @@ -1,9 +1,8 @@ -# coding: utf-8 - -from pyvcloud.vcd.client import Client, BasicLoginCredentials +from pyvcloud.vcd.client import BasicLoginCredentials +from pyvcloud.vcd.client import Client from pyvcloud.vcd.org import Org -from pyvcloud.vcd.vdc import VDC from pyvcloud.vcd.vapp import VApp +from pyvcloud.vcd.vdc import VDC from wrapanapi.systems.base import System @@ -11,10 +10,9 @@ class VmwareCloudSystem(System): """Client to VMware vCloud API""" - def __init__(self, hostname, username, organization, password, api_port, - api_version, **kwargs): - super(VmwareCloudSystem, self).__init__(**kwargs) - self.endpoint = 'https://{}:{}'.format(hostname, api_port) + def __init__(self, hostname, username, organization, password, api_port, api_version, **kwargs): + super().__init__(**kwargs) + self.endpoint = f"https://{hostname}:{api_port}" self.username = username self.organization = organization self.password = password @@ -22,8 +20,9 @@ def __init__(self, hostname, username, organization, password, api_port, self._client = None def info(self): - return 'VmwareCloudSystem endpoint={}, api_version={}'.format( - self.endpoint, self.api_version) + return "VmwareCloudSystem endpoint={}, api_version={}".format( + self.endpoint, self.api_version + ) @property def client(self): @@ -53,24 +52,20 @@ def count_vcloud(self, client): org_resource = client.get_org() org = Org(client, resource=org_resource) - stats = { - 'num_availability_zone': 0, - 'num_orchestration_stack': 0, - 'num_vm': 0 - } + stats = {"num_availability_zone": 0, "num_orchestration_stack": 0, "num_vm": 0} for vdc_info in org.list_vdcs(): - stats['num_availability_zone'] += 1 - vdc = VDC(client, resource=org.get_vdc(vdc_info['name'])) + stats["num_availability_zone"] += 1 + vdc = VDC(client, resource=org.get_vdc(vdc_info["name"])) for vapp_info in vdc.list_resources(): try: - vapp_resource = vdc.get_vapp(vapp_info.get('name')) + vapp_resource = vdc.get_vapp(vapp_info.get("name")) except Exception: continue # not a vapp (probably vapp template or something) vapp = VApp(client, resource=vapp_resource) - stats['num_orchestration_stack'] += 1 - stats['num_vm'] += len(vapp.get_all_vms()) + stats["num_orchestration_stack"] += 1 + stats["num_vm"] += len(vapp.get_all_vms()) return stats diff --git a/wrapanapi/systems/virtualcenter.py b/wrapanapi/systems/virtualcenter.py index ca90171b..97b044d1 100644 --- a/wrapanapi/systems/virtualcenter.py +++ b/wrapanapi/systems/virtualcenter.py @@ -1,9 +1,7 @@ -# coding: utf-8 """Backend management system classes Used to communicate with providers without using CFME facilities """ - import atexit import operator import re @@ -11,91 +9,97 @@ import threading import time from datetime import datetime -from distutils.version import LooseVersion from functools import partial import pytz from cached_property import threaded_cached_property -from pyVim.connect import Disconnect, SmartConnect -from pyVmomi import vim, vmodl -from wait_for import TimedOutError, wait_for - -from wrapanapi.entities import (Template, TemplateMixin, Vm, VmMixin, - VmState) +from distutils.version import LooseVersion +from pyVim.connect import Disconnect +from pyVim.connect import SmartConnect +from pyVmomi import vim +from pyVmomi import vmodl +from wait_for import TimedOutError +from wait_for import wait_for + +from wrapanapi.entities import Template +from wrapanapi.entities import TemplateMixin +from wrapanapi.entities import Vm +from wrapanapi.entities import VmMixin +from wrapanapi.entities import VmState from wrapanapi.entities.base import Entity -from wrapanapi.exceptions import (DatastoreNotFoundError, HostNotRemoved, NotFoundError, - VMCreationDateError, VMInstanceNotCloned, - VMInstanceNotFound, VMInstanceNotStopped, - VMInstanceNotSuspended, VMNotFoundViaIP) +from wrapanapi.exceptions import DatastoreNotFoundError +from wrapanapi.exceptions import HostNotRemoved +from wrapanapi.exceptions import NotFoundError +from wrapanapi.exceptions import VMCreationDateError +from wrapanapi.exceptions import VMInstanceNotCloned +from wrapanapi.exceptions import VMInstanceNotFound +from wrapanapi.exceptions import VMInstanceNotStopped +from wrapanapi.exceptions import VMInstanceNotSuspended +from wrapanapi.exceptions import VMNotFoundViaIP from wrapanapi.systems.base import System SELECTION_SPECS = [ - 'resource_pool_traversal_spec', - 'resource_pool_vm_traversal_spec', - 'folder_traversal_spec', - 'datacenter_host_traversal_spec', - 'datacenter_vm_traversal_spec', - 'compute_resource_rp_traversal_spec', - 'compute_resource_host_traversal_spec', - 'host_vm_traversal_spec', - 'datacenter_datastore_traversal_spec' + "resource_pool_traversal_spec", + "resource_pool_vm_traversal_spec", + "folder_traversal_spec", + "datacenter_host_traversal_spec", + "datacenter_vm_traversal_spec", + "compute_resource_rp_traversal_spec", + "compute_resource_host_traversal_spec", + "host_vm_traversal_spec", + "datacenter_datastore_traversal_spec", ] TRAVERSAL_SPECS = [ { - 'name': 'resource_pool_traversal_spec', - 'type': vim.ResourcePool, - 'path': 'resourcePool', - 'select_indices': [0, 1] + "name": "resource_pool_traversal_spec", + "type": vim.ResourcePool, + "path": "resourcePool", + "select_indices": [0, 1], }, { - 'name': 'resource_pool_vm_traversal_spec', - 'type': vim.ResourcePool, - 'path': 'vm', - 'select_indices': [] + "name": "resource_pool_vm_traversal_spec", + "type": vim.ResourcePool, + "path": "vm", + "select_indices": [], }, { - 'name': 'compute_resource_rp_traversal_spec', - 'type': vim.ComputeResource, - 'path': 'resourcePool', - 'select_indices': [0, 1] + "name": "compute_resource_rp_traversal_spec", + "type": vim.ComputeResource, + "path": "resourcePool", + "select_indices": [0, 1], }, { - 'name': 'compute_resource_host_traversal_spec', - 'type': vim.ComputeResource, - 'path': 'host', - 'select_indices': [] + "name": "compute_resource_host_traversal_spec", + "type": vim.ComputeResource, + "path": "host", + "select_indices": [], }, { - 'name': 'datacenter_host_traversal_spec', - 'type': vim.Datacenter, - 'path': 'hostFolder', - 'select_indices': [2] + "name": "datacenter_host_traversal_spec", + "type": vim.Datacenter, + "path": "hostFolder", + "select_indices": [2], }, { - 'name': 'datacenter_datastore_traversal_spec', - 'type': vim.Datacenter, - 'path': 'datastoreFolder', - 'select_indices': [2] + "name": "datacenter_datastore_traversal_spec", + "type": vim.Datacenter, + "path": "datastoreFolder", + "select_indices": [2], }, { - 'name': 'datacenter_vm_traversal_spec', - 'type': vim.Datacenter, - 'path': 'vmFolder', - 'select_indices': [2] + "name": "datacenter_vm_traversal_spec", + "type": vim.Datacenter, + "path": "vmFolder", + "select_indices": [2], }, + {"name": "host_vm_traversal_spec", "type": vim.HostSystem, "path": "vm", "select_indices": [2]}, { - 'name': 'host_vm_traversal_spec', - 'type': vim.HostSystem, - 'path': 'vm', - 'select_indices': [2] + "name": "folder_traversal_spec", + "type": vim.Folder, + "path": "childEntity", + "select_indices": [2, 3, 4, 5, 6, 7, 1, 8], }, - { - 'name': 'folder_traversal_spec', - 'type': vim.Folder, - 'path': 'childEntity', - 'select_indices': [2, 3, 4, 5, 6, 7, 1, 8] - } ] @@ -104,16 +108,15 @@ def get_task_error_message(task): function will figure out the error message. """ message = "faultCause='{}', faultMessage='{}', localizedMessage='{}'".format( - task.info.error.faultCause if hasattr(task.info.error, 'faultCause') else "", - task.info.error.faultMessage if hasattr(task.info.error, 'faultMessage') else "", - task.info.error.localizedMessage if hasattr(task.info.error, 'localizedMessage') else "" + task.info.error.faultCause if hasattr(task.info.error, "faultCause") else "", + task.info.error.faultMessage if hasattr(task.info.error, "faultMessage") else "", + task.info.error.localizedMessage if hasattr(task.info.error, "localizedMessage") else "", ) return message def progress_log_callback(logger, source, destination, progress): - logger.info("Provisioning progress {}->{}: {}".format( - source, destination, str(progress))) + logger.info(f"Provisioning progress {source}->{destination}: {str(progress)}") class VMWareVMOrTemplate(Entity): @@ -125,6 +128,7 @@ class VMWareVMOrTemplate(Entity): A template will have 'config.template'==True """ + def __init__(self, system, raw=None, **kwargs): """ Construct a VMWareVirtualMachine instance @@ -134,14 +138,14 @@ def __init__(self, system, raw=None, **kwargs): raw: pyVmomi.vim.VirtualMachine object name: name of VM """ - super(VMWareVMOrTemplate, self).__init__(system, raw, **kwargs) - self._name = raw.name if raw else kwargs.get('name') + super().__init__(system, raw, **kwargs) + self._name = raw.name if raw else kwargs.get("name") if not self._name: raise ValueError("missing required kwarg 'name'") @property def _identifying_attrs(self): - return {'name': self._name} + return {"name": self._name} @property def name(self): @@ -167,14 +171,14 @@ def host(self): @staticmethod def _get_loc_of_vm(source_template, progress_callback): - """ Get the location where the inventory object will be stored""" + """Get the location where the inventory object will be stored""" folder = getattr(source_template.parent.parent, "vmParent", None) or source_template.parent - progress_callback("Picked folder `{}`".format(folder.name)) + progress_callback(f"Picked folder `{folder.name}`") return folder @staticmethod def _set_vm_clone_spec(mark_template, power_on, vm_reloc_spec, cpu, ram): - """Set properties for Virtual Machine Cloning Operation specification """ + """Set properties for Virtual Machine Cloning Operation specification""" vm_clone_spec = vim.VirtualMachineCloneSpec() @@ -197,7 +201,7 @@ def delete(self): task = self.raw.Destroy_Task() try: - wait_for(lambda: self.system.get_task_status(task) == 'success', delay=3, timeout="4m") + wait_for(lambda: self.system.get_task_status(task) == "success", delay=3, timeout="4m") except TimedOutError: self.logger.warn("Hit TimedOutError waiting for VM '%s' delete task", self.name) if self.exists: @@ -230,14 +234,16 @@ def rename(self, new_name): def get_hardware_configuration(self): self.refresh() return { - 'ram': self.raw.config.hardware.memoryMB, - 'cpu': self.raw.config.hardware.numCPU, + "ram": self.raw.config.hardware.memoryMB, + "cpu": self.raw.config.hardware.numCPU, } def get_datastore_path(self, vm_config_datastore): - datastore_url = [str(datastore.url) - for datastore in self.raw.config.datastoreUrl - if datastore.name in vm_config_datastore] + datastore_url = [ + str(datastore.url) + for datastore in self.raw.config.datastoreUrl + if datastore.name in vm_config_datastore + ] return datastore_url.pop() def get_config_files_path(self): @@ -246,44 +252,49 @@ def get_config_files_path(self): return str(vmfilespath) def pick_datastore(self, allowed_datastores): - """ Pick a datastore based on free space. - Args: - allowed_datastores: A list of allowed datastore names that can be deployed on - Returns: - pyVmomi.vim.Datastore: The managed object of the datastore. + """Pick a datastore based on free space. + Args: + allowed_datastores: A list of allowed datastore names that can be deployed on + Returns: + pyVmomi.vim.Datastore: The managed object of the datastore. """ possible_datastores = [ - ds for ds in self.system.get_obj_list(vim.Datastore) - if ds.name in allowed_datastores and ds.summary.accessible and - ds.summary.multipleHostAccess and ds.overallStatus != "red"] + ds + for ds in self.system.get_obj_list(vim.Datastore) + if ds.name in allowed_datastores + and ds.summary.accessible + and ds.summary.multipleHostAccess + and ds.overallStatus != "red" + ] if not possible_datastores: - raise DatastoreNotFoundError(item_type='datastores') + raise DatastoreNotFoundError(item_type="datastores") possible_datastores.sort( - key=lambda ds: float(ds.summary.freeSpace) / float(ds.summary.capacity), - reverse=True) + key=lambda ds: float(ds.summary.freeSpace) / float(ds.summary.capacity), reverse=True + ) return possible_datastores[0] def pick_datastore_cluster(self): - """ Pick a datastore cluster based on free space. + """Pick a datastore cluster based on free space. Returns: pyVmomi.vim.StoragePod: The managed object of the datastore cluster. """ # avoid datastore clusters with no datastores in them and that are in a 'red' status possible_datastore_clusters = [ - dsc for dsc in self.system.get_obj_list(vim.StoragePod) + dsc + for dsc in self.system.get_obj_list(vim.StoragePod) if dsc.overallStatus != "red" and bool(dsc.childEntity) ] if not possible_datastore_clusters: - raise DatastoreNotFoundError(item_type='datastore clusters') + raise DatastoreNotFoundError(item_type="datastore clusters") # choose the datastore cluster with the most freespace possible_datastore_clusters.sort( - key=lambda dsc: float(dsc.summary.freeSpace) / float(dsc.summary.capacity), - reverse=True) + key=lambda dsc: float(dsc.summary.freeSpace) / float(dsc.summary.capacity), reverse=True + ) return possible_datastore_clusters[0] def _get_resource_pool(self, resource_pool_name=None): - """ Returns a resource pool managed object for a specified name. + """Returns a resource pool managed object for a specified name. Args: resource_pool_name (string): The name of the resource pool. If None, first one will be @@ -299,7 +310,7 @@ def _get_resource_pool(self, resource_pool_name=None): return self.system.get_obj_list(vim.ResourcePool)[0] def _get_cluster_compute_resource(self, resource_name=None): - """ Returns a Compute Cluster Resource managed object. If a name is specified, + """Returns a Compute Cluster Resource managed object. If a name is specified, a vim.ClusterComputeResource object is returned for the specific resource. If no name is specified, the method checks if there are is a default resource specified and returns the object of the resource. Finally, if there is no name or defaults specified, it queries @@ -313,14 +324,16 @@ def _get_cluster_compute_resource(self, resource_name=None): if resource_name is not None: return self.system.get_obj(vim.ClusterComputeResource, resource_name) elif self.system.default_cluster_compute_resource is not None: - return self.system.get_obj(vim.ClusterComputeResource, - self.system.default_cluster_compute_resource) + return self.system.get_obj( + vim.ClusterComputeResource, self.system.default_cluster_compute_resource + ) else: return self.system.get_obj_list(vim.ClusterComputeResource)[0] - def _set_vm_relocate_spec(self, resource_pool, host, sparse, progress_callback, - deploy_on_ds_cluster): - """Set properties for Virtual Machine Relocate Operation specification """ + def _set_vm_relocate_spec( + self, resource_pool, host, sparse, progress_callback, deploy_on_ds_cluster + ): + """Set properties for Virtual Machine Relocate Operation specification""" vm_reloc_spec = vim.VirtualMachineRelocateSpec() @@ -334,11 +347,12 @@ def _set_vm_relocate_spec(self, resource_pool, host, sparse, progress_callback, vm_reloc_spec.pool = self._get_cluster_compute_resource(resource_pool).resourcePool else: vm_reloc_spec.pool = self._get_resource_pool(resource_pool) - progress_callback("Picked resource pool `{}`".format(vm_reloc_spec.pool.name)) + progress_callback(f"Picked resource pool `{vm_reloc_spec.pool.name}`") # Target Host for the VM, this could be none - vm_reloc_spec.host = (host if isinstance(host, vim.HostSystem) - else self.system.get_obj(vim.HostSystem, host)) + vm_reloc_spec.host = ( + host if isinstance(host, vim.HostSystem) else self.system.get_obj(vim.HostSystem, host) + ) if sparse: vm_reloc_spec.transform = vim.VirtualMachineRelocateTransformation().sparse progress_callback("Transformation has been set to sparse") @@ -348,49 +362,92 @@ def _set_vm_relocate_spec(self, resource_pool, host, sparse, progress_callback, return vm_reloc_spec - def _clone_on_datastore(self, destination, resource_pool, datastore, power_on, sparse, - mark_template, progress_callback, cpu, ram, relocate, host, - source_template, deploy_on_ds_cluster): + def _clone_on_datastore( + self, + destination, + resource_pool, + datastore, + power_on, + sparse, + mark_template, + progress_callback, + cpu, + ram, + relocate, + host, + source_template, + deploy_on_ds_cluster, + ): """Set all required parameters for a clone or relocate on a datastore""" - vm_reloc_spec = self._set_vm_relocate_spec(resource_pool=resource_pool, host=host, - sparse=sparse, - progress_callback=progress_callback, - deploy_on_ds_cluster=deploy_on_ds_cluster) + vm_reloc_spec = self._set_vm_relocate_spec( + resource_pool=resource_pool, + host=host, + sparse=sparse, + progress_callback=progress_callback, + deploy_on_ds_cluster=deploy_on_ds_cluster, + ) # Set the datastore property vm_reloc_spec.datastore = datastore - vm_clone_spec = self._set_vm_clone_spec(power_on=power_on, mark_template=mark_template, - cpu=cpu, ram=ram, vm_reloc_spec=vm_reloc_spec) + vm_clone_spec = self._set_vm_clone_spec( + power_on=power_on, + mark_template=mark_template, + cpu=cpu, + ram=ram, + vm_reloc_spec=vm_reloc_spec, + ) # Get the location of the new VM - folder = self._get_loc_of_vm(source_template=source_template, - progress_callback=progress_callback) + folder = self._get_loc_of_vm( + source_template=source_template, progress_callback=progress_callback + ) if relocate: action = source_template.RelocateVM_Task else: action = source_template.CloneVM_Task - action_args = dict(spec=vm_reloc_spec) if relocate else dict(folder=folder, - name=destination, - spec=vm_clone_spec) + action_args = ( + dict(spec=vm_reloc_spec) + if relocate + else dict(folder=folder, name=destination, spec=vm_clone_spec) + ) return action(**action_args) - def _clone_on_datastore_cluster(self, destination, resource_pool, datastore, power_on, sparse, - mark_template, progress_callback, cpu, ram, host, - source_template, deploy_on_ds_cluster): + def _clone_on_datastore_cluster( + self, + destination, + resource_pool, + datastore, + power_on, + sparse, + mark_template, + progress_callback, + cpu, + ram, + host, + source_template, + deploy_on_ds_cluster, + ): """Set all required parameters for a clone or relocate on a datastore cluster""" - vm_reloc_spec = self._set_vm_relocate_spec(resource_pool=resource_pool, host=host, - sparse=sparse, - progress_callback=progress_callback, - deploy_on_ds_cluster=deploy_on_ds_cluster) + vm_reloc_spec = self._set_vm_relocate_spec( + resource_pool=resource_pool, + host=host, + sparse=sparse, + progress_callback=progress_callback, + deploy_on_ds_cluster=deploy_on_ds_cluster, + ) - vm_clone_spec = self._set_vm_clone_spec(power_on=power_on, mark_template=mark_template, - cpu=cpu, ram=ram, - vm_reloc_spec=vm_reloc_spec) + vm_clone_spec = self._set_vm_clone_spec( + power_on=power_on, + mark_template=mark_template, + cpu=cpu, + ram=ram, + vm_reloc_spec=vm_reloc_spec, + ) # Create the StoragePlaceSpec object that will be passed to the RecommendDatastores method storage_spec = vim.StoragePlacementSpec() @@ -399,10 +456,11 @@ def _clone_on_datastore_cluster(self, destination, resource_pool, datastore, pow # Specification for moving or copying a VM to a different storage pod pod_spec = vim.StorageDrsPodSelectionSpec(storagePod=datastore) - storage_spec.type = 'clone' + storage_spec.type = "clone" storage_spec.cloneName = destination - storage_spec.folder = self._get_loc_of_vm(source_template=source_template, - progress_callback=progress_callback) + storage_spec.folder = self._get_loc_of_vm( + source_template=source_template, progress_callback=progress_callback + ) storage_spec.podSelectionSpec = pod_spec storage_spec.vm = source_template storage_spec.cloneSpec = vm_clone_spec @@ -412,20 +470,35 @@ def _clone_on_datastore_cluster(self, destination, resource_pool, datastore, pow # For SDRS-enabled pods, this API is intended to replace RelocateVM_Task and CloneVM_Task # SDRS is required for managing aggregated resources of a datastore cluster result = self.system.content.storageResourceManager.RecommendDatastores( - storageSpec=storage_spec) + storageSpec=storage_spec + ) if result: key = result.recommendations[0].key else: - raise ValueError( - "RecommendDatastore task failed to provide host for {}".format(destination)) + raise ValueError(f"RecommendDatastore task failed to provide host for {destination}") return self.system.content.storageResourceManager.ApplyStorageDrsRecommendation_Task( - key=key) + key=key + ) - def _clone(self, destination, resource_pool=None, datastore=None, power_on=True, sparse=False, - mark_template=False, provision_timeout=1800, progress_callback=None, - allowed_datastores=None, cpu=None, ram=None, relocate=False, host=None, **kwargs): + def _clone( + self, + destination, + resource_pool=None, + datastore=None, + power_on=True, + sparse=False, + mark_template=False, + provision_timeout=1800, + progress_callback=None, + allowed_datastores=None, + cpu=None, + ram=None, + relocate=False, + host=None, + **kwargs, + ): """ Clone this template to a VM When relocate is True, relocated (migrated) with VMRelocateSpec instead of being cloned @@ -463,11 +536,10 @@ def _clone(self, destination, resource_pool=None, datastore=None, power_on=True, except VMInstanceNotFound: vm = None if vm and not relocate: - raise Exception("VM/template of the name {} already present!".format(destination)) + raise Exception(f"VM/template of the name {destination} already present!") if progress_callback is None: - progress_callback = partial( - progress_log_callback, self.logger, self.name, destination) + progress_callback = partial(progress_log_callback, self.logger, self.name, destination) source_template = self.raw @@ -491,29 +563,39 @@ def _clone(self, destination, resource_pool=None, datastore=None, power_on=True, else: picked_datastore = datastores - progress_callback("Picked datastore `{}`".format(picked_datastore.name)) - - task_args = dict(destination=destination, resource_pool=resource_pool, - datastore=picked_datastore, power_on=power_on, sparse=sparse, - mark_template=mark_template, progress_callback=progress_callback, cpu=cpu, - ram=ram, relocate=relocate, host=host, source_template=source_template, - deploy_on_ds_cluster=False) + progress_callback(f"Picked datastore `{picked_datastore.name}`") + + task_args = dict( + destination=destination, + resource_pool=resource_pool, + datastore=picked_datastore, + power_on=power_on, + sparse=sparse, + mark_template=mark_template, + progress_callback=progress_callback, + cpu=cpu, + ram=ram, + relocate=relocate, + host=host, + source_template=source_template, + deploy_on_ds_cluster=False, + ) if isinstance(picked_datastore, vim.Datastore): task = self._clone_on_datastore(**task_args) elif isinstance(picked_datastore, vim.StoragePod): - task_args['deploy_on_ds_cluster'] = True - task_args.pop('relocate') + task_args["deploy_on_ds_cluster"] = True + task_args.pop("relocate") task = self._clone_on_datastore_cluster(**task_args) else: - raise NotImplementedError("{} not supported for datastore".format(picked_datastore)) + raise NotImplementedError(f"{picked_datastore} not supported for datastore") def _check(store=[task]): try: - if hasattr(store[0].info, 'progress') and store[0].info.progress is not None: - progress_callback("{}/{}%".format(store[0].info.state, store[0].info.progress)) + if hasattr(store[0].info, "progress") and store[0].info.progress is not None: + progress_callback(f"{store[0].info.state}/{store[0].info.progress}%") else: - progress_callback("{}".format(store[0].info.state)) + progress_callback(f"{store[0].info.state}") except AttributeError: pass if store[0].info.state not in {"queued", "running"}: @@ -523,10 +605,9 @@ def _check(store=[task]): wait_for(_check, num_sec=provision_timeout, delay=4) - if task.info.state != 'success': + if task.info.state != "success": self.logger.error( - "Clone VM from VM/template '%s' failed: %s", - self.name, get_task_error_message(task) + "Clone VM from VM/template '%s' failed: %s", self.name, get_task_error_message(task) ) raise VMInstanceNotCloned(destination) if mark_template: @@ -557,7 +638,7 @@ def add_disk(self, capacity_in_kb, provision_type=None, unit=None): (bool, task_result): Tuple containing boolean True if task ended in success, and the contents of task.result or task.error depending on state """ - provision_type = provision_type if provision_type in ['thick', 'thin'] else 'thin' + provision_type = provision_type if provision_type in ["thick", "thin"] else "thin" self.refresh() # if passed unit matches existing device unit, match these values too @@ -565,8 +646,10 @@ def add_disk(self, capacity_in_kb, provision_type=None, unit=None): controller_key = None unit_number = None virtual_disk_devices = [ - device for device - in self.raw.config.hardware.device if isinstance(device, vim.vm.device.VirtualDisk)] + device + for device in self.raw.config.hardware.device + if isinstance(device, vim.vm.device.VirtualDisk) + ] for dev in virtual_disk_devices: if unit == int(dev.unitNumber): # user specified unit matching existing disk, match key too @@ -577,12 +660,12 @@ def add_disk(self, capacity_in_kb, provision_type=None, unit=None): controller_key = dev.controllerKey if not (controller_key or unit_number): - raise ValueError('Could not identify VirtualDisk device on given vm') + raise ValueError("Could not identify VirtualDisk device on given vm") # create disk backing specification backing_spec = vim.vm.device.VirtualDisk.FlatVer2BackingInfo() - backing_spec.diskMode = 'persistent' - backing_spec.thinProvisioned = (provision_type == 'thin') + backing_spec.diskMode = "persistent" + backing_spec.thinProvisioned = provision_type == "thin" # create disk specification, attaching backing disk_spec = vim.vm.device.VirtualDisk() @@ -595,7 +678,7 @@ def add_disk(self, capacity_in_kb, provision_type=None, unit=None): # create device specification, attaching disk device_spec = vim.vm.device.VirtualDeviceSpec() - device_spec.fileOperation = 'create' + device_spec.fileOperation = "create" device_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add device_spec.device = disk_spec @@ -607,13 +690,13 @@ def add_disk(self, capacity_in_kb, provision_type=None, unit=None): task = self.raw.ReconfigVM_Task(spec=vm_spec) try: - wait_for(lambda: task.info.state not in ['running', 'queued']) + wait_for(lambda: task.info.state not in ["running", "queued"]) except TimedOutError: - self.logger.exception('Task did not go to success state: %s', task) + self.logger.exception("Task did not go to success state: %s", task) finally: - if task.info.state == 'success': + if task.info.state == "success": result = (True, task.info.result) - elif task.info.state == 'error': + elif task.info.state == "error": result = (False, task.info.error) else: # shouldn't happen result = (None, None) @@ -622,9 +705,9 @@ def add_disk(self, capacity_in_kb, provision_type=None, unit=None): class VMWareVirtualMachine(VMWareVMOrTemplate, Vm): state_map = { - 'poweredOn': VmState.RUNNING, - 'poweredOff': VmState.STOPPED, - 'suspended': VmState.SUSPENDED, + "poweredOn": VmState.RUNNING, + "poweredOff": VmState.STOPPED, + "suspended": VmState.SUSPENDED, } def refresh(self): @@ -637,11 +720,11 @@ def _get_state(self): @property def ip(self): - ipv4_re = r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}' + ipv4_re = r"\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}" self.refresh() try: ip_address = self.raw.summary.guest.ipAddress - if not re.match(ipv4_re, ip_address) or ip_address == '127.0.0.1': + if not re.match(ipv4_re, ip_address) or ip_address == "127.0.0.1": ip_address = None return ip_address except (AttributeError, TypeError): @@ -669,8 +752,10 @@ def creation_time(self): filter_spec = vim.event.EventFilterSpec( entity=vim.event.EventFilterSpec.ByEntity( - entity=vm, recursion=vim.event.EventFilterSpec.RecursionOption.self), - eventTypeId=['VmDeployedEvent', 'VmCreatedEvent']) + entity=vm, recursion=vim.event.EventFilterSpec.RecursionOption.self + ), + eventTypeId=["VmDeployedEvent", "VmCreatedEvent"], + ) collector = self.system.content.eventManager.CreateCollectorForEvents(filter=filter_spec) collector.SetCollectorPageSize(1000) # max allowed value events = collector.latestPage @@ -682,7 +767,7 @@ def creation_time(self): # no events found for VM, fallback to last boot time creation_time = vm.runtime.bootTime if not creation_time: - raise VMCreationDateError('Could not find a creation date for {}'.format(self.name)) + raise VMCreationDateError(f"Could not find a creation date for {self.name}") # localize and make tz-naive return creation_time.astimezone(pytz.UTC) @@ -772,7 +857,7 @@ def suspend(self): def delete(self): self.ensure_state(VmState.STOPPED) - return super(VMWareVirtualMachine, self).delete() + return super().delete() def mark_as_template(self, template_name=None, **kwargs): self.ensure_state(VmState.STOPPED) @@ -784,7 +869,7 @@ def mark_as_template(self, template_name=None, **kwargs): return template def clone(self, vm_name, **kwargs): - kwargs['destination'] = vm_name + kwargs["destination"] = vm_name self.ensure_state(VmState.STOPPED) return self._clone(**kwargs) @@ -827,21 +912,22 @@ class VMWareSystem(System, VmMixin, TemplateMixin): https://developercenter.vmware.com/web/dp/doc/preview?id=155 """ + _api = None _stats_available = { - 'num_vm': lambda self: len(self.list_vms()), - 'num_host': lambda self: len(self.list_host()), - 'num_cluster': lambda self: len(self.list_cluster()), - 'num_template': lambda self: len(self.list_templates()), - 'num_datastore': lambda self: len(self.list_datastore()), + "num_vm": lambda self: len(self.list_vms()), + "num_host": lambda self: len(self.list_host()), + "num_cluster": lambda self: len(self.list_cluster()), + "num_template": lambda self: len(self.list_templates()), + "num_datastore": lambda self: len(self.list_datastore()), } can_suspend = True can_pause = False def __init__(self, hostname, username, password, **kwargs): - super(VMWareSystem, self).__init__(**kwargs) + super().__init__(**kwargs) self.hostname = hostname self.username = username self.password = password @@ -852,7 +938,7 @@ def __init__(self, hostname, username, password, **kwargs): @property def _identifying_attrs(self): - return {'hostname': self.hostname} + return {"hostname": self.hostname} @property def can_suspend(self): @@ -867,11 +953,10 @@ def _start_keepalive(self): Send a 'current time' request to vCenter every 10 min as a connection keep-alive """ + def _keepalive(): while True: - self.logger.debug( - "vCenter keep-alive: %s", self.service_instance.CurrentTime() - ) + self.logger.debug("vCenter keep-alive: %s", self.service_instance.CurrentTime()) time.sleep(600) t = threading.Thread(target=_keepalive) @@ -889,10 +974,7 @@ def _create_service_instance(self): context = ssl._create_unverified_context() context.verify_mode = ssl.CERT_NONE si = SmartConnect( - host=self.hostname, - user=self.username, - pwd=self.password, - sslContext=context + host=self.hostname, user=self.username, pwd=self.password, sslContext=context ) except Exception: self.logger.error("Failed to connect to vCenter") @@ -901,10 +983,7 @@ def _create_service_instance(self): # Disconnect at teardown atexit.register(Disconnect, si) - self.logger.info( - "Connected to vCenter host %s as user %s", - self.hostname, self.username - ) + self.logger.info("Connected to vCenter host %s as user %s", self.hostname, self.username) self._start_keepalive() return si @@ -951,7 +1030,8 @@ def get_obj(self, vimtype, name, folder=None): def _search_folders_for_vm(self, name): # First get all VM folders container = self.content.viewManager.CreateContainerView( - self.content.rootFolder, [vim.Folder], True) + self.content.rootFolder, [vim.Folder], True + ) folders = container.view container.Destroy() @@ -988,17 +1068,18 @@ def destroy_folder(self, folder_name): def _build_filter_spec(self, begin_entity, property_spec): """Build a search spec for full inventory traversal, adapted from psphere""" # Create selection specs - selection_specs = [vmodl.query.PropertyCollector.SelectionSpec(name=ss) - for ss in SELECTION_SPECS] + selection_specs = [ + vmodl.query.PropertyCollector.SelectionSpec(name=ss) for ss in SELECTION_SPECS + ] # Create traversal specs traversal_specs = [] for spec_values in TRAVERSAL_SPECS: spec = vmodl.query.PropertyCollector.TraversalSpec() - spec.name = spec_values['name'] - spec.type = spec_values['type'] - spec.path = spec_values['path'] - if spec_values.get('select_indices'): - spec.selectSet = [selection_specs[i] for i in spec_values['select_indices']] + spec.name = spec_values["name"] + spec.type = spec_values["type"] + spec.path = spec_values["path"] + if spec_values.get("select_indices"): + spec.selectSet = [selection_specs[i] for i in spec_values["select_indices"]] traversal_specs.append(spec) # Create an object spec obj_spec = vmodl.query.PropertyCollector.ObjectSpec() @@ -1029,12 +1110,13 @@ def get_updated_obj(self, obj): try: filter_ = property_collector.CreateFilter(filter_spec, True) except vmodl.fault.ManagedObjectNotFound: - self.logger.warning('ManagedObjectNotFound when creating filter from spec {}' - .format(filter_spec)) + self.logger.warning( + f"ManagedObjectNotFound when creating filter from spec {filter_spec}" + ) return update = property_collector.WaitForUpdates(None) if not update or not update.filterSet or not update.filterSet[0]: - self.logger.warning('No object found when updating %s', str(obj)) + self.logger.warning("No object found when updating %s", str(obj)) return if filter_: filter_.Destroy() @@ -1055,10 +1137,9 @@ def _get_vm_or_template(self, name, force=False): VMWareVirtualMachine object, VMWareTemplate object, or None """ if not name: - raise ValueError('Invalid name: {}'.format(name)) + raise ValueError(f"Invalid name: {name}") if name not in self._vm_obj_cache or force: - self.logger.debug( - "Searching all vm folders for vm/template '%s'", name) + self.logger.debug("Searching all vm folders for vm/template '%s'", name) vm_obj = self._search_folders_for_vm(name) if not vm_obj: raise VMInstanceNotFound(name) @@ -1091,7 +1172,7 @@ def get_vm(self, name, force=False): if not vm: raise VMInstanceNotFound(name) if isinstance(vm, VMWareTemplate): - raise Exception("Looking for VM but found template of name '{}'".format(name)) + raise Exception(f"Looking for VM but found template of name '{name}'") return vm def _list_vms_or_templates(self, template=False, inaccessible=False): @@ -1108,8 +1189,10 @@ def _list_vms_or_templates(self, template=False, inaccessible=False): property_spec = vmodl.query.PropertyCollector.PropertySpec() property_spec.all = False property_spec.pathSet = [ - 'name', 'config.template', - 'config.uuid', 'runtime.connectionState' + "name", + "config.template", + "config.uuid", + "runtime.connectionState", ] property_spec.type = vim.VirtualMachine pfs = self._build_filter_spec(self.content.rootFolder, property_spec) @@ -1126,15 +1209,15 @@ def _list_vms_or_templates(self, template=False, inaccessible=False): # object already "knows" the answer in its cached object # content. So we just pull the value straight out of the cache. vm_props = {p.name: p.val for p in object_content.propSet} - if vm_props.get('config.template') == get_template: - if (vm_props.get('runtime.connectionState') == "inaccessible" and - inaccessible) or vm_props.get( - 'runtime.connectionState') != "inaccessible": - obj_list.append(vm_props['name']) + if vm_props.get("config.template") == get_template: + if ( + vm_props.get("runtime.connectionState") == "inaccessible" and inaccessible + ) or vm_props.get("runtime.connectionState") != "inaccessible": + obj_list.append(vm_props["name"]) return obj_list def get_vm_from_ip(self, ip): - """ Gets the name of a vm from its IP. + """Gets the name of a vm from its IP. Args: ip: The ip address of the vm. @@ -1156,19 +1239,20 @@ def get_vm_from_ip(self, ip): except Exception: pass if boot_times: - newest_boot_time = sorted(list(boot_times.items()), key=operator.itemgetter(1), - reverse=True)[0] + newest_boot_time = sorted( + list(boot_times.items()), key=operator.itemgetter(1), reverse=True + )[0] newest_vm = newest_boot_time[0] return VMWareVirtualMachine(system=self, name=newest_vm.name, raw=newest_vm) else: - raise VMNotFoundViaIP('The requested IP is not known as a VM') + raise VMNotFoundViaIP("The requested IP is not known as a VM") def is_host_connected(self, host_name): host = self.get_obj(vim.HostSystem, name=host_name) return host.summary.runtime.connectionState == "connected" def create_vm(self, vm_name): - raise NotImplementedError('This function has not yet been implemented.') + raise NotImplementedError("This function has not yet been implemented.") def list_vms(self, inaccessible=False): return [ @@ -1194,9 +1278,9 @@ def create_template(self, *args, **kwargs): def get_template(self, name, force=False): vm = self._get_vm_or_template(name, force) if not vm: - raise NotFoundError("template: {}".format(name)) + raise NotFoundError(f"template: {name}") if isinstance(vm, VMWareVirtualMachine): - raise Exception("Looking for template but found VM of name '{}'".format(name)) + raise Exception(f"Looking for template but found VM of name '{name}'") return vm def list_host(self): @@ -1228,7 +1312,7 @@ def list_networks(self): def info(self): # NOTE: Can't find these two methods in either psphere or suds # return '{} {}'.format(self.api.get_server_type(), self.api.get_api_version()) - return '{} {}'.format(self.content.about.apiType, self.content.about.apiVersion) + return f"{self.content.about.apiType} {self.content.about.apiVersion}" def disconnect(self): pass @@ -1244,7 +1328,7 @@ def _task_wait(self, task): string: pyVmomi.vim.TaskInfo.state value if the task is not queued/running/None """ task = self.get_updated_obj(task) - if task.info.state not in ['queued', 'running', None]: + if task.info.state not in ["queued", "running", None]: return task.info.state def get_task_status(self, task): @@ -1263,14 +1347,13 @@ def remove_host_from_cluster(self, host_name): task = host.DisconnectHost_Task() status, _ = wait_for(self._task_wait, [task]) - if status != 'success': - raise HostNotRemoved("Host {} not removed: {}".format( - host_name, get_task_error_message(task))) + if status != "success": + raise HostNotRemoved(f"Host {host_name} not removed: {get_task_error_message(task)}") task = host.Destroy_Task() status, _ = wait_for(self._task_wait, [task], fail_condition=None) - return status == 'success' + return status == "success" def usage_and_quota(self): installed_ram = 0 @@ -1283,28 +1366,28 @@ def usage_and_quota(self): property_spec = vmodl.query.PropertyCollector.PropertySpec() property_spec.all = False - property_spec.pathSet = ['name', 'config.template'] + property_spec.pathSet = ["name", "config.template"] property_spec.type = vim.VirtualMachine pfs = self._build_filter_spec(self.content.rootFolder, property_spec) object_contents = self.content.propertyCollector.RetrieveProperties(specSet=[pfs]) for vm in object_contents: vm_props = {p.name: p.val for p in vm.propSet} - if vm_props.get('config.template'): + if vm_props.get("config.template"): continue - if vm.obj.summary.runtime.powerState.lower() != 'poweredon': + if vm.obj.summary.runtime.powerState.lower() != "poweredon": continue used_ram += vm.obj.summary.config.memorySizeMB used_cpu += vm.obj.summary.config.numCpu return { # RAM - 'ram_used': used_ram, - 'ram_total': installed_ram, - 'ram_limit': None, + "ram_used": used_ram, + "ram_total": installed_ram, + "ram_limit": None, # CPU - 'cpu_used': used_cpu, - 'cpu_total': installed_cpu, - 'cpu_limit': None, + "cpu_used": used_cpu, + "cpu_total": installed_cpu, + "cpu_limit": None, } def get_network(self, network_name): @@ -1333,7 +1416,6 @@ def get_datastore(self, name): elif name in self.list_datastore_cluster(): datastore = self.get_obj(vimtype=vim.StoragePod, name=name) else: - raise ValueError("{ds} was not found as a datastore on {p}".format( - ds=name, p=self.hostname)) + raise ValueError(f"{name} was not found as a datastore on {self.hostname}") return datastore diff --git a/wrapanapi/utils/__init__.py b/wrapanapi/utils/__init__.py index 50b8eef5..b7b96d89 100644 --- a/wrapanapi/utils/__init__.py +++ b/wrapanapi/utils/__init__.py @@ -1,7 +1,6 @@ - +from .json_utils import eval_strings +from .json_utils import json_load_byteified +from .json_utils import json_loads_byteified from .logger_mixin import LoggerMixin -from .json_utils import ( - json_load_byteified, json_loads_byteified, eval_strings -) -__all__ = ['LoggerMixin', 'json_load_byteified', 'json_loads_byteified', 'eval_strings'] +__all__ = ["LoggerMixin", "json_load_byteified", "json_loads_byteified", "eval_strings"] diff --git a/wrapanapi/utils/json_utils.py b/wrapanapi/utils/json_utils.py index c861c874..e7444c12 100644 --- a/wrapanapi/utils/json_utils.py +++ b/wrapanapi/utils/json_utils.py @@ -1,28 +1,21 @@ import json - from ast import literal_eval import dateparser def json_load_byteified(file_handle): - return _byteify( - json.load(file_handle, object_hook=_byteify), - ignore_dicts=True - ) + return _byteify(json.load(file_handle, object_hook=_byteify), ignore_dicts=True) def json_loads_byteified(json_text): - return _byteify( - json.loads(json_text, object_hook=_byteify), - ignore_dicts=True - ) + return _byteify(json.loads(json_text, object_hook=_byteify), ignore_dicts=True) def _byteify(data, ignore_dicts=False): # if this is a unicode string, return its string representation if isinstance(data, str): - return data.encode('utf-8') + return data.encode("utf-8") # if this is a list of values, return list of byteified values if isinstance(data, list): return [_byteify(item, ignore_dicts=True) for item in data] @@ -43,7 +36,7 @@ def _try_parse_datetime(time_string): if out: return out else: - raise Exception('Could not parse datetime from string: {}'.format(time_string)) + raise Exception(f"Could not parse datetime from string: {time_string}") def _eval(text_value): @@ -51,7 +44,7 @@ def _eval(text_value): evaluators = ( literal_eval, _try_parse_datetime, - lambda val: {'true': True, 'false': False}[val] + lambda val: {"true": True, "false": False}[val], ) for eval_ in evaluators: try: @@ -63,16 +56,16 @@ def _eval(text_value): def eval_strings(content): """Recursively trying to eval any string inside json content. - Examples: - * 'true' -> True - * '2016-04-14 22:09:48' -> datetime.datetime(2016, 4, 14, 22, 9, 48) - Args: - * content: list or tuple or any iterable array - representing the json content. + Examples: + * 'true' -> True + * '2016-04-14 22:09:48' -> datetime.datetime(2016, 4, 14, 22, 9, 48) + Args: + * content: list or tuple or any iterable array + representing the json content. """ - for i in (content if isinstance(content, dict) else list(range(len(content)))): + for i in content if isinstance(content, dict) else list(range(len(content))): if isinstance(content[i], str): content[i] = _eval(content[i]) - elif hasattr(content[i], '__iter__'): + elif hasattr(content[i], "__iter__"): content[i] = eval_strings(content[i]) return content diff --git a/wrapanapi/utils/logger_mixin.py b/wrapanapi/utils/logger_mixin.py index 474a1987..dfd00a43 100644 --- a/wrapanapi/utils/logger_mixin.py +++ b/wrapanapi/utils/logger_mixin.py @@ -5,7 +5,7 @@ import logging -class LoggerMixin(object): +class LoggerMixin: @property def logger(self): """ @@ -17,10 +17,7 @@ def logger(self): """ if not hasattr(self, "_logger"): self._logger = logging.getLogger( - "{}.{}".format( - self.__class__.__module__, - self.__class__.__name__ - ) + f"{self.__class__.__module__}.{self.__class__.__name__}" ) return self._logger @@ -37,17 +34,11 @@ def logger(self, value): else: # Basic check to make sure 'value' is some kind of logger # (not necessarily a logging.Logger) - expected_attrs = [ - 'info', 'warning', 'critical', 'error', - 'trace', 'debug', 'exception' - ] + expected_attrs = ["info", "warning", "critical", "error", "trace", "debug", "exception"] callable_attrs_present = ( hasattr(value, a) and callable(value.a) for a in expected_attrs ) if not all(callable_attrs_present): - raise ValueError( - "missing one of expected logger methods: {}" - .format(expected_attrs) - ) + raise ValueError(f"missing one of expected logger methods: {expected_attrs}") self._logger = value diff --git a/wrapanapi/utils/random.py b/wrapanapi/utils/random.py index 92a2e93b..603c7c05 100644 --- a/wrapanapi/utils/random.py +++ b/wrapanapi/utils/random.py @@ -1,5 +1,5 @@ import fauxfactory -def random_name(prefix='integration-tests', length=5): - return '{}-{}'.format(prefix, fauxfactory.gen_alphanumeric(length=length).lower()) +def random_name(prefix="integration-tests", length=5): + return f"{prefix}-{fauxfactory.gen_alphanumeric(length=length).lower()}"