From d7fe4ef43b40838d41fd9e0bf287162c2110881d Mon Sep 17 00:00:00 2001 From: Leonid Fedotov <3584432+iLeonidze@users.noreply.github.com> Date: Mon, 13 Sep 2021 11:11:11 +0300 Subject: [PATCH] Initial commit --- .dockerignore | 11 + .github/workflows/build.yaml | 33 + .github/workflows/publish.yaml | 17 + .github/workflows/unit-tests.yaml | 25 + .gitignore | 82 + CONTRIBUTING.md | 0 Dockerfile | 43 + build.sh | 19 + documentation/Installation.md | 4914 +++++++++++++++++ documentation/Kubecheck.md | 538 ++ documentation/Logging.md | 82 + documentation/Maintenance.md | 1028 ++++ documentation/Troubleshooting.md | 342 ++ documentation/images/all-in-one.png | Bin 0 -> 20803 bytes documentation/images/full-ha.png | Bin 0 -> 60272 bytes documentation/images/mini-ha.png | Bin 0 -> 44766 bytes examples/cluster.yaml/allinone-cluster.yaml | 11 + examples/cluster.yaml/full-cluster.yaml | 494 ++ examples/cluster.yaml/miniha-cluster.yaml | 22 + examples/cluster.yaml/minimal-cluster.yaml | 35 + examples/cluster.yaml/typical-cluster.yaml | 104 + examples/most_restricted_psp.yaml | 54 + examples/procedure.yaml/full-backup.yaml | 18 + examples/procedure.yaml/full-reboot.yaml | 6 + examples/procedure.yaml/full-restore.yaml | 23 + examples/procedure.yaml/minimal-backup.yaml | 5 + examples/procedure.yaml/minimal-restore.yaml | 1 + kubetool/__init__.py | 0 kubetool/__main__.py | 189 + kubetool/apparmor.py | 114 + kubetool/apt.py | 83 + kubetool/audit.py | 37 + kubetool/core/__init__.py | 0 kubetool/core/cluster.py | 285 + kubetool/core/connections.py | 76 + kubetool/core/defaults.py | 455 ++ kubetool/core/environment.py | 18 + kubetool/core/executor.py | 251 + kubetool/core/flow.py | 286 + kubetool/core/group.py | 620 +++ kubetool/core/log.py | 308 ++ kubetool/core/utils.py | 303 + kubetool/core/yaml_merger.py | 37 + kubetool/coredns.py | 164 + kubetool/cri/__init__.py | 56 + kubetool/cri/containerd.py | 90 + kubetool/cri/docker.py | 68 + kubetool/demo.py | 298 + kubetool/etcd.py | 34 + kubetool/haproxy.py | 140 + kubetool/jinja.py | 23 + kubetool/k8s_certs.py | 101 + kubetool/keepalived.py | 257 + kubetool/kubernetes.py | 1070 ++++ kubetool/kubernetes_accounts.py | 81 + kubetool/packages.py | 170 + kubetool/plugins/__init__.py | 722 +++ kubetool/plugins/calico.py | 18 + kubetool/plugins/haproxy_ingress.py | 16 + kubetool/plugins/nginx_ingress.py | 139 + kubetool/procedures/__init__.py | 0 kubetool/procedures/add_node.py | 156 + kubetool/procedures/backup.py | 485 ++ kubetool/procedures/cert_renew.py | 89 + kubetool/procedures/check_iaas.py | 533 ++ kubetool/procedures/check_paas.py | 659 +++ kubetool/procedures/do.py | 92 + kubetool/procedures/install.py | 556 ++ kubetool/procedures/manage_psp.py | 66 + kubetool/procedures/migrate_cri.py | 277 + kubetool/procedures/reboot.py | 85 + kubetool/procedures/remove_node.py | 165 + kubetool/procedures/restore.py | 333 ++ kubetool/procedures/upgrade.py | 221 + kubetool/psp.py | 459 ++ kubetool/resources/__init__.py | 0 kubetool/resources/configurations/__init__.py | 0 .../resources/configurations/defaults.yaml | 538 ++ .../resources/configurations/globals.yaml | 468 ++ kubetool/resources/drop_ins/__init__.py | 0 kubetool/resources/drop_ins/haproxy.conf | 2 + kubetool/resources/drop_ins/keepalived.conf | 2 + kubetool/resources/psp/__init__.py | 0 kubetool/resources/psp/anyuid.yaml | 61 + kubetool/resources/psp/default.yaml | 70 + kubetool/resources/psp/host-network.yaml | 65 + kubetool/resources/psp/privileged.yaml | 58 + kubetool/resources/reports/__init__.py | 0 kubetool/resources/reports/check_report.css | 73 + kubetool/resources/scripts/__init__.py | 0 kubetool/resources/scripts/check_haproxy.sh | 12 + kubetool/resources/scripts/etcdctl.sh | 78 + kubetool/selinux.py | 213 + kubetool/sysctl.py | 71 + kubetool/system.py | 684 +++ kubetool/templates/__init__.py | 0 kubetool/templates/haproxy.cfg.j2 | 69 + kubetool/templates/keepalived.conf.j2 | 35 + kubetool/templates/kubelet.service.j2 | 18 + kubetool/templates/plugins/__init__.py | 0 .../templates/plugins/calico-ippool.yaml.j2 | 12 + .../templates/plugins/calico-v3.16.yaml.j2 | 4001 ++++++++++++++ .../templates/plugins/calico-v3.17.yaml.j2 | 3962 +++++++++++++ .../templates/plugins/calico-v3.19.yaml.j2 | 4005 ++++++++++++++ kubetool/templates/plugins/calicoctl.cfg.j2 | 6 + .../plugins/dashboard-ingress.yaml.j2 | 4 + .../templates/plugins/dashboard-v2.0.yaml.j2 | 304 + .../templates/plugins/dashboard-v2.1.yaml.j2 | 304 + .../templates/plugins/dashboard-v2.3.yaml.j2 | 304 + kubetool/templates/plugins/flannel.yaml.j2 | 605 ++ .../haproxy-ingress-controller.yaml.j2 | 225 + kubetool/templates/plugins/iperf3.yaml.j2 | 113 + .../plugins/local-path-provisioner.yaml.j2 | 168 + .../nginx-ingress-controller-v0.34.yaml.j2 | 365 ++ .../nginx-ingress-controller-v0.35.yaml.j2 | 364 ++ .../nginx-ingress-controller-v0.43.yaml.j2 | 364 ++ .../nginx-ingress-controller-v0.48.yaml.j2 | 354 ++ .../plugins/sock-shop-ingress.yaml.j2 | 15 + kubetool/templates/plugins/sock-shop.yaml.j2 | 825 +++ kubetool/testsuite.py | 292 + kubetool/thirdparties.py | 186 + kubetool/yum.py | 83 + kubetools | 11 + kubetools.py | 4 + main.spec | 67 + pyproject.toml | 3 + requirements.txt | 9 + setup.cfg | 27 + test/__init__.py | 4 + test/unit/__init__.py | 0 test/unit/core/__init__.py | 4 + test/unit/core/test_flow.py | 121 + test/unit/docker/__init__.py | 9 + test/unit/k8s_cert_test.py | 38 + test/unit/plugins/__init__.py | 0 test/unit/plugins/test_template.py | 108 + .../test_templates/test_template1.yaml | 0 .../test_templates/test_template2.yaml | 0 .../test_templates/test_template3.yaml | 0 test/unit/test_coredns.py | 106 + test/unit/test_defaults.py | 123 + test/unit/test_demo.py | 103 + test/unit/test_group.py | 95 + test/unit/test_haproxy.py | 126 + test/unit/test_inventory.py | 75 + test/unit/test_keepalived.py | 259 + test/unit/test_upgrade.py | 114 + test/unit/test_workaround.py | 61 + 148 files changed, 39437 insertions(+) create mode 100644 .dockerignore create mode 100644 .github/workflows/build.yaml create mode 100644 .github/workflows/publish.yaml create mode 100644 .github/workflows/unit-tests.yaml create mode 100644 .gitignore create mode 100644 CONTRIBUTING.md create mode 100644 Dockerfile create mode 100755 build.sh create mode 100644 documentation/Installation.md create mode 100644 documentation/Kubecheck.md create mode 100644 documentation/Logging.md create mode 100644 documentation/Maintenance.md create mode 100644 documentation/Troubleshooting.md create mode 100644 documentation/images/all-in-one.png create mode 100644 documentation/images/full-ha.png create mode 100644 documentation/images/mini-ha.png create mode 100644 examples/cluster.yaml/allinone-cluster.yaml create mode 100644 examples/cluster.yaml/full-cluster.yaml create mode 100644 examples/cluster.yaml/miniha-cluster.yaml create mode 100644 examples/cluster.yaml/minimal-cluster.yaml create mode 100644 examples/cluster.yaml/typical-cluster.yaml create mode 100644 examples/most_restricted_psp.yaml create mode 100644 examples/procedure.yaml/full-backup.yaml create mode 100644 examples/procedure.yaml/full-reboot.yaml create mode 100644 examples/procedure.yaml/full-restore.yaml create mode 100644 examples/procedure.yaml/minimal-backup.yaml create mode 100644 examples/procedure.yaml/minimal-restore.yaml create mode 100644 kubetool/__init__.py create mode 100755 kubetool/__main__.py create mode 100644 kubetool/apparmor.py create mode 100644 kubetool/apt.py create mode 100644 kubetool/audit.py create mode 100644 kubetool/core/__init__.py create mode 100755 kubetool/core/cluster.py create mode 100644 kubetool/core/connections.py create mode 100755 kubetool/core/defaults.py create mode 100644 kubetool/core/environment.py create mode 100644 kubetool/core/executor.py create mode 100755 kubetool/core/flow.py create mode 100755 kubetool/core/group.py create mode 100644 kubetool/core/log.py create mode 100755 kubetool/core/utils.py create mode 100644 kubetool/core/yaml_merger.py create mode 100644 kubetool/coredns.py create mode 100644 kubetool/cri/__init__.py create mode 100755 kubetool/cri/containerd.py create mode 100755 kubetool/cri/docker.py create mode 100644 kubetool/demo.py create mode 100644 kubetool/etcd.py create mode 100644 kubetool/haproxy.py create mode 100644 kubetool/jinja.py create mode 100644 kubetool/k8s_certs.py create mode 100644 kubetool/keepalived.py create mode 100644 kubetool/kubernetes.py create mode 100644 kubetool/kubernetes_accounts.py create mode 100644 kubetool/packages.py create mode 100755 kubetool/plugins/__init__.py create mode 100755 kubetool/plugins/calico.py create mode 100755 kubetool/plugins/haproxy_ingress.py create mode 100644 kubetool/plugins/nginx_ingress.py create mode 100644 kubetool/procedures/__init__.py create mode 100755 kubetool/procedures/add_node.py create mode 100755 kubetool/procedures/backup.py create mode 100755 kubetool/procedures/cert_renew.py create mode 100755 kubetool/procedures/check_iaas.py create mode 100755 kubetool/procedures/check_paas.py create mode 100755 kubetool/procedures/do.py create mode 100755 kubetool/procedures/install.py create mode 100755 kubetool/procedures/manage_psp.py create mode 100755 kubetool/procedures/migrate_cri.py create mode 100755 kubetool/procedures/reboot.py create mode 100755 kubetool/procedures/remove_node.py create mode 100755 kubetool/procedures/restore.py create mode 100755 kubetool/procedures/upgrade.py create mode 100644 kubetool/psp.py create mode 100644 kubetool/resources/__init__.py create mode 100644 kubetool/resources/configurations/__init__.py create mode 100644 kubetool/resources/configurations/defaults.yaml create mode 100644 kubetool/resources/configurations/globals.yaml create mode 100644 kubetool/resources/drop_ins/__init__.py create mode 100644 kubetool/resources/drop_ins/haproxy.conf create mode 100644 kubetool/resources/drop_ins/keepalived.conf create mode 100644 kubetool/resources/psp/__init__.py create mode 100644 kubetool/resources/psp/anyuid.yaml create mode 100644 kubetool/resources/psp/default.yaml create mode 100644 kubetool/resources/psp/host-network.yaml create mode 100644 kubetool/resources/psp/privileged.yaml create mode 100644 kubetool/resources/reports/__init__.py create mode 100644 kubetool/resources/reports/check_report.css create mode 100644 kubetool/resources/scripts/__init__.py create mode 100755 kubetool/resources/scripts/check_haproxy.sh create mode 100755 kubetool/resources/scripts/etcdctl.sh create mode 100644 kubetool/selinux.py create mode 100644 kubetool/sysctl.py create mode 100644 kubetool/system.py create mode 100644 kubetool/templates/__init__.py create mode 100644 kubetool/templates/haproxy.cfg.j2 create mode 100644 kubetool/templates/keepalived.conf.j2 create mode 100644 kubetool/templates/kubelet.service.j2 create mode 100644 kubetool/templates/plugins/__init__.py create mode 100644 kubetool/templates/plugins/calico-ippool.yaml.j2 create mode 100644 kubetool/templates/plugins/calico-v3.16.yaml.j2 create mode 100644 kubetool/templates/plugins/calico-v3.17.yaml.j2 create mode 100644 kubetool/templates/plugins/calico-v3.19.yaml.j2 create mode 100644 kubetool/templates/plugins/calicoctl.cfg.j2 create mode 100644 kubetool/templates/plugins/dashboard-ingress.yaml.j2 create mode 100644 kubetool/templates/plugins/dashboard-v2.0.yaml.j2 create mode 100644 kubetool/templates/plugins/dashboard-v2.1.yaml.j2 create mode 100644 kubetool/templates/plugins/dashboard-v2.3.yaml.j2 create mode 100644 kubetool/templates/plugins/flannel.yaml.j2 create mode 100644 kubetool/templates/plugins/haproxy-ingress-controller.yaml.j2 create mode 100644 kubetool/templates/plugins/iperf3.yaml.j2 create mode 100644 kubetool/templates/plugins/local-path-provisioner.yaml.j2 create mode 100644 kubetool/templates/plugins/nginx-ingress-controller-v0.34.yaml.j2 create mode 100644 kubetool/templates/plugins/nginx-ingress-controller-v0.35.yaml.j2 create mode 100644 kubetool/templates/plugins/nginx-ingress-controller-v0.43.yaml.j2 create mode 100644 kubetool/templates/plugins/nginx-ingress-controller-v0.48.yaml.j2 create mode 100644 kubetool/templates/plugins/sock-shop-ingress.yaml.j2 create mode 100644 kubetool/templates/plugins/sock-shop.yaml.j2 create mode 100644 kubetool/testsuite.py create mode 100644 kubetool/thirdparties.py create mode 100644 kubetool/yum.py create mode 100755 kubetools create mode 100644 kubetools.py create mode 100644 main.spec create mode 100644 pyproject.toml create mode 100644 requirements.txt create mode 100644 setup.cfg create mode 100755 test/__init__.py create mode 100644 test/unit/__init__.py create mode 100755 test/unit/core/__init__.py create mode 100755 test/unit/core/test_flow.py create mode 100755 test/unit/docker/__init__.py create mode 100644 test/unit/k8s_cert_test.py create mode 100644 test/unit/plugins/__init__.py create mode 100644 test/unit/plugins/test_template.py create mode 100644 test/unit/plugins/test_templates/test_template1.yaml create mode 100644 test/unit/plugins/test_templates/test_template2.yaml create mode 100644 test/unit/plugins/test_templates/test_template3.yaml create mode 100755 test/unit/test_coredns.py create mode 100755 test/unit/test_defaults.py create mode 100755 test/unit/test_demo.py create mode 100755 test/unit/test_group.py create mode 100755 test/unit/test_haproxy.py create mode 100755 test/unit/test_inventory.py create mode 100755 test/unit/test_keepalived.py create mode 100755 test/unit/test_upgrade.py create mode 100755 test/unit/test_workaround.py diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 000000000..de99b60c2 --- /dev/null +++ b/.dockerignore @@ -0,0 +1,11 @@ +documentation +dump +examples +ansible-inventory.ini +cluster.yaml +build.sh +.git +Dockerfile +.DS_Store +.gitignore +*.md diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml new file mode 100644 index 000000000..3767b0199 --- /dev/null +++ b/.github/workflows/build.yaml @@ -0,0 +1,33 @@ +name: Build Artifacts +on: + push: + branches: + - '**' +jobs: + build-image: + runs-on: ubuntu-latest + steps: + - name: Checkout Repo + uses: actions/checkout@v2 + - name: Build Docker Image + run: docker build -t kubetool --no-cache . + build-binary: + runs-on: ubuntu-latest + steps: + - name: Checkout Repo + uses: actions/checkout@v2 + - name: Build Binary via Docker + run: docker build -t kubetool --build-arg BUILD_TYPE=binary --no-cache . + build-package: + runs-on: ubuntu-latest + steps: + - name: Checkout Repo + uses: actions/checkout@v2 + - name: Setup Python + uses: actions/setup-python@v2.2.2 + - name: Install Dependencies + run: python3 -m pip install --upgrade pip + - name: Prepare Setuptools + run: python3 -m pip install wheel setuptools build + - name: Build Kubetool Package + run: python3 -m build -n diff --git a/.github/workflows/publish.yaml b/.github/workflows/publish.yaml new file mode 100644 index 000000000..9d0548638 --- /dev/null +++ b/.github/workflows/publish.yaml @@ -0,0 +1,17 @@ +name: Publish Artifacts +on: + release: + types: [created] + push: + branches: + - 'main' +env: + TAG_NAME: ${{ github.event.release.tag_name || (github.ref == 'refs/heads/main' && 'main') }} +jobs: + publish-docker: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - run: docker build . -t ghcr.io/netcracker-technology/kubetool:${{ env.TAG_NAME }} + - run: echo ${{secrets.GITHUB_TOKEN}} | docker login https://ghcr.io -u ${GITHUB_ACTOR} --password-stdin + - run: docker push ghcr.io/netcracker-technology/kubetool:${{ env.TAG_NAME }} diff --git a/.github/workflows/unit-tests.yaml b/.github/workflows/unit-tests.yaml new file mode 100644 index 000000000..a3ced91df --- /dev/null +++ b/.github/workflows/unit-tests.yaml @@ -0,0 +1,25 @@ +name: Unit Tests +on: + push: + branches: + - '**' +jobs: + unit-tests: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - run: docker build -t kubetools --build-arg BUILD_TYPE=test --no-cache . + - run: docker run --entrypoint=python3 kubetools -m unittest discover -s /opt/kubetools/test/unit -t /opt/kubetools/test/unit + coverage: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - run: docker build -t kubetools --build-arg BUILD_TYPE=test --no-cache . + - run: docker run --entrypoint=/bin/bash kubetools -c "python3 /usr/local/bin/coverage run -m unittest discover -s /opt/kubetools/test/unit -t /opt/kubetools/test/unit; python3 /usr/local/bin/coverage report -m" > /tmp/report.txt + - run: cat /tmp/report.txt; PERCENTAGE=$(cat /tmp/report.txt | tail -1 | awk '{print $4}' | tr -dc '0-9') + linter: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - run: docker build -t kubetools --build-arg BUILD_TYPE=test --no-cache . + - run: docker run --entrypoint=/bin/bash kubetools -c "/usr/local/bin/pylint kubetool test --disable fixme || exit 0" diff --git a/.gitignore b/.gitignore new file mode 100644 index 000000000..d463ab1d1 --- /dev/null +++ b/.gitignore @@ -0,0 +1,82 @@ +# Project-specific excludes +config +/local_chart_folder +!examples/* +/dump +ansible-inventory.ini +admin.conf +account-tokens.yaml +ca.csr +ca.pem +ca-key.pem +kubernetes.csr +kubernetes.pem +kubernetes-key.pem +/cluster.yaml +/additional.yaml +/procedure.yaml +venv/ +build/ +dist/ +kubecheck +kubecheck.zip +report.* +*.tar +*.tar.gz +*.log + +# System & software trash +*~ +.idea/ +*.iml +*.zip +*.komodoproject +.loadpath +.project +*.pyc +.pydevproject +*.pyo +*.config_oc.yaml +*.redcar* +.*.swp +.sass-cache +.rvmrc +.DS_Store +.vagrant +.tags* +*.retry +.vscode/ +.cache +.tox/ +.coverage +*.egg-info +.eggs +/Roadmap* +Thumbs.db +ehthumbs.db +ehthumbs_vista.db +*.stackdump +[Dd]esktop.ini +$RECYCLE.BIN/ +*.cab +*.msi +*.msix +*.msm +*.msp +*.lnk +.AppleDouble +.LSOverride +Icon +._* +.DocumentRevisions-V100 +.fseventsd +.Spotlight-V100 +.TemporaryItems +.Trashes +.VolumeIcon.icns +.com.apple.timemachine.donotpresent +.AppleDB +.AppleDesktop +Network Trash Folder +Temporary Items +.apdisk diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 000000000..e69de29bb diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 000000000..07922eff8 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,43 @@ +FROM python:3.8.12-slim-buster +# Warning! Python and Debian versions should be strict to avoid sudden components upgrade, +# including unreasonable upgrade of GLIBC version. If the GLIBC version suddenly goes up, a large number of consumers +# will suddenly be unable to use the compiled binary version on older systems. + +ARG BUILD_TYPE + +USER root + +ENV ANSIBLE_HOST_KEY_CHECKING False + +COPY . /opt/kubetools/ +WORKDIR /opt/kubetools/ + +# The following dependecies required for cryptography package build (see https://github.com/pyca/cryptography/blob/main/docs/installation.rst) +# - build-essential +# - libssl-dev +# - libffi-dev +# - python3-dev +# - cargo +# Finally they should be removed to avoid big size of docker image + +RUN apt update && \ + apt install -y build-essential libssl-dev libffi-dev python3-dev cargo zlib1g-dev && \ + if [ "$BUILD_TYPE" = "binary" ]; then apt install -y upx-ucl binutils; fi && \ + pip3 install --upgrade pip && \ + pip3 install -r /opt/kubetools/requirements.txt && \ + if [ "$BUILD_TYPE" = "test" ]; then pip3 install pytest==5.4.3 pylint coverage; fi && \ + if [ "$BUILD_TYPE" = "binary" ]; then pip3 install pyinstaller; fi && \ + if [ "$BUILD_TYPE" = "binary" ]; then pyinstaller main.spec --noconfirm && exit 0; fi && \ + apt install -y openssl curl && \ + curl -k https://get.helm.sh/helm-v3.4.1-linux-amd64.tar.gz -o helm-v3.4.1.tar.gz && \ + tar -zxvf helm-v3.4.1.tar.gz && \ + mv linux-amd64/helm /usr/local/bin/helm && \ + rm -rf helm-v3.4.1.tar.gz && \ + rm -rf linux-amd64 && \ + apt remove -y build-essential libssl-dev libffi-dev python3-dev cargo && \ + apt autoremove -y && \ + apt clean -y && \ + rm -f /etc/apt/sources.list && \ + rm -rf /var/lib/apt/lists/* + +ENTRYPOINT ["/opt/kubetools/kubetools"] diff --git a/build.sh b/build.sh new file mode 100755 index 000000000..18100a8d8 --- /dev/null +++ b/build.sh @@ -0,0 +1,19 @@ +#!/bin/bash + +cd "$(dirname "$0")" || exit 1 + +NAME=${NAME:-kubetools} + +if [[ -n "${LOCATION}" ]]; then + sed -i "s|non-release version|version ${LOCATION} build $(date +"%D %T")|g" "kubetool/__main__.py" +fi + +rm -rf build.sh documentation examples CONTRIBUTING.md .git + +docker build -t "${NAME}" --no-cache . + +for id in $DOCKER_NAMES; do + docker tag "${NAME}" "$id" +done + +chmod +x kubetools diff --git a/documentation/Installation.md b/documentation/Installation.md new file mode 100644 index 000000000..46c6dab16 --- /dev/null +++ b/documentation/Installation.md @@ -0,0 +1,4914 @@ +This section provides information about the inventory, features, and steps for installing a Kubernetes solution on the environment. + +- [Prerequisites](#prerequisites) + - [Prerequisites for Deployment Node](#prerequisites-for-deployment-node) + - [Prerequisites for Cluster Nodes](#prerequisites-for-cluster-nodes) + - [Minimal Hardware Requirements](#minimal-hardware-requirements) + - [Recommended Hardware Requirements](#recommended-hardware-requirements) + - [ETCD Recommendation](#etcd-recommendation) +- [Inventory Preparation](#inventory-preparation) + - [Deployment Schemes](#deployment-schemes) + - [Non-HA Deployment Schemes](#non-ha-deployment-schemes) + - [All-in-one Scheme](#all-in-one-scheme) + - [HA Deployment Schemes](#ha-deployment-schemes) + - [Mini-HA Scheme](#mini-ha-scheme) + - [Full-HA Scheme](#full-ha-scheme) + - [Taints and Toleration](#taints-and-toleration) + - [Configuration](#configuration) + - [node_defaults](#node_defaults) + - [nodes](#nodes) + - [cluster_name](#cluster_name) + - [control_plain](#control_plain) + - [public_cluster_ip](#public_cluster_ip) + - [registry](#registry) + - [gateway_nodes](#gateway_nodes) + - [vrrp_ips](#vrrp_ips) + - [services](#services) + - [kubeadm](#kubeadm) + - [Kubernetes version](#kubernetes-version) + - [Cloud Provider Plugin](#cloud-provider-plugin) + - [Service Account Issuer](#service-account-issuer) + - [kernel_security](#kernel_security) + - [selinux](#selinux) + - [apparmor](#apparmor) + - [packages](#packages) + - [package_manager](#package_manager) + - [management](#management) + - [associations](#associations) + - [thirdparties](#thirdparties) + - [CRI](#cri) + - [modprobe](#modprobe) + - [sysctl](#sysctl) + - [ntp](#ntp) + - [chrony](#chrony) + - [timesyncd](#timesyncd) + - [resolv.conf](#resolvconf) + - [etc_hosts](#etc_hosts) + - [coredns](#coredns) + - [loadbalancer](#loadbalancer) + - [RBAC psp](#rbac-psp) + - [Configuring Admission Controller](#configuring-admission-controller) + - [Configuring OOB Policies](#configuring-oob-policies) + - [Configuring Custom Policies](#configuring-custom-policies) + - [RBAC accounts](#rbac-accounts) + - [RBAC account_defaults](#rbac-account_defaults) + - [Plugins](#plugins) + - [Predefined Plugins](#predefined-plugins) + - [calico](#calico) + - [flannel](#flannel) + - [nginx-ingress-controller](#nginx-ingress-controller) + - [haproxy-ingress-controller](#haproxy-ingress-controller) + - [kubernetes-dashboard](#kubernetes-dashboard) + - [local-path-provisioner](#local-path-provisioner) + - [Plugins Features](#plugins-features) + - [plugin_defaults](#plugin_defaults) + - [Plugins Reinstallation](#plugins-reinstallation) + - [Plugins Installation Order](#plugins-installation-order) + - [Node Selector](#node-selector) + - [Tolerations](#tolerations) + - [Custom Plugins Installation Procedures](#custom-plugins-installation-procedures) + - [template](#template) + - [expect pods](#expect_pods) + - [python](#python) + - [thirdparty](#thirdparty) + - [shell](#shell) + - [ansible](#ansible) + - [helm](#helm) + - [Advanced features](#advanced-features) + - [List Merge Strategy](#list-merge-strategy) + - [Merge Strategy Positioning](#merge-strategy-positioning) + - [Dynamic Variables](#dynamic-variables) + - [Limitations](#limitations) + - [Jinja2 Expressions Escaping](#jinja2-expressions-escaping) + - [Installation without Internet Resources](#installation-without-internet-resources) +- [Installation Procedure](#installation-procedure) + - [Installation Tasks Description](#installation-tasks-description) + - [Installation of Kubernetes using CLI](#installation-of-kubernetes-using-cli) + - [Custom Inventory File Location](#custom-inventory-file-location) +- [Installation Features](#installation-features) + - [Tasks List Redefinition](#tasks-list-redefinition) + - [Logging](#logging) + - [Dump Files](#dump-files) + - [Configurations Backup](#configurations-backup) + - [Ansible Inventory](#ansible-inventory) + - [Contents](#contents) + - [[all]](#all) + - [[cluster:children]](#clusterchildren) + - [[balancer], [master], [worker]](#balancer-master-worker) + - [[cluster:vars]](#clustervars) + - [Cumulative Points](#cumulative-points) +- [Supported Versions](#supported-versions) + +# Prerequisites + +The technical requirements for all types of host VMs for Kubetools installation are specified in this section. + +## Prerequisites for Deployment Node + +Ensure the following requirements are met: + +**Minimal Hardware** +* 1 CPU +* 512MB RAM + +**Operating System** +* Linux +* MacOS + +**Preinstalled Software** +* OpenSSL library +* python 3.7 (or higher version) +* pip3 +* Helm 3 (optional, only if Helm plugins required to be installed) + +Install the required python modules using the following command: + +```bash +pip3 install -r requirements.txt +``` + +**System Clock** + +System clock should be synchronized the same way as for Cluster nodes system clock. + +## Prerequisites for Cluster Nodes + +For cluster machines, ensure the following requirements are met: + +**Host type** +* VM +* Bare-Metal + +**Host arch** +* x86-64 + +**Operating System** + +* The following distributives and versions are supported: + + * Centos 7.5+, 8.4 + * RHEL 7.5+, 8.4 + * Oracle Linux 7.5+, 8.4 + * Ubuntu 20.04 + + +The actual information about the supported versions can be found at [global.yaml configuration](../kubetool/resources/configurations/globals.yaml#L389). + + +**Networking** + +* Opened TCP-ports: + * Internal communication: + * 22 + * 80 + * 443 + * 6443 + * 2379-2380 + * 10250-10252 + * 30000-32767 + * External communication: + * 80 + * 443 +* Internal network bandwidth not less than 1GBi/s. +* Dedicated internal address, IPv4, and IPv6 are supported as well, for each VM. +* Any network security policies are disabled or whitelisted. This is especially important for OpenStack environments. + * Traffic is allowed for pod subnet. Search for address at`services.kubeadm.networking.podSubnet`. By default, `10.128.0.0/14` for IPv4 or `fd02::/80` for IPv6. + * Traffic is allowed for service subnet. Search for address at `services.kubeadm.networking.serviceSubnet`. By default `172.30.0.0/16` for IPv4 or `fd03::/112` for IPv6). + +**Warning**: `Kubetools` uses `firewalld` only as an IP firewall . If you have other solution, remove or switch off the IP firewall before the installation. + +**Preinstalled software** + +* Mandatory: + * curl + * OpenSSL library + * kmod + * semanage + * conntrack + * audit + * unzip. By default it is not required. Install if you intend to unzip third-party files with **.zip** extension. +* Recommended + Installation of the below packages is highly recommended; however, Kubernetes is able to work without them, but may show warnings: + * ethtool + * ebtables + * socat + * policycoreutils-python + +**Warning**: You have to specify packages names in "RPM format" if it is possible for you OS, +e.g.: specify `conntrack-tools` instead of `conntrack`. + +**Note**: For an automated installation, you can use [Packages](#packages) during installation. + +**Preinstalled or RPM repository provided in `cluster.yaml` with the following RPMs from [Supported versions table](#supported-versions)** + +**Note**: + +* You can install a version other than the recommended version, but it is not supported and can cause unpredictable consequences. +* rh-haproxy18 (build provided by RedHat) is supported only for now. + +**Warning**: RHEL version 8 has a conflict in dependencies, that makes the `podman` and `containerd.io` +installation on the same OS impossible. To avoid it one should implement those steps before the installation procedure. +1. Add Docker-CE repository. +2. Run in cli: +``` +dnf -y module disable container-tools +dnf -y install 'dnf-command(copr)' +dnf -y copr enable rhcontainerbot/container-selinux +curl -L -o /etc/yum.repos.d/devel:kubic:libcontainers:stable.repo https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/CentOS_8/devel:kubic:libcontainers:stable.repo +dnf -y --refresh install containerd +dnf -y --refresh install podman +``` + +**Preconfigured** +* SSHD running on each VM via port 22. +* User with sudo and no-require-tty parameter in sudoers file. +* SSH key is configured on each node. The key should be available for the connection with a username from the previous statement. + +**Recommended** +* Logrotate policy for `/var/log/messages` is configured according to the planned load (it is recommended to use limited size and daily rotation) + +For more information, refer to _Official Kubernetes Requirements Documentation_ +at [https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/install-kubeadm/#before-you-begin](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/install-kubeadm/#before-you-begin). + +### Minimal Hardware Requirements + +The minimum hardware requirements for cluster machines are as follows: + +**Balancer** +* 1 CPU +* 1GB RAM +* 10GB HDD + +**Master** +* 2 CPU +* 2GB RAM +* 40GB HDD + +**Worker** +* 4 CPU +* 4GB RAM +* 80GB HDD + +### Recommended Hardware Requirements + +The recommended hardware requirements are as follows: + +**Balancer** +* 2 CPU +* 1GB RAM +* 10GB HDD + +**Master** +* 4 CPU +* 4GB RAM + +**Worker** +* 8 CPU +* 16GB RAM +* 120GB HDD + +### ETCD Recommendation + +For a cluster with a high load on the ETCD, it is strongly recommended to mount dedicated SSD-volumes in the ETCD-storage directory (15 Gb size is recommended) on each Master before installation. +Mount point: + +``` +/var/lib/etcd +``` +[General H/W recommendations](https://github.com/etcd-io/etcd/blob/master/Documentation/op-guide/hardware.md) + +# Inventory Preparation + +Before you begin, select the deployment scheme and prepare the inventory. + +## Deployment Schemes + +Several deployment schemes exist for the cluster installation. + +There are two major deployment schemes as follows: +* Non-HA Deployment +* HA Deployment + +### Non-HA Deployment Schemes + +This deployment provides a single Kubetools master. + +#### All-in-one Scheme + +This scheme has one node assigned as master and worker roles; balancer role is optional. This scheme is used for developing and demonstrating purposes only. +An example of this scheme is available in the [All-in-one Inventory Example](../examples/cluster.yaml/allinone-cluster.yaml). + +The following image illustrates the All-in-one scheme. + +![All-in-one Scheme](/documentation/images/all-in-one.png) + +### HA Deployment Schemes + +This deployment type provides a highly available and reliable solution. + +#### Mini-HA Scheme + +In this scheme, the master, balancer, and worker roles are all assigned to odd number of identical nodes (at least 3). +In this scheme, it is mandatory to enable VRRP to leverage balancing. An example of this scheme is available in the [Mini-HA Inventory Example](../examples/cluster.yaml/miniha-cluster.yaml). + +The following image illustrates the Mini-HA scheme. + +![Mini-HA Scheme](/documentation/images/mini-ha.png) + +#### Full-HA Scheme + +In this scheme, several nodes are assigned different roles. The number of master nodes should be odd, three, or more. +The number of worker nodes should be greater than one or more than three. The recommended number of balancer nodes is two, with configured VRRP, but one balancer without VRRP is also supported. +An example of this scheme presented is available in the [Minimal Full-HA Inventory Example](../examples/cluster.yaml/minimal-cluster.yaml) and [Typical Full-HA Inventory Example](../examples/cluster.yaml/typical-cluster.yaml). + +The following image illustrates the Full-HA scheme. + +![Full-HA Scheme](/documentation/images/full-ha.png) + +## Taints and Toleration + +A node, taint, lets you mark a node so that the scheduler avoids or prevents using it for certain pods. A complementary feature, tolerations, lets you designate pods that can be used on "tainted" nodes. + +Node taints are key-value pairs associated with an effect. Following are the available effects: + + * NoSchedule. The pods that do not tolerate this taint are not scheduled on the node; the existing pods are not evicted from the node. + * PreferNoSchedule. Kubernetes avoids scheduling the pods that do not tolerate this taint onto the node. + * NoExecute. A pod is evicted from the node if it is already running on the node, and is not scheduled onto the node if it is not yet running on the node. + +**Note**: Some system pods, for example, kube-proxy and fluentd, tolerate all NoExecute and NoSchedule taints, and are not evicted. + +In general, taints and tolerations support the following use cases: + + * Dedicated nodes. You can use a combination of node affinity and taints/tolerations to create dedicated nodes. For example, you can limit the number of nodes onto which to schedule pods by using labels and node affinity, apply taints to these nodes, and then add corresponding tolerations to the pods to schedule them on those particular nodes. + * Nodes with special hardware. If you have nodes with special hardware, for example, GPUs, you have to repel pods that do not need this hardware and attract pods that need it. This can be done by tainting the nodes that have the specialized hardware and adding the corresponding toleration to pods that must use this special hardware. + * Taint-based evictions. New Kubernetes versions allow configuring per-pod eviction behavior on nodes that experience problems. + +To set taint to any node, you can apply the following command: + +``` +kubectl taint nodes =: +``` + +To remove the taint added by command above you can run: + +``` +kubectl taint nodes =:- +``` + +Where: + + * NODENAME is the name of the tainted node. + * KEY is the name of the taint. For example, special, database, infra, and so on. + * VALUE is the value for the taint. + * EFFECT is the effect for the taint behavior. It can be one of NoSchedule, PreferNoSchedule, or NoExecute. + +To deploy pods on tainted nodes, you should define the toleration section: + +```YAML +tolerations: +- key: + operator: Equal + value: + effect: +``` + +A toleration "matches" a taint if the keys are the same and the effects are the same, and: + + * the operator is Exists (in which case no value should be specified), or + * the operator is Equal and the values are equal. + +**Note**: An empty key with operator Exists matches all keys, values, and effects which specifies that this tolerates everything. + +## Configuration + +All the installation configurations for the cluster are in a single inventory file. It is recommended to name this file as **cluster.yaml**. + +For more information about the structure of the inventory and how to specify the values, refer to the following configuration examples: +* [Minimal Full-HA Inventory Example](examples/cluster.yaml/minimal-cluster.yaml) - It provides the minimum set of parameters required to install a cluster out of the box. +* [Typical Full-HA Inventory Example](examples/cluster.yaml/typical-cluster.yaml) - It provides a set of parameters that you probably want to configure. +* [Full Full-HA Inventory Example](examples/cluster.yaml/full-cluster.yaml) - It provides almost all the possible parameters that you can configure. +* [Minimal All-in-one Inventory Example](examples/cluster.yaml/allinone-cluster.yaml) - It provides the minimum set of parameters for deploying All-in-one scheme. +* [Minimal Mini-HA Inventory Example](examples/cluster.yaml/miniha-cluster.yaml) - It provides the minimum set of parameters for deploying Mini-HA scheme. + +These files consists of the following sections. + +### node_defaults + +In the `node_defaults` section, you can describe the parameters to be applied by default to each record in the [nodes](#nodes) section. +For example, by adding the `keyfile` parameter in this section, it is copied to all elements of the nodes list. +However, if this parameter is defined in any element of nodes list, it is not replaced in it. + +For example, you can have the following inventory content: + +```yaml +node_defaults: + keyfile: "/home/username/.ssh/id_rsa" + username: "centos" + +node: + - name: "lb" + internal_address: "192.168.0.1" + roles: ["balancer"] + - name: "master" + keyfile: "/home/username/another.key" + internal_address: "192.168.0.2" + roles: ["master"] +``` + +After executing the above example, the final result is displayed as follows: + +```yaml +node: + - name: "lb" + username: "centos" + keyfile: "/home/username/.ssh/id_rsa" + internal_address: "192.168.0.1" + roles: ["balancer"] + - name: "master" + username: "centos" + keyfile: "/home/username/another.key" + internal_address: "192.168.0.2" + roles: ["master"] +``` + +### nodes + +In the `nodes` section, it is necessary to describe each node of the future cluster. + +The following options are supported: + +|Name|Type|Mandatory|Default Value|Example|Description| +|---|---|---|---|---|---| +|keyfile|string|**yes**| |`/home/username/.ssh/id_rsa`|**Absolute** path to keyfile on local machine to access the cluster machines| +|username|string|no|`centos`|`root`|Username for SSH-access the cluster machines| +|name|string|**yes**| |`k8s-master-1`|Cluster member name| +|address|ip address|no|`10.101.0.1`|External node's IP-address| +|internal_address|ip address|**yes**| |`192.168.0.1`|Internal node's IP-address| +|connection_port|int|no| |`22`|Port for SSH-connection to cluster node| +|connection_timeout|int|no|10|`60`|Timeout for SSH-connection to cluster node| +|roles|list|**yes**| |`["master"]`|Cluster member role. It can be `balancer`, `master` or `worker`.| +|labels|map|no| |`netcracker-infra: infra`|Additional labels for node| +|taints|list|no| |See examples below|Additional taints for node. **Caution**: Use at your own risk. It can cause unexpected behavior. No support is provided for consequences.| + +An example with parameters values is as follows: + +```yaml +node_defaults: + keyfile: "/home/username/.ssh/id_rsa" + username: "centos" + +nodes: + - name: "k8s-lb" + address: "10.101.0.1" + internal_address: "192.168.0.1" + roles: ["balancer"] + - name: "k8s-master-1" + address: "10.101.0.2" + internal_address: "192.168.0.2" + roles: ["master"] + labels: + region: asia + taints: + - "node-role.kubernetes.io/remove-example:NoSchedule-" + - "node-role.kubernetes.io/add-example=add-example:NoSchedule" + - name: "k8s-worker-1" + address: "10.101.0.5" + internal_address: "192.168.0.5" + roles: ["worker"] + labels: + netcracker-infra: infra + region: europe +``` + +The example is also available in [Full Inventory Example](examples/cluster.yaml/full-cluster.yaml). + +### cluster_name + +In the `cluster_name` variable specify the future address of the cluster. +On this address, the Control Plane Endpoint is raised, and it is used in the calculated parameters. + +An example is as follows: + +```yaml +cluster_name: "k8s-stack.sdntest.example.com" +``` + +**Note**: Cluster name should be a fully qualified domain name. It should not be an IP address. + + + +For more information, refer to _FQDN_ at https://en.wikipedia.org/wiki/Fully_qualified_domain_name + + + + +### control_plain + +`control_plain` parameter specifies which addresses are to be available for Kubernetes. The internal and external parameter is described in the following table. + +| Parameter | Example | Description | +|-----------|---------|-------------| +| `control_plain['internal']` | `192.168.0.1` | Internal network address for the connection. To be used for all internal kubeapi traffic. | +| `control_plain['external']` | `10.101.0.1` | External network address for the connection. To be used for serving and balancing external traffic and external connections. | + +This parameter is calculated in the first turn so that it can be referenced in inventory parameters. +For example: + +```yaml +answer: '3600 IN A {{ control_plain["internal"] }}' +``` + +This is an autocalculated parameter, but you can override it if you are aware about the procedure. +For example: + +```yaml +control_plain: + internal: 192.168.0.1 + external: 10.101.0.1 +``` + +Automatic calculation works according to the following principle: +The algorithm iterates through and looks for appropriate addresses. + +Addresses are taken from the following groups in order: + +1. VRRP IP +1. Balancer +1. Master + +**Note**: It is important to notice that addresses may not necessarily be taken from a single group. There may be situation that the internal address is taken from the VRRP, and the external one from the Balancer. This situation is not recommended, but it is possible. If the inventory is correctly filled in and all the addresses that are available are indicated, the algorithm automatically selects the best pair of addresses. + +After detecting addresses, the algorithm automatically displays the determined addresses and their sources as follows: + +```yaml +Control plains: + Internal: 192.168.0.1 (vrrp_ip[0]) + External: 10.101.1.101 (balancer "balancer-1") +``` + +The algorithm chooses the very first address if there are several elements in the group. If you are not satisfied with this principle, you can "help" the algorithm in choosing which address to take by specifying the parameter `control_endpoint` for the group element. +For example: + +```yaml +vrrp_ips: +- ip: 192.168.0.1 + floating_ip: 10.101.0.1 +- ip: 192.168.0.2 + floating_ip: 10.101.0.2 + control_endpoint: True +``` + +The above example produces the following result: + +``` +Control plains: + Internal: 192.168.0.2 (vrrp_ip[1]) + External: 10.101.0.2 (vrrp_ip[1]) +``` + +An example with mixed groups: + +```yaml +vrrp_ips: +- ip: 192.168.0.1 + floating_ip: 10.101.0.1 +- ip: 192.168.0.2 + control_endpoint: True + +nodes: +- name: balancer-1 + internal_address: 192.168.0.3 + address: 10.101.0.3 +- name: balancer-2 + internal_address: 192.168.0.4 + address: 10.101.0.4 + control_endpoint: True +``` + +The above example produces the following result: + +``` +Control plains: + Internal: 192.168.0.2 (vrrp_ip[1]) + External: 10.101.0.4 (balancer "balancer-2") +``` + +### public_cluster_ip + +**Warning**: `public_cluster_ip` is an obsolete variable, use `control_plain.external` variable instead. + +`public_cluster_ip` variable specifies the Kubernetes external address to connect from an external network. +This variable is optional and required if you are using Helm plugins installation. + +By default `public_cluster_ip` inherits `control_plain["external"]` as shown in the following code: + +```yaml +public_cluster_ip: '{{ control_plain["external"] }}' +``` + +However, it is possible to change an address if the external control_plain parameter is not suitable. For example, if the cluster is behind an external balancer as shown in the following code. + +```yaml +public_cluster_ip: "10.102.0.1" +``` + +### registry + +If you want to install Kubernetes in a private environment, without access to the internet, then you need to redefine the addresses of remote resources. These resources are many, so for convenience there is a single unified registry parameter that allows you to specify the registry for everything at once. To do this, you need to specify `registry` section in the root of the inventory and fill it with parameters. The following parameters are supported: + +| Parameter | Type | Default value | Description | +|-------------|---------|---------------|--------------------------------------------------------------| +| address | string | | Full address to the registry, without protocol and port. | +| docker_port | number | | Custom port for connecting to the image registry. | +| webserver | boolean | `False` | A special parameter indicating whether registry is has ability to serve http files. When enabled, the `thirdparties` are patched and the `/k8s.gcr.io` path appended to the address in `services.kubeadm.imageRepository`. | +| ssl | boolean | `False` | Registry SSL support switch. | + +The `registry` parameter automatically completes the following parameters: + +|Path|Registry Type|Format|Example|Description| +|---|---|---|---|---| +|`services.kubeadm.imageRepository`|Docker|Address without protocol, where Kubernetes images are stored. It should be the full path to the repository.|```example.com:5443/k8s.gcr.io```|Kubernetes Image Repository. The system container's images such as `kubeapi` or `etcd` is loaded from this registry.| +|`services.cri.dockerConfig.insecure-registries`|Docker|List with addresses without a protocol.|```example.com:5443```|Docker Insecure Registries. It is necessary for the Docker to allow the connection to addresses unknown to it.| +|`services.cri.dockerConfig.registry-mirrors`|Docker|List with addresses. Each address should contain a protocol.|```https://example.com:5443```|Docker Registry Mirrors. Additional image sources for the container's images pull.| +|`services.cri.containerdConfig.{{containerd-specific name}}`|Docker|Toml-like section with endpoints according to the containerd docs.|```https://example.com:5443```|| +|`services.thirdparties.{{ thirdparty }}.source`|Plain|Address with protocol or absolute path on deploy node. It should be the full path to the file.|```https://example.com/kubeadm/v1.20.2/bin/linux/amd64/kubeadm```|Thridparty Source. Thirdparty file, such as binary, archive and so on, is loaded from this registry.| +|`plugin_defaults.installation.registry`|Docker|Address without protocol, where plugins images are stored.|```example.com:5443```|Plugins Images Registry. All plugins container's images are loaded from this registry.| + +**Note**: You can enter these parameters yourself, as well as override them, even if the `registry` parameter is set. + +Example: + +```yaml +registry: + address: example.com + docker_port: 5443 + webserver: True + ssl: False +``` + +This configuration generates the following parameters: + +```yaml +services: + kubeadm: + imageRepository: example.com:5443/k8s.gcr.io + cri: + dockerConfig: + insecure-registries: + - example.com:5443 + registry-mirrors: + - http://example.com:5443 + thirdparties: + /usr/bin/calicoctl: + source: http://example.com/webserver/repository/raw/projectcalico/calicoctl/v3.19.1/calicoctl-linux-amd64 + /usr/bin/kubeadm: + source: http://example.com/webserver/repository/raw/kubernetes/kubeadm/v1.20.2/bin/linux/amd64/kubeadm + /usr/bin/kubectl: + source: http://example.com/webserver/repository/raw/kubernetes/kubectl/v1.20.2/bin/linux/amd64/kubectl + /usr/bin/kubelet: + source: http://example.com/webserver/repository/raw/kubernetes/kubelet/v1.20.2/bin/linux/amd64/kubelet +plugin_defaults: + installation: + registry: example.com:5443 +``` + +However, if you override one of the replaced parameters, it is not replaced. For example, with the following configuration: + +```yaml +registry: + address: example.com + docker_port: 5443 + webserver: True + ssl: False +services: + kubeadm: + imageRepository: 1.1.1.1:8080/test +``` + +The following configuration is produced: + +```yaml +services: + kubeadm: + imageRepository: 1.1.1.1:8080/test + cri: + dockerConfig: + insecure-registries: + - example.com:5443 + registry-mirrors: + - http://example.com:5443 +... +``` + +### gateway_nodes + +If you do not have direct SSH-access to the cluster nodes from the deployer node and you need to connect via the gateway, you can specify the gateway nodes through which you need to create an SSH-tunnel. +You can specify several gateways. + +The following parameters are supported: + +|Parameter|Type|Mandatory|Description| +|---|---|---|---| +|**name**|string|**yes**|Gateway node name| +|**address**|ip address|**yes**|Gateway node's IP or hostname address for connection| +|**username**|string|**yes**|Username for SSH-access the gateway node| +|**keyfile**|string|**yes**|**Absolute** path to keyfile on deploy node to access the gateway node| + +An example is as follows: + +```yaml +gateway_nodes: + - name: k8s-gateway-1 + address: 10.102.0.1 + username: root + keyfile: "/home/username/.ssh/id_rsa" + - name: k8s-gateway-2 + address: 10.102.0.2 + username: root + keyfile: "/home/username/.ssh/id_rsa" +``` + +You need to specify which gateways should be used to connect to nodes. + +An example is as follows: + +```yaml +nodes: + - name: "k8s-master-1" + address: "10.101.0.2" + internal_address: "192.168.0.2" + roles: ["master"] + gateway: k8s-gateway-1 + - name: "k8s-master-2" + address: "10.101.0.3" + internal_address: "192.168.0.3" + roles: ["master"] + gateway: k8s-gateway-2 +``` + +**Note**: If the gateway is not specified on the node, then the connection is direct. + +### vrrp_ips + +*Installation task*: `deploy.loadbalancer.keepalived` + +*Can cause reboot*: No + +*Can restart service*: Always yes, `keepalived` + +*OS specific*: Yes, different OS may have different default network interfaces. For interfaces with autodetection mode selected, it is automatically detected. + +In order to assign VRRP IP you need to create a `vrrp_ips` section in the inventory and specify the appropriate configuration. +You can specify several VRRP IP addresses. + +The following parameters are supported: + +|Parameter|Default Automatically Calculated Value|Description| +|---|---|---| +|hosts| | List of hosts on which the VRRP IP should be set.| +|hosts[i].name| |The name of the node. It must match the name in the `nodes` list.| +|hosts[i].priority|`255 - {{ i }}`|The priority of the VRRP IP host.| +|ip| |The IP address for virtual IP.| +|floating_ip| |The floating IP address for virtual IP.| +|interface|`eth0`|The interface on which the address must be listened.| +|id|`md5({{ interface }} + {{ ip }})` сropped to 10 characters|The ID of the VRRP IP. It must be unique for each VRRP IP.| +|password|Randomly generated 8-digit string|Password for VRRP IP set. It must be unique for every VRRP IP ID.| +|router_id|Last octet of IP|The router ID of the VRRP IP. Must be unique for each VRRP IP ID and have maximum 3-character size.| + +There are several formats in which you can specify values. + +The following are some examples of the format: + +You can specify only the address of the VRRP IP, in which case it automatically applies to all balancers in the cluster and other parameters are automatically calculated. +For example: + +```yaml +vrrp_ips: +- 192.168.0.1 +``` + +You can specify the address and which hosts it should apply to. Other parameters are automatically calculated. +For example: + +```yaml +vrrp_ips: +- hosts: + - name: balancer-1 + priority: 254 + - name: balancer-2 + priority: 253 + ip: 192.168.0.1 + floating_ip: 10.101.1.1 +``` + +You can specify all possible parameters at one time instead of using auto-calculated. For example: + +```yaml +vrrp_ips: +- hosts: + - name: balancer-1 + priority: 254 + - name: balancer-2 + priority: 253 + id: d8efc729e4 + interface: eth0 + ip: 192.168.0.1 + floating_ip: 10.101.1.1 + password: 11a1aabe + router_id: '1' +``` + +### Services + +In the `services` section, you can configure the service settings. The settings are described in the following sections. + +#### kubeadm + +*Installation task*: `deploy.kubernetes` + +*Can cause reboot*: no + +*Can restart service*: always yes, `kubelet` + +*OS specific*: No + +In `services.kubeadm` section, you can override the original settings for the kubeadm. For more information these settings, refer to the [Official Kubernetes Documentation](https://kubernetes.io/docs/reference/setup-tools/kubeadm/kubeadm-init/#config-file). +By default, the installer uses the following parameters: + +|Parameter|Default Value| +|---|---| +|kubernetesVersion|`v1.20.3`| +|controlPlaneEndpoint|`{{ cluster_name }}:6443`| +|networking.podSubnet|`10.128.0.0/14` for IPv4 or `fd02::/80` for IPv6| +|networking.serviceSubnet|`172.30.0.0/16` for IPv4 or `fd03::/112` for IPv6| +|apiServer.certSANs|List with all nodes internal IPs, external IPs and names| +|apiServer.extraArgs.enable-admission-plugins|`NodeRestriction`| +|apiServer.extraArgs.profiling|`false`| +|apiServer.extraArgs.audit-log-path|`/var/log/apiserver/audit.log`| +|apiServer.extraArgs.audit-log-maxage|`30`| +|apiServer.extraArgs.audit-log-maxbackup|`10`| +|apiServer.extraArgs.audit-log-maxsize|`100`| +|scheduler.extraArgs.profiling|`false`| +|controllerManager.extraArgs.profiling|`false`| +|controllerManager.extraArgs.terminated-pod-gc-threshold|`1000`| + +The following is an example of kubeadm defaults override: + +```yaml +services: + kubeadm: + networking: + podSubnet: '10.128.0.0/14' + serviceSubnet: '172.30.0.0/16' + imageRepository: example.com:5443/k8s.gcr.io + apiServer: + extraArgs: + enable-admission-plugins: NodeRestriction,PodNodeSelector + profiling: "false" + audit-log-path: /var/log/apiserver/audit.log + audit-log-maxage: "30" + audit-log-maxbackup: "10" + audit-log-maxsize: "100" + scheduler: + extraArgs: + profiling: "false" + controllerManager: + extraArgs: + profiling: "false" + terminated-pod-gc-threshold: "1000" + +``` + +#### Kubernetes version + +By default, the `1.20.2` version of the Kubernetes is installed. See the table of supported versions for details in [Supported versions section](#supported-versions). However, we recommend that you explicitly specify the version you are about to install. This version applies into all the dependent parameters - images, binaries, rpms, configurations: all these are downloaded and used according to your choice. To specify the version, use the following parameter as in example: + +```yaml +services: + kubeadm: + kubernetesVersion: v1.20.2 +``` + +#### Cloud Provider Plugin + +Before proceeding further, it is recommended to read the official Kubernetes Guide about the CPP deployment in the cluster at [https://kubernetes.io/blog/2020/02/07/deploying-external-openstack-cloud-provider-with-kubeadm/](https://kubernetes.io/blog/2020/02/07/deploying-external-openstack-cloud-provider-with-kubeadm/). + +**Warning**: Manual CPP installation on a deployed cluster can cause Kubernetes out-of-service denial and break Kubetools procedures for adding and removing nodes. + +It is possible to specify a plugin at the installation stage, if it is required. To enable the CPP support, just specify the `external-cloud-volume-plugin` parameter of `controllerManager` in the `kubeadm` cluster configuration. For example: + +```yaml +services: + kubeadm: + controllerManager: + extraArgs: + external-cloud-volume-plugin: openstack + extraVolumes: + - name: "cloud-config" + hostPath: "/etc/kubernetes/cloud-config" + mountPath: "/etc/kubernetes/cloud-config" + readOnly: true + pathType: File +``` + +In this case, Kubetool automatically initializes and joins new cluster nodes with CPP enabled. However, this is not enough for the full operation of the СPP. There are a number of manual steps required to configure the CPP before running Calico and other plugins. These steps depend directly on your Cloud Provider and its specific settings. An example of a simple setup for an openstack is as follows: + +1. Prepare cloud config of your Cloud Provider with credentials and mandatory parameters required for the connection. Openstack cloud config example: + + ```ini + [Global] + region=RegionOne + username=username + password=password + auth-url=https://openstack.cloud:5000/v3 + tenant-id=14ba698c0aec4fd6b7dc8c310f664009 + domain-name=default + ``` + +1. Upload the cloud config to all the nodes in the cluster to the following location: + + ``` + /etc/kubernetes/cloud-config + ``` + + It is recommended to use Kubetools functionality of plugins or thirdparties for automatic uploading. For example, it is possible to upload the cloud config on all nodes using thirdparties before starting the cluster installation: + + ```yaml + services: + thirdparties: + /etc/kubernetes/cloud-config: + source: ./example/cloud-config.txt + ``` + +1. Before running any plugins, it is necessary to create a secret RBAC resource and cloud controller manager DaemonSet for CPP. This can be specified as the very first Kubetools plugin, for example: + + Create a file `./openstack-cloud-controller-manager-ds.yaml` on deploy node with the following content: + + ```yaml + --- + apiVersion: v1 + kind: ServiceAccount + metadata: + name: cloud-controller-manager + namespace: kube-system + --- + apiVersion: apps/v1 + kind: DaemonSet + metadata: + name: openstack-cloud-controller-manager + namespace: kube-system + labels: + k8s-app: openstack-cloud-controller-manager + spec: + selector: + matchLabels: + k8s-app: openstack-cloud-controller-manager + updateStrategy: + type: RollingUpdate + template: + metadata: + labels: + k8s-app: openstack-cloud-controller-manager + spec: + nodeSelector: + node-role.kubernetes.io/master: "" + tolerations: + - key: node.cloudprovider.kubernetes.io/uninitialized + value: "true" + effect: NoSchedule + - key: node-role.kubernetes.io/master + effect: NoSchedule + - effect: NoSchedule + key: node.kubernetes.io/not-ready + serviceAccountName: cloud-controller-manager + containers: + - name: openstack-cloud-controller-manager + image: docker.io/k8scloudprovider/openstack-cloud-controller-manager:v1.15.0 + securityContext: + privileged: true + args: + - /bin/openstack-cloud-controller-manager + - --v=1 + - --cloud-config=$(CLOUD_CONFIG) + - --cloud-provider=openstack + - --use-service-account-credentials=true + - --address=127.0.0.1 + volumeMounts: + - mountPath: /etc/kubernetes/pki + name: k8s-certs + readOnly: true + - mountPath: /etc/config + name: cloud-config-volume + readOnly: true + - mountPath: /usr/libexec/kubernetes/kubelet-plugins/volume/exec + name: flexvolume-dir + resources: + requests: + cpu: 200m + env: + - name: CLOUD_CONFIG + value: /etc/config/cloud.conf + hostNetwork: true + volumes: + - hostPath: + path: /usr/libexec/kubernetes/kubelet-plugins/volume/exec + type: DirectoryOrCreate + name: flexvolume-dir + - hostPath: + path: /etc/kubernetes/pki + type: DirectoryOrCreate + name: k8s-certs + - name: cloud-config-volume + secret: + secretName: cloud-config + - name: ca-cert + secret: + secretName: openstack-ca-cert + ``` + **Warning:** Pay attention on external resources links. + For restricted environments links should be changed to local registry. + e.g. image: `docker.io/k8scloudprovider/openstack-cloud-controller-manager:v1.15.0` should be changed to + `registry:17001/k8scloudprovider/openstack-cloud-controller-manager:v1.15.0` + + **Warning**: Pay attention to pod security policies for cloud controller manager. You can create new ClusterRole or disable PSP. + + Place the following plugin section to the cluster config: + + ```yaml + plugins: + cloud-config: + install: true + installation: + priority: -1 + procedures: + - shell: + command: sudo kubectl create secret -n kube-system generic cloud-config --from-literal=cloud.conf="$(sudo cat /etc/kubernetes/cloud-config)" --dry-run -o yaml > cloud-config-secret.yaml && sudo kubectl apply -f cloud-config-secret.yaml + nodes: ['master-1'] + - shell: + command: sudo kubectl apply -f https://github.com/kubernetes/cloud-provider-openstack/raw/release-1.15/cluster/addons/rbac/cloud-controller-manager-roles.yaml + nodes: ['master-1'] + - shell: + command: sudo kubectl apply -f https://github.com/kubernetes/cloud-provider-openstack/raw/release-1.15/cluster/addons/rbac/cloud-controller-manager-role-bindings.yaml + nodes: ['master-1'] + - template: + source: ./openstack-cloud-controller-manager-ds.yaml + ``` + **Warning**: Pay attention on external resources links. + For restricted environments configs should be downloaded and links changed to the local path. + +### Service Account Issuer + +**Warning**: + +* Manual Service Account Issuer setup on an already installed Kubernetes cluster is not supported. +* Service Account Issuer feature is supported only on Kubernetes 1.20. + +If Service Account Issuer is required, you can configure the necessary Kubernetes parameters using the `kubeadm` section in the cluster config. For example: + +```yaml +services: + kubeadm: + apiServer: + extraArgs: + feature-gates: "ServiceAccountIssuerDiscovery=true" + service-account-issuer: "https://{{ cluster_name }}:6443" + service-account-jwks-uri: "https://{{ cluster_name }}:6443/openid/v1/jwks" + service-account-signing-key-file: /etc/kubernetes/pki/sa.key + service-account-key-file: /etc/kubernetes/pki/sa.pub + controllerManager: + extraArgs: + feature-gates: "ServiceAccountIssuerDiscovery=true" + scheduler: + extraArgs: + feature-gates: "ServiceAccountIssuerDiscovery=true" +``` + +To be able to fetch the public keys and validate the JWT tokens against the Kubernetes cluster’s issuer, you have to allow external unauthenticated requests. +To do this, bind the special role, system:service-account-issuer-discovery, with a ClusterRoleBinding to unauthenticated users. Make sure that this is safe in your environment, but only public keys are visible on the URL. + +**Warning**: The following command opens an unauthenticated access to the endpoint receiving public tokens. Do not execute this command if you do not need to open access to the outside, or if you do not understand what you are doing. If you still decide to open an external access, make sure to provide secure access to this endpoint with external resources outside the cluster. + +For example: + +```bash +kubectl create clusterrolebinding oidc-reviewer --clusterrole=system:service-account-issuer-discovery --group=system:unauthenticated +``` + +If you need to test that the Service Account Issuer is working, implement the following steps: + +1. Create a test pod: + +```yaml +kubectl apply -f - < kubernetes_ca.crt +``` + +5. Visit well-known OIDC URLs: + +```bash +curl --cacert kubernetes_ca.crt https://CLUSTER_NAME:6443/.well-known/openid-configuration +``` + +Example result: + +```json +{ + "issuer": "https://localhost:6443", + "jwks_uri": "https://localhost:6443/openid/v1/jwks", + "response_types_supported": [ + "id_token" + ], + "subject_types_supported": [ + "public" + ], + "id_token_signing_alg_values_supported": [ + "RS256" + ] +} +``` + +6. Visit the JWKS address ("jwks_uri") to view public keys: + +```bash +curl --cacert kubernetes_ca.crt https://CLUSTER_NAME:6443/openid/v1/jwks +``` + +Example result: + +```json +{ + "keys": [ + { + "use": "sig", + "kty": "RSA", + "kid": "Rt3TBA31bh3rH67PQbKImg2ldwhPqBTWF2w1Hxqi84c", + "alg": "RS256", + "n": "vL0tjBqLDFTyqOCPBQC5Mww_3xkhlkWmeklPjSAhFuqL0U-Oie9E1z8FuhcApBaUs7UEPzja02PEZd4i1UF2UDoxKYEG9hG5vPseTXwN_xGnbhOaBdfgQ7KDvqV-WHfmlrnnCizi1VmNAHsoAg6oZMiUdOuk8kCFxpe0N6THmBKNSKnqoRnhSL4uwHSBWJ5pEyWAqyL8KYaaGYhc2MVUs3I8e-gtQE6Vlwe75_QSp9uIZNZeFr5keqiXhz8BWL76ok-vY8UZ8-rH2VIN5LzXkCvhIFI9W_UBzziSnb9l5dgSQCwGf18zVgT0yJjCz0Z9YE9A1Wgeu-LLrJz3gxR8Hw", + "e": "AQAB" + } + ] +} +``` + +### kubeadm_kubelet + +*Installation task*: `deploy.kubernetes` + +*Can cause reboot*: no + +*Can restart service*: always yes, `kubelet` + +*OS specific*: No + +In `services.kubeadm_kubelet` section, you can override the original settings for the kubelet. For more information these settings, refer to the [Official Kubernetes Documentation](https://kubernetes.io/docs/reference/setup-tools/kubeadm/kubeadm-init/#config-file). +By default, the installer uses the following parameters: + +|Parameter|Default Value| +|---|---| +|readOnlyPort|0| +|protectKernelDefaults|true| +|podPidsLimit|4096| +|maxPods|110| + +`pidsPidsLimit` the default value is chosen to prevent [Fork Bomb](https://en.wikipedia.org/wiki/Fork_bomb) + +**Warning**: If you want to change the values of variables `podPidsLimit` and `maxPods`, you have to update the value of the `pid_max` (this value should not less than result of next expression: `maxPods * podPidsLimit + 2048`), which can be done using task `prepare.system.sysctl`. To get more info about `pid_max` you can go to [sysctl](#sysctl) section. + +The following is an example of kubeadm defaults override: + +``` +yaml +services: + kubeadm_kubelet: + readOnlyPort: 0 + protectKernelDefaults: true + podPidsLimit: 2048 + maxPods: 100 + +``` + +#### kernel_security + +This is a common section for `selinux` and `apparmor` properties. + +##### selinux + +*Installation task*: `prepare.system.setup_selinux` + +*Can cause reboot*: Yes, only on configurations change + +*Can restart service*: No + +*Overwrite files*: Yes, only on configurations change: `/etc/selinux/config`, backup is created + +*OS specific*: Yes, performs only on RHEL OS family. + +All the SELinux settings are specified in the `services.kernel_security.selinux` section of the inventory. + +**Note**: SELinux configuration is possible only on nodes running Centos or RHEL operating system. + +The following parameters are available: + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeMandatoryDefault ValuePossible ValuesDescription
statestringnoenforcingenforcing - The SELinux security policy is enforced.Defines the top-level state of SELinux on a system.
permissive - The SELinux system prints warnings but does not enforce policy. This is useful for debugging and troubleshooting purposes.
disabled - SELinux is fully disabled. SELinux hooks are disengaged from the kernel and the pseudo-file system is unregistered.
policystringnotargetedtargeted - Only targeted network daemons are protected.Specifies which policy is currently being enforced by SELinux.
strict - Full SELinux protection, for all daemons. Security contexts are defined, for all subjects and objects, and every single action is processed by,the policy enforcement server.
permissivelistno
- haproxy_t
- container_t
- keepalived_t
anyCertain SELinux object type policy records, applicable without requiring modification to or recompilation from the policy sources.
+ +**Warning**: It is recommended to use default values. Using values different from default may cause unexpected consequences and no support is provided for consequences. + +**Note**: Turning off and then turning on SELinux can lead to the loss of security rules, which were configured earlier. + +For more information about SELinux, refer to the _Official RedHat SELinux Configuration Documentation_ at [https://access.redhat.com/documentation/en-US/Red_Hat_Enterprise_Linux/4/html/Reference_Guide/s2-SELinux-files-etc.html](https://access.redhat.com/documentation/en-US/Red_Hat_Enterprise_Linux/4/html/Reference_Guide/s2-SELinux-files-etc.html). + +The procedure for applying SELinux settings is as follows: + +1. The current settings on remote hosts are validated first. The detected configurations are precisely compared with the configurations from the inventory: + * `SELinux status` compared with `services.selinux.state` - Specified if SELinux needs to be disabled or not. + * `Current mode` and `Mode from config file` compared with the `services.selinux.state` parameter. + * `Loaded policy name` and `Policy from config file` compared with the `services.selinux.policy` parameter. + * `Customized Permissive Types` items compared with items in the `services.selinux.permissive` list parameter. +2. If there are no differences, then proceeds with the following tasks. +3. If there is at least one difference, the application of all SELinux settings for remote nodes begins. +4. After applying the settings, it is planned to reboot and re-validate the required configurations. +5. When re-validating, everything is checked again as described in Step 1. + +##### apparmor + +*Installation task*: `prepare.system.setup_apparmor` + +*Can cause reboot*: Yes, only on configurations change. + +*Can restart service*: No + +*Overwrite files*: Yes, only on configurations change: `/etc/apparmor.d`, no backups. + +*OS specific*: Yes, performs only on Ubuntu OS family. + +All the Ubuntu settings are specified in the `services.kernel_security.ubuntu` section of the inventory. + +**Note**: AppArmor configuration is possible only on nodes running Ubuntu or Debian operating system. + +In the AppArmor section, you must declare the already existing AppArmor profiles, the state of which needs to be changed. It is not necessary to indicate all the available profiles - something that is not indicated is not affected. +The profiles should be specified in a standard way using a path or name. It is possible to modify the following states: + +* `enforce` (default mode) - prohibits everything according to the profile settings. +* `complain` - does not prohibit, but only displays violation warnings in the logs. +* `disable` - disables and unloads security profile. + +**Note**: The necessary profiles are installed and configured by themselves during the installation of packages and their activation manually is not required by default. However, if some profiles are missing for you, you need to preload them on all nodes yourself and launch the apparmor task after that. + +Example: + +```yaml +services: + kernel_security: + apparmor: + enforce: + - /etc/cron/daily/logrotate + - /sbin/dhclient + - nvidia_modprobe + complain: + - /usr/lib/postfix/smtp + - man_filter + disable: + - /usr/bin/ping + - /bin/netstat + - man_groff +``` + +If you need to disable AppArmor, you cannot do this using Kubetools. If you absolutely need it, you can uninstall AppArmor from the system through the package manager. + +**Note**: After the installation of new repositories, the repodata is reloaded. + +#### packages + +##### package_manager + +*Installation task*: `prepare.package_manager.configure` + +*Can cause reboot*: No + +*Can restart service*: No + +*Overwrite files*: Yes, `/etc/yum.repos.d/` or `/etc/apt/sources.list.d/` backup is presented. + +*OS specific*: Yes, different defaults for different OS families. + +**Warning**: This section is specific to different OS families. Ensure that you use the proper definition format for your OS distributive - it may differ from the presented examples in this document. + +If your cluster is in a closed environment or if you need to add additional package manager repositories, you can specify them in the `services.yum` section of inventory. +The following parameters are supported: + +|Parameter|Default value|Description| +|---|---|---| +|replace-repositories|`false`|Deletes old repositories on hosts and installs new ones instead.| +|repositories| |List of new repositories.| + +In the repositories section, you need to specify new repositories to install. The contents of their configurations can be arbitrary and is directly forwarded into the yum repo files. + +For example in CentOS: + +```yaml +services: + packages: + package_manager: + replace-repositories: true + repositories: + kubernetes: + name: "Kubernetes" + enabled: 1 + gpgcheck: 0 + baseurl: "https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64" + my_own_repo: + name: "My repository" + enabled: 1 + gpgcheck: 1 + baseurl: "https://example.com/repo" +``` + +For example in Ubuntu: + +```yaml +services: + packages: + package_manager: + replace-repositories: true + repositories: + - "deb [arch=amd64 trusted=yes] http://example.com/deb/ubuntu/ focal main restricted" + - "deb [arch=amd64 trusted=yes] http://example.com/deb/ubuntu/ focal-updates main restricted" + - "deb [arch=amd64 trusted=yes] http://example.com/deb/ubuntu/ focal universe" + - "deb [arch=amd64 trusted=yes] http://example.com//deb/ubuntu/ focal-updates universe" + - "deb [arch=amd64 trusted=yes] http://example.com//deb/ubuntu/ focal multiverse" + - "deb [arch=amd64 trusted=yes] http://example.com/deb/ubuntu/ focal-updates multiverse" + - "deb [arch=amd64 trusted=yes] http://example.com/deb/ubuntu/ focal-backports main restricted universe multiverse" + - "deb [arch=amd64 trusted=yes] http://example.com/deb/misc/docker-ce/debian/ buster stable" +``` + +**Note**: You cannot and do not need to specify repositories for different package managers. The package manager is detected automatically and the specified configuration should match it. + +##### management + +*Installation task*: `prepare.package_manager.manage_packages` + +*Can cause reboot*: Yes, only when a list of installed packages changes. + +*Can restart service*: No + +*OS specific*: Yes, the necessary package manager is selected for different OS families. + +By default, the installer does not install any packages from the package manager. However, if you need it, you can manage the packages directly during installation. +You can choose any one action from the following types of actions: + +* remove +* install +* upgrade + +All these actions are performed in a sequence as described above. You can specify only some types of actions or all at once. Short and full configuration formats are available. + +**Warning**: Before you start working, ensure to check that you have all the necessary dependencies in the repositories you are using. You can configure the necessary repositories in the [yum](#yum) section of inventory. + +**Warning**: This section is specific to different OS families. Ensure that you use the proper definition format for your OS distributive - it may differ from the presented examples in this document. + +**Warning**: The packages in the install section are installed on **all** nodes. + +The following is an example to install new packages: + +```yaml +services: + packages: + install: + - ethtool + - ebtables + - socat + - curl + - openssl + - unzip + - policycoreutils-python +``` + +The following is an example to install, upgrade, and remove packages: + +```yaml +services: + packages: + remove: + - curl + install: + - unzip + - policycoreutils-python + upgrade: + - openssl +``` + +The format of package definition is same as in the package manager. You can specify the exact version of package to install: + +```yaml +services: + packages: + install: + - openssl-1.0 + - unzip-1.1 +``` + +To update all packages, you can use an asterisk. For example: + +```yaml +services: + packages: + upgrade: + - * +``` + +A more complex format is also available in which you can enable and exclude packages from processing: + +```yaml +services: + packages: + upgrade: + include: + - * + exclude: + - kernel + - gluster +``` + +**Warning**: Be careful with managing packages, they directly affect the host operating system. + +**Warning**: If changes in the installed packages list are detected, a reboot is scheduled. + +##### associations + +*Installation task*: No + +*Can cause reboot*: No + +*Can restart service*: No + +*OS specific*: Yes, different defaults for different OS families. + +**Warning**: This section is specific to different OS families. Ensure that you use the proper definition format for your OS distributive - it may differ from the presented examples in this document. + +In the `services.packages` section, there is a `services.packages.associations` sub-section that allows you to configure predefined associations of package objects. It allows you to redefine the following knowledges: + +* executable_name +* package_name +* service_name +* config_location + +This setting is required to change the behavior of the installer such as to install a package with a different name, use the configuration file from the different path, and so on. + +**Note**: Associations defaults automatically switches for different OS families. Do not worry about this; use those associations that are specific to your operating system in the common section - specify which one is not required in common cases. + +The following associations are used by default: + +###### RHEL and Centos + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
SubjectAssosiation keyAssosiation value
dockerexecutable_namedocker
package_namedocker-ce-{{k8s-version-specific}}
docker-ce-cli-{{k8s-version-specific}}
containerd.io-{{k8s-version-specific}}
service_namedocker
config_location/etc/docker/daemon.json
containerdexecutable_namecontainerd
package_namecontainerd.io-{{k8s-version-specific}}
podman-{{k8s-version-specific}}
service_namecontainerd
config_location/etc/containerd/config.toml
haproxyexecutable_name/opt/rh/rh-haproxy18/root/usr/sbin/haproxy
package_namerh-haproxy18
service_namerh-haproxy18-haproxy
config_location/etc/haproxy/haproxy.cfg
keepalivedexecutable_namekeepalived
package_namekeepalived
service_namekeepalived
config_location/etc/keepalived/keepalived.conf
+ + +###### Ubuntu and Debian + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
SubjectAssosiation keyAssosiation value
dockerexecutable_namedocker
package_namedocker-ce={{k8s-version-specific}}
docker-ce-cli={{k8s-version-specific}}
containerd.io={{k8s-version-specific}}
service_namedocker
config_location/etc/docker/daemon.json
containerdexecutable_namecontainerd
package_namecontainerd.io
service_namecontainerd
config_location/etc/containerd/config.toml
haproxyexecutable_name/usr/sbin/haproxy
package_namehaproxy
service_namehaproxy
config_location/etc/haproxy/haproxy.cfg
keepalivedexecutable_namekeepalived
package_namekeepalived
service_namekeepalived
config_location/etc/keepalived/keepalived.conf
+ +**Note**: By default, the packages' versions are installed in accordance with the Kubernetes version specified in the [Supported versions](#supported-versions) section. + +The following is an example of overriding docker associations: + +```yaml +services: + packages: + associations: + docker: + executable_name: 'docker' + package_name: + - docker-ce-19* + - docker-ce-cli-19* + - containerd.io-1.4.3-3.1* + service_name: 'docker' + config_location: '/etc/docker/daemon.json' +``` + +In case when you should redefine associations for multiple OS families at once, you should define their names in the root of `associations` in the following way: + +```yaml +services: + packages: + associations: + debian: + haproxy: + executable_name: '/usr/sbin/haproxy' + package_name: haproxy=1.8.* + rhel: + haproxy: + executable_name: '/opt/rh/rh-haproxy18/root/usr/sbin/haproxy' + package_name: rh-haproxy18-haproxy-1.8* +``` + +**Note**: There are only 3 supported OS families: Debian, RHEL, and RHEL8 (for RHEL based version 8). + +#### thirdparties + +*Installation task*: `prepare.thirdparties` + +*Can cause reboot*: No + +*Can restart service*: No + +*Overwrite files*: Yes, backup is not presented + +*OS specific*: No + +The installer has a mechanism to automatically deliver files from third party sources and install them in the system. +For example, using it is convenient to automatically download a certain file from a repository and place it in a specific place in the system. +This is configured in the `services.thirdparties` section. The contents of this section are as follows: +* The absolute destination path on the host system of the cluster is indicated as a key +* A set of the following parameters is indicated as values: + +|Name|Mandatory|Default Value|Description| +|---|---|---|---| +|**source**|**yes**| |Source from where to upload the file to hosts. It can be an URL or an **absolute** path on the deployment node. For detailed description of this parameter, see [Installation without Internet Resources](#installation-without-internet-resources).| +|**sha1**|no|`None`|SHA1 hash of the file. It is necessary in order to check with an existing file on the hosts and decide whether to download the file or not.| +|**owner**|no|`root`|The owner who needs to be assigned to the file after downloading it.| +|**mode**|no|`700`|The mode which needs to be assigned to the file after downloading it.| +|**unpack**|no|`None`|Absolute path on hosts where to unpack the downloaded file. Unpacking is supported only for the following file extensions: `.tar`, `.gz` and `.zip`.| +|**group**|no|`None`|The name of the group to whose hosts the file should be uploaded.| +|**groups**|no|`None`|The list of group names to whose hosts the file should be uploaded.| +|**node**|no|`None`|The name of node where the file should be uploaded.| +|**nodes**|no|`None`|The list of node names where the file should be uploaded.| + +**Warning**: verify that you specified the path to the correct version of the thirdparty. + +**Note**: Groups and nodes can be combined. + +**Note**: If no groups and nodes are present, then the file is uploaded to masters and workers by default. + +**Note**: If the file is already uploaded to hosts and its hash matches with the hash in the config, then the file is not downloaded again. + +**Note**: The installation of the thirdparties sources that are required in the plugins are installed with the plugin. For more information, see [thirdparty](#thirdparty). + +By default, the installer installs the following thirdparties with the following configuration: + +```yaml +services: + thirdparties: + /usr/bin/kubeadm: + source: 'https://storage.googleapis.com/kubernetes-release/release/{{ services.kubeadm.kubernetesVersion }}/bin/linux/amd64/kubeadm' + sha1: e5cdfcda337a5c8d59035da9db0c2b02913271d1 + /usr/bin/kubelet: + source: 'https://storage.googleapis.com/kubernetes-release/release/{{ services.kubeadm.kubernetesVersion }}/bin/linux/amd64/kubelet' + sha1: d6e92cdc09eab3e1c24c9c35fa79421a351f6ba8 + /usr/bin/kubectl: + source: 'https://storage.googleapis.com/kubernetes-release/release/{{ services.kubeadm.kubernetesVersion }}/bin/linux/amd64/kubectl' + sha1: f684dd035bd44e0899ab43ce2ad4aea0baf86c2e + group: master + /usr/bin/calicoctl: + source: 'https://github.com/projectcalico/calicoctl/releases/download/{{ plugins.calico.version }}/calicoctl-linux-amd64' + sha1: bc6cc7869ebbb0e1799dfbe10795f680fba4321b + group: master + # "crictl" is installed by default ONLY if "containerRuntime != docker", otherwise it is removed programmatically + /usr/bin/crictl.tar.gz: + source: 'https://github.com/kubernetes-sigs/cri-tools/releases/download/{{ globals.compatibility_map.software.crictl[services.kubeadm.kubernetesVersion].version }}/crictl-{{ globals.compatibility_map.software.crictl[services.kubeadm.kubernetesVersion].version }}-linux-amd64.tar.gz' + sha1: '{{ globals.compatibility_map.software.crictl[services.kubeadm.kubernetesVersion].sha1 }}' + group: master + unpack: /usr/bin/ +``` + +If necessary, you can redefine or add thirdparties. For example: + +```yaml +services: + thirdparties: + /usr/bin/kubeadm: + source: https://example.com/kubernetes/kubeadm/v1.20.2/bin/linux/amd64/kubeadm +``` + +#### CRI + +*Installation task*: `prepare.cri` + +*Can cause reboot*: No + +*Can restart service*: Always yes, `docker` or `containerd` + +*Overwrite files*: Yes, by default `/etc/docker/daemon.json` or `/etc/containerd/config.toml`, `/etc/crictl.yaml` and `/etc/containers/registries.conf`, backup is created + +*OS specific*: No + +The `services.cri` section configures container runtime used for kubernetes. By default, the following parameters are used: + +```yaml +services: + cri: + containerRuntime: containerd + containerdConfig: + version: 2 + dockerConfig: + ipv6: False + log-driver: json-file + log-opts: + max-size: 64m + max-file: "3" + exec-opts: + - native.cgroupdriver=systemd + icc: False + live-restore: True + userland-proxy: False +``` + +The `containerRuntime` parameter configures a particular container runtime implementation used for kubernetes. +The available values are `docker` and `containerd`. By default `containerd` is used. + +When docker is used as a container runtime, it is possible to additionally define the `dockerConfig` section, +which contains the parameters passed to `daemon.json`, for example: + +```yaml +services: + cri: + containerRuntime: docker + dockerConfig: + insecure-registries: + - artifactory.example.com:5443 + registry-mirrors: + - https://artifactory.example.com:5443 +``` + +For detailed description of the parameters, see [Installation without Internet Resources](#installation-without-internet-resources). +For more information about Docker daemon parameters, refer to the official docker configuration file documentation at [https://docs.docker.com/engine/reference/commandline/dockerd/#daemon-configuration-file](https://docs.docker.com/engine/reference/commandline/dockerd/#daemon-configuration-file). + +**Note**: After applying the parameters, the docker is restarted on all nodes in the cluster. + +Alternatively, it is possible to use containerd as a container runtime for kubernetes by setting `containerd` value for `containerRuntime` parameter. +When containerd is used as a container runtime, it is possible to additionally define the `containerdConfig` section, +which contains the parameters passed to `config.toml`, for example: + +```yaml +services: + cri: + containerRuntime: containerd + containerdConfig: + plugins."io.containerd.grpc.v1.cri": + sandbox_image: k8s.gcr.io/pause:3.2 + plugins."io.containerd.grpc.v1.cri".registry.mirrors."artifactory.example.com:5443": + endpoint: + - https://artifactory.example.com:5443 +``` + +Note how `containerdConfig` section reflects the toml format structure. +For more details on containerd configuration, refer to the official containerd configuration file documentation at [https://github.com/containerd/containerd/blob/master/docs/cri/config.md](https://github.com/containerd/containerd/blob/master/docs/cri/config.md). +By default, the following parameters are used for `containerdConfig`: + +```yaml +services: + cri: + containerdConfig: + version: 2 +``` + +**Note**: When containerd is used, `crictl` binary and `podman` package are also installed and configured as required. + +#### modprobe + +*Installation task*: `prepare.system.modprobe` + +*Can cause reboot*: Yes, only when a list of kernel modules changes. + +*Can restart service*: No + +*Overwrite files*: Yes, only when a list of kernel modules changes, `/etc/modules-load.d/predefined.conf`, backup is created + +*OS specific*: No + +The `services.modprobe` section manages Linux Kernel modules to be loaded in the host operating system. By default, the following modules are loaded: + +|Key|Note| +|---|---| +|br_netfilter| | +|ip_vs| | +|ip_vs_rr| | +|ip_vs_wrr| | +|ip_vs_sh| | +|ip6table_filter|Only when IPv6 detected in node IP| +|nf_conntrack_ipv6|Only when IPv6 detected in node IP| +|nf_nat_masquerade_ipv6|Only when IPv6 detected in node IP| +|nf_reject_ipv6|Only when IPv6 detected in node IP| +|nf_defrag_ipv6|Only when IPv6 detected in node IP| + +If necessary, you can redefine or add [List Merge Strategy](#list-merge-strategy) to the standard list of Kernel modules to load. For example: + +```yaml +services: + modprobe: + - my_own_module1 + - my_own_module2 +``` + +**Warning**: Be careful with these settings, they directly affect the hosts operating system. + +**Warning**: If changes to the hosts `modprobe` configurations are detected, a reboot is scheduled. After the reboot, the new parameters are validated to match the expected configuration. + +#### sysctl + +*Installation task*: `prepare.system.sysctl` + +*Can cause reboot*: Yes, only when list of Kernel parameters changes. + +*Can restart service*: No + +*Overwrite files*: Yes, only when list of Kernel parameters changes: `/etc/sysctl.d/98-kubetools-sysctl.conf`, backup is created + +*OS specific*: No + +The `services.sysctl` section manages the Linux Kernel parameters for all hosts in a cluster. By default, the following key-values are configured: + +|Key|Value|Note| +|---|---|---| +|net.bridge.bridge-nf-call-iptables|1| | +|net.ipv4.ip_forward|1| | +|net.ipv4.ip_nonlocal_bind|1| | +|net.ipv4.conf.all.route_localnet|1| | +|net.bridge.bridge-nf-call-ip6tables|1|Presented only when IPv6 detected in node IP| +|net.ipv6.conf.all.forwarding|1|Presented only when IPv6 detected in node IP| +|net.ipv6.ip_nonlocal_bind|1|Presented only when IPv6 detected in node IP| +|kernel.panic|10|| +|vm.overcommit_memory|1|| +|kernel.panic_on_oops|1|| +|kernel.pid_max|calculated| If this parameter is not explicitly indicated in the `cluster.yaml`, then this value is calculated by this formula: `maxPods * podPidsLimit + 2048` | + +Constant value equal to `2048` means the maximum number of processes that the system can require during run (only processes of the Linux virtual machine itself are implied). This value have been established empirically. + +**Note**: You can also define the `kernel.pid_max` value by your own, but you need to be sure that it is at least greater than the result of the expression: `maxPods * podPidsLimit + 2048`. For more information about the `podPidsLimit` and `maxPods` values, refer to the [kubeadm_kubelet](#kubeadm_kubelet) section. + +**Warning**: Also, in both the cases of calculation and manual setting of the `pid_max` value, the system displays a warning if the specified value is less than the system default value equal to `32768`. If the `pid_max` value exceeds the maximum allowable value of `4194304`, the installation is interrupted. + +**Note**: Before Kubernetes 1.21 `sysctl` property `net.ipv4.conf.all.route_localnet` have been set automatically to `1` by Kubernetes, but now it setting by Kubetools defaults. [Kubernetes 1.21 Urgent Upgrade Notes](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.21.md#no-really-you-must-read-this-before-you-upgrade-6). + +You can specify your own parameters instead of the standard parameters. You need to specify the parameter key and its value. If the value is empty, the key is ignored. For example: + +```yaml +services: + sysctl: + net.bridge.bridge-nf-call-iptables: 1 + net.ipv4.ip_forward: 0 + net.ipv4.ip_nonlocal_bind: 0 +``` + +**Warning**: Be careful with these settings, they directly affect the hosts operating system. + +**Warning**: If the changes to the hosts `sysctl` configurations are detected, a reboot is scheduled. After the reboot, the new parameters are validated to match the expected configuration. + +#### audit + +*Installation task*: `prepare.system.audit` + +*Can cause reboot*: No + +*Can restart service*: Always yes, `auditd`. + +*OS specific*: Yes, performs only on RHEL OS family. + +```yaml +services: + audit: + rules: + - -w /var/lib/docker -k docker + - -w /etc/docker -k docker + - -w /usr/lib/systemd/system/docker.service -k docker + - -w /usr/lib/systemd/system/docker.socket -k docker + - -w /etc/default/docker -k docker + - -w /etc/docker/daemon.json -k docker + - -w /usr/bin/containerd -k docker + - -w /usr/sbin/runc -k dockerks + - -w /usr/bin/dockerd -k docker +``` + +#### ntp + +This is a common section for `chrony` and `timesyncd` properties. + +For Kubernetes and ETCD to work correctly, it is recommended to configure the system time synchronization on all nodes of the cluster. However, this is optional and you can do it at your own discretion. + +##### chrony + +*Installation task*: `prepare.ntp.chrony` + +*Can cause reboot*: No + +*Can restart service*: Always yes, `chronyd` + +*Overwrite files*: Yes, `/etc/chrony.conf`, backup is created + +*OS specific*: Yes, performs only on the RHEL OS family. + +To synchronize the system time, you must make a list of NTP servers. All servers must be accessible from any node of the cluster. +The list should be indicated in the `chrony` section of the` services.ntp` section config file. +In addition to the NTP server address, you can specify any additional configurations in the same line. + +The following parameters are supported: + +|Name|Mandatory|Type|Default value|Description| +|---|---|---|---|---| +|servers|**yes**|list| |NTP servers addresses with additional configurations.| +|makestep|no|string|`5 10`|Step the system clock if large correction is needed.| +|rtcsync|no|boolean|`True`|Specify that RTC should be automatically synchronised by kernel.| + +For more information about Chrony configuration, refer to the official documentation at [https://chrony.tuxfamily.org/documentation.html](https://chrony.tuxfamily.org/documentation.html). + +The following is a configuration example: + +```yaml +services: + ntp: + chrony: + servers: + - ntp1.example.com iburst + - ntp2.example.com iburst +``` + +An example is also available in [Full Inventory Example](examples/cluster.yaml/full-cluster.yaml). + +Synchronization is configured with the` prepare.ntp.chrony` task. The task performs the following: +* Generates the `chrony.conf` file and uploads it to the `/etc/chrony` directory on all cluster hosts. If dumping is enabled, the config dump is saved. +* Restarts the `chronyd.service` service +* Checks if the synchronization is done by the first host of the cluster. Leap status should become normal. + +If the configuration `services.ntp.chrony.servers` is absent, then the task` prepare.ntp.chrony` in the installation is skipped. + +##### timesyncd + +*Installation task*: `prepare.ntp.timesyncd` + +*Can cause reboot*: No + +*Can restart service*: Always yes, `systemd-timesyncd`. + +*Overwrite files*: Yes, `/etc/systemd/timesyncd.conf`, backup is created. + +*OS specific*: Yes, performs only on Debian OS family. + +To synchronize the system time, you must make a list of NTP servers. All servers must be accessible from any node of the cluster. +The list should be indicated in the `timesyncd.Time.NTP` parameter of the` services.ntp` section config file. +In addition to the NTP server address, you can specify any additional configurations in the same line. + +The following parameters are supported: + +|Name|Mandatory|Type|Default value|Description| +|---|---|---|---|---| +|NTP|**yes**|list| |NTP servers addresses.| +|FallbackNTP|**no**|list| |Backup NTP servers addresses when NTP servers are unavailable.| +|RootDistanceMaxSec|no|int|`5`|Step the system clock if large correction is needed.| +|PollIntervalMinSec|no|int|`32`|The minimal poll interval.| +|PollIntervalMaxSec|no|int|`2048`|The maximum poll interval.| + +The following is a configuration example: + +```yaml +services: + ntp: + timesyncd: + Time: + NTP: + - ntp1.example.com + - ntp2.example.com +``` + +Synchronization is configured with the` prepare.ntp.timesyncd` task. The task performs the following: + +* Generates the `timesyncd.conf` file and uploads it to the `/etc/systemd/` directory on all cluster hosts. If dumping is enabled, the config dump is saved. +* Restarts the `systemd-timesyncd` service. +* Checks if the synchronization is done by the first host of the cluster. The leap status should become normal. + +If the configuration `services.ntp.timesyncd.servers` is absent, then the task` prepare.ntp.timesyncd` in the installation is skipped. + +#### resolv.conf + +*Installation task*: `prepare.dns.resolv_conf` + +*Can cause reboot*: No + +*Can restart service*: No + +*Overwrite files*: Yes, `/etc/resolv.conf`, backup is created + +*OS specific*: No + +The `services.resolv.conf` section allows you to configure the nameserver addresses to which cluster systems has access. By default, this section is empty in the inventory. The following parameters are supported: + +|Name|Type|Description| +|---|---|---| +|search|string|The domain name to search| +|nameservers|list|The DNS servers for usage in the OS| + +**Note**: If some network resources are located in a restricted network and do not resolve through the standard DNS, be sure to configure this section and specify your custom DNS service. + +For example: + +```yaml +services: + resolv.conf: + search: default + nameservers: + - 1.1.1.1 + - 1.0.0.1 + - 2606:4700:4700::1111 + - 2606:4700:4700::1001 +``` + +#### etc_hosts + +*Installation task*: `prepare.dns.etc_hosts` + +*Can cause reboot*: no + +*Can restart service*: no + +*Overwrite files*: Yes, `/etc/hosts`, backup is created + +*OS specific*: No + +The installation procedure has a task that generates and applies `/etc/hosts` configuration file on all nodes presented in the cluster. + +**Warning**: This task overwrites the existing original `/etc/hosts` file records on all hosts. If you need to save these records, manually move them into inventory file to `serives.etc_hosts` section. + +By default, the generated file contains the following address associations: + +* Localhost for IPv4 and IPv6 +* Internal control-plain address as `control-plain` and FQDN name +* Balancers, masters, workers names and theirs FQDNs + +In order to setup your custom address, you need to specify the IP-address as the key and DNS-name as the list item. Example: + +```yaml +services: + etc_hosts: + 1.1.1.1: + - example.com +``` + +Example of generated file: + +``` +127.0.0.1 localhost localhost.localdomain localhost4 localhost.localdomain4 +::1 localhost localhost.localdomain localhost6 localhost6.localdomain6 +1.1.1.1 example.com +100.100.100.101 k8s-stack.example.com control-plain balancer-1.k8s-stack.sdntest.example.com balancer-1 +100.100.100.102 master-1.k8s-stack.example.com master-1 +100.100.100.103 master-2.k8s-stack.example.com master-2 +100.100.100.104 master-3.k8s-stack.example.com master-3 +100.100.100.105 worker-1.k8s-stack.example.com worker-1 +100.100.100.106 worker-2.k8s-stack.example.com worker-2 +100.100.100.107 worker-3.k8s-stack.example.com worker-3 +``` + +You can specify multiple addresses at once, for example: + +```yaml +services: + etc_hosts: + 1.1.1.1: + - example.com + - demo.example.com +``` + +This generates the following result: + +``` +... +1.1.1.1 example.com demo.example.com +... +``` + +Records can be merged with defaults. You can specify additional names to the required addresses in the usual way, for example: + +```yaml +services: + etc_hosts: + 127.0.0.1: + - example.com +``` + +This produces the following result: + +``` +127.0.0.1 localhost localhost.localdomain localhost4 localhost.localdomain4 example.com +... +``` + +#### coredns + +`coredns` parameter configures the Coredns service and its DNS rules in the Kubernetes cluster. It is divided into the following sections: + +##### configmap + +This section contains the Configmap parameters that are applied to the Coredns service. By default the following configs are used: + +* Corefile - The main Coredns config, which is converted into a template in accordance with the specified parameters. +* Hosts - Hosts file obtained in accordance with [etc_hosts inventory parameter](#etc_hosts). The contents of this file are automatically added to the inventory, if not specified manually. + +Before working with the Corefile, refer to the [official Coredns plugins documentation at https://coredns.io/plugins/](https://coredns.io/plugins/). + +The Corefile consists of the settings applied for a specific destination. By default, all settings are applied for `.:53` destination. +For example: + +```yaml +services: + coredns: + configmap: + Corefile: + '.:53': + errors: True +``` + +The following settings are supported: + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ParameterType
Default valueDescription
errorsbooleanTrueAny errors encountered during the query processing are printed to standard output. The errors of a particular type can be consolidated and printed once per a period of time
healthbooleanTrueEnabled process wide health endpoint. When CoreDNS is up and running, this returns a 200 OK HTTP status code. The health is exported, by default, on port 8080/health.
readybooleanTrueBy enabling ready, an HTTP endpoint on port 8181 returns 200 OK when all plugins that are able to signal readiness have done so. If some are not ready, the endpoint still returns a 503 with the body containing the list of plugins that are not ready. Once a plugin has signaled that it is ready, it is not queried again.
prometheusstring:9153With prometheus, you export metrics from the CoreDNS and any plugin that has them. The metrics path is fixed to /metrics
cacheinteger30With cache enabled, all records except zone transfers and metadata records are cached according to the ttl value set
loopbooleanTrueThe loop plugin sends a random probe query and keeps a track of how many it is viewed. If it is viewed more than twice, assume that CoreDNS has seen a forwarding loop and halt the process.
reloadbooleanTrueThis plugin allows automatic reload of a changed Corefile.
loadbalanceboolean
TrueThe loadbalance acts as a round-robin DNS load balancer by randomizing the order of A, AAAA, and MX records in the answer.
hostsstring/etc/coredns/HostsThe hosts plugin is useful for serving zones from a /etc/hosts like file. It serves from a preloaded file, which is applied from ConfigMap during the installation.
forwardlist- .
- /etc/resolv.conf
The forward plugin re-uses already opened sockets to the upstreams. It supports UDP, TCP, and DNS-over-TLS. It is used in band health checking.
kubernetesdictThis plugin implements the Kubernetes DNS-Based Service Discovery Specification. Refer the following sections for more details.
templatedictThe template plugin allows you to dynamically respond to queries by just writing a template. Refer the following sections for more details.
+ +**Note**: + +* All settings have their own priority. They are generated in the priority they are in the above table. Their priority cannot be changed. +* You can set any setting parameter to `False` to disable it, no matter what type it is. +* It is possible to specify other Corefile settings in an inventory-like format. However, this is risky since the settings have not been tested with the generator. All non-supported settings have a lower priority. + +##### deployment + +This section contains YAML settings that are applied to Coredns service via a patch. By default, this section contains the following data: + +```yaml +services: + coredns: + deployment: + spec: + template: + spec: + volumes: + - configMap: + defaultMode: 420 + items: + - key: Corefile + path: Corefile + - key: Hosts + path: Hosts + name: coredns + name: config-volume +``` + +However, it is possible to add or modify any deployment parameters of the inventory in accordance with the Kubernetes patching syntax. + +#### loadbalancer + +`loadbalancer` configures the balancers for the Kubernetes cluster. Currently, only the Haproxy configuration can be customized. + +##### haproxy + +This section contains the configuration parameters that are applied to the **haproxy.cfg** config file. By default, the following configuration is used: + +```yaml +services: + loadbalancer: + haproxy: + config: + defaults: + timeout_connect: '10s' + timeout_client: '1m' + timeout_server: '1m' + timeout_tunnel: '60m' + timeout_client_fin: '1m' + maxconn: 10000 +``` + +These settings can be overrided in the **cluster.yaml**. Currently, the following settings for `defaults` part of **haproxy.cfg** are supported: + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ParameterType
Default valueDescription
timeout_connectstring10s"timeout connect". Set the maximum time to wait for a connection attempt to a server to succeed.
timeout_clientstring1m"timeout client". Set the maximum inactivity time on the client side.
timeout_serverstring1m"timeout server". Set the maximum inactivity time on the server side.
timeout_tunnelstring60m"timeout tunnel". Set the maximum inactivity time on the client and server sides for tunnels.
timeout_client_finstring1m"timeout client-fin". Set the inactivity timeout on the client side for half-closed connections.
maxconninteger10000"maxconn". Limits the sockets to this number of concurrent connections.
+ +For more information on these parameters, refer to the official Haproxy documentation at [https://www.haproxy.org/download/1.8/doc/configuration.txt](https://www.haproxy.org/download/1.8/doc/configuration.txt). + + +### RBAC psp + +*Installation task*: `deploy.psp` + +Pod security policies enable fine-grained authorization of pod creation and updates. +Pod security policies are enforced by enabling the admission controller. By default, admission controller is enabled during installation. + +To configure pod security policies it is required to provide cluster-level `policy/v1beta1/podsecuritypolicy` resource +that controls security sensitive aspects of the pod specification. +If controller is enabled and no policies are provided, then the system does not allow deployment of new pods. +Several OOB policies are provided and by default they are enabled during installation. +It is also possible to specify custom policies to be applied during installation. + +Configuration format for `psp` section is as follows: + +```yaml +rbac: + psp: + pod-security: enabled + oob-policies: + default: enabled + host-network: enabled + anyuid: enabled + custom-policies: + psp-list: [] + roles-list: [] + bindings-list: [] +``` + +#### Configuring Admission Controller + +Admission controller is enabled by default during installation. +It is possible to disable admission controller installation to fully disable pod security policy enforcement. +In this case no OOB or custom policies are installed. To disable admission controller: + +```yaml +rbac: + psp: + pod-security: disabled +``` + +**Note**: + +* Disabling admission controller is not recommended. +* On existing cluster it is possible to enable/disable admission controller using the `manage_psp` maintenance procedure. + +#### Configuring OOB Policies + +The following policies are provided and enabled out of the box: + + + + + + + + + + + + + + + + + + + + + + + +
Policy namePSP, CR, CRB namesUse case
privileged
    +
  • oob-privileged-psp
  • +
  • oob-privileged-psp-cr
  • +
  • oob-privileged-psp-crb
  • +
Used for pods which require full privileges, for example kube-system pods
default
    +
  • oob-default-psp
  • +
  • oob-default-psp-cr
  • +
  • oob-default-psp-crb
  • +
Used for authenticated group, enforces unauthorized users to deploy pods with severe restrictions
anyuid
    +
  • oob-anyuid-psp
  • +
  • oob-anyuid-psp-cr
  • +
Used for pods which require root privileges
host-network
    +
  • oob-host-network-psp
  • +
  • oob-host-network-psp-cr
  • +
Used for pods which require host network access
+ + +The OOB policies are not installed if admission controller is disabled. +You can manually disable a particular OOB policy during installation, except `privileged` policy. + +For example, to disable `host-network` OOB policy: + +```yaml +rbac: + psp: + oob-policies: + host-network: disabled +``` + +**Note**: + +* Disabling OOB policies is not recommended. +* `PodSecurityPolicy` (PSP) resources included in different OOB policies are used by different OOB plugins, so disabling any OOB policy may lead to **issues with some OOB plugins**. + If you are using OOB plugins then you should provide custom PSPs in place of disabled OOB PSPs and bind them using `ClusterRoleBinding` to particular plugin `ServiceAccout`. +* It is possible to reconfigure OOB policies on an existing cluster using the `manage_psp` maintenance procedure. + +#### Configuring Custom Policies + +You can install custom policies during cluster installation. For example, to install custom "most restricted" policy for `authenticated` group: + +```yaml +rbac: + psp: + custom-policies: + psp-list: + - apiVersion: policy/v1beta1 + kind: PodSecurityPolicy + metadata: + name: most-restricted-psp + annotations: + seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'docker/default,runtime/default' + seccomp.security.alpha.kubernetes.io/defaultProfileName: 'runtime/default' + spec: + privileged: false + # Allow core volume types. + hostPID: false + hostIPC: false + hostNetwork: false + volumes: + - 'configMap' + - 'emptyDir' + - 'projected' + - 'secret' + - 'downwardAPI' + - 'persistentVolumeClaim' + fsGroup: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + readOnlyRootFilesystem: true + runAsUser: + rule: 'MustRunAsNonRoot' + supplementalGroups: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + runAsGroup: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + allowPrivilegeEscalation: false + seLinux: + rule: 'RunAsAny' + requiredDropCapabilities: + - ALL + roles-list: + - apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + name: most-restricted-psp-cr + rules: + - apiGroups: ['policy'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: + - most-restricted-psp + bindings-list: + - kind: ClusterRoleBinding + apiVersion: rbac.authorization.k8s.io/v1 + metadata: + name: most-restricted-psp-crb + roleRef: + kind: ClusterRole + name: most-restricted-psp-cr + apiGroup: rbac.authorization.k8s.io + subjects: + - kind: ServiceAccount + # it is possible to bind to non-existing SA in non-existing namespace + name: sa-name + namespace: sa-namespace +``` + +**Note**: + +* Any of these lists can be empty. +* If the list is not empty, then all the resources should align with list type. For example, the `psp-list` can only have resources with `kind: PodSecurityPolicy`. +* The custom policies should not have 'oob-' prefix. +* To manage custom policies on an existing cluster use the `manage_psp` maintenance procedure. + +### RBAC accounts + +*Installation task*: `deploy.accounts` + +In the `deploy.accounts` section, you can specify the account creation settings after the cluster is installed. + +### RBAC account_defaults + +In this section, you can describe any parameters that needs to be applied by default to each record in the [RBAC accounts](#rbac-accounts) section. It works the same way as [node_defaults](#node_defaults). + +### Plugins + +*Installation task*: `deploy.plugins` + +In the `deploy.plugins` section, you can configure the parameters of the plugins, as well as register your own plugins. Plugins are installed during the `deploy.plugins` task. +If you skip the plugin installation task, no plugins are installed. + +#### Predefined Plugins + +When you want to install a plugin, the installer includes pre-configured plug-in configurations. The following plugins are available for installation out of the box: + +* Network plugins + * [calico](#calico) + * [flannel](#flannel) +* Ingress Controllers + * [nginx-ingress-controller](#nginx-ingress-controller) + * [haproxy-ingress-controller](#haproxy-ingress-controller) +* [kubernetes-dashboard](#kubernetes-dashboard) +* [local-path-provisioner](#local-path-provisioner) + +**Note**: It is not possible to install multiple plugins of the same type at the same time. + +##### calico + +Before proceeding, refer to the [Official Documentation of the Kubernetes Cluster Network](https://kubernetes.io/docs/concepts/cluster-administration/networking/). + +Calico plugin is installed by default and does not require any special enablement or configuration. However it is possible to explicitly enable or disable the installation of this plugin through the `install` plugin parameter. + +The following is an example to enable the calico plugin: + +```yaml +plugins: + calico: + install: true +``` + +The following is an example to disable the calico plugin: + +```yaml +plugins: + calico: + install: false +``` + +After applying the plugin configurations, the plugin installation procedure waits for the following pods to be in the `Running` state: +* coredns +* calico-kube-controllers +* calico-node + +If the pods do not have time to start at a specific timeout, then the plugin configuration is incorrect. In this case, the installation is aborted. + +By default, no additional settings are required for the plugin. However, you can change the default settings. To do this, in the `plugins` section of the config file, specify the `calico` plugin section and list all the necessary parameters and their values ​​in it. +For example: + +```yaml +plugins: + calico: + install: true + mtu: 1400 + typha: + enabled: true + node: + image: calico/node:v3.10.1 +``` + +An example is also available in [Full Inventory Example](examples/cluster.yaml/full-cluster.yaml). + +**Warning**: For correct network communication, it is important to set the correct MTU value (For example in case `ipip` mode it should be 20 bytes less than MTU NIC size). [More details](Troubleshooting.md#packets-between-nodes-in-different-networks-are-lost) + +**Note**: If the cluster size is more than 50 nodes, it is recommended to enable the Calico Typha daemon and adjust the size of its replicas. + +The plugin configuration supports the following parameters: + +|Name|Type|Default Value|Value Rules|Description| +|---|---|---|---|---| +|mode|string|`ipip`|`ipip` / `vxlan`|Network protocol to be used in network plugin| +|crossSubnet|boolean|`true`| |Enables crossing subnet boundaries to improve network performance| +|mtu|int|`1440`|MTU size on interface - 50|MTU size for Calico interface| +|typha.enabled|boolean|`false`|Enable if you have more than 50 nodes in cluster|Enables the [Typha Daemon](https://github.com/projectcalico/typha)| +|typha.replicas|int|`{{ (((nodes\|length)/50) + 1) \| round(1) }}`|1 replica for every 50 cluster nodes|Number of Typha running replicas| +|typha.image|string|`calico/typha:v3.10.1`|Should contain both image name and version|Calico Typha image| +|cni.image|string|`calico/cni:v3.10.1`|Should contain both image name and version|Calico CNI image| +|node.image|string|`calico/node:v3.10.1`|Should contain both image name and version|Calico Node image| +|kube-controllers.image|string|`calico/kube-controllers:v3.10.1`|Should contain both image name and version|Calico Kube Controllers image| +|flexvol.image|string|`calico/pod2daemon-flexvol:v3.10.1`|Should contain both image name and version|Calico Flexvol image| + +###### Calico Environment Properties + +It is possible to change the default Calico environment properties. To do that, it is required to specify a key-value in the `env` section in the `calico` plugin definition. For example: + +``` +plugins: + calico: + env: + WAIT_FOR_DATASTORE: false + FELIX_DEFAULTENDPOINTTOHOSTACTION: DENY +``` + +**Note**: In case of you use IPv6 you have to define `CALICO_ROUTER_ID` with value `hash` in `env` section. This uses a hash of the configured nodename for the router ID. + +For more information about the supported Calico environment variables, refer to the official Calico documentation at [https://docs.projectcalico.org/reference/node/configuration](https://docs.projectcalico.org/reference/node/configuration). + +##### flannel + +Before proceeding, refer to the [Official Documentation of the Kubernetes Cluster Network](https://kubernetes.io/docs/concepts/cluster-administration/networking/). + +**Warning**: This plugin is experimental. It is not recommended to use it in production. + +Flannel plugin is not installed by default. However, it is possible to explicitly enable or disable the installation of this plugin through the `install` plugin parameter. + +The following is an example to enable the plugin: + +```yaml +plugins: + flannel: + install: true +``` + +If you explicitly enable Flannel plugin and do not enable Calico plugin, then only Flannel plugin is installed, and Calico plugin is not installed by default. + +After applying the plugin configurations, the plugin installation procedure waits for the following pods to be in the `Running` state: +* coredns +* kube-flannel-ds-amd64 + +If the pods do not have time to start at a specific timeout, then the plugin configuration is incorrect. In this case, the installation is aborted. + +By default, no additional settings are required for the plugin. However, you can change the default settings. To do this, in the `plugins` section of the config file, specify the `flannel` plugin section and list all the necessary parameters and their values ​​in it. +For example: + +```yaml +plugins: + flannel: + install: true + node: + image: quay.io/coreos/flannel:v0.11.0-amd64 +``` + +An example is also available in [Full Inventory Example](examples/cluster.yaml/full-cluster.yaml). + +The plugin configuration supports the `image` parameter. The `image` parameter specifies the string for the Flannel image. The default value is `quay.io/coreos/flannel:v0.11.0-amd64`. + +##### nginx-ingress-controller + +Before proceeding, refer to the [Official Documentation of the Kubernetes Ingress Controllers](https://kubernetes.io/docs/concepts/services-networking/ingress-controllers/) and visit [official Nginx Ingress Controller repository](https://github.com/nginxinc/kubernetes-ingress). + +NGINX Ingress Controller plugin is installed by default and does not require any special enablement or configuration. However, you can explicitly enable or disable the installation of this plugin through the `install` plugin parameter. + +The following is an example to enable the plugin: + +```yaml +plugins: + nginx-ingress-controller: + install: true +``` + +The following is an example to disable the plugin: + +```yaml +plugins: + nginx-ingress-controller: + install: false +``` + +After applying the plugin configurations, the plugin installation procedure waits for the `nginx-ingress-controller` pod to be in the `Running` state. + +If the pods do not have time to start at a specific timeout, then the plugin configuration is incorrect. In this case, the installation is aborted. + +By default, no additional settings are required for the plugin. However, you can change the default settings. To do this, in the `plugins` section of the config file, specify the `nginx-ingress-controller` plugin section and list all the necessary parameters and their values ​​in it. +For example: + +```yaml +plugins: + nginx-ingress-controller: + install: true + controller: + image: k8s-artifacts-prod/ingress-nginx/controller:v0.34.1 +``` + +An example is also available in [Full Inventory Example](examples/cluster.yaml/full-cluster.yaml). + +The plugin configuration supports the following parameters: + +* The `controller.image` parameter specifies the string for the NGINX Ingress Controller image. +* The `controller.ssl.enableSslPassthrough` parameter is used to enable the ssl-passthrough feature. The default value is `false`. +**Note**: Enabling this feature introduces a small performance penalty. + +* The `controller.ssl.default-certificate` parameter is used to configure a custom default certificate for ingress resources. +The certificate and key are provided using one of the following two formats: + + * The `controller.ssl.default-certificate.data` format is used to provide a certificate and a key inplace in the pem format: + + ```yaml + nginx-ingress-controller: + controller: + ssl: + default-certificate: + data: + cert: | + -----BEGIN CERTIFICATE----- + ... (skipped) ... + -----END CERTIFICATE----- + key: | + -----BEGIN RSA PRIVATE KEY----- + ... (skipped) ... + -----END RSA PRIVATE KEY----- + ``` + + * The `controller.ssl.default-certificate.paths` format is used to provide a certificate and a key as paths to the pem files: + + ```yaml + nginx-ingress-controller: + controller: + ssl: + default-certificate: + paths: + cert: /path/to/cert + key: /path/to/key + ``` +* The `config_map` parameter is used to customize or fine tune NGINX behavior. Before proceeding, refer to the [Official NGINX Ingress Controller documentation](https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/configmap/) +For example: +```yaml + nginx-ingress-controller: + config_map: + server-tokens: "False" +``` + +* The `custom_headers` parameter sets specified custom headers before sending the traffic to backends. Before proceeding, refer to the official NGINX Ingress Controller documentation at [https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/configmap/](https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/configmap/). + +For example: + +```yaml + nginx-ingress-controller: + custom_headers: + Expect: $http_expect + X-Different-Name: "true" + X-Request-Start: t=${msec} + X-Using-Nginx-Controller: "true" +``` + +##### haproxy-ingress-controller + +Before proceeding, refer to the [Official Documentation of the Kubernetes Ingress Controllers](https://kubernetes.io/docs/concepts/services-networking/ingress-controllers/) and visit [official HAProxy Ingress Controller repository](https://github.com/haproxytech/kubernetes-ingress). + +**Warning**: This plugin is experimental. It is not recommended to use it in production. + +NGINX Ingress Controller plugin is not installed by default. However, you can explicitly enable or disable the installation of this plugin through the `install` plugin parameter. + +The following is an example to enable the plugin: + +```yaml +plugins: + haproxy-ingress-controller: + install: true +``` + +If you explicitly enable HAProxy Ingress Controller plugin, but do not enable NGINX Ingress Controller plugin, then only HAProxy plugin is installed, and NGINX plugin is not installed by default. + +After applying the plugin configurations, the plugin installation procedure waits for `haproxy-ingress` pod to be in the `Running` state. + +If the pods do not have time to start at a specific timeout, then the plugin configuration is incorrect. In this case, the installation is aborted. + +By default, no additional settings are required for the plugin. However, you can change the default settings. To do this, in the `plugins` section of the config file, specify the `haproxy-ingress-controller` plugin section and list all the necessary parameters and their values ​​in it. +For example: + +```yaml +plugins: + flannel: + install: true + controller: + image: haproxytech/kubernetes-ingress:1.2.7 + backend: + image: k8s.gcr.io/defaultbackend:1.0 +``` + +An example is also available in [Full Inventory Example](examples/cluster.yaml/full-cluster.yaml). + +The plugin configuration supports the following parameters: + +|Name|Type|Default Value|Value Rules|Description| +|---|---|---|---|---| +|controller.image|string|`haproxytech/kubernetes-ingress:1.2.7`| |HAProxy Ingress Controller image| +|backend.image|string|`k8s.gcr.io/defaultbackend:1.0`| |Default Backend image for HAProxy Ingress Controller| + +##### kubernetes-dashboard + +Before proceeding, refer to the [Official Documentation of the Kubernetes Dashboard UI](https://kubernetes.io/docs/tasks/access-application-cluster/web-ui-dashboard/) and visit [official Kubernetes Dashboard repository](https://github.com/kubernetes/dashboard). + +By default, the Kubernetes dashboard is not installed, as it is not a mandatory part of the cluster. However, you can install it by enabling the plugin. + +The following is an example to enable dashboard plugin: + +```yaml +plugins: + kubernetes-dashboard: + install: true +``` + +**Note**: By default Kubernetes dashboard is available at `dashboard.{{ cluster_name }}`. + +**Note**: The Kubernetes Dashboards UI is available **only** via HTTPS. + +If you enable the plugin, all other parameters are applied by default. The following is a list of supported parameters: + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameDefault valueDescription
hostname
dashboard.{{ cluster_name }}
Address on which the Kubernetes Dashboard UI is located. Actually an alias for
ingress.spec
dashboard.image
kubernetesui/dashboard:v2.0.0-rc2
Kubernetes Dashboard image.
metrics-scraper.image
kubernetesui/metrics-scraper:v1.0.2
Kubernetes Dashboard Metrics Scraper image.
ingress.metadata
name: kubernetes-dashboard
+namespace: kubernetes-dashboard
+annotations:
+  nginx.ingress.kubernetes.io/backend-protocol: "HTTPS"
+  nginx.ingress.kubernetes.io/ssl-redirect: "true"
+  nginx.ingress.kubernetes.io/rewrite-target: /
+  nginx.ingress.kubernetes.io/secure-backends: "true"
+  nginx.ingress.kubernetes.io/ssl-passthrough: "true"
Ingress metadata, typically contains namespace and NGINX-specific parameters.
ingress.spec
tls:
+  - hosts:
+    - '{{ plugins["kubernetes-dashboard"].hostname }}'
+rules:
+  - host: '{{ plugins["kubernetes-dashboard"].hostname }}'
+    http:
+      paths:
+        - path: /
+          backend:
+            serviceName: kubernetes-dashboard
+            servicePort: 443
Ingress specs, determining where and on which port the Kubernetes Dashboard UI is located.
+ +If you do not want the default parameters, you can override them. + +The following is an example to use custom dashboard address: + +```yaml +plugins: + kubernetes-dashboard: + install: true + hostname: 'mydashboard.k8s.example.com' +``` + +The following is an example to use custom dashboard images: + +```yaml +plugins: + kubernetes-dashboard: + install: true + dashboard: + image: kubernetesui/dashboard:v2.0.0-rc2 + metrics-scraper: + image: kubernetesui/metrics-scraper:v1.0.2 +``` + +The following is an example to redefine ingress parameters: + +```yaml +plugins: + kubernetes-dashboard: + install: true + ingress: + metadata: + name: kubernetes-dashboard + namespace: kubernetes-dashboard + annotations: + nginx.ingress.kubernetes.io/backend-protocol: "HTTPS" + nginx.ingress.kubernetes.io/ssl-redirect: "true" + nginx.ingress.kubernetes.io/rewrite-target: / + nginx.ingress.kubernetes.io/secure-backends: "true" + nginx.ingress.kubernetes.io/ssl-passthrough: "true" + spec: + tls: + - hosts: + - 'mydashboard.k8s.example.com' + rules: + - host: 'mydashboard.k8s.example.com' + http: + paths: + - path: / + backend: + serviceName: kubernetes-dashboard + servicePort: 443 +``` + +**Warning**: Be very careful when overriding these parameters. + +##### local-path-provisioner + +Before proceeding, visit [official Local Path Provisioner repository](https://github.com/rancher/local-path-provisioner). + +By default, the local path provisioner is not installed, as it is not a +mandatory part of the cluster. However, you can install it by enabling the +plugin. + +The following is an example to enable this plugin: +```yaml +plugins: + local-path-provisioner: + install: true +``` + +If you enable the plugin, all other parameters are applied by default. The +following is a list of supported parameters: + +| Name | Default value | Description | +| :---: |:---:| --- | +| storage-class.name | `local-path` | Name of the storage class resource, which describes the class of the local volumes created by the provisioner. | +| storage-class.is-default | `"false"` | If `"true"`, the created storage class is the default one. | +| volume-dir | `/opt/local-path-provisioner` | The directory on each node, where the provisioner stores the PV data. For each requested PV, the provisioner creates the subdirectory in the volume-dir. | + +If you do not want the default parameters, you can override them. + +The following is an example to use custom volume directory: +```yaml +plugins: + local-path-provisioner: + install: true + volume-dir: /mnt/local-path-provisioner-volume +``` + +The following is an example to create default storage class: +```yaml +plugins: + local-path-provisioner: + install: true + storage-class: + is-default: "true" +``` + +The following is an example to use custom provisioner and helper pod image: +```yaml +plugins: + local-path-provisioner: + install: true + image: rancher/local-path-provisioner:v0.0.18 + helper-pod-image: busybox:latest +``` + +#### Plugins Features + +This section provides information about the plugin features in detail. + +##### plugin_defaults + +In the `plugin_defaults` section, you can describe any parameters that are to be applied by default to each record in the [Plugins](#plugins) section. It works the same way as [node_defaults](#node_defaults). + +For example: + +```yaml +plugin_defaults: + installation: + registry: artifactory.example.com:5443 +``` + +For detailed description of `registry` parameter, see [Installation without Internet Resources](#installation-without-internet-resources). + +##### Plugins Reinstallation + +You can reinstall the necessary plugins without cluster reinstallation, for example, if the plugin configuration is corrupted. +You can also change the configuration of any plugin on an already running cluster. +To do this, you need to start the execution of the plugin task `deploy.plugins` and set the following: +* Set the parameter `install: true` for plugins that need to be reinstalled +* Set the parameter `install: false` for those plugins that do not need to be reinstalled. + +Starting the task leads to re-application of the plugin configurations in the Kubernetes, which allows you to reinstall, reset, reconfigure the plugin to the desired parameters without stopping the Kubernetes cluster and other plugins. + +The following is an example in which Calico and NGINX Ingress Controller not assigned for reinstal, and the Kubernetes Dashboard is assigned for reinstall: + +```yaml +plugins: + calico: + install: false + nginx-ingress-controller: + install: false + kubernetes-dashboard: + install: true +``` + +**Warning**: The plugin reinstallation behavior is intended, but is not necessarily by custom plugins. For detailed information on the procedure for reinstalling custom plugins, contact the respective provider. + +##### Plugins Installation Order + +Plugins are installed in a strict sequential order. The installation sequence is determined by the `installation.priority` parameter in each plugin separately. Predefined plugins have the following predefined installation priorities: + +|Plugin|Priority| +|---|---| +|calico|`0`| +|flannel|`0`| +|nginx-ingress-controller|`1`| +|haproxy-ingress-controller|`1`| +|kubernetes-dashboard|`2`| + +You can change the priorities of preinstalled plugins, as well as set your own priority for the custom plugins. +The following is an example of how to prioritize a plugin: + +```yaml +plugins: + kubernetes-dashboard: + install: true + installation: + priority: 55 +``` + +After the priorities are set, you can see the sequence of installation in the stdout. + +If you do not set the priorities for the plugins, they are installed in any order immediately after the plugins for which the priorities are set. Also, if the plugins have the same priority, they are installed in any order. + +##### Node Selector + +It is possible to set custom nodeSelectors for the OOB plugins in order to influence pods scheduling for particular plugin. + +The following table contains details about existing nodeSelector configuration options: + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
PluginYAML path (relative)DefaultNotes
calico
    +
  • typha.nodeSelector
  • +
  • kube-controllers.nodeSelector
  • +
beta.kubernetes.io/os: linuxnodeSelector applicable only for calico typha
and calico kube-controllers containers,
but not for ordinary calico containers,
which should be deployed on all nodes
flannel--It is not possible to configure nodeSelector for flannel
since flannel containers should run on all nodes
nginx-ingress-controllercontroller.nodeSelector + kubernetes.io/os: linux
+
haproxy-ingress-controller
    +
  • controller.nodeSelector
  • +
  • backend.nodeSelector
  • +
+ kubernetes.io/os: linux
+
kubernetes-dashboard
    +
  • dashboard.nodeSelector
  • +
  • metrics-scraper.nodeSelector
  • +
beta.kubernetes.io/os: linux
+ +For example, if you want to customize Calico kube-controllers pods to be scheduled only on nodes with `netcracker-infra: infra` label, you need to specify the following in your `cluster.yml` file: + +```yaml +plugins: + calico: + kube-controllers: + nodeSelector: + netcracker-infra: infra +``` + +Custom nodeSelector is merged with default nodeSelector that results in the following configuration: + +```yaml +plugins: + calico: + kube-controllers: + nodeSelector: + beta.kubernetes.io/os: linux + netcracker-infra: infra +``` + +**Note**: You need to specify corresponding labels for nodes in order for `nodeSelector` customization to work. + +##### Tolerations + +It is possible to set custom tolerations for the provided OOB plugins in order to influence pods scheduling for particular plugin. + +The following table contains details about existing tolerations configuration options: + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
PluginYAML path (relative)DefaultNotes
calico- + - effect: NoSchedule
+ operator: Exists +
tolerations are not configurable for network plugins
flannel- + - effect: NoSchedule
+ operator: Exists +
tolerations are not configurable for network plugins
nginx-ingress-controller
  • controller.tolerations
none
haproxy-ingress-controller
    +
  • controller.tolerations
  • +
  • backend.tolerations
  • +
node
kubernetes-dashboard
    +
  • dashboard.tolerations
  • +
  • metrics-scraper.tolerations
  • +
none
+ +For example, if you want to customize the nginx-ingress-controller pods to allow scheduling on master nodes, you need to specify the following tolerations in your `cluster.yml` file: + +```yaml +plugins: + nginx-ingress-controller: + controller: + tolerations: + - key: node-role.kubernetes.io/master + operator: Exists + effect: NoSchedule +``` + +#### Custom Plugins Installation Procedures + +During the installation of plugins, certain installation procedures are performed. You can use these procedures to write your custom plugins. The procedures should be presented as a list in the `installation.procedures` section in plugin definition, where each element is a separate procedure execution. +For example: + +```yaml +plugins: + example-plugin: + installation: + procedures: + - shell: mkdir -p /var/data + - template: /var/data/template.yaml.j2 + - config: + source: /var/data/config.yaml + do_render: False + - expect: + pods: + - my-service + - ansible: /var/data/playbook.yaml + - python: + module: /opt/checker/cluster.py + method: check_service + arguments: + pod-name: my-service +``` + +The procedures are executed strictly one after another according to the procedure list. The procedures of the same type can be called multiple times. + +A description of each type of plugins procedures is presented below. + +**Note**: It is highly recommended to write plugin installation procedures so that they are idempotent and it should be possible to run the installation for the plugin several times and the result should be the same. +Consequent plugin installations should not perform re-installation of the plugin, they should ensure that the plugin is already installed. +For this reason, be cautious with `python`, `shell`, and `ansible` installation procedures. + +##### template + +This procedures allows you to automatically compile the Jinja2 template file, upload to remote hosts, and apply it. The following parameters are supported: + +|Parameter|Mandatory|Default Value|Description| +|---|---|---|---| +|**source**|**yes**| |The local absolute path to the source Jinja2 template file. It is compiled before uploading to hosts.| +|**destination**|no|`/etc/kubernetes/{{ filename from source }}`|The absolute path on the hosts where the compiled template needs to be uploaded.| +|**apply_required**|no|`True`|A switch to call the command to apply the uploaded template on remote hosts.| +|**apply_command**|no|`kubectl apply -f {{ destination }}`|The command to apply the template on remote hosts after uploading it. It is called only if the switch `apply_required` is on.| +|**sudo**|no|`True`|A switch for the command execution from the sudoer.| +|**destination_groups**|no|`None`|List of groups on which the compiled template needs to be uploaded.| +|**destination_nodes**|no|`None`|List of nodes on which the compiled template needs to be uploaded.| +|**apply_groups**|no|`None`|List of groups on which the template apply command needs to be executed.| +|**apply_nodes**|no|`None`|List of nodes on which the template apply command needs to be executed.| + +Inside the templates you can use all the variables defined in the inventory in `cluster.yaml`. +Moreover, it is possible to dynamically create your own variables in runtime using `python` or `shell` plugin procedures. +These runtime variables can also be used in templates by accessing `runtime_vars`, for example if you have variable +`example_var` created in runtime you can access this variable in templates like `runtime_vars['example_var']` + +**Note**: You can specify nodes and groups at the same time. + +**Note**: If no groups and nodes defined, by default master group is used for destination and the first master node is used for applying. + +**Note**: You can use wildcard source. For example: +`/tmp/my_templates/*.yaml`. This source argument matches every `.yaml` template in the `my_templates` directory. + +The following is an example of using all parameters at a time: + +```yaml +plugins: + example-plugin: + installation: + procedures: + - template: + source: /var/data/template.yaml.j2 + destination: /etc/example/configuration.yaml + apply_required: true + sudo: true + destination_groups: ['master'] + destination_nodes: ['worker-1'] + apply_groups: None + apply_nodes: ['master-1', 'worker-1'] + apply_command: 'testctl apply -f /etc/example/configuration.yaml' +``` + +The following is an example of applying a Kubernetes configuration: + +```yaml +plugins: + nginx-ingress-controller: + installation: + procedures: + - template: + source: /var/data/plugins/nginx-ingress-controller.yaml.j2 +``` + +The following is an example of applying configuration with custom ctl: + +```yaml +plugins: + calico: + installation: + procedures: + - template: + source: /var/data/plugins/calico-ippool.yaml.j2 + destination: /etc/calico/ippool.yaml + apply_command: 'calicoctl apply -f /etc/calico/ippool.yaml' +``` + +The following is an example of uploading a compiled Jinja2 template to masters and workers without applying it: + +```yaml +plugins: + calico: + installation: + procedures: + - template: + source: /var/data/plugins/calicoctl.cfg.j2 + destination: /etc/calico/calicoctl.cfg + destination_groups: ['master', 'worker'] + apply_required: false +``` + +A short format of template procedure is available. In this format only mandatory source paths should be specified. For example: + +```yaml +plugins: + example-plugin: + installation: + procedures: + - template: /var/data/template.yaml.j2 +``` + +It equals to the following record: + +```yaml +plugins: + example-plugin: + installation: + procedures: + - template: + source: /var/data/template.yaml.j2 +``` + +##### config + +This procedure is an alias for [template](#template) that allows you not to render the contents of the files by using an additional property, `do_render`. By default, this value is defined as `True`, which specifies that the content is rendered as in the `Template` procedure. + +All the parameters match with [template](#template). + +|Parameter|Mandatory|Default Value|Description| +|---|---|---|---| +|**do_render**|**no**|**True**| Allows you not to render the contents of the file.| + +##### expect pods + +This procedure allows you to wait until the necessary pods are ready. You have to declare a procedure section and specify the list of the pod names that should be expected. +For example: + +```yaml +plugins: + calico: + installation: + procedures: + - expect: + pods: + - coredns + - calico-kube-controllers + - calico-node +``` + +**Note**: You can specify some part of the pod name instead of the full name of the container. + +The procedure tries once every few seconds to find the necessary pods and detect their running status. If you use the standard format of this procedure, then pods are expected in accordance with the following configurations: + +|Configuration|Value|Description| +|---|---|---| +|timeout|`5`|The number of seconds until the next pod status check.| +|retries|`30`|The number of attempts to check the status.| + +The total waiting time is calculated by multiplying the configuration `timeout * retries`, for default values it is 2 to 5 minutes to wait. +If during this time, the pods do not have a ready status, then a corresponding error is thrown and the work is stopped. +Also, if at least one of the expected pods is detected in the status of a fail, an error is thrown without waiting for the end of the total waiting time. +If you are not satisfied with the default wait values, you can use the advanced form of the procedure record. For example: + +```yaml +plugins: + calico: + installation: + procedures: + - expect: + pods: + timeout: 10 + retries: 60 + list: + - coredns + - calico-kube-controllers + - calico-node +``` + +##### python + +This procedure allows you to directly call the Python 3 code. +This is helpful when you want to connect a ready-made product in Python, or for example you have complex business logic that can only be described in a programming language. +For this procedure, you must specify the following parameters: + +|Parameter|Description| +|---|---| +|**module**|The absolute path on local host to the Python 3 module to be loaded.| +|**method**|The name of the method to call.| +|**arguments**|Optional. Key-value map, which should be applied as kwargs to the requested method.| + +**Note**: The python code is executed on the deploying node and not on remote nodes. + +**Note**: The called method **must** accept a cluster object as the first argument. + +For example: + +```yaml +plugins: + haproxy-ingress-controller: + installation: + procedures: + - python: + module: /var/data/plugins/ingress_controller.py + method: override_priviledged_ports + arguments: + service: haproxy-ingress + namespace: haproxy-controller +``` + +##### thirdparty + +This procedure allows you to trigger specific thirdparty installation. This thirdparty must be configured in the [thirdparties](#thirdparties) section and its destination path must be specified in this procedure. In this case, thirdparty is not installed in `prepare.thirdparties` task, but is installed during the installation of the current plugin. +For example: + +```yaml +services: + thirdparties: + /usr/bin/calicoctl: + source: 'https://example.com/calicoctl/calicoctl-linux-amd64' +plugins: + calico: + installation: + procedures: + - thirdparty: /usr/bin/calicoctl +``` + +##### shell + +This procedure allows you to execute shell code on remote hosts. The following parameters are supported: + +|Parameter|Mandatory|Default Value|Description| +|---|---|---|---| +|**command**|**yes**| |A shell command to be executed on remote hosts.| +|**sudo**|no|`False`|Switch for the command execution from the sudoer.| +|**groups**|no|`None`|List of groups on which the shell command should be executed.| +|**nodes**|no|`None`|List of nodes on which the shell command should be executed.| +|**out_vars**|no|`None`|List of ENV variables to export and save for later use| +|**in_vars**|no|`None`|List of ENV variables to import before command execution| + +**Note**: You can specify nodes and groups at the same time. + +**Note**: If no groups or nodes are specified, then by default the first master is used. + +For example: + +```yaml +plugins: + calico: + installation: + procedures: + - shell: + command: mkdir -p /etc/calico + groups: ['master'] + sudo: true +``` + +There is support for a shortened format. In this case, you need to specify only the command to execute, all other parameters are set by default. For example: + +```yaml +plugins: + calico: + installation: + procedures: + - shell: whoami +``` + +It equals to the following record: + +```yaml +plugins: + example-plugin: + installation: + procedures: + - shell: + command: whoami +``` + +If you combine several commands, for example `whoami && whoami` with `sudo: true`, the second command is executed from non-sudoer. In this case, specify `sudo` for second command explicitly. For example: + +```yaml +plugins: + example-plugin: + installation: + procedures: + - shell: + command: whoami && sudo whoami + sudo: true +``` + +Also try to avoid complex shell features, for example pipe redirection. Shell procedure is only for simple command invocation, but not for complex shell scripts. If you need to call complex shell logic, place a script file, upload it to a remote host, and call the script. For example: + +```yaml +plugins: + calico: + installation: + procedures: + - template: + source: /var/data/plugins/script.sh + destination: /etc/calico/script.sh + destination_nodes: ['master-1'] + apply_required: false + - shell: + command: bash -x /etc/calico/script.sh + nodes: ['master-1'] + sudo: true +``` + +Example of runtime variables usage in shell procedure: + +```yaml +plugins: + example-plugin: + installation: + procedures: + - shell: + command: + - echo $input_var $input_var_1 + - export output_var='this string will be saved to `runtime_vars` with name `output_var`' + - export output_var_2='this string will be saved to `runtime_vars` with name `example_var_alias`' + out_vars: + - name: output_var + - name: output_var_2 + save_as: example_var_alias + in_vars: + - name: input_var # value for this var should be set in runtime as it is for `output_var`, or else it will be empty + - name: input_var_1 + value: static value, which can also be rendered {{ like_this }} +``` + +##### ansible + +This procedure allows you to directly execute Ansible playbooks. This is useful when you have a ready-made set of playbooks required for your business logic and you need to execute them during the installation process. +For this procedure you must specify the following parameters: + +|Parameter|Mandatory|Default Value|Description| +|---|---|---|---| +|**playbook**|**yes**| |An absolute path for playbook to be executed.| +|**vars**|no|`None`|Additional variables, overriding variables from Ansible inventory. They are passed as `--extra-vars` in CLI.| +|**become**|no|`False`|Privilege escalation switch. Enables `-b` argument.| +|**groups**|no|`None`|Targeted list of groups, passed to Ansible as `--limit` argument.| +|**nodes**|no|`None`|Targeted list of nodes, passed to Ansible as `--limit` argument.| + +**Note**: The playbook execution starts on the deploying node, not on remote nodes. + +**Note**: Ansible must be manually installed on the deploying node. + +**Note**: An [Ansible Inventory](#ansible-inventory) is provided to the playbook, so it should not be disabled. + +**Note**: When calling ansible plugin from kubetools container, note that kubetools container is shiped with ansible-2.9.9. + +For example: + +```yaml +plugins: + example-plugin: + installation: + procedures: + - ansible: + playbook: /var/data/plugins/playbook.yaml + vars: + foo: bar + become: True + groups: ['master', 'worker'] +``` + +There is support for a shortened format. In this case, you need to specify only path to the playbook, all other parameters are set by default. For example: + +```yaml +plugins: + calico: + installation: + procedures: + - ansible: /var/data/plugins/playbook.yaml +``` + +It equals to the following record: + +```yaml +plugins: + example-plugin: + installation: + procedures: + - ansible: + playbook: /var/data/plugins/playbook.yaml +``` + +##### helm + +You can install or upgrade HELM chart on Kubernetes cluster. +If a Helm chart is already installed on the cluster, the `helm upgrade` command is called, otherwise `helm install` command is called. + +Specify the following parameters: + +The `chart_path` parameter specifies the absolute path on local host to the Helm chart. The URL link to chart archive is also supported. + +The `values` parameter specifies the YAML formatted values for the chart that override values from `values.yaml` file from the provided chart. This parameter is optional. + +The `values_file` parameter specifies the absolute path on local host to the file with YAML formatted values for the chart that override values from `values.yaml` file from the provided chart. Alternate for `values`. This parameter is optional. + +The `namespace` parameter specifies the cloud namespace where chart should be installed. This parameter is optional. + +**Note**: + +* Helm 3 is only supported. +* If the `values` parameter is specified, the `values_file` parameter is ignored. + +For example: + +```yaml +plugins: + some_plugin: + install: True + installation: + priority: 10 + procedures: + - helm: + chart_path: /tmp/some-chart + values: + serviceAccount: + create: false + namespace: elastic-search + values_file: /tmp/custom_values.yaml +``` + +## Advanced Features + +Before use, the configuration file **cluster.yaml** is preprocessed. The user settings are merged with default settings, thereby creating the final configuration file, which +is further used throughout the entire installation. + +**Note**: If [Dump Files](#dump-files) is enabled, then you can see merged **cluster.yaml** file version in the dump directory. + +To make sure that the information in the configuration file is not duplicated, the following advanced functionality appears in the yaml file: + +* List merge strategy +* Dynamic variables + +### List Merge Strategy + +It is possible to define the following strategies when merging two lists: + +* **replace** - It indicates that the contents of one list must be replaced by + other. This strategy is useful when you need to completely replace the default + list with the settings on your own. If no strategy is specified, then this + strategy is applied by default. +* **merge** - It indicates that the contents of one list must be merged with + other. This strategy is useful when you need to merge the default list of + settings with your list, without replacing the earlier list. + +To define a strategy in the list, you must specify a new list element. In +this element, you need to put a key-value pair, where the key is `<<`, and value +is the name of the join strategy. + +**Note**: This functionality is available only for lists and only single strategy pointer is allowed inside the list. + +The following is an example of `replace` strategy: + +```yaml +plugins: + calico: + installation: + procedures: + - template: /var/data/custom_template.yaml.j2 + - '<<': replace +``` + +The user list replaces the default: + +```yaml +plugins: + calico: + installation: + procedures: + - template: /var/data/custom_template.yaml.j2 +``` + +The following is an example of `merge` strategy: + +```yaml +plugins: + calico: + installation: + procedures: + - template: /var/data/custom_template.yaml.j2 + - '<<': merge +``` + +The result is as follows: + +```yaml +plugins: + calico: + installation: + procedures: + - template: /var/data/custom_template.yaml.j2 + - template: templates/plugins/calico.yaml.j2 + expect: + pods: + - coredns +``` + +#### Merge Strategy Positioning + +With the `merge` strategy, you can specify a specific place for the content +from the default list inside the user list. + +For example, you can indicate it at the beginning: + +```yaml +plugins: + calico: + installation: + procedures: + - '<<': merge + - template: /var/data/custom_template.yaml.j2 + - template: /var/data/custom_template2.yaml.j2 +``` + +As a result, the default part of the list is at the beginning, and the user +part at the end: + +```yaml +plugins: + calico: + installation: + procedures: + - template: templates/plugins/calico.yaml.j2 + expect: + pods: + - coredns + - template: /var/data/custom_template.yaml.j2 + - template: /var/data/custom_template2.yaml.j2 +``` + +You can specify it at the end as follows: + +```yaml +plugins: + calico: + installation: + procedures: + - template: /var/data/custom_template.yaml.j2 + - template: /var/data/custom_template2.yaml.j2 + - '<<': merge +``` + +As a result, the default part of the list is at the end, and the user part at the beginning: + +```yaml +plugins: + calico: + installation: + procedures: + - template: /var/data/custom_template.yaml.j2 + - template: /var/data/custom_template2.yaml.j2 + - template: templates/plugins/calico.yaml.j2 + expect: + pods: + - coredns +``` + +You can specify in the middle as follows: + +```yaml +plugins: + calico: + installation: + procedures: + - template: /var/data/custom_template.yaml.j2 + - '<<': merge + - template: /var/data/custom_template2.yaml.j2 +``` + +The result is as follows: + +```yaml +plugins: + calico: + installation: + procedures: + - template: /var/data/custom_template.yaml.j2 + - template: templates/plugins/calico.yaml.j2 + expect: + pods: + - coredns + - template: /var/data/custom_template2.yaml.j2 +``` + +### Dynamic Variables + +There are settings in the configuration file that borrow their contents from the settings of the other sections. To avoid any duplication of the settings, the mechanism of dynamic variables is used. + +This mechanism allows you to specify a link to one variable to another. + +For example, the following parameters: + +```yaml +section_one: + variable: "test" +section_two: + variable: '{{ section_one.variable }}' +``` + +This leads to the following result: + +```yaml +section_one: + variable: test +section_two: + variable: test +``` + +Dynamic variables allow you to refer to the other variables, but can also be full-fledged Jinja2 templates. + +For example, the following configuration: + +```yaml +section_one: +section_two: + variable: '{{ section_one.variable | default("nothing") }}' +``` + +This leads to the following result: + +```yaml +section_one: null +section_two: + variable: nothing +``` + +Recursive pointing to each other is also supported. For example: + +```yaml +section: + - variable: '{{ section[1].variable }}-2' + - variable: '{{ section[2].variable }}-1' + - variable: "hello" +``` + +The above configuration generates the following result: + +```yaml +section: +- variable: hello-1-2 +- variable: hello-1 +- variable: hello +``` + +#### Limitations + +Dynamic variables have some limitations that should be considered when working with them: + +* The start pointer of the Jinja2 template must be inside a pair of single or double quotes. The `{{` or `{%` out of quotes leads to a parsing error of the yaml file. +* The variable cannot refer to itself. It does not lead to any result, but it slows down the compilation process. +* The variables cannot mutually refer to each other. For example, the following configuration: + + ```yaml + section: + variable_one: '{{ section.variable_two }}' + variable_two: '{{ section.variable_one }}' + ``` + + This leads to the following result: + + ```yaml + section: + variable_one: '{{ section.variable_one }}' + variable_two: '{{ section.variable_one }}' + ``` + The variables copy each other, but since none of them lead to any result, there is a cyclic link to one of them. + +#### Jinja2 Expressions Escaping + +Inventory strings can have strings containing characters that Jinja2 considers as their expressions. For example, if you specify a golang template. To avoid rendering errors for such expressions, it is possible to wrap them in exceptions `{% raw %}``{% endraw %}`. +For example: + +```yaml +authority: '{% raw %}{{ .Name }}{% endraw %} 3600 IN SOA' +``` +## Installation without Internet Resources + +If you want to install Kubernetes in a private environment, without access to the internet, then you need to redefine the addresses of remote resources. +Be careful with the following parameters: + +|Path|Registry Type|Format|Example|Description| +|---|---|---|---|---| +|`services.kubeadm.imageRepository`|Docker|Address without protocol, where Kubernetes images are stored. It should be the full path to the repository.|```example.com:5443/k8s.gcr.io```|Kubernetes Image Repository. The system container's images such as `kubeapi` or `etcd` is loaded from this registry.| +|`services.docker.insecure-registries`|Docker|List with addresses without protocol.|```example.com:5443```|Docker Insecure Registries. It is necessary for the Docker to allow connection to addresses unknown to it.| +|`services.docker.registry-mirrors`|Docker|List with addresses. Each address should contain a protocol.|```https://example.com:5443```|Docker Registry Mirrors. Additional image sources for container's images pull.| +|`services.thirdparties.{{ thirdparty }}.source`|Plain|Address with protocol or absolute path on deploy node. It should be the full path to the file.|```https://example.com/kubeadm/v1.16.3/bin/linux/amd64/kubeadm```|Thridparty Source. Thirdparty file, such as binary, archive and so on, is loaded from this registry.| +|`plugin_defaults.installation.registry`|Docker|Address without protocol, where plugins images are stored.|```example.com:5443```|Plugins Images Registry. All plugins container's images are loaded from this registry.| + + +# Installation Procedure + +The installation information for Kubetools is specified below. + +**Warning**: Running the installation on an already running cluster redeploys the cluster from scratch. + +## Installation Tasks Description + +The following is the installation tasks tree: + +* **prepare** + * **check** + * **sudoer** - Validates if the connection user has the sudoer permissions. + * **system** - Validates the distributive and version of the hosts operating system. + * **cluster_installation** - Looks for an already installed cluster. + * **dns** + * **hostname** - Configures nodes hostnames. + * **resolv_conf** - Configures the records in `/etc/resolv.conf` (backup is presented). For more information about parameters for this task, see [resolv.conf](#resolvconf). If no parameters are presented, the task is skipped. + * **etc_hosts** - Configures the records in `/etc/hosts` (backup is presented). This task writes the node names and their addresses to this file. + * **package_manager** + * **configure** - Configures repositories for the package manager (backup is presented) and updates the repodata. For more information about parameters for this task, see [package_manager](#package_manager). If no parameters are presented, the task is skipped. OS-specific. + * **manage_packages** - Manages packages on hosts. For more information about parameters for this task, see [packages](#packages). If no parameters are presented, the task is skipped. OS-specific. + * **ntp** + * **chrony** - Configures the file `/etc/chrony.conf` (backup is presented) and synchronizes the time using the `chronyd` service. For more information about parameters for this task, see [chrony](#chrony). If no parameters are presented or non-RHEL OS is used, the task is skipped. + * **timesyncd** - Configures the file `/etc/systemd/timesyncd.conf` (backup is presented) and synchronizes the time using the `timesyncd` service. For more information about parameters for this task, see [timesyncd](#timesyncd). If no parameters are presented or non-Debian OS is used, the task is skipped. + * **system** + * **setup_selinux** - Configures SELinux. For more information about parameters for this task, see [SELinux](#selinux). The task is performed only for the RHEL OS family. + * **setup_apparmor** - Configures AppArmor. For more information about parameters for this task, see [AppArmor](#apparmor). The task is performed only for the Debian OS family. + * **disable_firewalld** - Forcibly disables FirewallD service. + * **disable_swap** - Forcibly disables swap in system. + * **modprobe** - Configures Linux Kernel modules. For more information about parameters for this task, see [modprobe](#modprobe). + * **sysctl** - Configures Linux Kernel parameters. For more information about parameters for this task, see [sysctl](#sysctl). + * **audit** - Configures Linux audit rules. For more information about parameters for this task, see [audit](#audit). + * **cri** + * **install** - Installs the container runtime. For more information about parameters for this task, see [CRI](#cri). + * **configure** - Configures the container runtime. For more information about parameters for this task, see [CRI](#cri). + * **thirdparties** - Downloads thirdparties and installs them. For more information about parameters for this task, see [thirdparties](#thirdparties). +* **deploy** + * **loadbalancer** + * **haproxy** + * **install** - Installs HAProxy if balancers are presented in the inventory. If the HAProxy is already installed, then there is no reinstallation. + * **configure** - Configures HAProxy in the file `/etc/haproxy/haproxy.cfg` (backup is presented). + * **keepalived** + * **install** - Installs Keepalived if `vrrp_ip` is presented in the inventory. If the Keepalived is already installed, then there is no reinstallation. + * **configure** - Configures Keepalived in the file `/etc/keepalived/keepalived.conf` (backup is presented). For more information about parameters for this task, see [vrrp_ips](#vrrp_ips). + * **kubernetes** + * **reset** - Resets an existing or previous Kubernetes cluster. All the data related to the Kubernetes is removed, including the container runtime being cleaned up. + * **install** - Configures Kubernetes service in the file `/etc/systemd/system/kubelet.service` + * **prepull_images** - Prepulls Kubernetes images on all nodes using parameters from the inventory. + * **init** - Initializes Kubernetes nodes via kubeadm with config files: `/etc/kubernetes/init-config.yaml` and `/etc/kubernetes/join-config.yaml`. For more information about parameters for this task, see [kubeadm](#kubeadm). + * **psp** - Applies OOB and custom pod security policies. For more information about parameters for this task, see [RBAC psp](#rbac-psp). + * **coredns** - Configures CoreDNS service with [coredns](#coredns) inventory settings. + * **plugins** - Applies plugin installation procedures. For more information about parameters for this task, see [Plugins](#plugins). + * **accounts** - Creates new users in cluster. For more information about parameters for this task, see [RBAC accounts](#rbac-accounts). +* **overview** - Collects general information about the cluster and displays it in stdout. + +**Note**: The task execution is strictly performed in the order as in the tree above. + +## Installation of Kubernetes using CLI + +Full installation using CLI can be started with the following command: + +```bash +./kubetools install +``` + +It begins the execution of all tasks available in the installer in accordance with its task tree. + +**Note**: The SSH-keyfile path in the config-file should be absolute, not relative. + +### Custom Inventory File Location + +If you are installing via CLI, you can specify the custom `cluster.yaml` location as follows: + +```bash +./kubetools install --config="${PATH_TO_CONFIG}/cluster.yaml" +``` + +or shorter + +```bash +./kubetools install -c "${PATH_TO_CONFIG}/cluster.yaml" +``` + +where, `${PATH_TO_CONFIG}` - is the path to the local inventory file. + +**Note**: Use the absolute path in arguments, instead of relative. + +# Installation Features + +This section describes the installation features. + +## Tasks List Redefinition + +It is possible to override the default installation tasks tree with `--tasks` argument when installing via CLI, and as the contents list tasks names separated by commas. + +The following is an example for CLI: + +```bash +./kubetools install --tasks="prepare.dns.etc_hosts,deploy" +``` + +For detailed tree of tasks, see [Installation Tasks Description](#installation-tasks-description). + +If required, you can exclude some tasks from the execution in `--exclude` argument when installing via CLI. The principle of action is the opposite of `tasks` argument/parameter. + +Example: + +```bash +./kubetools install --exclude="deploy.loadbalancer,deploy.kubernetes.install" +``` + +The arguments can be combined. For example, when you only need to perform a deploy, but not touch the balancers. + +Example: + +```bash +./kubetools install --tasks="deploy" --exclude="deploy.loadbalancer" +``` + +When you specify the name of the task, you can specify the following types: + +* **group** - Logically separated part of the execution tree, which includes a + certain set of tasks. For example, when you specify `prepare.dns` group, it + executes only tasks from group: `prepare.dns.resolv_conf` and + `prepare.dns.etc_hosts`, tasks from other groups are skipped. +* **task** - The exact address of the task to be performed. Others are skipped. + +You can also combine the types, specify both groups and tasks at the same time. For example: + +```bash +./kubetools install --tasks="prepare.system,prepare.dns.resolv_conf" +``` + +The Flow Filter filters everything and make a new execution tree, on which the +installation begins. The list of excluded tasks gets printed before +starting the work and displays as follows: + +``` +Excluded tasks: + prepare + deploy.loadbalancer + deploy.plugins + deploy.accounts +``` + +If nothing is excluded, it displays: + +``` +Excluded tasks: + No excluded tasks +``` + +The Flow Filter also takes care of sorting the sequence of tasks. Therefore, you do +not need to consider the sequence for listing the tasks. You can do it in any sequence, and then the actual sequence of the tasks is automatically decided and followed at the time of installation. + +**Note**: The sequence cannot be changed, it is hardcoded into the source code. This is done intentionally since some tasks are dependent on others. + +## Logging + +Kubetools has the ability to customize the output of logs, as well as customize the output to a separate file or graylog. +For more information, refer to the [Configuring Kubetools Logging](documentation/public/Logging.md) section. + +## Dump Files + +During installation configurations, templates and other files are generated. For best user experience, these configurations are not displayed in the output log. +However, by default, all intermediate results are saved in the dump directory, which is automatically created at the beginning of work. +It is not recommended but you can also disable this functionality. + +By default, the dump directory is located in the `dump` directory inside executable directory. However, the dump directory location path can be changed using the` --dump-location` argument. For example: + +``` +$ install --dump-location /var/data/dump/ +``` + +**Note**: When creating a dump directory, the entire hierarchy of directories is created recursively in accordance with the specified path, even if a part of the path is missing. + +You can use the `--disable-dump` argument to disable the dumping feature that disables creation of the dump directory and stop storing dump files in it. +The following example turns off the dump: + +``` +$ install --disable-dump +``` + +If you want a dump to be created, but you do not want it to be cleaned every time, you can turn off the automatic cleaning using the `disable-dump-cleanup` parameter. For example: + +``` +$ install --disable-dump-cleanup +``` + +### Finalized Dump + +After the procedure is completed, a final dump with all the missing variable values is needed, which is pulled from the finished cluster environment. +This dump of the final inventory can be found in the `dump/cluster_finalized.yaml` file. In the file, you can see not only the compiled inventory, but also some converted values depending on what is installed on the cluster. + +For example, consider the following package's origin configuration: + +```yaml +services: + packages: + associations: + docker: + executable_name: 'docker' + package_name: + - docker-ce-19.03* + - docker-ce-cli-19.03* + - containerd.io-1.4.6* + service_name: 'docker' + config_location: '/etc/docker/daemon.json' + install: + - conntrack + - ethtool + - ebtables + - socat + - unzip + - policycoreutils-python-utils +``` + +The above configuration is converted to the following finalized configuration: + +```yaml +services: + packages: + associations: + docker: + executable_name: 'docker' + package_name: + - docker-ce-19.03.15-3.el7.x86_64 + - docker-ce-cli-19.03.15-3.el7.x86_64 + - containerd.io-1.4.6-3.1.el7.x86_64 + service_name: 'docker' + config_location: '/etc/docker/daemon.json' + install: + - conntrack + - ethtool-4.8-10.el7.x86_64 + - ebtables-2.0.10-16.el7.x86_64 + - socat-1.7.3.2-2.el7.x86_64 + - unzip-6.0-21.el7.x86_64 + - policycoreutils-python-utils +``` + +**Note**: Some of the packages are impossible to be detected in the system, therefore such packages remain unchanged. + +The same applies to the VRRP interfaces. For example, the following origin configuration without interfaces: + +```yaml +vrrp_ips: + - ip: 192.168.101.1 + floating_ip: 1.101.10.110 +``` + +The above configuration is converted to the following configuration with real interfaces as it is presented on the keepalived nodes: + +```yaml +vrrp_ips: +- floating_ip: 1.101.10.110 + hosts: + - interface: eth0 + name: balancer-1 + - interface: eth0 + name: balancer-2 + ip: 192.168.101.1 +``` + +**Note**: Also, finalization escapes the golang expression; this required for prevention incompatibility with the jinja parser. + + +## Configurations Backup + +During perform of Kubetool, all configuration files on the nodes are copied to their backup copies before being overwritten. Also, all versions of the file, that are different from each other, are saved, and new copies are incremented in the file name. This protects from losing important versions of configuration files and allows to restore the desired file from a necessary backup version. Аfter several installations, you can find the file and all its backups as in the following example: + +```bash +$ ls -la /etc/resolv.conf* +-rw-rw-r--. 1 root root 78 jul 5 08:57 /etc/resolv.conf +-rw-r--r--. 1 root root 117 jul 5 08:55 /etc/resolv.conf.bak1 +-rw-r--r--. 1 root root 216 jul 5 08:57 /etc/resolv.conf.bak2 +``` + + +## Ansible Inventory + +By default, during installation a new Ansible inventory file is converted from **cluster.yaml** file. +Ansible inventory file is available in the root directory of the distribution immediately after starting the installation. + +If you want to generate only an inventory file, you must run the installer with the argument `--without-act`. For example: + +``` +$ ./kubetools install --without-act +``` + +You can specify custom path and name for the ansible inventory file, using the argument `--ansible-inventory-location`. By default, the file is saved to the executable directory with the name `ansible-inventory.ini`. For example: + +``` +$ ./kubetools install --ansible-inventory-location /var/data/ansible-inventory.ini +``` + +**Warning**: Always specify the absolute path to the file, not relative. + +Arguments can be combined. For example the following arguments generate the inventory without starting the installation: + +``` +$ ./kubetools install --without-act --ansible-inventory-location /var/data/inventory.ini +``` + +### Contents + +The automatically converted information is placed in the inventory file, divided into the following sections. + +#### [all] + +The `[all]` section contains the following basic knowledge about nodes to connect to: +* Node name +* Ansible-host +* Internal IP address +* External IP address (if exists) + +For example: + +```ini +[all] +localhost ansible_connection=local +k8s-lb ansible_host=10.101.10.1 ip=192.168.0.1 external_ip=10.101.10.1 +k8s-master-1 ansible_host=10.101.10.2 ip=192.168.0.2 external_ip=10.101.10.2 +k8s-master-2 ansible_host=10.101.10.3 ip=192.168.0.3 external_ip=10.101.10.3 +k8s-master-3 ansible_host=10.101.10.4 ip=192.168.0.4 external_ip=10.101.10.4 +k8s-worker-1 ansible_host=10.101.10.5 ip=192.168.0.5 external_ip=10.101.10.5 +k8s-worker-2 ansible_host=10.101.10.6 ip=192.168.0.6 external_ip=10.101.10.6 +``` + +#### [cluster:children] + +The `[cluster:children]` section contains the following node roles presented in cluster: +* balancer (if any presented) +* master +* worker (if any presented) + +For example: + +```ini +[cluster:children] +balancer +master +worker +``` + +#### [balancer], [master], [worker] + +The `[balancer]`, `[master]`, `[worker]` sections contain nodes names, which are included in this sections. + +For example: + +```ini +[balancer] +k8s-lb + +[master] +k8s-master-1 +k8s-master-2 +k8s-master-3 + +[worker] +k8s-worker-1 +k8s-worker-2 +``` + +#### [cluster:vars] + +The `[cluster:vars]` section contains other cluster-specific information: +* Username for connection +* Path to SSH key-file for connection (this data is used from `node_defaults` section from the original inventory) +* Services parameters +* Plugins parameters + +For example: + +```ini +[cluster:vars] +ansible_become=true +ansible_ssh_user=centos +ansible_ssh_private_key_file=/home/username/.ssh/id_rsa +``` + +All the data from the original inventory is included in the parameters of services and plugins, either explicitly defined by the user or automatically calculated. +They are either explicitly converted to a string type, or converted to JSON if it is list or dict. +The parameter values are presented as follows: + +For example: + +```ini +[cluster:vars] +... + +# services.kubeadm +kubeadm_apiVersion=kubeadm.k8s.io/v1beta2 +kubeadm_kind=ClusterConfiguration +kubeadm_kubernetesVersion=v1.16.3 +kubeadm_networking={"podSubnet": "10.128.0.0/14", "serviceSubnet": "172.30.0.0/16"} +kubeadm_apiServer={"certSANs": ["192.168.0.1", "k8s-lb", "10.101.10.1"]} +kubeadm_imageRepository=example.com:5443 +kubeadm_controlPlaneEndpoint=k8s.example.com:6443 + +# services.cri +cri_containerRuntime=containerd +cri_dockerConfig={"ipv6": false, "log-driver": "json-file", "log-opts": {"max-size": "64m", "max-file": "3"}, "exec-opts": ["native.cgroupdriver=systemd"], "icc": false, "live-restore": true, "userland-proxy": false} +cri_containerdConfig={"version": 2, "plugins.\"io.containerd.grpc.v1.cri\"": {"sandbox_image": "k8s.gcr.io/pause:3.2"}, "plugins.\"io.containerd.grpc.v1.cri\".registry.mirrors.\"artifactory.example.com:5443\"": {"endpoint": ["https://artifactory.example.com:5443"]}} +``` + +**Note**: From the final variables list the following parameters are excluded: + +* `install` +* `installation` + +## Cumulative Points + +Cumulative points is a special feature that allows you to combine several repeating actions of the same type into one, and run at the right moment of installation. +For example, if you have 3 tasks, each of which requires a system reboot in order for their configurations to apply. So instead of repeating reboot 3 times in a row, you can do 1 reboot after these 3 tasks. +The description of cumulative points is as follows: + +|Method|Scheduled by Tasks|Executed before tasks|Description| +|---|---|---|---| +|os.reboot_nodes|prepare.system.setup_selinux
prepare.system.disable_firewalld
prepare.system.disable_swap
prepare.system.modprobe
prepare.system.sysctl|prepare.system.sysctl|Reboots all cluster nodes.| +|os.verify_system|prepare.system.setup_selinux
prepare.system.disable_firewalld
prepare.system.disable_swap
prepare.system.modprobe
prepare.system.sysctl|prepare.system.sysctl|Verifies that configured system configurations have been applied.| + +Cumulative points are not necessarily always executed. Tasks independently decide when to schedule a cumulative point. +For example, if the configurations are not updated, then a reboot for applying them is also not required. +For more detailed information, see the description of the tasks and their parameters. +If the task is skipped, then it is not able to schedule the cumulative point. For example, by skipping certain tasks, you can avoid a reboot. + + +# Supported Versions + +**Note**: You can specify Kubernetes version via `kubernetesVersion` parameter. See [Kubernetes version](#kubernetes-version) section for more details. + +**Note**: If you need to upgrade an existing Kubernetes cluster to new version, please use the [Upgrade Procedure](Maintenance.md#upgrade-procedure). + +The tables below shows the correspondence of versions that are supported and is used during the installation: + +## Default Dependent Components Versions for Kubernetes Versions v1.19.3 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
TypeNameVersionsNote
CentOS RHEL
Oracle Linux 7.5+
CentOS RHEL
Oracle Linux 8.4
Ubuntu 20.04
binarieskubeadmv1.19.3
kubelet
kubectl
calicoctlv3.16.1Required only if calico is installed.
crictlv1.20.0Required only if containerd is used as a container runtime.
rpmsdocker-ce19.03
containerd.io1.4.61.4.81.4.6
podman1.6.43.0.13.1.2Required only if containerd is used as a container runtime.
haproxy/rh-haproxy1.81.82.0Required only if balancers are presented in the deployment scheme.
keepalived1.32.12.0Required only if VRRP is presented in the deployment scheme.
imagesk8s.gcr.io/kube-apiserverv1.19.3
k8s.gcr.io/kube-controller-manager
k8s.gcr.io/kube-proxy
k8s.gcr.io/kube-scheduler
k8s.gcr.io/coredns1.7.0
k8s.gcr.io/pause3.2
k8s.gcr.io/etcd3.4.13-0
calico/typhav3.16.1Required only if Typha is enabled in Calico config.
calico/cni
calico/node
calico/kube-controllers
calico/pod2daemon-flexvol
quay.io/kubernetes-ingress-controller/nginx-ingress-controller0.35.0
kubernetesui/dashboardv2.0.4Required only if Kubernetes Dashboard plugin is set to be installed.
kubernetesui/metrics-scraperv1.0.4Required only if Kubernetes Dashboard plugin is set to be installed.
rancher/local-path-provisionerv0.0.19Required only if local-path provisioner plugin is set to be installed.
+ +## Default Dependent Components Versions for Kubernetes Versions v1.20.2 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
TypeNameVersionsNote
CentOS RHEL
Oracle Linux 7.5+
CentOS RHEL
Oracle Linux 8.4
Ubuntu 20.04
binarieskubeadmv1.20.2
kubelet
kubectl
calicoctlv3.19.1Required only if calico is installed.
crictlv1.20.0Required only if containerd is used as a container runtime.
rpmsdocker-ce19.03
containerd.io1.4.61.4.81.4.6
podman1.6.43.0.13.1.2Required only if containerd is used as a container runtime.
haproxy/rh-haproxy1.81.82.0Required only if balancers are presented in the deployment scheme.
keepalived1.32.12.0Required only if VRRP is presented in the deployment scheme.
imagesk8s.gcr.io/kube-apiserverv1.20.2
k8s.gcr.io/kube-controller-manager
k8s.gcr.io/kube-proxy
k8s.gcr.io/kube-scheduler
k8s.gcr.io/coredns1.7.0
k8s.gcr.io/pause3.2
k8s.gcr.io/etcd3.4.13-0
calico/typhav3.19.1Required only if Typha is enabled in Calico config.
calico/cni
calico/node
calico/kube-controllers
calico/pod2daemon-flexvol
quay.io/kubernetes-ingress-controller/nginx-ingress-controllerv0.43.0
kubernetesui/dashboardv2.1.0Required only if Kubernetes Dashboard plugin is set to be installed.
kubernetesui/metrics-scraperv1.0.6Required only if Kubernetes Dashboard plugin is set to be installed.
rancher/local-path-provisionerv0.0.19Required only if local-path provisioner plugin is set to be installed.
+ +## Default Dependent Components Versions for Kubernetes Versions v1.21.2 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
TypeNameVersionsNote
CentOS RHEL
Oracle Linux 7.5+
CentOS RHEL
Oracle Linux 8.4
Ubuntu 20.04
binarieskubeadmv1.21.2SHA1: cbb07d380de4ef73d43d594a1055839fa9753138
kubeletSHA1: 024e458aa0f74cba6b773401b779590437812fc6
kubectlSHA1: 2c7a7de9fff41ac49f7c2546a9b1aff2c1d9c468
calicoctlv3.19.1SHA1: dde3851a977280f7c0d54538526bb9459fa7a7ac
Required only if calico is installed.
crictlv1.20.0SHA1: eaf4ffa1cfac5c69ec522d9562c8ee6ddd873f3e
Required only if containerd is used as a container runtime.
rpmsdocker-ce19.03
containerd.io1.4.61.4.81.4.6
podman1.6.43.0.13.1.2Required only if containerd is used as a container runtime.
haproxy/rh-haproxy1.81.82.0Required only if balancers are presented in the deployment scheme.
keepalived1.32.12.0Required only if VRRP is presented in the deployment scheme.
imagesk8s.gcr.io/kube-apiserverv1.21.2
k8s.gcr.io/kube-controller-manager
k8s.gcr.io/kube-proxy
k8s.gcr.io/kube-scheduler
k8s.gcr.io/coredns1.8.0
k8s.gcr.io/pause3.2
k8s.gcr.io/etcd3.4.13-0
calico/typhav3.19.1Required only if Typha is enabled in Calico config.
calico/cni
calico/node
calico/kube-controllers
calico/pod2daemon-flexvol
quay.io/kubernetes-ingress-controller/nginx-ingress-controllerv0.48.1
kubernetesui/dashboardv2.3.1Required only if Kubernetes Dashboard plugin is set to be installed.
kubernetesui/metrics-scraperv1.0.6Required only if Kubernetes Dashboard plugin is set to be installed.
rancher/local-path-provisionerv0.0.19Required only if local-path provisioner plugin is set to be installed.
diff --git a/documentation/Kubecheck.md b/documentation/Kubecheck.md new file mode 100644 index 000000000..10a4be0b0 --- /dev/null +++ b/documentation/Kubecheck.md @@ -0,0 +1,538 @@ +This section provides information about the Kubecheck functionality. + +- [Procedure Execution From CLI](#procedure-execution-from-cli) +- [Check Procedures](#check-procedures) + - [IAAS Procedure](#iaas-procedure) + - [001 Connectivity](#001-connectivity) + - [002 Latency - Single Thread](#002-latency-single-thread) + - [003 Latency - Multi Thread](#003-latency-multi-thread) + - [004 Sudoer Access](#004-sudoer-access) + - [005 Items Amount](#005-items-amount) + - [005 VIPs Amount](#005-vips-amount) + - [005 Balancers Amount](#005-balancers-amount) + - [005 Masters Amount](#005-masters-amount) + - [005 Workers Amount](#005-workers-amount) + - [005 Total Nodes Amount](#005-total-nodes-amount) + - [006 VCPUs Amount](#006-vcpus-amount) + - [006 VCPUs Amount - Balancers](#006-vcpus-amount-balancers) + - [006 VCPUs Amount - Masters](#006-vcpus-amount-masters) + - [006 VCPUs Amount - Workers](#006-vcpus-amount-workers) + - [007 RAM Amount](#007-ram-amount) + - [007 RAM Amount - Balancers](#007-ram-amount-balancers) + - [007 RAM Amount - Masters](#007-ram-amount-masters) + - [007 RAM Amount - Workers](#007-ram-amount-workers) + - [008 Distributive](#008-distributive) + - [PAAS Procedure](#paas-procedure) + - [201 Service Status](#201-service-status) + - [201 Haproxy Status](#201-haproxy-status) + - [201 Keepalived Status](#201-keepalived-status) + - [201 Container Runtime Status](#201-container-runtime-status) + - [201 Kubelet Status](#201-kubelet-status) + - [202 Kubelet Version](#202-kubelet-version) + - [203 Recommended packages versions](#203-recommended-packages-version) + - [204 Docker Version](#204-cri-versions) + - [204 HAproxy Version](#204-haproxy-version) + - [204 Keepalived Version](#204-keepalived-version) + - [205 Generic Packages Version](#205-generic-packages-version) + - [206 Pods Condition](#206-pods-condition) + - [207 Dashboard Availability](#207-dashboard-availability) + - [208 Nodes Existence](#208-nodes-existence) + - [209 Nodes Roles](#209-nodes-roles) + - [210 Nodes Condition](#210-nodes-condition) + - [210 Nodes Condition - NetworkUnavailable](#210-nodes-condition-networkunavailable) + - [210 Nodes Condition - MemoryPressure](#210-nodes-condition-memorypressure) + - [210 Nodes Condition - DiskPressure](#210-nodes-condition-diskpressure) + - [210 Nodes Condition - PIDPressure](#210-nodes-condition-pidpressure) + - [210 Nodes Condition - Ready](#210-nodes-condition-ready) +- [Report File Generation](#report-file-generation) + - [HTML Report](#html-report) + - [CSV Report](#csv-report) + + +# Kubernetes Check + +The Kubernetes Check procedure provides an opportunity to automatically verify the environment and quickly get a report on the results. The environment is checked against the following criteria, which is defined in advance: + +* Minimal - The minimum results that the test environment must meet. If it does not satisfy this, there is no guarantee that this environment will be operational. +* Recommended - The recommended results in which the test development environment for the Full-HA cluster scheme showed the best results and performance. If you have a production environment, you must independently calculate the number of resources for your cluster. This number is more than that recommended by the Kubernetes Check procedure. + +If the detected test results deviate from the criteria, the following status types are assigned to them: + +* **OK** - This status indicates the compliance with the recommended values, if any, and successful completion of the test without errors. +* **WARN** - This status indicates that the test deviated slightly from the expected values. For example, the results found do not correspond to the recommended values. However, this test passed the minimum requirements and has not failed. +* **FAIL** - This status indicates that the test does not meet the minimum requirements or it has failed. This test requires attention to fix the environment. +* **ERROR?** - This status indicates that an internal error occurred in the test and it cannot be continued. + +At the end of the logs of the procedure, a summary report table with all the test results is displayed. For example: + +``` + Group Status ID Test Actual result Minimal Recommended + + SSH OK 001 Connectivity .......................................................... Connected + SSH WARN 002 Latency - Single Thread .................................................. 1500ms 10000 1000 + SSH FAIL 003 Latency - Multi Thread .................................................. 50000ms 15000 2000 + + OVERALL RESULTS: 1 SUCCEEDED 1 WARNED 1 FAILED +``` + +The following columns are presented in this table: + +* Group - The logical group of checks to which the test belongs. +* Status - The final status assigned to the test according to the results of the check. +* ID - The test identifier. +* Name - The short test name. +* Actual result - The actual value detected by the test on the environment. +* Minimal (optional) - The minimum required value for this test. +* Recommended (optional) - The recommended required value for this test. + +The final report is generated in a file. For more information, see [Report File Generation](#report-file-generation). + +### Procedure Execution From CLI + +Check procedure execution form CLI can be started with the following command: + +```bash +./kubetools check %{CHECK_TYPE} +./kubetools check iaas +./kubetools check paas +``` + +It begins the execution of all tasks available in the procedure in accordance with the procedure type. For more information about how a tasks list can be redefined, see [Tasks List Redefinition](/documentation/public/Installation.md#tasks-list-redefinition) in _Kubetools Installation Procedure_. + +### Check Procedures + +A check procedure is divided into logical sub-procedures. Each of them is responsible for its own set of tests conducted on the environment. + +#### IAAS Procedure + +The IAAS procedure verifies only the infrastructure. For example, it checks the amount of hardware resources or checks the speed of the environment. These tests do not perform cluster checks and are intended to be performed both on a completely empty environment and an environment with the cluster installed. + +The task tree is as follows: + +* ssh + * connectivity + * latency + * single + * multiple + * sudoer_access +* network + * pod_subnet_connectivity + * service_subnet_connectivity + * check_tcp_ports +* hardware + * members_amount + * vips + * balancers + * masters + * workers + * total + * cpu + * balancers + * masters + * workers + * ram + * balancers + * masters + * workers +* system + * distributive + +##### 001 Connectivity + +*Task*: `ssh.connectivity` + +This test checks whether it is possible to establish the SSH-connection with nodes. If you are unable to connect to the nodes, check and fix the following: + +* The credentials for the connection are correct (verify the ip address, user, and key). +* The node is up. +* The node is online. +* The network connection to the node is available. +* The node port 22 (or other custom, if configured) is open and can be binded. +* The SSHD is running and its configuration is correct. + +##### 002 Latency - Single Thread + +*Task*: `ssh.latency.single` + +This test checks the delay between the nodes in the single-threaded mode. The test of the nodes passes one after another. + +##### 003 Latency - Multi Thread + +*Task*: `ssh.latency.multiple` + +This test checks the delay between the nodes in the multi-threaded mode. The test of all nodes passes at the same time. + +##### 004 Sudoer Access + +*Task*: `ssh.sudoer_access` + +##### 005 Items Amount + +Tests of this type check the availability of the required amount of resources. + +###### 005 VIPs Amount + +*Task*: `hardware.members_amount.vips` + +This test checks the number of VIPs present for Keepalived. + +###### 005 Balancers Amount + +*Task*: `hardware.members_amount.balancers` + +This test checks the number of nodes present with the `balancer` role. + +###### 005 Masters Amount + +*Task*: `hardware.members_amount.masters` + +This test checks the number of nodes present with the `master` role. + +###### 005 Workers Amount + +*Task*: `hardware.members_amount.workers` + +This test checks the number of nodes present with the `worker` role. + +###### 005 Total Nodes Amount + +*Task*: `hardware.members_amount.total` + +This test checks the number of all the nodes present. + +##### 006 VCPUs Amount + +Tests of this type check the availability of the required number of processors. + +###### 006 VCPUs Amount - Balancers + +*Task*: `hardware.cpu.balancers` + +This test checks the number of processors on the nodes with the `balancer` role. + +###### 006 VCPUs Amount - Masters + +*Task*: `hardware.cpu.masters` + +This test checks the number of processors on the nodes with the `master` role. + +###### 006 VCPUs Amount - Workers + +*Task*: `hardware.cpu.workers` + +This test checks the number of processors on the nodes with the `worker` role. + +##### 007 RAM Amount + +Tests of this type check the availability of the required number of RAM. + +###### 007 RAM Amount - Balancers + +*Task*: `hardware.ram.balancers` + +This test checks the amount of RAM on nodes with the `balancer` role. + +###### 007 RAM Amount - Masters + +*Task*: `hardware.ram.masters` + +This test checks the amount of RAM on nodes with the `master` role. + +###### 007 RAM Amount - Workers + +*Task*: `hardware.ram.workers` + +This test checks the amount of RAM on nodes with the `worker` role. + +##### 008 Distributive + +*Task*: `system.distributive` + +This test checks the family and release version of the operating system on the hosts. + +##### 009 PodSubnet + +*Task*: `network.pod_subnet_connectivity` + +This test checks the connectivity between nodes inside a pod's subnetwork. + +##### 010 ServiceSubnet + +*Task*: `network.service_subnet_connectivity` + +This test checks the connectivity between nodes inside the service's subnetwork. + +##### 011 TCPPorts + +*Task*: `network.check_tcp_ports` + +This test checks if necessary ports are opened on the nodes. + +#### PAAS Procedure + +The PAAS procedure verifies the platform solution. For example, it checks the health of a cluster or service statuses on nodes. This test checks the already configured environment. All services and the Kubernetes cluster must be installed and should be in working condition. Apart from the environment installed and configured by Kubetools, the test can check other environments too. + +The task tree is as follows: + +* services + * haproxy + * status + * keepalived + * status + * container_runtime + * status + * kubelet + * status +* kubernetes + * version + * nodes + * existence + * roles + * condition + * network + * memory + * disk + * pid + * ready + +##### 201 Service Status + +Tests of this type verify the correctness of service statuses. + +###### 201 Haproxy Status + +*Task*: `services.haproxy.status` + +This test checks the status of the Haproxy service on all hosts in the cluster where this service is expected. + +###### 201 Keepalived Status + +*Task*: `services.keepalived.status` + +This test checks the status of the Keepalived service on all hosts in the cluster where this service is expected. + +###### 201 Container Runtime Status + +*Task*: `services.container_runtime.status` + +This test checks the status of the Container Runtime (docker/containerd) service on all hosts in the cluster where this service is expected. + +###### 201 Kubelet Status + +*Task*: `services.kubelet.status` + +This test checks the status of the Kubelet service on all hosts in the cluster where this service is expected. + +##### 202 Nodes pid_max + +*Task*: `services.kubelet.configuration` + +This test checks that kubelet `maxPods` and `podPidsLimit` are correctly alligned with kernel `pid_max`. + +##### 203 Kubelet Version + +*Task*: `services.kubelet.version` + +This test checks the Kubelet version on all hosts in a cluster. + +##### 204 Recommended Packages Version + +*Task*: `packages.system.recommened_versions` + +This test checks that system package versions in the inventory are recommended. + +##### 205 CRI Versions + +*Task*: `packages.system.cri_version` + +This test checks that the configured CRI package is installed on all nodes and has an equal version. + +##### 205 HAproxy Version + +*Task*: `packages.system.haproxy` + +This test checks that the configured HAproxy package is installed on all nodes and has an equal version. + +##### 205 Keepalived Version + +*Task*: `packages.system.keepalived` + +This test checks that the configured Keepalived package is installed on all nodes and has an equal version. + +##### 206 Generic Packages Version + +*Task*: `packages.generic.versions` + +This test checks that the configured generic packages are installed on all nodes and have equal versions. + +##### 212 Thirdparties hashes + +*Task*: `thirdparties.hashes` + +This test checks that configured thirdparties hashes are equal to actual files hashes on nodes. + +##### 207 Pods Condition + +*Task*: `kubernetes.pods` + +This test checks that system pods are in good condition. + +##### 208 Dashboard Availability + +*Task*: `kubernetes.plugins.dashboard` + +This test checks that the dashboard is available by its URL. + +##### 209 Nodes Existence + +*Task*: `kubernetes.nodes.existence` + +This test checks for the presence of nodes in the Kubernetes cluster. + +##### 210 Nodes Roles + +*Task*: `kubernetes.nodes.roles` + +This test checks the nodes' roles in the Kubernetes cluster. + +##### 211 Nodes Condition + +Tests of this type check the condition of the nodes that the Kubernetes reports. + +###### 211 Nodes Condition - NetworkUnavailable + +*Task*: `kubernetes.nodes.condition.network` + +This test checks the condition `NetworkUnavailable` of the Kubernetes nodes of the cluster. + +###### 211 Nodes Condition - MemoryPressure + +*Task*: `kubernetes.nodes.condition.memory` + +This test checks the condition `MemoryPressure` of the Kubernetes nodes of the cluster. + +###### 211 Nodes Condition - DiskPressure + +*Task*: `kubernetes.nodes.condition.disk` + +This test checks the condition `DiskPressure` of the Kubernetes nodes of the cluster. + +###### 211 Nodes Condition - PIDPressure + +*Task*: `kubernetes.nodes.condition.pid` + +This test checks the condition `PIDPressure` of the Kubernetes nodes of the cluster. + +###### 211 Nodes Condition - Ready + +*Task*: `kubernetes.nodes.condition.ready + +This test checks the condition `Ready` of the Kubernetes nodes of the cluster. + +### Report File Generation + +In addition to the resulting table in the log output, the same report is presented in the form of files. + +#### HTML Report + +The report allows you to visually see the final report. All content, including styles, is already included inside a single file. You can use the following supported command line arguments: + +|Argument|Default|Description| +|---|---|---| +|**--html-report**|`report.html`|The full absolute path to the file location where the report is saved.| +|**--disable-html-report**| |If specified, the report generation is disabled.| + +Report file example (trimmed): + +```html + + + + + PAAS Check Report + + +
2020-04-29 10:09:31.096773
+
+
12 succeeded
+
+

PAAS Check Report

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
GroupStatusIDTestActual ResultMinimalRecommended
services +
ok
+
201Haproxy Statusactive (running)
services +
ok
+
201Keepalived Statusactive (running)
services +
ok
+
201Docker Statusactive (running)
+ + +``` + +#### CSV Report + +This report allows a thirdparty software to parse the report result. This is convenient when working with Excel or automatic metrics collection systems. You can use the following supported command line arguments: + +|Argument|Default|Description| +|---|---|---| +|**--csv-report**|`report.csv`|The full absolute path to the file location where the report is saved.| +|**--csv-report-delimiter**|`;`|The character used as a column separator.| +|**--disable-csv-report**| |If specified, the report generation is disabled.| + +Report file example: + +```csv +group;status;test_id;test_name;current_result;minimal_result;recommended_result +services;ok;201;Haproxy Status;active (running);; +services;ok;201;Keepalived Status;active (running);; +services;ok;201;Docker Status;active (running);; +services;ok;201;Kubelet Status;active (running);; +kubernetes;ok;202;Kubelet Version;v1.16.3;; +kubernetes;ok;203;Nodes Existence;All nodes presented;; +kubernetes;ok;204;Nodes Roles;All nodes have the correct roles;; +kubernetes;ok;205;Nodes Condition - NetworkUnavailable;CalicoIsUp;; +kubernetes;ok;205;Nodes Condition - MemoryPressure;KubeletHasSufficientMemory;; +kubernetes;ok;205;Nodes Condition - DiskPressure;KubeletHasNoDiskPressure;; +kubernetes;ok;205;Nodes Condition - PIDPressure;KubeletHasSufficientPID;; +kubernetes;ok;205;Nodes Condition - Ready;KubeletReady;; +``` \ No newline at end of file diff --git a/documentation/Logging.md b/documentation/Logging.md new file mode 100644 index 000000000..55c768374 --- /dev/null +++ b/documentation/Logging.md @@ -0,0 +1,82 @@ +This section provides information about configuring the logging of Kubetools. + +- [Default Behavior](#default-behavior) +- [Supported Parameters](#supported-parameters) + - [Output to Stdout](#output-to-stdout) + - [Output to File](#output-to-file) + - [Output to Graylog](#output-to-graylog) + +# Default Behavior + +By default, Kubetools writes its logs into two locations with the following configurations: + +* stdout - Debug level, colorize enabled, and correct newlines enabled. +* `dump/debug.log` - Verbose level, colorize disabled, and correct newlines enabled. + +# Supported Parameters + +You can customize the default behavior and specify your own logging parameters. To do this, the `--log` argument should be specified with the parameters. +It is possible to specify an unlimited number of logs - the logs are written to all places at the same time. +For example: + +```bash +./kubetools install \ +--log="example.log;level=verbose;colorize=false;correct_newlines=false;filemode=a" \ +--log="graylog;level=verbose;host=10.101.182.166;port=12201;type=tcp" +``` + +**Warning**: Enclose the arguments in quotes to avoid parsing problems. + +To configure the parameters in each case is described in the following sections. + +## Output to Stdout + +By default, Kubetools already writes the logs to stdout, but this can be customized with the necessary parameters. +To do this, it is required to specify the special word `stdout` in the target and then list the following supported parameters: + +* level - The log output level. It determines which logs are displayed and which are not. The supported levels are: `verbose`, `debug`, `info`, `error`, and `critical`. +* colorize - A boolean parameter that adds special characters to the output that provides colors to the text. +* correct_newlines - A boolean parameter that corrects line breaks, making each new line a separate log. +* format - It specifies the format of the logs. For more information about the formatting, refer to the official logging documentation at [https://docs.python.org/3/howto/logging.html#changing-the-format-of-displayed-messages](https://docs.python.org/3/howto/logging.html#changing-the-format-of-displayed-messages). An example format of the logs is as shown below: +`format=%(asctime)s %(name)s %(levelname)s %(message)s` +* datefmt - It specifies the format of the date in the logs. For more information about the formatting, refer to the official logging documentation at [https://docs.python.org/3/howto/logging.html#changing-the-format-of-displayed-messages](https://docs.python.org/3/howto/logging.html#changing-the-format-of-displayed-messages). An example format of the date in the logs is as shown below: +`datefmt=%I:%M:%S` + +Example: + +```bash +./kubetools install \ +--log="stdout;level=verbose;colorize=true;correct_newlines=true;format=%(asctime)s %(name)s %(levelname)s %(message)s;datefmt=%I:%M:%S" +``` + +**Note**: Be careful when specifying the format and date format. Enclose the entire `log` argument, but do not enclose the `format` and `datefmt` sections. Also, do not use separators like `=` or `;` in the format, otherwise it can cause a parsing failure. + +## Output to File + +Kubetools allows you to output logs to a file. For this, the following parameters are supported: + +* All parameters supported in the stdout output. +* filemode - It specifies the mode of working with the file. `w` - rewrites the file for every run, `a` - appends new content to the file for every run. + +Example: + +```bash +./kubetools install \ +--log="example.log;level=verbose;colorize=false;correct_newlines=false;filemode=a" +``` + +## Output to Graylog + +Kubetools allows you to output logs to Graylog. To do this, it is required to specify the special word `graylog` in the target and then list the following supported parameters: + +* level - The log output level. It determines which logs are sent to Graylog. +* host - The Graylog hostname to connect to. +* port - The Graylog port to connect to. +* type - The connection type. It can be `tcp`, `udp`, `tls`, or `http`. + +Example: + +```bash +./kubetools install \ +--log="graylog;level=verbose;host=10.101.182.166;port=12201;type=tcp" +``` diff --git a/documentation/Maintenance.md b/documentation/Maintenance.md new file mode 100644 index 000000000..1acb17d93 --- /dev/null +++ b/documentation/Maintenance.md @@ -0,0 +1,1028 @@ +This section describes the features and steps for performing maintenance procedures on the existing Kubernetes cluster. + +- [Prerequisites](#prerequisites) +- [Provided Procedures](#provided-procedures) + - [Upgrade Procedure](#upgrade-procedure) + - [Backup Procedure](#backup-procedure) + - [Restore Procedure](#restore-procedure) + - [Add Node Procedure](#add-node-procedure) + - [Operating System Migration](#operating-system-migration) + - [Remove Node Procedure](#remove-node-procedure) + - [Manage PSP Procedure](#manage-psp-procedure) + - [Reboot Procedure](#reboot-procedure) + - [Certificate Renew Procedure](#certificate-renew-procedure) + - [Migration Cri Procedure](#migration-cri-procedure) +- [Procedure Execution](#procedure-execution) + - [Procedure Execution from CLI](#procedure-execution-from-cli) + - [Logging](#logging) + - [Additional Parameters](#additional-parameters) + - [Grace Period and Drain Timeout](#grace-period-and-drain-timeout) + - [Images Prepull](#images-prepull) +- [Additional procedures](#additional-procedures) +- [Common practice](#common-practice) + +# Prerequisites + +Before you start any maintenance procedure, you must complete the following mandatory steps: + +1. Verify the environment for compliance with the prerequisites described in the [Kubetools Installation Prerequisites](Installation.md#prerequisites) section in _Kubetools Installation Procedure_. +1. Ensure that all the nodes are up, online, and healthy (except dead nodes, when you trying to remove them). This applies to the new nodes as well as the existing nodes. +1. If using custom registries, make sure they are online, accessible from nodes, and you are able to download images from the registries. +1. If using custom RPM repositories, make sure they are online, accessible from nodes, and you are able to perform repository updates. +1. Prepare the latest actual **cluster.yaml** that should contain information about the current cluster state. For more information, refer to the [Kubetools Inventory Preparation](Installation.md#inventory-preparation) section in _Kubetools Installation Procedure_. + + **Note**: If you provide an incorrect config file, it can cause unknown consequences. + +1. Prepare **procedure.yaml** file containing the configuration for the procedure that you are about to perform. Each procedure has its own configuration format. Read documentation below to fill procedure inventory data. + + +# Provided Procedures + +The information about the procedures for nodes is described in the following sections. + +## Upgrade Procedure + +**Warning**: Before starting the upgrade, make sure you make a backup. For more information, see the section [Backup Procedure](#backup-procedure). + +**Warning**: The upgrade procedure only maintains upgrading from one `supported` version to the next `supported` version. For example, from 1.18 to 1.20 or from 1.20 to 1.21. For more information about `supported` versions, see [Lifecycle Policy](LifecyclePolicy.md#Supported-release-history). + +The upgrade procedure allows you to automatically update Kubernetes cluster and its core components to a new version. To do this, you must specify the `upgrade_plan` in the procedure config, and fill in the new version of the Kubernetes cluster you want to upgrade to. For example: + +```yaml +upgrade_plan: + - v1.18.8 + - v1.19.3 +``` + +**Note**: Be sure to check the version string format and do not forget to specify the letter `v` at the beginning of the string. + +**Note**: It is not possible to skip minor Kubernetes versions. For example, to upgrade from 1.18.8 to 1.20.2, you have to first upgrade to the intermediate 1.19.3 version. + +After starting the upgrade procedure, the script validates the correctness of the entered upgrade plan. If it contains issues, the update does not start, and a message with the problem description is displayed. If there are no errors, the following log with loaded update plan is displayed: + +```yaml +Loaded upgrade plan: current ⭢ v1.16.12 +Loading inventory file 'cluster.yaml' + + +------------------------------------------ +UPGRADING KUBERNETES v1.18.8 ⭢ v1.19.3 +------------------------------------------ +``` + +The script upgrades Kubernetes versions one-by-one. After each upgrade, the `cluster.yaml` is regenerated to reflect the actual cluster state. Use the latest updated `cluster.yaml` configuration to further work with the cluster. + +#### Upgrading Specific Nodes + +**Note**: Kubetools automatically determines already upgraded nodes and excludes them from the Kubernetes upgrade procedure. Use manual nodes specifying for updating in exceptional cases when the problem cannot be solved automatically. Also, if any of the nodes are not available, first remove the node from the cluster, instead of changing the list of nodes for the upgrade. + +**Warning**: By manually specifying the nodes for the upgrade, you completely take control of yourself and bear all the consequences of an unsuccessful upgrade. + +In special cases, a situation may arise when you need to manually specify certain nodes that need to be upgraded. For such situations, a parameter, `upgrade_nodes`, is available in the procedure configuration. Within this parameter, list all the nodes that you want to upgrade. Specify the nodes in the same format in which they are specified in your main `cluster.yaml` config. + +For Example: + +```yaml +upgrade_nodes: + - name: worker-10 + address: 10.101.10.10 + internal_address: 192.168.101.10 + roles: [worker] + - name: worker-11 + address: 10.101.10.11 + internal_address: 192.168.101.11 + roles: [worker] +``` + +Based on the example above, only the nodes `worker-10` and `worker-11` are updated, the rest are skipped. + +**Note**: The nodes are excluded only from the Kubernetes upgrade. All other upgrade tasks like thirdparties, coredns, and so on are performed for all the nodes as they are. + +#### Nodes Saved Versions Before Upgrade + +During the upgrade, a temporary file `/etc/kubernetes/nodes-k8s-versions.txt` is created on first master node that saves the state and versions of the nodes prior to the initial upgrade. +If the procedure fails and certain nodes for the upgrade are not manually specified, the saved versions of the nodes before the upgrade are used to determine the initial state of the nodes. +In case of a successful upgrade of a node, the information about it is deleted from the state file so as to not upgrade it again. +If the entire update cycle completes successfully, this temporary file is deleted, and in further upgrades it is generated anew. +At the same time, there may be situations when this file interferes with a normal upgrade - in this case, you can erase it or use manually specified nodes for the upgrade. + +#### Thirdparties Upgrade Section and Task + +If the cluster is located in an isolated environment, it is possible to specify the custom paths to new thirdparties with the same syntax as in the `cluster.yaml` as shown in the following script: + +```yaml +v1.18.10: + thirdparties: + /usr/bin/kubeadm: + source: https://example.com/thirdparty.files/kubernetes/kubeadm/v1.18.10/bin/linux/amd64/kubeadm + /usr/bin/kubelet: + source: https://example.com/thirdparty.files/kubernetes/kubelet/v1.18.10/bin/linux/amd64/kubelet + /usr/bin/kubectl: + source: https://example.com/thirdparty.files/kubernetes/kubectl/v1.18.10/bin/linux/amd64/kubectl + /usr/bin/calicoctl: + source: https://example.com/thirdparty.files/projectcalico/calicoctl/v3.14.1/calicoctl-linux-amd64 +``` + +This configuration replaces the configuration contained in the current `cluster.yaml`. + +#### Kubernetes Upgrade Task + +This task is required to actually upgrade the Kubernetes cluster to the next version. The upgrade is performed node-by-node. On each node, the docker or containerd is upgraded, if required. For more information, see [Packages Upgrade Task](#packages-upgrade-task). After all the pods are drained from the node, the node is upgraded and finally returned to the cluster for scheduling. + +By default, node drain is performed using `disable-eviction=True` to ignore the PodDisruptionBudget (PDB) rules. If you want to enforce PDB rules during the upgrade, set `disable-eviction` to False. However, in this case, the upgrade may fail if you are unable to drain the node due of PDB rules. `disable-eviction` works only for upgrades on Kubernetes versions >= 1.18. +An example configuration to enforce PDB rules is as follows: + +```yaml +upgrade_plan: + - v1.18.8 + +disable-eviction: False # default is True +``` + +The upgrade procedure is always risky, so you should plan a maintenance window for this procedure. If you encounter issues during the Kubernetes cluster upgrade, refer to the [Troubleshooting guide](Troubleshooting.md#failures-during-kubernetes-upgrade-procedure). + +**Note**: During the upgrade, some or all internal Kubernetes certificates are updated. Do not rely on upgrade procedure to renew all certificates. Check the certificates' expiration using the `cert_renew` procedure for every 3 months independently of the upgrades +and plan the certificates' renewal accordingly. + +#### CoreDNS Upgrade Task + +This task is executed to restore the required CoreDNS configuration. + +#### Packages Upgrade Section and Task + +This inventory section contains the configuration to upgrade custom and system packages, such as docker, containerd, haproxy, and keepalived. The system packages are upgraded by default, if necessary. You can influence the system packages' upgrade and specify custom packages for the upgrade/installation/removal using the `packages` section as follows: + +```yaml +v1.18.8: + packages: + remove: + - curl + install: + - unzip + - policycoreutils-python + upgrade: + - openssl + associations: + docker: + package_name: + - docker-ce-cli-19.03* + - docker-ce-19.03* +``` + +The requested actions for custom packages are performed in the `packages` task. The configuration from the procedure inventory replaces the configuration specified in the `cluster.yaml`. If you do not want to lose the packages specified in the `cluster.yaml`, then it is necessary to copy them to the procedure inventory. + +By default, it is not required to provide information about system packages through associations. They are upgraded automatically as required. You can provide this information if you want to have better control over system packages' versions, such as docker. Also, you have to explicitly provide system packages' information if you have specified this information in the `cluster.yaml`. It is because in this case, you take full control over the system packages and the defaults do not apply. The provided configuration for system packages is merged with configuration in the `cluster.yaml`. + +**Note**: The system packages are updated in separate tasks. For example, the container runtime (docker/containerd) is upgraded during the Kubernetes upgrade. + +**Note**: During the container runtime upgrade, the containers may be broken, so all containers on the node are deleted after the upgrade. +Kubernetes re-creates all the pod containers. However, your custom containers may be deleted, and you need to start them manually. + +#### Plugins Upgrade Section and Task + +This task is required to upgrade OOB plugins and specified user plugins. The OOB plugins are upgraded automatically. You can also configure your own plugins for the upgrade as follows: + +```yaml +v1.18.10: + plugins: + example-plugin: + installation: + procedures: + - template: + source: /var/data/template.yaml.j2 + destination: /etc/example/configuration.yaml + apply_required: true + sudo: true + destination_groups: ['master'] + destination_nodes: ['worker-1'] + apply_groups: None + apply_nodes: ['master-1', 'worker-1'] + apply_command: 'testctl apply -f /etc/example/configuration.yaml' +``` + +After applying, this configuration is merged with the plugins' configuration contained in the current `cluster.yaml`. Only the `installation` section for each plugin is overwritten, if specified. + +**Note**: The plugins should be idempotent and it should be possible to install them several times. Also, note that plugins are installed over previously installed plugins, so they should perform the necessary clean-ups. + +**Note**: If you have changed images for any of the OOB plugins in the `cluster.yaml`, it is required to explicitly specify new images in the procedure inventory for that particular plugin. The configuration format for OOB plugins is the same. + +### Upgrade Procedure Tasks Tree + +The `upgrade` procedure executes the following sequence of tasks: + +* verify_upgrade_versions +* thirdparties +* prepull_images +* kubernetes +* kubernetes_cleanup +* packages +* plugins +* overview + +## Backup Procedure + +**Note**: Before starting the backup, make sure all nodes are online and accessible. + +The backup procedure automatically saves the following entities: +* ETCD snapshot +* Files and configs from cluster nodes +* Kubernetes resources + +As a result of the procedure, you receive an archive with all the stored objects inside. The archive has approximately the following structure inside: + +```text +backup-Jan-01-21-09-00-00.tar.gz +├── descriptor.yaml +├── cluster.yaml +├── ansible-inventory.ini +├── etcd.db +├── kubernetes_resources +│ ├── apiservices.apiregistration.k8s.io.yaml +│ ├── blockaffinities.crd.projectcalico.org.yaml +│ ├── ... +│ ├── podsecuritypolicies.policy.yaml +│ └── priorityclasses.scheduling.k8s.io.yaml +│ ├── default +│ │ ├── endpoints.yaml +│ │ ├── endpointslices.discovery.k8s.io.yaml +│ │ ... +│ │ ├── serviceaccounts.yaml +│ │ └── services.yaml +│ ├── ingress-nginx +│ │ ├── configmaps.yaml +│ │ ├── controllerrevisions.apps.yaml +│ │ ... +│ │ ├── secrets.yaml +│ │ └── serviceaccounts.yaml +│ ├── kube-node-lease +│ │ ├── leases.coordination.k8s.io.yaml +│ │ ├── secrets.yaml +│ │ └── serviceaccounts.yaml +│ ├── kube-public +│ │ ├── configmaps.yaml +│ │ ├── rolebindings.rbac.k8s.io.yaml +│ │ ... +│ │ ├── secrets.yaml +│ │ └── serviceaccounts.yaml +│ ├── kube-system +│ │ ├── configmaps.yaml +│ │ ├── controllerrevisions.apps.yaml +│ │ ... +│ │ ├── serviceaccounts.yaml +│ │ └── services.yaml +│ └── kubernetes-dashboard +│ ├── configmaps.yaml +│ ├── deployments.apps.yaml +│ ... +│ ├── serviceaccounts.yaml +│ └── services.yaml +└── nodes_data + ├── balancer-1.tar.gz + ├── master-1.tar.gz + ├── master-2.tar.gz + └── master-3.tar.gz +``` + +### Backup Procedure Parameters + +**Note**: There are some examples located in [examples/procedure.yaml](examples/procedure.yaml). + +By default, no parameters are required. However, if necessary, you can specify custom. + +#### backup_location parameter + +By default, the backup is placed into the workdirectory. However, if you want to specify a different location, you can specify it through `backup_location` parameter. +You can specify two types of path in it: +* The full path of the file, including the name. In this case, the file is saved to the specified path with the name you specified. +* Full path to the directory, without file name. In this case, the file is saved to the directory you specified, with the default name that contains the timestamp of the backup. For example: + +``` + /home/centos/backup-{cluster_name}-20201214-162731.tar.gz +``` + +#### etcd parameters + +You can specify custom parameters for ETCD snapshot creation task. The following options are available: + +* `source_node` - the name of the node to create snapshot from. The node must be a master and have a ETCD data located on it. +* `certificates` - ETCD certificates for `etcdctl` connection to ETCD API. You can specify some certificates, or specify them all. You must specify the paths of certificates on the node from which the copy is made. + +Parameters example: + +```yaml +backup_plan: + etcd: + source_node: master-1 + certificates: + cert: /etc/kubernetes/pki/etcd/server.crt + key: /etc/kubernetes/pki/etcd/server.key + cacert: /etc/kubernetes/pki/etcd/ca.crt +``` + +#### nodes parameter + +By default, the following files are backed up from all nodes in the cluster: + +* /etc/resolv.conf +* /etc/hosts +* /etc/chrony.conf +* /etc/selinux/config +* /etc/systemd/system/kubelet.service +* /etc/docker/daemon.json +* /etc/containerd/config.toml +* /etc/crictl.yaml +* /etc/haproxy/haproxy.cfg +* /etc/systemd/system/{haproxy_service_name}.service.d/{haproxy_service_name}.conf +* /etc/keepalived/keepalived.conf +* /etc/systemd/system/{keepalived_service_name}.service.d/{keepalived_service_name}.conf +* /usr/local/bin/check_haproxy.sh +* /etc/yum.repos.d/ +* /etc/modules-load.d/ +* /etc/audit/rules.d/ +* /etc/kubernetes/ +* /var/lib/kubelet/pki/ + +**Note**: If the file does not exist on the node, it is skipped without error. + +**Note**: It is possible to backup not only files, but also directories. + +If you need to add additional files for backup, or disable the default ones, you can specify this in the parameter `node` via key-value, where the key is the full file or directory path, and the value is the enable or exclude indicator. For example: + +```yaml +backup_plan: + nodes: + /etc/resolv.conf: True + /root: True + /etc/hosts: False +``` + +#### kubernetes parameter + +The procedure exports all available Kubernetes resources from the cluster to yaml files. There are two types of resources - namespaced and non-namespaced. If you need to restrict resources for export, you can specify which ones you need. + +**Note**: If the specified resource is missing, it is skipped without an error. + +For the namespaced resources, you can specify the namespaces from which to export, as well as the full names of the resources to be exported. For example: +```yaml +backup_plan: + kubernetes: + namespaced_resources: + namespaces: + - default + - kube-system + resources: + - secrets + - services + - serviceaccounts +``` + +Moreover, if you need to export everything, you can specify the special word `all`, as is follows: +```yaml +backup_plan: + kubernetes: + namespaced_resources: + namespaces: all + resources: all +``` + +For the non-namespaced resources, you can specify only full names of the resources to be exported. For example: + +```yaml +backup_plan: + kubernetes: + nonnamespaced_resources: + - secrets + - services + - serviceaccounts +``` + +Another example: +```yaml +backup_plan: + kubernetes: + nonnamespaced_resources: all +``` + +### Backup Procedure Tasks Tree + +The `backup` procedure executes the following sequence of tasks: + +* verify_backup_location +* export + * inventory + * cluster_yaml + * ansible_inventory + * lists + * rpms + * hostname + * nodes + * etcd + * cluster_version + * kubernetes +* make_descriptor +* pack + + +## Restore Procedure + +**Note**: Before starting the restore, make sure that all nodes are online and accessible. + +**Note**: the topology of the cluster being restored must be the same as the topology of the cluster from which the backup was created. Everything should be the same, down to the names and addresses of the nodes, their amounts and roles. If they differ, then it is recommended to perform manual recovery using the backed up Kubernetes resources from your backup archive. + +**Note**: It is not necessary to define cluster.yaml for the restore procedure. In case of a missing or empty cluster, the yaml is retrieved from the backup archive. + +The restore procedure automatically restores the following parts of the cluster: + +* Thirdparties +* Nodes files and configs +* ETCD database + +After recovery, the procedure reboots all cluster nodes. + +### Restore Procedure Parameters + +**Note**: There are some examples located in [examples/procedure.yaml](examples/procedure.yaml). + +To start the procedure, you must mandatory specify `backup_location` parameter. Other parameters are optional, if necessary, you can also specify them. + + +#### backup_location parameter + +You need to specify the required path to the file with the backup - the recovery is performed from it. + +Example: + +``` +backup_location: /home/centos/backup-{cluster_name}-20201214-162731.tar.gz +``` + +#### etcd parameters + +By default, ETCD restore does not require additional parameters, however, if required, the following are supported: + +* image - the full name of the ETCD image, including the registry address. On its basis, the restoration is performed. +* certificates - ETCD certificates for `etcdctl` connection to ETCD API. You can specify some certificates, or specify them all. Certificates should be presented on all nodes. + +#### thirdparties parameter + +The procedure recovers thirdparties based on the `cluster.yaml`. If rpm thirdparties outdated or incorrect, specify the correct ones in this section, in the same format. For example: + +```yaml +restore_plan: + thirdparties: + /usr/bin/kubeadm: + source: https://storage.googleapis.com/kubernetes-release/release/v1.18.8/bin/linux/amd64/kubeadm + /usr/bin/kubelet: + source: https://storage.googleapis.com/kubernetes-release/release/v1.18.8/bin/linux/amd64/kubelet + /usr/bin/kubectl: + source: https://storage.googleapis.com/kubernetes-release/release/v1.18.8/bin/linux/amd64/kubectl + /usr/bin/calicoctl: + source: https://github.com/projectcalico/calicoctl/releases/download/v3.14.1/calicoctl-linux-amd64 +``` + +**Note**: The version must match the version of Kubernetes indicated in the `cluster.yaml`. + + +### Restore Procedure Tasks Tree + +The `restore` procedure executes the following sequence of tasks: + +* prepare + * unpack + * verify_backup_data + * stop_cluster +* restore + * thirdparties +* import + * nodes + * etcd +* reboot +* overview + + +## Add Node Procedure + +The `add_node` procedure allows you to add new nodes to an existing Kubernetes cluster. It is possible to add several nodes at a time. +Each node can have different combination of roles. + +The procedure works as shown in the following table: + +|Case|Expected Result|Important Note| +|---|---|---| +|Add load balancer|A new load balancer is configured. If `vrrp_ip` is present, then all the Keepalived nodes are reconfigured and restarted.|Kubernetes and Keepalived installations should not start.| +|Add load balancer + Keepalived|A new load balancer is configured. Keepalived is installed and configured on all the load balancers.|Kubernetes installation should not start.| +|Add master|Kubernetes is installed only on a new node. A new master is added to the Kubernetes cluster, and all Haproxy nodes are reconfigured and restarted.|Haproxy installation should not start.| +|Add worker|Kubernetes is installed only on a new node. A new worker is added to the Kubernetes cluster, and all Haproxy nodes are reconfigured and restarted.|Haproxy installation should not start.| + +Also pay attention to the following: + +* Thirdparties, if any, should be installed only on new nodes. They should not be installed or updated on other nodes. +* Packages should be installed only on new nodes, and can be upgraded if the upgrade is available. Nodes that are already present in the cluster should not install or update packages. +* Configs should be generated and applied only to new nodes. The only exceptions are balancers and Keepalived. +* Plugins are not reinstalled. +* System configurations like `selinux`, `modprobe`, `sysctl`, and others should be verified and configured only on new nodes. +* Only new nodes can be rebooted. +* The file `/etc/hosts` is updated and uploaded to all nodes in the cluster. + +**Note**: It is not possible to change a node's role by adding an existing node again with a new role. You have to remove the node and add it again. + +### Configuring Add Node Procedure + +The `nodes` configuration format for specifying new nodes is the same as that of the installation procedure. For more information, refer to [Kubetools Inventory Nodes](Installation.md#nodes) section in _Kubetools Installation Procedure_. + +The following example demonstrates the configuration of two nodes for adding: + +```yaml +nodes: + - name: "lb" + internal_address: "192.168.0.1" + roles: ["balancer"] + - name: "master" + internal_address: "192.168.0.2" + roles: ["master"] +``` + +**Note**: + +* The connection information for new nodes can be used from defaults as described in the [Kubetools Inventory Node Defaults](Installation.md#node_defaults) section in _Kubetools Installation Procedure_. If the connection information is not present by default, define the information in each new node configuration. +* You can add the `vrrp_ips` section to **procedure.yaml** if you intend to add the new `balancer` node and have previously not configured the `vrrp_ips` section. + +### Add Node Tasks Tree + +The `add_node` procedure executes the following sequence of tasks: + +* prepare + * check + * sudoer + * system + * cluster_installation + * dns + * resolv_conf + * etc_hosts + * ntp + * chrony + * package_manager + * configure_yum + * manage_packages + * system + * setup_selinux + * disable_firewalld + * disable_swap + * modprobe + * sysctl + * audit + * **cri** + * **install** + * **configure** + * thirdparties +* deploy + * loadbalancer + * haproxy + * install + * configure + * keepalived + * install + * configure + * kubernetes + * reset + * install + * init (as join) + * wait_for_nodes +* overview + +## Remove Node Procedure + +The `remove_node` procedure removes nodes from the existing Kubernetes cluster. It is possible to remove several nodes with different combination of roles at a time. + +The procedure works as follows: + +|Case|Expected Result|Important Note| +|---|---|---| +|Remove load balancer|Haproxy and Keepalived are disabled on removed nodes. Keepalived is reconfigured on all balancers.|Keepalived installation should not start.| +|Remove master|Kubernetes node is deleted from the cluster and Haproxy is reconfigured on all balancers.|Haproxy and Keepalived installation should not start. Keepalived should not be reconfigured.| +|Remove worker|Kubernetes node is deleted from the cluster and Haproxy is reconfigured on all balancers.|Haproxy and Keepalived installation should not start. Keepalived should not be reconfigured.| + +Also pay attention to the following: + +* If `vrrp_ip` is not used by any node after nodes removal, then the `vrrp_ip` is removed from **cluster.yaml**. +* The file `/etc/hosts` is updated and uploaded to all remaining nodes in the cluster. The control plane address may change. +* This procedure only removes nodes and does not restore nodes to their original state. Packages, configurations, and Thirdparties are also not deleted. + +Removing a node from a Kubernetes cluster is done in the following order: + +1. Pods are gracefully evacuated. +1. The ETCD member is stopped and removed from the ETCD cluster. +1. Kubelet is stopped. +1. ETCD and Kubernetes data is deleted. +1. Containers are stopped and deleted. Images are deleted and container runtime is entirely pruned. +1. Kubernetes node is deleted from the Kubernetes cluster. + +### Configuring Remove Node Procedure + +To remove nodes, it is possible to use the configuration format similar to installation or adding. For more information, refer to [Kubetools Inventory Nodes](Installation.md#nodes) section in _Kubetools Installation Procedure_. + +For example: + +```yaml +nodes: + - name: "lb" + internal_address: "192.168.0.1" + roles: ["balancer"] + - name: "master" + internal_address: "192.168.0.2" + roles: ["master"] +``` + +However, it is allowed to use a simple configuration, where only the node `name` is present. + +For example: + +```yaml +nodes: + - name: "lb" + - name: "master" +``` + +### Remove Node Tasks Tree + +The `remove_node` procedure executes the following sequence of tasks: + +* loadbalancer + * remove + * haproxy + * keepalived + * configure + * haproxy + * keepalived +* update_etc_hosts +* remove_kubernetes_nodes +* overview + +## Operating System Migration + +To change the operating system on an already running cluster: + +1. Start Kubetools IAAS and PAAS checks, make sure that the cluster is operating correctly and without any problems. +1. Backup the entire cluster and virtual machine snapshots. +1. Run the Remove node procedure for the node you want to migrate with an old OS. +1. Backup/restore/migrate service-specific data from the old node to a new one. +1. Run the Add node procedure for the node you are migrating with the new OS. The old node can be redeployed with the new OS, or another with a new OS used. +1. Start Kubetools IAAS and PAAS checks, make sure all services, pods, entire cluster are healthy and running correctly. +1. If something is not functioning correctly in the cluster, manually correct it before resuming. +1. Start the migration for the next node, and migrate all the remaining nodes. +1. After the migration finished, manually replace all OS-specific information in your `cluster.yaml`: repositories, packages, associations, if any. Also pay attention to their versions. In further procedures, use only the new inventory instead of the old one. + +**Note**: It is possible to migrate the OS removing/adding groups of nodes, not only for a single node. However, be careful with the selected group of nodes - incorrectly selected nodes for removal or their amount can damage the cluster or lead it to an unusable state. Select the nodes at your discretion. + +**Warning**: It is necessary to complete the procedure and completely migrate all nodes to a single operating system. The cluster and services can exist on different operating systems, but if you need to immediately perform any maintenance procedure, Kubetools does not allow you to do this, since the cluster is in an inconsistent state with another maintenance procedure not yet completed. + +**Warning**: In case when you use custom associations, you need to specify them simultaneously for all types of operating systems. For more information, refer to the [associations](/documentation/public/Installation.md#associations) section in the _Kubetools Installation Procedure_. + +## Manage PSP Procedure + +The manage PSP procedure allows you to change PSP configuration on an already installed cluster. Using this procedure, you can: +* Add/delete custom policies +* Enable/disable OOB policies +* Enable/disable admission controller + +Manage PSP procedure works as follows: +1. During this procedure the custom policies specified for deletion are deleted. + Then the custom policies specified for addition are added. +2. If OOB policies are reconfigured or admission controller is reconfigured, then all OOB policies are recreated + as configured in the `cluster.yaml` and `procedure.yaml`. The values from `procedure.yaml` take precedence. + If admission controller is disabled, then all OOB policies are deleted without recreation. +3. If the admission controller is reconfigured in `procedure.yaml`, then `kubeadm` configmap and `kube-apiserver` manifest is updated accordingly. +4. All kubernetes nodes are `drain-uncordon`ed one-by-one and all daemon-sets are restarted to restart all pods (except system) in order to re-validate pods specifications. + +### Configuring Manage PSP Procedure + +To manage PSPs on existing cluster, use the configuration similar to PSP installation, except the +`custom-policies` is replaced by `add-policies` and `delete-policies` as follows: + +```yaml +psp: + pod-security: enabled/disabled + oob-policies: + default: enabled/disabled + host-network: enabled/disabled + anyuid: enabled/disabled + add-policies: + psp-list: [] + roles-list: [] + bindings-list: [] + delete-policies: + psp-list: [] + roles-list: [] + bindings-list: [] +``` + +For example, if admission controller is disabled on existing cluster and you want to enable it, without enabling +`host-network` OOB policy, you should specify the following in the `procedure.yaml` file: + +```yaml +psp: + pod-security: enabled + oob-policies: + host-network: disabled +``` + +To configure `add-policies` and `delete-policies`, use the configuration format similar to `custom-policies`. For more information, refer to the [Configuring Custom Policies](/documentation/public/Installation.md#configuring-custom-policies) section in the _Kubetools Installation Procedure_. + +**Note**: The OOB plugins use OOB policies, so disabling OOB policy breaks some OOB plugins. +To avoid this, you need to specify custom policy and bind it using `ClusterRoleBinding` to the `ServiceAccout` plugin. + +### Manage PSP Tasks Tree + +The `manage_psp` procedure executes the following sequence of tasks: + +1. delete_custom +2. add_custom +3. reconfigure_oob +4. reconfigure_plugin +5. restart_pods + +## Reboot Procedure + +This procedure allows you to safely reboot all nodes in one click. By default, all nodes in the cluster are rebooted. Gracefully reboot is performed only if installed Kubernetes cluster is detected on nodes. You can customize the process by specifying additional parameters. + +### graceful_reboot parameter + +The parameter allows you to forcefully specify what type of reboot to perform. Possible values: + +* `False` - All cluster nodes are forced to restart at the same time and immediately. This is a quick operation. If you have a cluster installed, this causes it to be temporarily unavailable. +* `True` - All cluster nodes are rebooted, pods drained to other nodes and rebooted one after another, after which the pods are scheduled back to the nodes. This is a very long operation. This procedure should not cause the cluster to be unavailable, but may slow down some applications in the cluster. + +Example: + +```yaml +graceful_reboot: False +``` + +### nodes parameter + +This parameter allows you to specify which nodes should be rebooted. Other nodes are not affected. In this parameter, you must specify a list of node names, as is follows: + +```yaml +nodes: + - name: master-1 + - name: master-2 + - name: master-3 +``` + + +## Certificate Renew Procedure + +The `cert_renew` procedure allows you to renew some certificates on an existing Kubernetes cluster. + +For kubernetes most of the internal certificates could be updated, specifically: +`apiserver`, `apiserver-etcd-client`, `apiserver-kubelet-client`, `etcd-healthcheck-client`, `etcd-peer`, `etcd-server`, +`admin.conf`, `controller-manager.conf`, `scheduler.conf`, `front-proxy-client`. +Certificate used by `kubelet.conf` by default is updated automatically by kubernetes, +link to kubernetes docs regarding `kubelet.conf` rotation: https://kubernetes.io/docs/tasks/tls/certificate-rotation/#understanding-the-certificate-rotation-configuration. + +**Note**: Serving kubelet certificate `kubelet.crt` is updated forcefully by this procedure each time it runs. + +**Note**: Each time you run this procedure, kubelet and all control plane containers are restarted. + +**Note**: CA certificates cannot be updated automatically and should be updated manually after 10 years. + +For nginx-ingress-controller, the config map along with the default certificate is updated with a new certificate and key. The config map update is performed by plugin re-installation. + +The `cert_renew` procedure also allows you to monitor kubernetes internal certificates expiration status. + +### Configuring Certificate Renew Procedure + +#### Configuring Certificate Renew Procedure For nginx-ingress-controller +To update the certificate and key for `nginx-ingress-controller`, use the following configuration: + +```yaml +nginx-ingress-controller: + data: + cert: | + -----BEGIN CERTIFICATE----- + ...(skipped)... + -----END CERTIFICATE----- + key: | + -----BEGIN RSA PRIVATE KEY----- + ...(skipped)... + -----END RSA PRIVATE KEY----- +``` + +Similar to the plugin configuration, you can either use the data format or the paths format. +For more information about these formats, refer to the [nginx-ingress-controller](/documentation/public/Installation.md#nginx-ingress-controller) section in the _Kubetools Installation Procedure_. + +#### Configuring Certificate Renew Procedure For Kubernetes Internal Certificates +To update internal kubernetes certificates you can use the following configuration: +```yaml +kubernetes: + cert-list: + - apiserver + - apiserver-etcd-client + - apiserver-kubelet-client + - etcd-healthcheck-client + - etcd-peer + - etcd-server + - admin.conf + - controller-manager.conf + - scheduler.conf + - front-proxy-client +``` +Above list contains all possible certificates for update. You can pick all or some of them, as you need. +Alternatively to specifying the full list, you can use shorter form: +```yaml +kubernetes: + cert-list: + - all +``` + +### Certificate Renew Tasks Tree + +The `cert_renew` procedure executes the following sequence of tasks: + +1. kubernetes +2. nginx_ingress_controller +3. certs_overview + +## Migration Cri Procedure + +The `migrate_cri` procedure allows you to migrate from Docker to Containerd. + +**Note**: This procedure consults `/etc/fstab` to see if separate disk is used for docker directory `/var/lib/docker`. +If there is such disk, it will be **cleared** and re-mounted to `/var/lib/containerd`. + +**Warning**: This procedure works only in one direction. + +**Warning**: If for some reason, the migration to Containerd has been executed on an environment where Containerd was already used as Cri, Kubernetes dashboard may be unavailable. To resolve this issue, restart the pods of the ingress-nginx-controller service. + +**Warning** The migration procedure removes the docker daemon from all nodes in the cluster. + +### migrate_cri parameters + +The following sections describe the `migrate_cri` parameters. + +#### cri parameter + +In this parameter, you should specify `containerRuntime: containerd` and the configuration for it. + +**Note**: This parameter is mandatory. An exception is raised if the parameter is absent. + +Example for CLI: + +```yaml +cri: + containerRuntime: containerd + containerdConfig: + plugins."io.containerd.grpc.v1.cri": + sandbox_image: k8s.gcr.io/pause:3.2 + plugins."io.containerd.grpc.v1.cri".registry.mirrors."artifactory.example.com:5443": + endpoint: + - https://artifactory.example.com:5443 +``` + +#### yum-repositories parameter + +This parameter allows you to specify a new repository from where containerd could be downloaded. + +**Note**: This parameter is optional. + +Example: + +```yaml +yum: + repositories: + test-repo: + name: repo-name + enabled: 1 + gpgcheck: 0 + baseurl: http://example.com/misc/epel/7/x86_64/ +``` + +#### packages-associations parameter + +This parameter allows you to specify an association for containerd, thus you could set a concrete version which should be installed from the allowed repositories. + +**Note**: This parameter is optional. + +Example: + +```yaml +packages: + associations: + containerd: + executable_name: 'containerd' + package_name: 'containerd.io-1.4.*' + service_name: 'containerd' + config_location: '/etc/containerd/config.toml' +``` + +#### thirdparties parameter + +This parameter allows you to specify the link to a concrete version of a crictl third-party. In the absence of this parameter, crictl is downloaded from Github/registry in case you ran the procedure from CLI. + +**Note**: This parameter is optional. + +Example: + +```yaml +thirdparties: + /usr/bin/crictl.tar.gz: + source: https://github.com/kubernetes-sigs/cri-tools/releases/download/v1.20.0/crictl-v1.20.0-linux-amd64.tar.gz +``` + + +### Procedure Execution Steps + +1. Verify and merge all the specified parameters into the inventory. +2. Install and configure containerd and podman. +3. Install crictl. +4. Implement the following steps on each master and worker node by node. + 1. Drain the node. + 2. Update configurations on the node for migration to containerd. + 3. Move the pods on the node from the docker's containers to those of containerd. + 4. Uncordon the node. + +**Warning**: Before starting the migration procedure, verify that you already have the actual claster.yaml structure. The services.docker scheme is deprecated. + +# Procedure Execution + +The following sections describe the execution of procedures using CLI. + +## Procedure Execution from CLI + +The command line executive for maintenance procedures has the same parameters as the installation executive. For more details, refer to the [Installing Kubernetes Using CLI](/documentation/public/Installation.md#installing-kubernetes-using-cli) section in _Kubetools Installation Procedure_. + +The following features described in the _Kubetools Installation Procedure_ are also available for maintenance procedures: + +* [Custom Configfile Location](Installation.md#custom-configfile-location) +* [Tasks List Redefinition](Installation.md#tasks-list-redefinition) +* [Ansible Inventory](Installation.md#ansible-inventory) +* [Dump Files](Installation.md#dump-files) + +For maintenance procedures, it is mandatory to provide procedure-specific **procedure.yaml** configuration as positional argument, in addition to an ordinary **cluster.yaml** cluster inventory. You can redefine the tasks list for execution/exclusion according to the selected procedure Tasks Tree. For more information, refer to the [Tasks List Redefinition](Installation.md#tasks-list-redefinition) section in _Kubetools Installation Procedure_. + +Also it is possible to get the list of supported options and their meaning by executing the maintenance procedure with `--help` flag. + +**Note**: After the maintenance procedure is completed, you can find the updated inventory files in place of the old ones. After each procedure, the old version of **cluster.yaml** is backed up to `dump/cluster.yaml_mm-dd-yyyy-hh:MM:ss`. + +An example for running `add_node` procedure without the **cluster.yaml** definition is as follows: + +```bash +./kubetools add_node procedure.yaml +``` + +It is used from the current location. + +An example for running `remove_node` procedure with explicit **cluster.yaml** is as follows: + +```bash +./kubetools remove_node procedure.yaml --config="${PATH_TO_CONFIG}/cluster.yaml" +``` + +An example for running the `add_node` procedure with overridden tasks is as follows: + +```bash +./kubetools add_node procedure.yaml --tasks="deploy" --exclude="deploy.loadbalancer" +``` + +## Logging + +Kubetools has the ability to customize the output of logs, as well as customize the output to a separate file or graylog. +For more information, refer to the [Configuring Kubetools Logging](documentation/public/Logging.md) section. + + +## Additional Parameters + +The Kubernetes cluster has the following additional parameters. + +### Grace Period and Drain Timeout + +The `remove_nodes` and `upgrade` procedures perform pods' draining before next actions. The pods' draining gracefully waits for the pods' migration to other nodes, before killing them. It is possible to modify the time to kill using the `grace_period` parameter in the **procedure.yaml** as follows (time in seconds): + +```yaml +grace_period: 180 +``` + +**Note**: To disable the `grace_period` parameter, simply set the value to "0". + +Also, these procedures wait for the pods' killing. This waiting time also is configurable with the `drain_timeout` parameter in the **procedure.yaml** as follows (time in seconds): + +```yaml +drain_timeout: 260 +``` + +### Images Prepull + +For the `add_nodes` and `upgrade` procedures, an images prepull task is available. This task prepulls images on specified nodes, but separates them on subgroups by 20 nodes per group, by default. This is required to avoid high load on the registry server. This value can be modified by setting the `prepull_group_size` parameter in the **procedure.yaml**, for example: + +```yaml +prepull_group_size: 100 +``` +# Additional procedures + +The following kubetools procedures are available additionally: +- `version` Print current release version +- `do` Execute shell command on cluster nodes + +# Common Practice + +You should not run any containers on worker nodes that are not managed by `kubelet` so as not to break the `kube-scheduler` precision. diff --git a/documentation/Troubleshooting.md b/documentation/Troubleshooting.md new file mode 100644 index 000000000..cbad73a42 --- /dev/null +++ b/documentation/Troubleshooting.md @@ -0,0 +1,342 @@ +This section provides troubleshooting information for Kubetools and Kubernetes solutions. + +- [Trobleshooting Tools](#troubleshooting-tools) + - [etcdctl script](#etcdctl-script) +- [Troubleshooting Kubernetes Generic Issues](#troubleshooting-kubernetes-generic-issues) + - [CoreDNS Responds with High Latency](#coredns-responds-with-high-latency) + - [Namespace with terminating CR/CRD cannot be deleted. Terminating CR/CRD cannot be deleted](#namespace-with-terminating-crcrd-cannot-be-deleted-terminating-crcrd-cannot-be-deleted) + - [Packets between nodes in different networks are lost](#packets-between-nodes-in-different-networks-are-lost) + - [`kubectl apply` fails with error "metadata annotations: Too long"](#kubectl-apply-fails-with-error-metadata-annotations-too-long) + - [`kube-apiserver` requests throttling](#kube-apiserver-requests-throttling) +- [Troubleshooting Kubetools](#troubleshooting-kubetools) + - [Failures During Kubernetes Upgrade Procedure](#failures-during-kubernetes-upgrade-procedure) + - [Numerous generation of auditd system messages ](#numerous-generation-of-auditd-system) + +# Trobleshooting Tools + +This section describes the additional tools that Kubetool provides for convenient troubleshooting of various issues. + +## etcdctl script + +This script allows you to execute `etcdctl` queries without installing an additional binary file and setting up a connection. This file is installed during the `prepare.thirdparties` installation task on all masters and requires root privileges. + +To execute a command through this script, make sure you meet all the following prerequisites: + +* You run the command from the master node with root privileges. +* You have configured `admin.conf` on node. +* The node with which you are running has all the necessary ETCD certificates and they are located in the correct paths. + +If all prerequisites are achieved, you can execute almost any `etcdctl` command. +For example: + +``` +# etcdctl member list +# etcdctl endpoint health --cluster -w table +# etcdctl endpoint status --cluster -w table +``` + +To find out all the available `etcdctl` options and features, use the original ETCD documentation. + +To execute the command, the script tries to launch the container using the following algorithm: + +1. Detect already running ETCD in Kubernetes cluster, parse its parameters, and launch the ETCD container with the same parameters on the current node. +1. If the Kubernetes cluster is dead, then try to parse the `/etc/kubernetes/manifests/etcd.yaml` file and launch the ETCD container. + +Since the command is run from a container, this imposes certain restrictions. For example, only certain volumes are mounted to the container. Which one it is, depends directly on the version and type of installation of ETCD and Kubernetes, but as a rule it is: + +* `/var/lib/etcd`:`/var/lib/etcd` +* `/etc/kubernetes/pki`:`/etc/kubernetes/pki` + +# Troubleshooting Kubernetes Generic Issues + +This section provides troubleshooting information for generic Kubernetes solution issues, which are not specific to Kubetools installation. + +## CoreDNS Responds with High Latency + +**Symptoms**: CoreDNS responds with some delay. + +**Root Cause**: With a large volume of the cluster or applications in it, the load on the CoreDNS can increase. + +**Solution**: To fix this problem, it is recommended to increase the number of replicas using the following command: + +``` +# kubectl scale deployments.apps -n kube-system coredns --replicas=4 +``` + +Choose the number of replicas at your discretion. In addition to increasing the replicas, it is recommended to use anti-affinity rules to reassign all CoreDNS pods to each node without any duplicates. + +## Namespace with terminating CR/CRD cannot be deleted. Terminating CR/CRD cannot be deleted + +**Symptoms**: A namespace containing a terminating `CustomResource` cannot be deleted, or simply `CustomResource` in some namespace hangs infinitely in the terminating status and cannot be deleted. + +**Root Cause**: This issue occurs when `CustomResource` has finalizers that are not deleted. This could happen because the controller that manages the `CustomResource` is not operational, for example, if the controller is deleted. As a result, the controller cannot handle and remove finalizers. + +`CustomResources` with non-empty finalizers are never deleted. + +**Solution**: There are two possible solutions to this issue: + +* If the controller is just temporarily unavailable, then `CustomResource` is deleted as soon as the controller starts running. You just have to make the controller operational. This is the recommended approach as the controller is able to perform on-delete logic. +* If the controller is removed, or you do not want to deal with an unavailable controller, remove `CustomResource` by manually deleting its finalizers. This approach is not recommended as the required on-delete logic for `CustomResource` is not executed by the controller. + +To manually delete a finalizer for `CustomResource`, execute the following command on one of the master nodes: + +```bash +kubectl patch -p '{"metadata":{"finalizers":[]}}' --type=merge +``` + +For example: + +```bash +kubectl patch crontab/my-new-cron-object -p '{"metadata":{"finalizers":[]}}' --type=merge +``` + +## Packets between nodes in different networks are lost + +**Symptoms**: Some packets between pods running on nodes in different networks are lost. DNS requests are also lost on the network. + +**Root Cause**: Default kubernetes installation uses calico network plugin and set ipip mode with CrossSubnet. In that case all packets between pods running on nodes in one networks go to each other directly, but packets between pods running on nodes in two or more networks go to each other by tunnel. As described in [calico documentation](https://docs.projectcalico.org/networking/mtu) MTU on calico tunl interfaces should be less by 20 than MTU on main network interface. + +**Solution**: To change MTU size to required value run following command on any master node: + +``` +# kubectl patch configmap/calico-config -n kube-system --type merge -p '{"data":{"veth_mtu": "1440"}}' +``` + +where: + - **1440** is the size of MTU. For MTU 1450 on interface eth0 you should set MTU size 1430 for calico-config. + + +After updating the ConfigMap, perform a rolling restart of all calico/node pods. For example: + +``` +# kubectl rollout restart daemonset calico-node -n kube-system +``` + +It changes MTU value only for new pods. To apply new MTU value for all pods in the cluster you should restart all pods or nodes one by one. + +## `kubectl apply` fails with error "metadata annotations: Too long" + +**Symptoms**: The `kubectl apply` command fails with an error having "metadata annotations: Too long" message. + +**Root Cause**: This issue happens when you try to apply a resource with a very large configuration. +The problem is that `kubectl apply` tries to save the new configuration to the `kubectl.kubernetes.io/last-applied-configuration` annotation. If the new configuration is too big, it cannot fit the annotation's size limit. +The maximum size cannot be changed, so `kubectl apply` is unable to apply large resources. + +**Solution**: Use `kubectl create` instead of `kubectl apply` for large resources. + +## `kube-apiserver` requests throttling + +**Symptoms**: Different services start receiving “429 Too Many Requests” HTTP error even though kube-apiservers can take more load. + +**Root Cause**: Low rate limit for `kube-apiserver`. + +**Solution**: Raise the rate limit for the `kube-apiserver` process using `--max-requests-inflight` and `--max-mutating-requests-inflight` options. +* `--max-requests-inflight` is the maximum number of non-mutating requests. The default value is 400. +* `--max-mutating-requests-inflight` is the maximum number of mutating requests. The default value is 200. + +`kube-apiserver` configration file is stored in /etc/kubernetes/manifests/kube-apiserver.yaml. This file should be changed +on all masters. Also, the configuration map `kubeadm-config`from kube-system namespace should have the same values +in `apiServer` section. + +```yaml +apiVersion: v1 +data: + ClusterConfiguration: | + apiServer: + ... + extraArgs: + ... + max-requests-inflight: "400" + max-mutating-requests-inflight: "200" + ... + +``` + +# Troubleshooting Kubetools + +This section provides troubleshooting information for Kubetools-specific or installation-specific issues. + +## Failures During Kubernetes Upgrade Procedure + +### Upgrade procedure failure, upgrade not completed + +**Symptoms**: The `upgrade` procedure fails at some point and leaves the upgrade process in an incomplete state. + +**Root cause**: Any error during the `upgrade` procedure could cause an upgrade procedure failure. + +**Solution**: First of all, it is required to find the root cause of the failure and fix it. You can check other troubleshooting sections in this guide regarding the issues during the upgrade. + +After the cause of the failure is fixed, you need to run the `upgrade` procedure once again to complete the upgrade. However, it is very important to check the status of the upgrade process before restarting the procedure because it may be required to change the procedure parameters like `cluster.yaml` and procedure inventory. + +For example, imagine you are doing the following upgrade: `1.16.12 -> 1.17.7 -> 1.18.8`. +In this case, if the upgrade fails on version `1.18.8`, but is completed for version `1.17.7`, you have to update `cluster.yaml` with the latest information available in the regenerated inventory (`cluster.yaml` is regenerated after each minor version upgrade) and also remove version `1.17.7` from the procedure inventory. It is absolutely fine to retry upgrades for version `X.Y.Z`, but only until the moment the upgrade starts for next version `X.Y+1.M`. It is incorrect to start upgrade to version `1.17.7` after the upgrade to version `1.18.8` is started. + +### Cannot drain node because of PodDisruptionBudget + +**Symptoms**: The `upgrade` procedure fails during node drain because of PodDisruptionBudget (PDB) limits. + +**Root cause**: Kubernetes is unable to drain a pod because draining the pod violates PDB rules configured by some application. + +**Solution**: Since the Kubernetes version 1.18, there is an option to ignore PDB rules during upgrades using `disable-eviction`. You can configure this option in the upgrade procedure. This option is enabled by default. + +If you face an issue with PDB rules during the upgrade on Kubernetes versions lower than 1.18, then temporarily change PDB limits to lower values, so that pods could be drained. After that you can run the `upgrade` procedure once again. After the upgrade, you have to return the PDB limits to the previous value. + +### Cannot drain node because of pod stuck in "Terminating" status + +**Symptoms**: The `upgrade` procedure fails during node drain because of the pod stuck in the "Terminating" status. + +**Root cause**: There could be many different reasons for pod being stuck in the "Terminating" status. Try to check the pod events to gather more details. Delete the "Terminating" pod to continue the upgrade. + +**Solution**: To resolve the issue with pod stuck in the "Terminating" status, perform the following steps: + +1. Try to forcefully delete the terminating pod using the command: `kubectl delete pod --grace-period=0 --force --namespace `. +2. If force delete does not help, try to reboot the node on which the pod is stuck in the "Terminating" status. + +After the "Terminating" pod is deleted, run the `upgrade` procedure once again. + +### Etcd pod customizations are missing after upgrade + +**Symptoms**: After an upgrade, you may notice that your etcd customizations are not present in the `/etc/kubernetes/manifests/etcd.yaml` file. + +**Root cause**: During the upgrade, etcd configuration is re-generated by kubeadm from its own configuration in `kubeadm-config` config map in `kube-system` namespace. Your customizations are missing in this config map. + +**Solution**: You need to put your customizations not only to the etcd pod manifest in `/etc/kubernetes/manifests/etcd.yaml` file, but also to `kubeadm-config` config map in `kube-system` namespace. +For example, if you want to increase etcd snapshot count from 10000 to 10001, you need to also modify `kubeadm-config` config map as following: + +```yaml +data: + ClusterConfiguration: | + etcd: + local: + extraArgs: + snapshot-count: "10001" +``` + +Note that the key has the same name as the etcd argument. The value should be quoted. +After the upgrade, this results in following etcd argument (among others): + +```yaml +spec: + containers: + - command: + - etcd + - --snapshot-count=10001 +``` + +Note that these arguments are added by kubeadm during the upgrade only, they will not be added automatically. +It means that you should manually add your customization to both the `/etc/kubernetes/manifests/etcd.yaml` file and the `kubeadm-config` config map. + +If everything is done correctly, all of your etcd customizations persist among Kubernetes upgrades. + +### Kubernetes image repository does not change during upgrade + +**Symptoms**: You expect Kubernetes to use a new repository during and after an upgrade, +but Kubernetes keeps using the old image repository. Kubernetes may fail to find images and the upgrade fails. + +**Root cause**: During an upgrade procedure, the kubeadm cluster configuration is not changed by `kubetool`, +particularly there is no way to change the Kubernetes image repository automatically during an upgrade using `kubetool`. + +**Solution**: You have to change the image repository manually in the kubeadm configuration and container runtime configuration. You have to modify `cluster.yaml` too. + +To edit the kubeadm configuration, use the following command: + +```bash +kubectl edit cm kubeadm-config -n kube-system +``` + +Here, change the `imageRepository` value to the new one, make sure to keep the `ks8.gcr.io` prefix if needed. +After these changes, kubeadm uses a new specified repository for downloading Kubernetes component images, +but only after the `upgrade` or `add_node` procedure (for new nodes). +Do not forget to change `imageRepository` in your `cluster.yaml` too, so that there are no inconsistencies +between `cluster.yaml` and the actual cluster configuration. + +You may also need to change your container runtime configuration to work correctly and consistently with the new registry. + +**Warning**: Executing the following actions restarts all pods in the cluster because the container runtime configuration changes. +These actions are actually optional, you need to execute them only if you want to use an insecure registry. + +If you have global unified registry specified in the `cluster.yaml` under the `registry` section, then change it to point to the new repository address. +If you have container runtime configurations under the `cri` section in `cluster.yaml`, then make sure they are consistent with your new registry. +You may need to not only change registry address, but also configure insecure access. +Do not remove the old registry from your container runtime configuration as it could still be used for some images. +After these changes, you need to run the `install` procedure with the `prepare.cri` task to update the container runtime configuration. +This restarts all containers in the cluster making it unavailable for some time. +If you use `containerd` as the container runtime, its version may also be updated. + +After making these changes, your cluster should be ready to upgrade using the new image repository. + +### Kubernetes garbage collector doesn't reclaim disk space + +**Symptoms**: There are error messages in the log file like the following: + +``` +Apr 02 13:15:01 worker3 kubelet[1114]: E0402 13:15:01.809804 1114 kubelet.go:1302] Image garbage collection failed multiple times in a row: failed to garbage collect required amount of images. Wanted to free 966184140 bytes, but freed 0 bytes +``` + +Also, the disk space usage is increasing, and pods are being evicted due to DiskPressure. + +**Root cause**: Kubernetes garbage collector cleans up only unused images and containers which are located under `/var/lib/docker`. It starts cleaning up when the disk usage is equal or above `image-gc-high-threshold` (The default value is 85%). +The pods' eviction due to DiskPressure starts when the free disk space is less than `imagefs.available` (The default value is 15%). +If other files except images and containers use the disk so that GC cannot free enough space, such an error may happen. + +**Solution**: Move /var/lib/docker to a separate disk of reasonable size. Also setting `image-gc-high-threshold` to a value lower than 100-`imagefs.available` may help. + +`image-gc-high-threshold` may be set as a kubelet flag in /var/lib/kubelet/kubeadm-flags.env. Keep in mind that its value should be higher than `image-gc-low-threshold`, whose default value is 80%. An example of kubeadm-flags.env file: + +``` +KUBELET_KUBEADM_ARGS="--cgroup-driver=systemd --network-plugin=cni --pod-infra-container-image=k8s.gcr.io/pause:3.1 --kube-reserved cpu=200m,memory=256Mi --system-reserved cpu=200m,memory=512Mi --max-pods 250 --image-gc-high-threshold 80 --image-gc-low-threshold 70" +``` + +## Numerous generation of `auditd` system + +**Symptoms**: Generation of numerous system messages on nodes and their processing in graylog: + +``` +-rw-------. 1 root root 1528411 aug 13 10:36 audit.log +-r--------. 1 root root 8388693 aug 13 10:35 audit.log.1 +-r--------. 1 root root 8388841 aug 13 10:34 audit.log.2 +-r--------. 1 root root 8388720 aug 13 10:32 audit.log.3 +-r--------. 1 root root 8388785 aug 13 10:30 audit.log.4 + +``` + + +**Root cause**: The reason for generating numerous messages is to add new rules to`audit.rules`.This is due to the update of the default.yaml configuration file.The default audit settings on Linux operating systems are two files: audit.d.conf and audit.rules +``` +-w /var/lib/docker -k docker +-w /etc/docker -k docker +-w /usr/lib/systemd/system/docker.service -k docker +-w /usr/lib/systemd/system/docker.socket -k docker +-w /etc/default/docker -k docker +-w /etc/docker/daemon.json -k docker +-w /usr/bin/containerd -k docker +-w /usr/sbin/runc -k dockerks +-w /usr/bin/dockerd -k docker + +``` + + +**Solution**: The solution to this problem is to modify the configuration files of the auditd daemon. + +1- Modifying the settings for the auditd.conf file +``` +max_log_file = 8 <- Generated file size in megabytes +num_logs = 5 <- Number of generated files +``` +2- Removing added rules +``` +-w /var/lib/docker -k docker +-w /etc/docker -k docker +-w /usr/lib/systemd/system/docker.service -k docker +-w /usr/lib/systemd/system/docker.socket -k docker +-w /etc/default/docker -k docker +-w /etc/docker/daemon.json -k docker +-w /usr/bin/containerd -k docker +-w /usr/sbin/runc -k dockerks +-w /usr/bin/dockerd -k docker + +``` + +Rules are deleted in predefined.rules, which is located on this path /etc/audit/rules.d + +**After all the manipulations, you need to apply the new configuration with the command** `sudo service auditd restart` diff --git a/documentation/images/all-in-one.png b/documentation/images/all-in-one.png new file mode 100644 index 0000000000000000000000000000000000000000..e0b74987c07d2a660affb43f0eb02f164b36a278 GIT binary patch literal 20803 zcmeHvWn5HU+b|{MQvcQ&*KIC7~w4!NDO#D#&Q!;GEII!NJubCInYV z{L5d1|8QM3<)v_nIpL}EF<+gTmbz_9dF!J)6 zIXiP&+L&2k%^X}g9no&!61eZ+WNCx3L0g_Y!^_Rf&A|=l;DKv%3o^nac?F;kI43`k zpux%WW>#p&(+Lrr++YG06*CJPM>i)|Yetw9_>FXQL)(M@!DaBNrUgFq!9N~uQyzpV zoE3bPbau8!>!Hn+Yyb^%eB2059!?$vxWS^Npsk|J$SV!L+uPWo!GH2-3p*!hkS-eQ zYUAVx2JpZ*dB6oNGmIJ5=3f&)>p{DjS)HtfN5xJ+73ry_@1n_%^>IXb>Y4%ipl3Wz z=Q?H!4=<-6Cl9#H%LtPNoI7O^c+BRMDcbHxqzlZ{!CFyAK-I}Y(9~5@?qniIGY9m^ z%hUm1H~?ADB>cM8=H>`jPkD829(84uv;#uM&ehz`O~*}7+0Fs$reM$QiBi?Il5#~z z3%a{GD#B1&C>M;XJ676-ThPS?uC9)db>&BxyJ|^!^7%0GO6j;DZ9Md3HJz;xo=$=; zQbcdM=W(@&b|qnyQ+z3fzve+@3HqK7K2H4ANQA&fP^@ zUDwh;#YMwWiBC#N$3ja_M#EMgqoU}IwdJ?5mErdBarRMDQ`5CjcjMKS^ftFqMxcC@ zt<~jaot!l_%&^LK@;cHoHm+U?{2EI!P0DibCyIHuZYf7P& zGzD~|^m*-VwD`@DNN-P=g(f##PsvOc4s(#waIm+OwinRSQ*rW9vNJ=wEAha+b@@H* zk2qHA}nR~z-==rMR|T{c{@IBeQQB!dtDDZq?!xL zT^*rfW@qn&Qn7TiL|VvlJMma}@=3aC34+BaYVix0xhPvA;Bt0IR~Jb%++82;0p?OM z(6R>i9rev*P;gf*MR%Bzw4FA)sql-3!I01;Vma}ou_Y_dDL+R=x6)hYL6r^RGRHb|* zrF>vAlIAYF2vtixZgqXMjIE1>CJ)-%R@NP@Wai`|Yilki>)@^9;*C*+JLoFN@C!KW zxS?fqEV)rwJ#Sf{sZ#2mN@^+=_8N{3SRZeFEgKZv%tJ%Z*#^Pyq3W(=>uR8+EhX!y zjsUhzhg-$c0x5_Dqdc6r&6R-+Ng*s%;V1=jb3KHXtB#HV3ajhxA!vm$SJT0033~B4 zd2;i3*%@H@1+Bot(Cb_+ogD<=+G;ZVI!Fwk1)qa0OcLv&2U#OMxRM1N;lK;YskNgt z3}d0@rJ$^;Hx!(QGTD`ke( zF>q5t>8q-1XkmHmrKOb}yq#pNG?ZQ7t}Y(@SOY;7budyvP#yZu&D=qWS5VDCRb7n- zBEdrm4K@qrC9lqJttaT|g|Jfb;WpRTOsP7ZGRt{Aw27aVI~ z28>}7dHL1X{=P;o z(R7JE?#F{b96VzB|9r^Yl;EZypho?7EcER@tJs;p9-$h9IXOo%5Qj3vPeK~5e8n5J#Dk>_G zu0uwyAun!-i+TC=WXP`Nh#w$Th30R^^P-48%7sWIE<>)cehu$wkHD`HyfwCLsSU45QTM4yeFz3`jTg90KtuQW-zi4y_sj=_s}Knb83AR(*Lq8Al4&dbj? z92ZPR3GM%SUwD_5gQF06sKi+<0&vY~*o>SrSt?OOlP6V3y zdc`qACcY}Wn~RhpUFS}ea>O9;ZeHOd)*FC%&gp@KTIJ>Cg$u)K;s=(-N1te%z624` zv4o1OJuMeeyWlunBI~AWyEfI*+i=f!v8G8JX4P4CpWFBcB`2-tm`iWHujkG}ezE0` z56`}He!q6Qj$0Dk155>1o|VLuP-lWl@A!TUharYQH!oAtfZpbt`@!Z= z8&~80t=S~kmz%^%qxZJQr>A30V^RYSQL17)mOYFC)M?2;B6ORwmKkmy8G1R|XlBgvR_soaIO~M;KlTl3dsSAUK zPURJ9LupDk^o-r3UtHsnis3Z2B~Zyp^{;796*pB>QbK=vK{a?!2Vb*jC$8rrw2t4v zvc0@GoPZjA0@{D%+_;exfBM{DVWQvGuoZUl-ZmmnCM9BcXe{#=WND z!ge>upi|X%!k)l9tsGU|z^$Ri!vSyoGGd5VP33qt7!{wz6?zX20$ORvI9uliIufP} z&yBlwiE?!GeiIsZ&iE~NnUvG|Bh3L%Hcac$qeq2bK4zWR-4C2VESDZfK^)0Uk|={| z;lB1F&@@QlTx19%`Lgx`T&KUAgVP+9JktLN+H)ED1mA3gQPtWvZIYCd+oc&PKUL2Aj`}X9$U)dzM(d_)u&{nLFKXu0_ zHVWHr%=N5I9vxPm{jxFNhwgu^p{b>1k=)0%SATT4H~sLkYFMIE1ImB@_h%+=f#W?4 zl(>aYoP8->H5qzI#JLYbF!vr=%9dcR29~y1E+_hb*@QyZfc>90&nJ$QKs7EfhoBGT%#`8O+Y<_IO&8n*ti5GOurckg^-yC!8LTYG)yRA-$ zI-l)2VcKnFz-zM&ZD><&7fY12?HARpDN+f!y*__KtWI)vQ_0W$SfR}t+qZ$LhyD0r zUj6+s&D1mIl(4a}F~S}12Pd2MZ?4I}V4IM2C_t4$%C2c>NaM%y5iw-M@PLpsBAh|q z;PYmj$z=tgq~VXwQMQ&3nXotRjjzgBRVkcOWMhtd1b|R z=NP=fh8d1cSiBUJn`w{loANQ<;M0CTCUMZF{8rCTwO+rHQVRjwWwtu+O z6<`pg_f`42%rV+%V#38_+uzToh6G*(G6(fLGl@SoeIA6Si2AnnsflxJk9(T_ic1;V z%Z5isN4H#*zkbtm@?IriqUfO9mEyX!=hovsNe@pJpFsdbMij3<6k=mKr1f%gkG(`v zzDc)zX>Y$$f5f&?Mc0@4c_XRQz=mE|U!SFN3>!Rmpk-X+hLl1i9P7k4KHLg9&(Wv7 zHz2AAgRJ?}h#f{s&}~wagr*m+3%qGQ(ENNOplU?Yb}= zejZHjTm)GPt7P{WzuzAzd3`pl<3+qn#(<~SWuEO!W{1r1QuS;K@7*RGDt^bL_9Vf+ zYMe4?C_sG-O@niO=Z~o};2`e+bnO3n0NKrrYR}b)AL~0)FNIgaMx7dsvsIJXp3wQc zR*vItwr$Ih4YnL9vl&#@$)}MgJe~^Q{?c7cE0cAeB#@M`+t6g=wVtsa%Ofr4xa#Qh zbqgakRcmW)&8(^t_`3L#R#;J1R`!=D*9mW(Gk}__UZ)s= zBuv1FK7o{FumHsnYvPkzUf=`_2(RBC=S4BrWm`Epxd-1`7B`=N{^?SM*KusI72JV* zYaU#dIJyY-oCQB4KHdiarvjY$n#H2a9RZMYYYIAp28g3%y;InbMhib7BT?IJaggIz zjMWJruCq3^M=?BkBnKJP&Z|$E;)HhRUS*Y)_1EYr#{Qbuojcguj_phqZVS69J@qqG zJm!MiAdllF#-Df~71ovS2sC}fxVX4Bcuh{s z={IZKm3ZvNuWkW|zIeQzG1zo_N;lid+cGHW3z$3v?@p9QS@M z`FWFDkk2SnrXNK6bL2nDKi#_u;M__-KN*BL_wou5r||buua5881Mpcwm9zl;;3oz` z@>nDD!m%E{1=D|IQ}73~(-Q!mDK<;yv>o430sbf<%=y@G-IWJEU-;4wkz<3%Mhr-L z$I>Sc4eG?ZvmOj%`FIRPZf612)Fh(`kpsgnu>+56evnHGai)n1%+Ps_$N+M-x1$3) zl?Mg~nRF6u?wmNS-+;pBl`PZHG_3S#FfIQ^aRMJjN+3vJ_*3A4DTW`zjtRSnuRlYH z`}qCw4^~DA?r6>TDpruh{ULsXE9-Zi_4v?5Uk&s;!gt?V6ucpf97yJ+cd92~f$TDU z9QJEq_jPCiRQO*5Z&ArT<2zw$Iaq<=IV~|T=$jNSUz+pxv|dK=98Qq#Ie-ZgUYtLE z@16CZrzg2U;^r%<5Ws_;0PoLl`2y_@H2OX(B^jHs6j%Ykv+=nVSF`<&pR**Vk5hcC zkr4u>3VaXV|IX};l_{yn%sj;77U6o3?yawX9a;0eqcj>{8V5CgCrEszG>&m zdYssKOtcJ`?>WwUv*XFX;Mq%D{D)V$3}8AT3l%kpJymD9VHEs|BoIO53g$uWw;hF5 zp!3KNPIW^=_K+I}&*&Hi&S~rE1y8P077rN)gz3cu5+)>URR4Z2)GU6y@S=-=cOB}z z017^plX#GskdW~0L0T^QxoviHRM)H>im<2zFaO1h^JhiyBq`y?V_Mh&6?GQ!Z5Jiz z6@w&jvVsB$@iPa{Id^FnU{yEj_uU|G}1I}B(`Xw6j@>&z#_NKAHV0}KchPhLa$dSL%0!~NOVL=Kf=;2vw z99eP-asP%ge*YNisD*l~-JSk4#T&Pe*)#?1!AqBnXkvQuEQxQ$M|Wm}FL6upCC9&m z&fS&6zTBfSXAOJbYxjD%(RltKa~(WPqTMwv z3`7|4l>TGAhz4H8EgG0sVbk;9ng=46)jvK}@BoCQg#f`BA&xP>2*}1Y9+9U{{vEtfBbqG2?J& zf%Xep-LXE4M=ZmdW{6zu*uA}n9~WQJB~-?AkhD0Pd~AZhod@D?_2Nt+5Ih!w4Kdb3 zgBy${=Z67Hnu2-q{U?UQZpq1%K<3Z|u=V-n+aG~ko8xbYUGjfhHkpF<-%2F1?e7xa zRwICWo!Ao~Mfw4@@;E`)@ZG8}ZzDhAT$9$tKod&q9*mWaagl{xf$Yl%7GN$I=!b#0 zx!~PdRZ0>CfMMwjVtwa=!mne@sS5C?%T_=5&O8}rSk6Et1Q(J?WtrCnl5fp+WNzvKxL zn{&y45L({`J52nr64(Gg+h!3><`TaA? zu+p)_>^)%*eXwO};7i=wycv@YO2$7v`hrd~nR9w^w6YjMLm^-U;N@h%QT^2HED_mn`_e$dvYIQA z$Fgv@7@&$RA1xYaYnrwdKN4TF*iuavYWe!sf{~f|GaE8?)7X6g95Q)oJ|!d{ahg-y zhOjqzG8orW9S&UbCHk~Vub9|a<}^&+d5W)PHjzspI5Vg}SFa_+p4FB4fyDZ58zKEO z+B1-R^h3mMeoCTa2rI)|iv9T_eX%?JfsxnuN4gfAjr&+UR;%;pgpGtOuUiv><6;bW zO)g?$<7!am)ozZo{Jgw~nhTCL(9t~8cYCG8XVbRFH(TKuWw-HF7z}oKlH33?@tI-3 zSCEkM8OzcS6S)06J05r|KEi+J!|tWV{kn!vBKswU??er2oD3?ZgxrP^@sD2(S6rRR zR_f~9{SwwGJCA8GDvcFh$#j%Rr_nfxNG<`-RMM4@00d7i$a1vB>GyNn_Q!S-4AQsNtNhTUn7>XZ{*_s6xN z+9fYd)5fWZ7X=o)B984IIWaR(DAh+6kWfb9P3~)QXti^FyS+_3=e1CE`iwR3(F6Jq zx1ndw<6UUbxaq&W#B=e%L*J5x|M{sXyc&y1N^zbC7a3DumCjc!{Q2i~6!dW<1K$vk z2t2?E(mwBn42%A)a$hpz8Dy$DfM?))phXLv9PYZZ8g5s6y2)NM}Aybe=a~*S;5IA+Wd^F=Ls}99WMrG`t6c=1u_|E{NA0Ga9#@)`Qt|tD8JYr z!XbbHTsrF8JbTR-)sIwbd&ouf^ru~}E1hkskv2mfr8Rs%1_zosF>w+$3#l~;c6L*( zpZW1atX-R9NfZZceGi`_FFrM~m~;hFUUdUmh7d%+C5Gm6DUbc;Rp78q_EVJGYK{ll-f zyysu>dJl^{zLCV`nL4TAFKkvQBAO$!=Gt4h#>qX@&fVM{EAGqqM$7NXeK+>W9~2XJ z-6sn+6kjr{;LXMy{)l|AYuWbk{U{18Dp1={*sNT4!p$Z#ygP%=rvNRD17GfZdu02I zvb#OhMvs(o&bLN7Q275I=x=R#GDW~8XxU}YTAUXxoZ}QjDaI1rs5X8Z9qm7yBOg;& z{zl4Vr%L&7-TRUIL7h|kQiS(W-r2nUQRcYA+NzZ}x}nef?Dv{WHpe!+v#=rZkC*tIsfYY|0#CNNV2zqB}lhvf&vyF>_%Wv?!_ku|Tmw)-$#mpyj zvCG7+67)X9%%@Di5PPNA`SA`0>5c=9Co9nmLGjtDvQ*ek4FaspRK(56(?hOnQWoG9uP?E->kjO4j_YR#{6&b><-Xvx!yM+s_YoE&Azlmp9#6 zPvmL5vjHzYWkmsC#Xkr2r>RZssZmNH@t1ryesm6E>jM&fqolMNQ&7F5_9AcX3)KiW zI)9{&k9=ZSZky#nrG8X?wzPfUHDj?wKg=%k4!+}dA%8iXD}jHD|>wlHqwiw zBOPsBCSMfJH01K;*PZcsWJ-7~;mOd6`U^+~#~10RjR>v*Oy9G&kwZk4j6=SODOCl*Np~?N6)}70jqn{^?#=q)s|Q{% z;Vh(Si|nReinmy)CCRj{r)tey z2rGU{a=p(tLuO&Mt9Q6Cwxjob&En#k1J5MKW_xWGM$kNbZY5|vcExKaO$X=b!~}-X z1N;&D?(rhWrGD}}2n2ZC{sCvKl5e z_MD{8;*z?#X-1q6!>&`o%KZ7d949FXq`H^DEH?jFW-mM}W`&V~@erzk;+tK~Bs?+B6VVj>;d3|6Bl+0y7MO zz|3z1e;aiAZY+RvF@`RT|GG+k1^64yU#C%&(^;&mfp>2!thxjyb53qDsr9n_)lj&% zl4(XX-9jhqd)+DA(f?htpF+dNLhE*)u>TutdN6BPCLjhWD{?6{OUHMVJ z^m<3TTG5nMP8+ilJG#|yG_yi?YT}$wD;G^Zy0XY!X~~4gdt-mHYlWU_M$ICjxJJ&h z2V4AtM)!7N#ZH#|tTL6bcl`#u%ZijUY}|A0O9?{E``6~B`~ro$vp2L@_LdSDK>Yo#>=E8&=a3!u^tl7^C(FqUIf-cR z#ftv8^?6*ORU;cbqlwVnADWDg|!?FAz#iVS}f$p+}xXP z6<5C3!RXM}ec)P1-(iJEW=!U_684S|Rd(ZoX+Mf^Ii4f+NS~2tHJsyUwUSmQ)K1;? zsdvCqpvX=qzWRghj&P;NLmbR?>vb=&yWa^)Fs|hrypmu4tP2}>OPzBnI#iHLILd|z z>mO(ZU+$Q*hQ}_{fo(4N5-~jZ?zLKMZ6oN3wSQy>v zFIqa)JZw1qtg;eCb9>AI@43HdNrCAvy|}>HrL%U0)Sp7pP!Anz{}ox8>9YyTjgW36TN@l{0)$C@OI*8Rcu z`?OYgNuyoM^!M8D=(yVCR&Qao9%nOit=LyRm?Au^w3Y7NF3y$GGg3ZLX{B&}Re9(S=aI%vvreVC)^7r${rCQ+& zL#SM5xvqLWTY@mPxcH(clPuw6kEqsWCOu0MYPXB%cq-LfD8+SJ)@N!MKcQW7<3F8d z&Bz}}9H`Y@Bf0sHxxs7x^ln*c)a~rp>)M6;%%si}hT4rH4$T*I29?`XKe`{)PN-{A z;@GqNHl*-rZ`aN&^S2vE)ZlkHnJioqR!L3dMcAnK8L{Q(wG|J|E%oL;SSyUrlb~xv z-5g4%f8UNRkKDp!D=&6OWov1TYNaI$(L|v!kG8(j0odugOPuM?RAql%SvF5RqNnEo zTZn!(Sy;4aWg~lUPh4M{b-f|CEJkYsl5CABnX}S}iSvzzH_TgOY~hHg6{{m}5r;cpfZMNR#fJ!<1^F-Zev#9girmGvNe35K3!qgNqWbPCA*HrLr}d zs#eKle2EHmyGYanCH4-?6(Qk)&ZHYY5=BX_+ClCn{9aF^;9+Ch3$YgMo8m>A&rH1E zR>v;nvC>SnrBZXQ2Wx}%r4H#CmO3?xj%;*|%??bCZBr;nw;z}uS@E@YtHAvYW+TsE zHn!b6O?!L*0X&LeI&a9su@I~TIOLCERf($2wBJ&&h+29K_1%?<-Jh>V`lLo?7q`Eq zNg227P~EXz2-q3gi-Z%TSos@pS{>vyE$t3<;#*P-t^^>c=i|yc@4QX4Un%WH-R3ZS zP)(CEYAL$7)8xJ@1ov)N^|wmILGn9whp=ZSjIY9Nk_WOm42mXF-71C|)mQKJ?uGlr z+qq*@tlqd0y^|Q;HCYG8<{{B~4fvbA;CbJ2o1ddhO?|`JpI9{yN)Pqu1{Ky94=gL= z_nOP>jR$j(qkFqYiiK$dBlz!SoNIo5i>g#CSl>U*z?I?IlrzaD#`^(B9hi(~Qmp6K zklpAuI{bKM!3V4QNMzG-oa%&$kBseS7;_o^m*wJYwSjI2`>irpV8 z@ZW!0HeSi;Yv<%~wtxAHXVePKD3S!*J9PnLxJibg@k~~E9_1$AtXz6u{Y=*2?d4aF z({x4k9J%5V;agh#V_!7~%Q|W@HAibXru_3_Dt7j73i4vC9hJB$4}?NLGoVnYiXo*( z5VBx9#|?{aeWf-D;zQr@!toMonljy+TL!tCrLBoJC-N0GADImOad!=hGRbcatE_P; zqHT1t`9+ZnjIaB49D+2nOjbyVBCcc9{lomEbo^t{+a(f77?avr&)FRI&{`gVmbz->w<--i=(ApvgOMD^->LdU71st zzIqNRS6k{AEm@>QGJ1>9wSYW#%uKH;U`P`Mz-UF+96ha%a8D2q-cz{Sliw zG!*y5v_j{Pmc^ThH?fbZ=P{9I?^!C$W^808qHf12zPmuHB8L-{2@*7lyEik>5NE!W z;3d?`g$ZrId%&d6=x`T6{7L%?sQb{q?!J4Hay;EO95zy6Uk)p!bbllT(H4(S~p8 z9Rm}F$s*oR(>$~imEo&VVG@qw!h8NTOXW)0OXaSQHx|Qhp4s!dJ?`hib@uqJK?->G zA#}*cKS9`1Ajoor`rvap0^5~8eamK^*K6|ne63x~#&QbdM}j-V&SOM&z7P0fchN11 z#dlEOqX7bgn_ilUwNq`0oG^wZjZ6_Po7-&=Uay1lnf4%OQ1U2V5nvEHyp~JK_cjXE zR{W}sIO!%-RcWJA-AoUuL*FWH@ou!mn1Ub$U0k_N5luYvljUg_zgUp16_W0YwaF)P>N>wc2o4_EmG0pewe2tZtIU>(Gp`GX^}vy5%pq~4H9Zx`bg(b! z)OAqc*_=FXFGMZxz=n6Nd;gl^&HO%mHxYLu_y0)`B)^3`G7+?MzPYXN@$TG*0vn9H z`2TBG{jxd(!j*7Z>9dyZd4VtUU;eWb>fn%~PI{aw1mS5<1$Q>=783!F8&_88Le(0r z8SBBtv~|dm>4OK~+&Qi;FyKm~x$=v#xX@GS@pYjhdx`M#46-!PN!l04&C+HzlnI4o z&#dP>>QBlzM zox!l~_gFU5!gf;wbp0T9(p39q<$+!4^$NWTx79e-e7=D%Kc?Ni5dq)Ar;(AxvA&34 zcl%i~Dqim`-Rhmx#6pWxPaD_=<+OK+d&B^TLC)uluT>jm+rs^53Yt80mbZ1U1<`g( z1ttS<+i}LG*Um7jOO>wrV2o@KkwexRJ2_S@M+bw+SEgur-r*8fQ5A(x3W@K2!4q7` z9^l;znC7)?G+K;1e1$eWT4ECXsYihA?0MAulc0Lj`vFm9<5$<&g1gmwo9z*HsUs|l z6@v<~9)7r33-*%=Aps>2EU^k9DufjN2NQP_zG84kglO`!1S`yIxIT5AQ<2}k!?^tb z1jcbLJKGqT7YV<+^Y&aOe4VlU!B+d*n0S z%}-%bo%$A$}6bfliOTO-Dm|tBjYdBGFr~mF;JHKlPB*#c}lv7A|9t)tPW=7)9nb*SsA0lO@y*_4$$}GN!vEDm6O~JFh#i zZMH?mdcM6+M;Zm+T#e?a_2`5L^<15|!h1VmEZPjRsKEg%)PAci1*yHQ@IG~em8+M# z-C2}l6T6BGb+6UMofh#3Xs3NwLq6e)z#Jc)Wth{pRa6e%XqTwGDQg@~cbX-O?|qLi z(p6X6LE`U5$l8a`>$|+fL!+sUcrC;K`fIK4J&Ar!apNL^>g4@{0Qpwk zv16yPlgH{J`u^+1l#$yM6T4Y{^D{Xr{;EE|!YM8(&o9Nr_y(WN;EPj%VGfG@`R@3bbYk@8zyYyy+#1z4rdj2 zkog6XGH)bE^x`b_MvkC_2|AC8#BlnmnMfm6M7r>0M{TEGQZ9N<_Z5x|*%#UTBEECs zd{q8uL8>`lgO|0(vvTRX-vmxiD*`%!Xn$2&su8GG@Pj0%P=e2I^Aki6>K^T^>vqb>1s!-o$t|L^>MNvb6f7csL9((WvZy|8oll)8RbYb;?Z=&h+PRBBVMk za|@Kl@3mFIDi}iPD*En%z#%)2?Dc_r&32`fOmkBj0poX$YAb#3+lHE!Gfw+2`se+L zo_iTV)?k}e7hw0+Lip!zuiNbT{uMoMNwds=1O^6!B8u6J=1g`-97XWmEJ#lM9`TZX zU}~H&-w%>gS8?Jr(tyLzzI=)7zm5U=CJb=O$lHs0|GxTn<;Z_L&eyj%&HXP!s{XGa z8&rh*@ZrB}=bK&vkL6+ifdZnfm)L$pm!WM)k6FccG%sB#;5QQasIl;Qj?oEycYvsf z5uYq6`>;~mF}`Map{HEUA&Os&E40QlD;i1_+$-K|4H>{rd~h#m$YferTWbIFglMph zc~W;6#pr%4`k84^Hjhlm7~s2pauA@uGe5xtTj+e*U{~0_TgG{f{R=Yo!Jeg(Kc6^X z1|cH@1knQ*@iUHC7llZKphV|wxw{h)iaX9OSy!gB+KMlHi5B(P(pTDa)6KP-RgOP0 zj&^fMz1b*QqrO(sXW1D`0eKNSKE1X)6$JJd-7xltq_+z7Du1LRsE6os^3&x!=U))f zP>X3P-kSxH72~`8Rfi_Vu<7O@CCfw<)Akl6MQ=K{@z!!UWBF)X?^;FF&a}JMglE2A z@8`1ZjrZ$}?4?Bx(G3PY(~rfA^JAJ1u+v`}1S4XYGGNB8%HF02OVJOM?|MWD#wMaw zA7;{SE*CzcSey5B*JZfS5o=sQ6fE>$>m5T9N(Y(vo|0{1+z)sRB9w72tCY^N%8kg zr1&6$uu2@;f5(*nsha6e?~r)G?-tw3>f;;xOg;3kTzsZ~@}lbA+0iy^M{{8JHac|G z5%)}TjrO|<7riofT}5nkaiY+NQ!_0Jks+h|YcFh+yrWIo>dLY63^Y?VeL&~w{O{%O zFQ;xg^lbs>%cwI@j-CEVGZmb=5Hor(J*=WjwS$RSyaB6hJt%f?7_+n~a=H+Yvs#?J zq^NaNYpU}_@bVxs{%ki~CF!jWU4pTBGR;wM%r_TOp@4uRAyi2(GsVp2k!Y2P*iq1Q zfXX*Ppb;Qr84ZGu8gIiPRQ)tmVm9M^^Wanah?bs#DPgUOeAi6~Y1t{ENA>f_>=yN}j>nYd5{I^T_3?xdUwb9h7kzB@6$r>qSmJ z2c=ALwe~W?g1*i{e^k!9zw5}KIw)o@L6^UdezYr^FB6H zOuoug>f0FmLE$b5JjB6T6>fcL0i$D(NJwe`Tv99KtQZ+wX%X&zr#&pdcyz$WILxrm z{$Nlxc{9F9{?$*x)+usTCob^shk)2btW6bgs0GaK@p@Tux_X{r5-M%P`7&1)4FkI!p z7mbpfxzX|XN_~y@6iA9rMM&IBDM6TUQTtSEow$7 zEnbMq>hewS%><{bLL8OJ;&DyNxhpe+L|#53@c9=Bgi^(c!v&4WD0>c#y1DN)S&!3) z5dO4tqea;lhB5Qs9~W-k>!u}iw>VF_un===F!fy2DQ4ueJOK6v`Q8NQ*AW0>)L^# zr2a7fLfu*jn`*T9mEmZZjh#>1uo;;?+%CnR`!q_iW2ygj?ckUG3rdCY1!vk{gga_p z{7TONH?3-z|3ot)U;kuD-nIDa3$<1mt1o2yO5~eKj@G}EQeaFzs;kSrnKaf(9<$VT zuU?vNca_E|4=ixZ!VXMxc~# zT9f}-wc4S4xI10*6Dd0hA3aR81LPD@pC%e7MhKig+}srYI-gCR(9U=%j=k46Ehc_^ zQ?=e9q#X6$J*Tp=LIs57zZ1${kn-kHS(jR@_))fgif^&@<9&4W+&z@fh1i%O0ojGo8)_^im=AK3^}0+6q<22i z>D~SuH+}(e&|G&ofse8ZF=cn{5H+ZIR2x%Rdwqe---5X~)@+83-QjaJz_X_={5QEm zK7y5wK;sxJ44#DKc0qqHX$l-kMKH ztLTdO-O+fQtSn@}59>sRX&dAuJRs=o81=Sf+n_)G{hDsR`6Yw$`0!o{?h&_NI_%R2 zhQY5oCT4a-=t3>gvMIFo>xAmKK%2tu<9(-`&7sE!ZWvVku#!_NARM1?_60}5xouT? zxkV~gzoVVlO=0HAqh&k2=I8>l_o-79v9+|Uq zD}GjoZ2R@10EAZ(cx8+X#%$J9J2}kD4Ik_Nvx~%y6AU~K>AL^IOCTo)s!uVmtyxY? z#PPGlf7Qg(=MrojGR01`M)m)^6#8kg>TzuV2OU}q+Ot53H4s{3T>aXE1;M?zz?G=V z^oLFl-M_y1kR>Qo6z(DQLW$tYaaBrK6gaaZ1?}NL+LKtii{-QZc8#MiC(0$7U+H!k z_|14~f zzy=SB>)n?IzJb0V&dJZ}E>-{U_R0`ey^qW%>O zX9Zok(Wfj8n!~Sf7-cf7uz$u0gW}gCsQ%6_bh$BUDLLq$_2nSYGSa5PKi;2L;x0HzWtC(`pb4r{2VeBIMza^v+kk$*l2ON6BpRLdf(jPMqnfDgB`CFX0SX_x831mj=7j zrKb$ev`KWr|?U<1f1NOL{;mnxKYS&qV*ksVB+MA=6 z%7hZ~SYoO1Q`fS)4<~JM1PyR^8`Q*hKKOK;)VdSjt_3S738i0#s^s4hi~RZaAR;A& z6Do5Z^B8q7E3)=_ot-UrmBYZ2R%E>$Mw4>hJEsi?F)ylVH&}31uy4QZvZ}z%YiS(` zi2R*k!Qf*xmWTkl%Ge^&qYU;Z4S@$W1|8R?#(=ilmiq+f5;_h++bL+wdE_3_X)CK( z6C`uDWt`H~@8W#Po6qjCVk+WC`>tQtgDJUU7?on+-ZC{}_BW5iz#zFHhwHIkdNDB% z+5};O!^7>yV8;hV%}*z4H@-i)0s3{$j^dM=!{-k)Ym$nqGeX3=rZ*Xz~96PzRoaB~221s!Cp0rHi((#=tWc0)fL+k z$=vqzB0~YA-(@D{u*ITUPJ7&efdRA7C&I7Wh!FF=O@o=VA*r3&kCcEsIaR&CZg=|) z!=zm7`qg7iCj*8iV_`54{NgYjR9Qd}#jEh&RQpE3 zyGJS<5*hn}eb7Y`G$bl|ZZmu|4LlP2;iL%Fqc*(+X3FFC2XA0zUf=|HhdQ#1l-n_b zwyQGUfZh3=wWc!{;H(O((Ij_r_O=_3#;5$gH7yO77RCy>QdAMKITuyW3cAkVH_lZe zwowQ@rdeu|Uz76E7bYOz?Ez$EI9-_*02oT#19W&}gS@;TXu^VQMf47^J?9L+m_4Dz z&<23!#P^QACOSHd+Y`RU?j2BzSj7uxB7VoQYjt{dpb;zP)k`6^G4%CdNr|IckYU0N zD**I^ieB)i3f_f-VE!eBu<-EkxaY>|i=Z%_VWK#5ymmf2v0#|h_!Ga|oO--?K*Q$8 zMjy~|jmG|X#{Aqdy{NHj((7l4kAQhE*ZITwoK#EDm5Tjx^T|_3-v(euT7VHDe#($1 z8*u-KGozA$87L`vFb<9ShYWcWu*JsoFqoE~SnBd-O}f0Z38&jvxj#2kGT&SV}CUhJ>igU8=}Q`f8hoTmgrsXH}zdc z^vo*cR=d_@{)$u+dlrS54qdz!er%DJC_z({zpJgxaR-6!!z};rxMw)2f5{lExr2=g z=G^JKQYBVaX6!kxgXu^>FYo4*(lRnKHcKo#Wq28;XL@g7w==I>Bpq!-o&__cR+x7T zpfqw0f>d9ZJ`<)DqGe43H_eo{v9bACP&dh$_Bj1z)3JyB${dD?G4CJIqeVcw`u5x$ z*OEyw|JHq$bQCV>Q{4i6GmgzZ)T{gV?_-K*EzR*uN=ohv^Dt!-^g|8mnMJ@5ExZ&J zm8Z8M*l?1dprB~p^Ny%sfL{l7bZ&&a{QUeslr3L;j6CkPx=RGQvPN;qb3n_7H{R~K zOwgof^4|I)9^bjS0HW|1w~?_cd656s)P%6WlVnwfuzkG!OY!>_3Kz^Te+wmfyAuu- zu>F-KzH}>X`*|3-f)1|OQLfP12DqIB8XcxHd(t>SOp*zp%nKXRVAdAWgO2V=8O}!lWnn;OQ;kTdyCd6xDOGkVb^mmN^&Y1<+ zQUt=5A!j*2^A8LDM)BsDI0eCVRD_&__+ zU#HrwL@JhRkgBR=xH`dQRV*lccvx>2VjIcW_Ou5i zlNA_)$_yekbKh@JfW}R(v?D{PeIOx$dfPLGuEszjcI&N0R{oQ?PJlcj0LWq7!RZBE zU{CLyB(7x}0%61g`V#z!-{@G0E|9B0yYd33`O1wmz?_r^POesBayQ~1O7IfnuY~?M zY_z<_F#!${{}*1SM)3V`6W zl8MsF({BRG^NNa?UMtq8d%A_5k&N;WxRR(W;Pcx0I#9}Pkm$=hdzJ`zkYzk-?fIE2 zLRuf!#4AH9T=#O?{6;Tr7>rxxdKR^c--B^3* zu6~AzGzsql-s^{?1Qf^pJy1bD4I{zLKme}cakl&2{ueE)&D@sfrRf!VXFpx=HIM)z z1Q|QXbu+4n9{)J~_{J9GrnmXH^AxC_0^z@${OJ`S0GvPoJVTi4p|ruVXh9!|z&fAy zT>s~?b1Z;^wVxtS|7-?$Gk95`3J_F2Nv+d1`jgjJDFNJW{1bT^em?Z>ZSQ>H2E8-UHY{D_dk<* z0)AB6iOm0tq_05h(3jx@e={Vo7OZmYw6xlHv?LRA*Xfg(?T%lI;`4>t5%g+C0 f=l{>rxgTI+OR#0KNg@RO0Yjv$s!Wm8-TVIs)}jbk literal 0 HcmV?d00001 diff --git a/documentation/images/full-ha.png b/documentation/images/full-ha.png new file mode 100644 index 0000000000000000000000000000000000000000..993ccd82eedf2f7fb12f981fdf8ecf5a49e0a2b2 GIT binary patch literal 60272 zcmeFZhgVZs)IO|;ieeoF>@qqwkd}~yh(ahKlq3WQ5F&ykgpv>-jX+d1I*tm`?1eTm z2%@Mc*by8>7*s%|*}$O-A|gr=fp1?V80UT8Kj2&Ix0WS@+;h*}XP>>F{p{!D0F6qT zIepRe2@@vFw6nE#o-kpG{)7oC;c8Rholn+_ufu;6Mb0G42^Y(jyn`RCISe~L3T1*0 zyjGhqabMsBRpcY^cM<%ZFkw>a#0it&|B1+ZsVa)!PSH=Dqs2U}PiUyVZ=jFA(1$P559WyBE%-e@#5d3{ zkmLLPGblrpp`M|!o{=%#$Z!?f!pH>vL7C_qqH)acpZD?S1dk1f(Kmzv)>3@ffx+Ss zQNSv+CH!m`Ean8k|L`{a>*xaiFyOC|p|=sn+jt$kv}Hl-$>sG z1HV}7U`wZvSD~!nbxhOn`*M$KY`(#Pu?!KKL6;)qbr_p5z5VS#Lz z0csVR2=*K^9DF2j%v5xVot*&f%@445HKB&E@!lc}o9}}J`|!sGp@FsZV6w;{I5#6! z5Z2C^NHaE~i9!ObIJOZeYhN^e70OcPh_{d6pqMD8vmw(NcRx_OK6kXWC+2!YS}kH>Q|kED+*^5uqkF{t{~$$_`J&`ua+FB8*Jn5k?L6lQHb< zWmFVhWX;21z+EgIq~SI`GGxsHo#97eNT5{^+R2q{M8gIW;oWeqERYIMVG|G#EMPH1 zaU!PF(iTHCG4dgiDaOG(cgrxTkP*%e;1Fpx#&o8D5*lV4$h9W>8is_~vwUr>EvSY; zY^IDa4EG50H6*!`JlsPa(J*9~ArmVL=lD|n`3xV7ogKV|ObN%pGW_YRAeOT+j>8I| z`@wr`KN^}x#RY|i4tdXriW-Rf*F_}b$n~jOs z*TPTe>&D>49tN4wA<7K(4evlsF_C^Xo5HddDIL?+eRgd~G0!L)2b z(cA#COzi4t>_HL=LVZJmeM4NtPL9D2I9IMX)WSyWY#kIt!8!RG+wp9$MjV`#z}JQi zULWA>DD=P?3VA_>ES_(un={#l!(hRXC`_O)-@zCi>_HEQ=wj(Zbii;Rl-LW5f>DT1 z)41>{Ckn=gMZ&p46tHoi`%6gnE}>MCERZeY@k1k!MX@5$ZDC<-n1LR4R@M48ip$%Gr=8s=tcK@XC+;Vj%q9RGq_z96v;zq=xZ0UyXQoNlV zJdPz=9AsgEcEGzj!7oW9A4^M#Elpw}HR5>$V=Y3_D3Jx*#n{O)$R9F>Eh&PGw?{~WlEBG2gh{sNNN7}!g-y6+sI`TJ!Lf9f zNR3ez)(j5{)ymmQYE0uBI){fzNqm$c#s?>1TTnUh0W{wYi5y{eL?66un84WB$jS)q zAmm}0h8%y2p%I76u?VrG)7Vxn!6Nuzgu8>-!NM)fE&?m2hne{MQ(0&~2E=70;q z7~w+gjY*a$Sr``O?1!udtP{=|jdzsb@Hne5ygiZP>K7zH`GtjGC|o}ZpW`pK@sK&= zC2S9PEU|4wxRmM=Y8yc|##*CDAs$?^gp7Dg7#EVcC7K^B6pD=a9Gt7zh0166$=n5*pwHOSUgt5Xh1tk;}u{KFEoU{5purrq{NP%~vTd{={TP_dh zZ*6Cc!V!^~8sYH%9{%BB&VDW?M(*w&GzfS${%no~*C$Ni!Ig#CQE2u-GK*loEX*El z%L_(1+X`tBOkbHZT|{=bG(us8Tq~>x6C3Iu%(Ej&=%E;?2hGAbEX+PsD7E0=t$6$Z zUliUQ?SUov`FY@ZSc(;fWNFWFWcygSVXUPNLXsmofJfq!J>2O&p_bO-aI^zV&Ce&; zpTW2GaYF}N&?p>B8ZB+UaEJKWj_6~XXh2twR&hL(=Vibx$~ zp_V58GFv-_z@6_G=3{~K!C8k$sXTY1aGBUGOzi6vKoZapyhLU=lHfmNI5x*h7-a41 zL81kixUewp?#`iHV{lF%G(RlRD%eIW0~~~WlEa|7hH@MjbP>xkNQ9PBxZyG%@Ft0+ zjk|>#3MDXPI$;eZBqy{x#xaC%&3AVXN4twi&KN_Sm8}on#El(+@wXL*1#z5wLP7*= zk1)p&5e24gY>aX=<^&74j+_W7mPaL1XnYaX%GN26B9-8=^iV0s%~>P`JPkmGLGmDm z+5`uN1+c+E#Z*70KV4+yiwPImvjcEUl7$^nDzjo^h-^PMC%&DPN4O!EC!;v~0jQt} z1F3>g2Mom>&kYHriv&2D4~u0f5_|ZYFd+hvo&1Rm=U^*ZXhaYy+#c(WBHQ>1gMlTi3_h-0=GRY1^wqZEO)yP*M zV95g9L_8y*B!DLkHVm>f4#2RSt@ti>_ELtc5tfSy#~O=yJ~YfIHWvDctgU^?)F5MD z!!Tc}Gns_N^F?7{PQFBAW3h>~1IIO-#y7V0W#RCFKGqx$vL#>`Yg#CV72+Q(3=d~g zT&;Yu;f5%zFO6yGLKC8$e5AfcVPPoy5GvN+#@4~omCDBQCC&^xR+z1m7$u|o(~SH> zL{NG_SPr*!MdLzUjnPI{40n-*G1}hA7w*iqpn;J{5ge?P8v^MN?GfM<#tF1#Mo4I4 zD=bci!AZhMhHUth4HP->ucFQwEqdVRu{sh>JUm5z=7b4^33k>N^ze!A-%L$RbiHZv z7WdEUUkR3K`8C=}6Ig_E$5&8JX=|@o(H*yc#oDzKiB7*qPsh%=sdXnxO*875^#=QR zuXJ07wSTIr{rdaoPpw0t)s!ozgHFCk`jl87f;qzPW9JgFS*fn9I zimKWi?f<@@%L!dNnR|5p=Wi76OrI!MU#|MUUJM^e<*KUcXPwoh|Nej6V9uNy9~b_Q z+bRanvLU=Z!@Xrm{9mT_{WEg)l>dF^|L^Ai&+Px3NB{rX;pD4Tl^!YA@$ElrQVyOD zxrE^Ou%c?VH7qe1D@cHs`~wj$`Q( zWcQaII!ykty(-TY_LQ41UrCBR_$v6ep4o7BoL0fXOZmMobE=s_>bx%e zO|fGMlcpbBgwg4JBds@S&rEB0FwgPe#7R?(Q+KbPrxX*m-cxZoOFn=uGg%qg9qs+# zsJqv~bndSEqs(|oHxW&{>5>{-4d)TrQq-Veh~m#>W5h|=65e%k*uF4Fqv0QPcS z==+@Fhn44CqMvGYZsc6Nud3hD@bQoFC|5YBl17S){rCPpt>{pLoCm)!l!M&0DkYmSvEz>eFOJSRu`xopKCjX_p*&yqrJXj{##6li z(YmG^VkoKX0XCWVlpgE<^m37`?0L$U2t|i3Nqkeg7nmPm0#1GBns1 z_wIQ{%{FvioboL-G)fB2f9g6PyILL{8--6+{4VvwWLe*brT8sJtQF5ZI$V<02@KEe zGYoZqp_-@o#N%n>ll#vLLOSFnomINt3Y*GLX`Dd&Q}=1boqzE#%S37Ns##=6LZut5 zB|oj;j~f$Ks;hY#3%hux%2z3UI1@Ed9<9?cr|MV5L$^gtbRmA(zC3fH(j_sv+M!Qx z?6}nYpSg}dbvu$vFFMfokH3>K`QVMVcgtvI%EPROVM_8Zb^c$&lp@1iu%ho98>S*a zh5hj00q>!9_n7LS&Q*#@B0Eryg%7{nm2-0ZmfKX5cMcC^se7K#$=deSYn}340w`c9 zjK(XaXII+BFxs-Yb8fWF4cq(6Pj4$tK6{#)XYN_nV#Qm8>(f5BSLq75XSw!gqq5G{ z(X_Jw!e=4Ep|GF6l=1zAB$y6V|nX4KYN#~ZuDy3-!(7+j{bcR1! ztu(m8x`QWVmr39hc_|4eU;AS~rH}jO{ zm#GgoYj2;OH|}}=G5kzDaF&WUm%0@>bU(PRPXi|89Y42ySQ++S^rzlg?ER)b_iYziiCP`To+3)njeq}mGMN9L zm0PL-TPgEIqher*j-k)rzn47KY)uqVXDr(oe$fpvmZ0YePV2e#lf1_IkT6&eMnv<` z#;2s^G0R8#i?f$HySyT6xVpSazoKON%9FbaiAj>o7RCB+(2b7{p1s^`Ba*30)ca0; zifDLnKv3yK(wKPQSF1lB=Ut*#ckLV*yfM`Kik<(ZI{N->^E;_AdlO|!=BLe885uaAyNsDO zbNuO&eknHIZg1B0J`lI#J-fTo#ZhBo)KFi& z`G=dW$tR2hojoO0u0|g zviB;3pI0^g{;{=}?tbMSTwt^U6>hYx_x-Wk4X{b{tAAOD;nH(h<|{o3T)oIamITlZ&>9UKHrHFY9C)HTdp83+n-qI`3%ON+z{a3_H%6 zS|n*X>0frd%cyvX3pp>Zr_2=B^(fCOa`08X&7y)&rMiMAn3@NFEDo-{Q~wjxsyq$b zA^%HeuVOo>i9NKt)Jh2!{?7n%M?Jq-F#NSP=<1V(Z$0G(XTwWl@P`_sy}z*%MP+!S z@Q-gBZag!0_ue6y`_ZK1!d3gzEwVarm#BmVab{lwzV#QIC-YKp;~okxGS@)ZmbFv( zim+XoO7Pz|%bn*^9JhQqf|fci-qw%Z)LY9k>O38M>zo?1>CCJ9`)0?1KVOfyA)aEl znwq$Pv{XW$8(*^bnS<3ihw>NI^&`W5g2B$~#j=iUt$oWQ!}GA-`9JMVj=ax(%X=Xf zr4($mCts^&MW4Wn{Bmd!7ezT>sp5kR(X7vHPlK}1y{A_EoD#kFMuuaG*Tz=Z_s=?R*AE5HeO)qvWOnz{(Mu1C&3bdf+s|c0 zO(5#G>?q&VH(x!~8@4>rT#Z>-yU$*t2(z#{0oTDFZl7CXJ2v|OPxtK*Co;zcp1y?W-X)*j0Y*{+}v#e`& z8D3JWPZ_t_+O)zci-Txn$0KQab+FJSk_8{~`|3&(7QEtL-*V?``}v&L!be3#THZ@m z+8o@!&gE?GHbKV{yN~GxJJx-;JNN-reS_!Gyg>tM#Tc;NRfmPmtk*sIf4_M#qo=0V z?(pi-?SFc2&rhC{t&+6mdBy(d;Lg>1=Z<*~*F`&H!`BY)kwpgipPfC0|Ewx%cn+~U ztUEIOsGH7i%@dI@;MvSIvx+;Me5VZs``tD=A{lPCbw6usnQYmM(nag`dUy#rxh;|k#pKU z=hb*E*1d4qGKbM;+I7dJ=~KzdxcM%N^lrbuqW(`+uyVMAWO)l*twaCXIP&|pId$jA zaLyC#s|OnHo^$b-%KG8n>*4?I)oQrEd^mJ_o7Hlpc$=SQ_%LR%)=1yZ5%-|3lKtux zW3y7^nRFP}VdkZAuV!kP41IpycrGJ33i9~_$ZDGP17%WHwBEI~bt%%i2mu67O?7cA z-PwHbMLF|HT6Rn`L-ls{9n;%ypprZ)TBZO}2r9g#4++G)v_-sWF@D@i#=8q29GJ(h zNVVv}NC3SVMSkmN$lj&5zPf&SX5SmB_Yz5^lb|cVPyc;I@T(CdL4YUE!ADt-J2&E3 z^0H~-FpAYm{SB?1>&MJRu$rE$nQObhE`OU;C*W!P{zVj}&@xd)eg2#q4DGn2aVx~E z-I|qY^IpPNLgA@Qq%cZfQ@Ljh*jA*fAWNR=c}jUS`7NzEH!=tHs|kv=lE+SmqQc3j zSjkrjsoMd(HeWq5PuVU%Z5bj2d3}`)NeUR9%2UC+{->fqIE@)R5 zlq((c<&RtAwgp!SKmdAm<4fhBs(kM(1_1OZQ&QFVByN$mmL6G$Sp1p?Berk>;DghH zol2n}jQt6Unj_`TiehA_{QPZwv5?v`G9Ax(Az3bG= zcI%Er%b%9uzj#&k)^CR3049wG(G8u~>w_^VQ&)#6a_Bq46Ggc>;X%qXo|KmP$PgUnnyDpTw7kqWK zh<3{h`iTePAa%*X^Pb}oKy!tm-{TXEokL%u3yI54NgIOls@#mYm6>}JJzl)QbE+Vz zx~Mt}bBk(o*eg?3DV%?F-fbs#&DC1kBQ#b#1!1=Og}Yzdi&oSEp4HfPeV+Di)Zb|Co0w|wRN(vvZ!ub*Ov8@Z<%~;OKW_|yOgvuLx~uK{X@G7kwNflsMFOg7aJ4- z|La^>^C>eI(amQ*`$c(PbJUB{(fQQ)V=7d)<(9{$s_-A#Lh(e~Ct1a#jAzOV87PdRtMdFTg~vHhQQfdQex-qG;Re5!03{pVa)jVDc;TobmA4%t zcmws>dQX&+fMQ3gN^FdqH(sggqu1aqsi;j>fLc3B0Z$|T(_}toc!HtPm`%ZETsWrYo zM#X5upp%mRiW-mGKfg$If>g68JtOb1!c`T1Hx|Dtcfljf6d%$V&-_ZE9&uP3VA2AP zBv(^^-1Mhg&?Z4Tz*N{}jic20qbTQ(SxXn)?z%f@dHPOk@;1%lWwN$Ei*kUf)~{Fsu8iFq+fmAwQd}=k@N#orhOm z{!+cU7$FzJn~%CTyvs<@-#ugG#bAp2%`S-My4WWC$+G~%6OUi=e|#drk$4I?o1Wfh z*Piu2Txo>88dE>?L7E_sk2bh#a_ZE#D9=wnZ`I^EuvOPo9#yQac12bpgaxE5GU(+@ zNXr%j&7pDo>z58Kq#)nC@5%L{=HuS_yB9265Yb(v)d&p{8-?7O0% zpR?Wvgk13oNmn?9+Iqs)1FMb&B(KhzpBT{j(V4~uhCK$LQf`>-`Bz9NPcJ^?pucc) zP!2H>D2ncLjytkU%)A|bm3^{1Oi2A*QD@it)4CULoH4;c`u|ABwW^FF$Jl)um&C-YKg%44uE*M915ON+CiYGJnbe{3juR_WAnsUQ-V zlM69dU)9|y)j6MgQ*=K2S`@l`ySFOT)O z>PHN;&vcZoZJ_v0=PVF}kTs z*F(8+(t<-$Y;oE$x##_c#r6F*NHvDg9`ohVg2snL{lB&_C#9vWg1Vml^t}6}*yNUQ zg{O>~vdspGvJc$mj~jMy^06&pFOY?A0K9B)HoA1K*2yp+Eg4W>zDh}0IiBB{KvR@T=%k^oId1JV?Zgq8>`aPGvqk^qi@D-^z;+j5>RhPATwhyVq7& z$GCfYrWS*LMqQh=$apQxIqp`-^(o2$D>Y&A`m#4ly$HFER$G0+&i4f)!%y-a*F>+s zV%xLrKmIrjb8Nrln5XD+(A-s~F5ez1+GaNJk0Z7qxsTXYH!@IlEw4tiIobX3HszVp zGYFUZTBb+tyz;jFLJD(y`+nQ}7H|^BBK5*3zZN<4^(e;ijt67RzNf1&;n>?h-06fj;Vmn2-sYcnnbIKgWxqgh`8B*q*-}cs zpQzFUorcFDtnog=e{)2I6%s~NFENxK{l^3#hev{+3{Zl`z4@o;2F@e1g><^_t;Tq* zK*P(Ad&$F4DZji&}CAmrVU>o&*jok4~eGyW-w*lp1iJ=Ur}Y2Ocoa z{M)~?JI$nzPZ;GqwOL&BDx@I4P!x-AIdKEXbgl4%KPoUJ+*Dv-o)-u3<3K*uQf0RJ zw@`#OG~&O^oqn{UJE&%JNIoO@X7h%~Gw7den)*hC(rNZgJ|4JKHCKFWI_&rmAox6l zB%3tLMEjU`6A_rEJ7L-!*a48DMscI-os) z`(yUHd_Fu-GMN#`}ZxA zt;N0$y&n!CiT(BSI{|PQ5227>g!IfC;w0MN=I-#l*^xBWHA20p$31s?%Aym0;l92h=zdi{j5UL$H3${k z9FzP#ZO!~I;ZQL(9C4u@xCFGh9`obyqk?bE206vcwmkrRu{n2DPZDJI9Q30*fHtCP zJW_y!GDTRg8&JnZiCP{#o}zG}@3HMNQbMd2sVJVFz*9k58FO#+zv+r}fB&K8lD8*h zn^FD|Aw5{#9gr9|%z|D`Ewqms2kS>71)i~6OVmeh8j!F}1NC34uQzk}Ova&7*+rxZGs1s8lSj{TtTuhVyS?t^z%}K_x2y0Qgp;1rwd3`A z#WeX6!N*eFOU5Y!EvG|l%!I!^02_IcM~;|^HeZa$>2HX}Er0b?>->c)_Dv=GYrM8) z4^$>j&zgTEpzg*lt(z^xOJ4bB=8M-ZJ(ade7!!>&ZoDAlvnU6jnf>505EIcNAOw^y z)|D$V(i|0dM#r3r-n2L~rm7*r0I*(WK$1&m7_1;|ZQB2P;tCzD=XJp7YPB7bA0X@P znU_8fskm$T*WWa(F(0sVa?`&BM7UJzx%tu9z|IJ$dj$7q;vN9|)Cm2Uh@fjP{GOz3 zvM+9xY3`XiTYtCq&CqcD`DPGto(2DBdOk3{>60EQQhLQ&m4&vo;46(Qmk-z)m9qLj z{@eYcJQ9m439a_-(aHDbW#_Q~edwG$c?&!x>gim) zxJSf%(UGDJ9T(<#_y4UGwKpHUxUV7Jd6{0ELH=F96f0`7O9IoWW&=&nT3UL9Z=yXv znvx`*K$#Ww0E&5jr$B7Ux1@x1Rg54tVPlV&zXv0Nql1B({UBcIMnKM-$eJZ{^16j~8EFYu99O z0Z*ipp2z=cr5SEd?WlZOnfwLwQbZ;_k|>(I-!tKQ&(zM%k2NDNuKW@5D-RYT%~^!l ze^TjAR7N%xnJUm}tW2&M<4;ptRJok2%u{>#Z>OKu$&#CTKmYM@@g9PIMidI_{SBnK zKW$T`TuvB-JomD-Y21JlRU~UFs&}_aYG^Z+_{jf4ssLYkUH(veb;*$TfotlXbW7J{ zrCW$y3%v`rXowYhjnva{ms`_r$&s4gaZ;X}%a3W!$GWWsJC4VZ2%D2NWH;BQKb*(SfT#1}Wzn03F(XULju)t|>m_CNWd zBvMrqTWF8WdPi2Xe0xT{jjHH+e9kR=|Kcheh_8h5bdqC@-9Xi^@jJDf#I*E%yNHs{ z(aawxDBw4+o=rQSuef+lfXmi5lA4&1orA9g6&6$TA`UK9G9VI)`I%4~V@zn{R|*59f1X&R{*a6b3!u#S>1K;srCOh zZobMph0tnSxF%)05GiMoYNX_;7VdSZ)wj-&o#zLx1^xs||A!Dqvic(wL=T{xUz(8= z#th1RYD4TmG+TpR1v_6ua86ih@KbAdPUcd%SJ%uGT;$!buMQN2!>BUVMq5@8Gamwu zS>T@}af{y6Rj?h%1=hLrV^IL*pIk5h8A@bcAjIzPBCGwqKf1Ix`t|r&I=_V?fmw5N z&|DA^ z?CvWV*(9|15EuD5P2K!m{>l$f*o620yN`YSO<44wR{O4XESdUKt|j!TL!mj;Vn0sC zw*6iRP2>isU1KS@H^v~c%nk=>;mf7DB@LtWH#=@q6!G>NYGUnyMJ9WHGW#v7!bMtd z{=6_lF*!MPT9aocepMyXM?ZMk1e!~YC)lwEpxu1UWuVy(8sJw?y#u;kU$e^#sNT~V zM|8sa6@=FJ@ONu{#S4r+ge9`Ur15cu>Kl6=qXqEX#YC+w^BvabSR>t&yo(nt}+1ikS0H2VJp? zo1!k9_|emCdT2x*HD#qx*>|()Ny8l;s!{#q2Gqkkk=RgRpDJ(WNngp zQ&I0iwBP*l0a;yN&xc3r5#0kwCtWUIATtv76>sMhU>jHMXL zku36=#;TK>wuR-CNCT&zgIKX9c}MRHR!$cHuT|Baq9-e!KhnRw9_fuBt%mnd8$98O zzd>&|>T#NPL1a#Q)_LsI4RzpmD|Q_o(7#f$;>o>4PufP8;b#VlIU>uIkFEs`S(N`A--}n{jc%PXK!km-^#sM^^hnFJ1oCmaq zTd~JJW$<28AF=<1dT4a;$$@d-4)Z(F(&=ub6S@P@E`p7}zltk1U;Uhg_ zye~AzqkyrjQ;SXhKxkHwF2HMxAf;{S+WKVW_no@PUAwEWT)hlwGQWnTQ?&HkauC?9 zT#m4i-FN<;u4}9xf9(2ZCN+ie_xvTmK_i8u^~ms$R_L)U zs<&3hNAE27{LG=~?+#>NCU{(wA98WTU@#Irk^c{aXjfcTNt)@|ultveIcm|EGLe3m zK%M+$sB5SBvK^nFvYcABdkKm(&Yz2FcP>^LD7MybNQ!foefnojBSP6B0`2Ds9`n+F zYaAyO!4~%FAg%b2g+HoqJv^RGv(-l<6m3b`wo-gHD za~9x&J-4@@Qx3qg>O#G6X2Aw=fNQ10DmAh7R%qZ0Kqq58`m>}e4_!_Riy(C}jI@m=T6 zl&%_Y)esh|0D}>^B(!j%0D&}6XlYU?6#kMxwEX3~(JgM?2)QFOzx2G30+7MexvqmY zSk`;Zqhm3<-9kI#*#KBz8PcnUz=lE2HB?z$&6O8!SHVl@u{x#17I%iv-htj%-?!N) z=p(SFi^H*aZ>DuTjvU{ryWpb4T2MjoBA=_jrU#J#8WE4A64O543cEJ{#``Oe8mipA`8WUJDJPBG%}^t}q0dra zkM*8!qjWFsN7OFF3Kryc@BV4A+K?&{LIf!@q09PULr~Icg^R(2Xn~+ycV#lhb_L-cfT`+R*P3Rw+X4cUu5|42{E2vz#}LneOi7yNGDFzq_#! z+Art2{@^Z^Q`N{&VL<<<<^VcroYa6!t*0)=hWf-rGe1ZqW57Ji@+~yB_Jb%1TLyBR ztjsw3Ihm_RRcEl1<{e&5$vVqDr_98#_`ba~M)ZGLiqO2*h1zK0RY>())*B4s{r0Ue7_153vesXL})+l*4$J(HR|FfIdHy}Ug=dQlmbJrhM_C{x={Dv z_xCcjed0 zE+g?5q%J^?YV70w$xz8#6R$1-HN>;-!b!6$y_$cXz&uE|SnlL8^L<$n3K<{ifU`il za`E`;z^U@!d9~nq4X_#7uYFSNj!}5lIgOEcyZ0+OW^B(c(?nExZ0KnUK#bZKN|~=z z<|0p+<(_&qS_GwgNJ`ei?~|tORf#hn4u@@}ZzTKNh5#~+YTD8N>`#dr;$B&q?QQ0H ziui8*cDK6zengzRTNCrlJIf%#nS8?VPnm9}pVE?kJ^_d>?y2sW6_Cj^m#k-)ee+#%K2AUz4+uPGEm9M=6*OvFm6@q=tijPQT;^x6Q$^hbt``5!P zyHwd^?8|_(aKJq0ukYq6w6h3;jRPj&^Md@{V-{l0y8(SEcFK<4b9vnJ2J)==3`n4C zh)Vl`BX;8q(W8{SBYECs6X}n3T&9tHCbm`^)XDWoEO29E+_xylAmBoWR5V*ZM2c38 z^%Z9R_7NVl2!uLw@BcBAm+Hn-FcM>AMO&o&><0tq(KtcVQxUa1KJenIKgsO)vMOiX z7i}6}rN(>bj`x@E&pMm8j$Pc8ghM1r+x0h@E` zOy1suKb(@jr6U^{vv1YGS5#D1(p7g%Hd^%8eav?}ZM+{mVUIZ1?-%WgT9pX*kD#1+ zqw%_S`K0Ww-G$K3U2=GhV=VMCZmZuk$;($6K=VKX_Vki>K?BQkTlx710*~XcC)F8r zmA?9j7Tf6eAqNn}-4UB-eiK&i3~mXSp@1~B4kUur?P-Vo1K6?Y&Ao-WV7RtBh0yMp z2V&%|?NiF$A0bScA7KN%1fS@yw2dE zL|Q5w`u09p(mYqY!d3q5jX?s^Ez z)n$k%3)I`Ujvcsasuyqm$X?w?y5aDw#1qo>tpT&o4XUC{`r(*?#9MB^g4U|?W=M3k z438N*sMa9He%8y=Y1*Hh-qD%vY0sKVb_UD8jeLRDZ+ZSxQ<%YT@hhvonboecTr-A} zH^FJ5r+fON0Dl07C(PZu?u<^lZoSRCaFA28;HU;K*Uz1^P4wn~{n zA#DeQB6&5n)@m0aL-kYZ+4^aw#MUv&CUqa!Qg&k!5<2DfYBxT*O>>RFW)&j401@M+ zVGFWeCau)hiI{ltnLHuuZ&2p~7g+=q)%goY0SWf$ii zpCJEZPFoo|h(FnvwSO)Rw%??8@@%Gy2_hy0VHcT#0%EpfN#i z3GV%AdjEi%m*dT`{PJccnCyoZfJ=Xi&;hRQTk>pp-aD%<(nFD@9J%1T90}vCx@V@S zw$oCb%}qPv?GGz(3dnOZ55>}Ej(PKj$q!?fE`~mcP~t|-R;x@X1YF2|_LrTcas}mA z+9r6Ms^?nVF|?BFn0ju!= z$toSC^P2yaSfv(EMijwN#0SOISWQ*~F)q~oi~cf9^u3egzQlcGYpuD_?c9{!ncvZ2i{X-3?8-yR(>QrYR*Wu`4Xr z`Q2q>2&~<0E1V@tP@e))8M+r70IEw!OZ6?lozieb_F7f)S3)eJcJ6m0!o5qo+uzLKVm$VTr=hbvbZ!?IGZq&7Q_^ z4|g_Xfl&eqIW_HeF%)k=PersoF}pq5rgd)AHaO;Z8k?O&GQHrbf?xUy`r}n6Oa!dC zhL0s2VQD6!9Y91-z$zVk_p)@aUdkVuC4sZibH=%!Tbs2*tlLL9*$*GqL6{!Jh^~5} z$RV=lRlXBG!dd6F%NFJ?Fx*N=6g*t$x z@BOR9l+RoP`!#gS#j*ZD;citFAwjyUbxS=+cl6Ijw}awj2~h0Fxd**A`XJ<}-wWL~ z^IcVS5btTL_%oq=;f<0`W$|SUr~;p!7`j;X-Kh#yRAV=ODrtkWE(NQbJLWlV6ioE5 zCGH~lPZrXPki#QLt(-tKYu%Qnw?Vgksas7kLx?sp?(GBhWuG(G}POV0-^^Zlmr|Vp-6V9XAqet^yH-`zrXf}LN+x)RrVyp0OPzT?Q_AvLC?PVkCKpDLG@f| zQV70hv1wS)#?KX2-JrESihh+V|d5QIEaLI(B`~LXP$!-i92DlJ5N1{=)vkuvdI7)Ze?HwwNz!QAYld^TN@wn*@x2)?0VeSlL{N@2MJr0>f1}-G>kDGy9iZ) ziRt{BOhhl-zWFWi(g&bjq8SIg+RY~qhe6=KE_NZDfjVD&2mG|V>QW>Ic5EKdG8!ys z{lvoI7o%8{hW$vf%iIJ(tv68rHhmwkdBtnYx(|#hH2~=bf4Or^qMuarvgh8^<*(ZZ zviXmTDn-dX_PzTBZssbSa2qAee!jn z)cR9K22!9?^Kg47*CXtmY1p2bSmewuKGuA=zaQ8ed_6vWX7-MP0(RL;M<&cFU{dtq2d+4Y?WH)d9|rbR8o{2J4FvEz^yUf2*^hFb=Lu zG58c)?))Ns>44d!rN}ozBm$>Ds)Rs2TJQ35%f{pFsG5gvbxlbp)g2JQ7b2?>{Ilx# z15nJ+mfVFDq5s}^aCDzRjgJ_p*Hi}I^wC{p5NV`;x4)s~Yww3Qrf|Aj!*OG`_2~Io zkVoq1Lm|JwBy?+!k=~==y8gOoM0|ug@evM1uDClg)K#o?_v7AUx1(#oT;sUL=kaR1aHXvVO--rly2Z$T zX}d%4x?$dY#AU+~3+glpFy5LL(Fhn>u&x`bOE{){XU-L&NbG2;%18O zjvBM!{>CEPea~Xl5b`a$Yv@{;E|_9)$WsQrvsc$8+@_ex5QNt6)||RjOoQW=XK&>E z3s@v7DeUDrldcO_%XDIS=y?d>{hn5CA>aDIv0RA!Ou(xS!vwG2ID92PVhFmN5X<7F zmVVk>p>&D?)uvV&cDSQp`22~;K%}Zf&J#e;?{vHR#A!*yasTAw&)tyj9&xF#^z5C> zS4M^hugkoGANRbJf=X!#Dii%Dg8_d7Rm)}WaKtZ=n_1n&lWv#K1 zq`2kgcNb_Ry5-fBv07Jc^F>|%QCuD|;hNfq*i z0MQ9Z&rCL#Uh;Ga(%LvbA-${b*+7q(L7V7sL`$ETY93LBZ#9ma@+S2D%tkn?tylW! z%Z3jiJ9B$|w>+nx?;al~~*}WjyyV=`( zYYqMjYO9vTfdyYboXVo zPY=IdQnTgso?k&W)VY1|_N`xQJ|WFG_p5<7dPMe@6H9fwc0Dl_qXXil1%B~aM-Z~( z;N5?Sbo1VvxT9E7aJfh;H`aE_%1CFnhy3 zLxdlOYF^_@J#5IR1odpp>YcGIsdMD)8yR-b3 zQ_-wrn$?d@EneA2tnsf~l~UY7On@HW-E6DYS7ze2iWTvPq6OEE3Ga4RpFLHg-UwPT zCNHDbZHhecYX?a2)(e59&4|)2M0#+@&{2Fmg)!w1;i*H3>YFmw1UiWhLe)=Y19wiuYyAp!1az-+Pa^Qz>HVYIn9X(k`j^w-r~xZy0B}?gUwZnL(5#b; zXQt&DgUX+^<)G){wB|;1$^e@9}u)Dr`BPhC(EZsx*MdsyBqdgioVZ%zu#y70egRc&<_{PH8bZrC)OP6SjS|# zb#j!7+e1+gfjxb^mi;~+`-o&gL@4EFwK$V<<-^pYb0ePlS$ff-M2T1?!x{A%j5Qmm z+u7=bS|h}*cH56EDM#FOAUB@=n*@6&jjT{Zs_G13|HSDLWc9DAk5hG#ipq#pd)J~e zZa*Tg*aCbe#Vf8p!|T6iUH49qMk=kWSp_+?@^QjiP9)W;a%Y})APzuiFYy4e$xq}4 zCryQlit-{RD&qUZ_#*5reV=4BxXfrH=-yY9_mv;OFL|WOIDlMqlFXgWHHfx|^ zA!v9JjAt!feOU2P(zwrFEpVxW?={^?;t&r=58*XJPF{Ys$8MtZfX?#cTS*cbQT95y z3_K56(etM!Pv3S}i(9@Cq||)}jL_@XRHe}dPPx+J(vhh=U`whV{F?aiD9i2EjJs2l z*~4}T%&_U1-Klt^LXtLYxQ5fdAN8qLTHBG|0|xESvGaR5R}SA>8fP;dzoA>Y2AehQ9`PGP6TiWP zt^DP&iE?ZWyRSdvH=mS7T57F$sIwVvb4`L6GKmGFLAb2jHi#|1uv}_?Z&akl=LiNW z)dguLDm&oDkbH&%Y1d*!bH|@V)gcT`z!LS3e@uhz+;dO(0h*=hiCTDN_|4TSFScLy zY8C*Jt1?Qf>|KuI%kwa6De_JB0TzJSwk%NFT^PQCFCD(BTXoJ3$#c;LpmF+ZZj7}T zV2n8U*Ha;y?%IA-;AzMNuIl*viCS3Z3{1P7HQSF}hIp!%iM>ffz}Y$Ys0`zb;2&Dyu;S19N$rMp@`+irMw)kwph2Yso-*a}#w@F4k? zIsFF;AF*7B8FHZKEYK5TR%WxG!IF| zIyfwWY!lesmIQ}j4%oX#^xji`@c1A_E-}hpB9W`ge&A;$x$9}oY=GfVMh})8weth3 zi*GWWh)xi;`v-8uYsu*5-Bfc}TCE8`u9RnTJpSVM=E1b;$BOf!VM(=Z`W*(0I5}WDU!EPO!ifIsB;wS^63w-t>S{XkWA6d z0APFLq)QX4_o_YkdlEqed7JOuxojYqKA$b(Qo;@|P^?=7K_q`QMZz(LXx8P4$L!B# zC80h7J-UjjKz_sOg!soP`OL5@J)Es3m2`-;fBILbz-eAg01vv5~8JZG;EF zBb+@5oOA^|{T73OM{G-&GRkdE3E0)9z{~q-kQ(4?awtg~nzS29 zxodh0Xxjvk1EO!AYanq5{81WYN@Z&%=vJ(9au_EXU+8oiUP8!)3S+6AZaYYkD~V#KgHyzG zRmQ2I89`ufJYNCUKFC!gJt&xcB)q)&2pOU**dpv5Da*M$z3{a1m+A(e&?r?VN{5wb z5O8eiEdzXIf^+2#@p?jRM<7-Zs88QMzw+X}A|e3d3l!4Z}oRX!wRY6>HMV3 zPm_oE0DrdB(FxS(I{u_0c3yG>oH%M5>m^5yF~naK>_Z-4@rp?6sETC?j9+OUU^4sb zMkZkTm5GWB6g=|qjvTrjF zOs0@f4Ow*sNz!zWwR*GdEs?jFy4I-E6X**Me}mOe(E61P95f-m!mCiY{xo97{p_ zS$O2rUYF(#;{j0*{acjV*@Fc;o*de;tNP7u+mGesLIDwvOMB-`>5LLmNe*tuAyOd_ z38NRZOJKRjiBh*we#zfeEH+yP!sEW=iFU*{tFAZXmem>JkOSl*NbEM_q1Wubyyh9} zMXqHB4^Cp{4>=EBffYX>KBah6Hc)}NM?U{F<>D7W>pp?mZcLu=kiXkGB8z3RU=%XH z0S}?-=L!li$pZ5EXlxWV#=$G08MGuTgWECJYefrw())x}z5E$hmJn*@qyD6m6(|NYr5aa=A3 za+oPmbt3zd@D+1g9OVB)<%@E}aq8Q@td; zS<#W_-$3eI8#wzZsz=Dg6~pT^=>5MVe-QNX7e5NXohpseHY1^YC%PwwC;4*D&;V1w ztj(%7U6%;}DMQy(e*q|Pz^ne1G%{*C{)-u2MsbJn`pGM<;$T4HCy*r!V7yYzfVN?r zce0(k1H$SA=dMNoJT;qr$ZR9`A5bqLBqb*{%XzlmnPgLahY9$t(JUq^RVB2>E+7IL z07+W|2gI`C73z{53`m0CAE4J-^Q$QFULxO(|L#yG%AbsLz6wO31etqQQ=JNnsP<8= z8zNblWeK9MRJ(*Dxs>hz${{)UbOJf2$L82_E0%}c$3C7aoDg*5(*@y5s!eL!-zSh{ z69{<@1iH$zxqrWWpzU}B!Lj-hr{@r2|HTKXRs``X$GV6+;At3=|A}NBne!hs0^oa0 z1M=jsgrd=%9^+e6&@oHD&VR_6!obt0f!xq}!N5Dv21gE5&CjV(@*B$jHCq8zAq0u8 zhHK8>b&eSk*qhw)yW?L+`42II2Q;!%qHq_uc&iO9s5Y$Rl}&e7(eGlz9-zbOA5#VI zprQz{4rP7G;rd6L5Z(d|-Qz87`rqH}Z-j&!rp{ykDfh@t22dvLBZNCPTaG&6eeBW7 ztP>3x-an_iU*FGS1wC`vl-|C}KXk32s z5Bmb?P)JI?w2Q~xLNh`WOae&iIaOZ60bm;aHWlQD4QXIg0!R5c$T|zc)5Y*gizzhz zAhz;9xi6p`K7_)H8uj_N72E~U0i++|psM;0tNdFLD2oF?j{i_#{Z%IpA7~PaLmA_) zvfhW_e9K-TnizqY@lOGOx3dtyeY!zK$PH*8F9~pCpWu93WUe9xZ%*z@nOMxl`RH>V z5W5G`LoPNf25rW_d^FT7NE#Z>4wpO#n)0ba+ZV|Lq-_(Lec^p?JiB_IFtRR2J5TnT z1FI7BoC8w6fA`y<(2ZJkJHiqV*vCjXIXgSs(!6$Gc)bj z$9;XZayUL^)7fp?8z#HbjR%8(o;;sINJjSR9u&;N^JFA!H8(%Sgpf%Cg#JZhm17xs z`DJ6ztBsy{;|ygsXn4YYuY0VeM^Mdu@Puynz-LquC>%OsWm+8sC>T@<_2la6>LZVD zFfcN7$+p>ya&k;8&?a(wHCo?yJwptHLlI$6h2!oaVH@UJboxQQ@5aXnV^Aq9$Ss2a zgUXle<(8Os;T%%fCVCF-{^eo6Ia&e&QpyX3%{WA-fovOB0@ zhgwkM_qY;4Py-=@du%L@q{96nJpmES{#ITGwJ(|_7p^fZR4^f>?qB?z_=QW5VNmx} z`EF+)r3{SiT7Gi^-vf`2DxAs3pzpR`nm!Ea*BZ3Lz6AYQ2r6G=f*Xt4p1G;5jYdzp zEh5}F;-_2Ix*Yb%&-b9U5X~Pwu|fLga#sh1fSaeE%uzHpsL$KoO~WofH+Ke*8(1@Zr0t-3O&_ut6s#_xh7^^=q zf4^+Ld+uD&4qs#a2JOvduF9;w_8Ybfzkn5hnCVXmC>STSWcqCSJQ~cqUxpHbKA{_h zF#aC>zw3oqSpwLwa^%Um!0)fSH-b?O?;neG*YuyaLD)|TDj;TAx*16z#WA`h;!J4VH~x=2%C4 zmzDK#_+|Q=e^%%n2`U`OL9qlg`d2|T^yD^yFI1oN{=Sdj=1da=?|E2`8TQY6eh;ij zq2RxMXRW{0QmBK1gGkEIch(@4bNiZtu2O`F_@BcItXG5Jncq}9wBy|s{6Yzync-NO z_3ras2l!MVugkbc*7VNYYqD+x*v{yHc>lX+e><9p*l#}1PKkK;i0*0da`N&W2g_Yy zGI*Wte@%1uwrRjy^cG+a5xJ`t{xzspE2~8EKPCYftL~+s){qlgw}U^!M*}L+A|%T5 z*`I;@=dt#{OhBA9P-eL6SF<{((4vt4An2b>qY8m>x}apd^z`nV&}jVx*rfk#7W@fb z0kS&PDMQvFNS~X9fg#_^DeGYUY4P{CCwu`6Nt!`L34d4c?0Nsp%+6SfGTpx|1g`@& zmJrw)998zPwxZ^IE?(`02x|_w)lUIr4-Q6OOL%4jmdbcwn%Z8IqWU=?DZoVwOp41kqR zkU2nxt!C~HBC;HoAR^caUY-tsLc>4|Oc7+3&}Qc?_2=N?{I8*DPSp64?t*W~?+Fka z40k5XtQ&q$vGDohuV7v6jN_vS*n1T^)6 zghBT#!r?p%(f0V~0GJd7QHm-ypMN#(dV8%4)O}Ch*VZHBa}0Yi7YoCnwm+XcS{l&c znurrJ`*aR?eWpnPmH<=jY7Zf?>S?uwCk!Uy!_$4Y_ZTV()|)lSU6~8gGdD2*=LPqG z&`Kvi96l)kxr-qQ#bC&+&JTj+c^#E7a8SB0oDl~aC^jTPsA-l!uDN}`?U@-0LQ2R> zOR!2%CHF}VoYew&dKsj$1q1nN+6kJ5~T26Vfct1BM&Su2#?S9gIsAq?o#=w!1tFg zh@jy=Vh{#aTkJtWX#&G(+W&HGgCXt#SC*dhTKP;_4$2$>lR+??m=4;1*8Xwp3Z!QW z03ub#H_v(b`^JLF6z=DTtCOpJ;xUXZJb3k4F)>e|VDj!+G<&-7nL{_Lnvdo^1S^Sf zj@zWC-$B}^O3El?6l;KA4;joq2(m|20`IVhk`Oh%5nxMXAlQi)@c4U32^oy&z{t?@;iXq! zP&ze2)n%sx~JrJcKjr~B^``)QQb1xwO}y(g6EWhX$b)iU2%!( zphlW}K>259|M$WTHYpJ_u;@}Ui?IKDbG&orAj828cpMM2-uV9BhyS@Z-tEmY5D!aK zLIxaH{^yJT{0W@cn*^Y#LWutlz4*J4fI$)}69(et|E(UN=L9xHH_*&U=`2FA%sUnh!ixCzF+bszYhH3rU+g2UZg?H(#Xtgx2RCm(w=_BY0@z#IgZ5MSG=8vV<*AI~ zTd1nWT`ztzdW`(+K1P3njevl_YLZK!_8?-8B-c9iL-%hmjEmW|T=fY^)(zqKvH<}B zT1CY(n24mhyjX%*92R+-8r2`O>+WF+@+gr*i}+{0xPO`e)TVvQ+j(+S$s7&Z!8|L#;36}-xL779Cx?^{Ta7u9 zX%|P86efjmPHk(f)&S2?d|guw2R7k4{Lu9krg+EeYS@MPB!tux%NC23mSl#5W{x3R z)KxZ{g92vZ`!3ZLixd%wi#-%059~!gxgEOJSs`OqY!ZuP-S2ec`w5+GTG@}$zOtg{ zcxv<$>-z=TngpNi@I|KWzMP_>P&rhS;l1HXVj@&|sMTASX^k@ex3?*=@wJ26lsU|A z>hatww=(0e9-NEf!DaVUi=ugp;ceci-Wr@yrAcW#L8-Q_|hVf{)O zZBLaj0twq8coT2$0;&4|ZBK@eUSmUDx3^_9Yl^nue6SUPn~$6b6&q?Ad(9>nu5p4e z3+c18$W~^G48~Hbl6%%prY|kf2*7C5peXidfJwn=!!z?NV*b`E1Iso;BOiQ@wnm9M zaxK>ve}DuHA5Ilq?TL7eBI)&c@IE{`v;|hbCu`kL6tDp=L0p-}AGx zJCO0yk)CL=!WSpKj}!mICwC&NrHtOD)ls5B51WDxnsHFhrJ<&Q3S?~x)vptI?A_@? z)zKP`~OOaZm(VuK7_8OH5T_6 zxAMN0LoX-ooP+;Tmt=W>c0_G~SIV||C`-7-wqiHbH=#SO1#e#a)RSXVyqNFjWj5&~ zkd;j#4370g#7F*Oahm$ABLuDx_Q%iigs^r7`aOs3_5;+n?hF;%4DVx9gb3l?nM@`P zsfnxG0RCVM_Yplo!y3KU;_1wyv2OF7->f6ISdq zoV2)16n%gx2Hk1>>_5MCaPZWf5C;7GRu3N@hC2An6LH%&o3@Y?CX_CD(jK*m<;9fB zU8pGm*u!+;yFRS3hU{WYw@kj zt6|-7Y0N!GPslxQfGzvTIH~xhJ}0flY7q6Qt4iI0D&c{AIR7qmscKdZZG6oc^l>PB#S!v^0jWPLs#Y>YG=D z+t3;dTtsjE-4u%&U2>jSXGj!7+9$=829ty)xY84` zu1Wa^OWD6bnc!$sB zlZT_<@kR{OezvT_0^eD-sY~w`!i!32s};edU=9?Ut{@u-XG~Z9U2bRFYw#2klpC?~ zaRP86|3lK8`IKRNr?+c)aPEY#qCS2yD@euud5G@GFrooIGOH*#@xT7E@EF^o%PiQs zqk53~t*BZ!qnSesrF|&uuR>DAw7O!2_bftYsxz1N9}CMhdD!HV5&;GlRE5RY!-zrE zR7bkAFCd!2L^jnLjsU*R2tx@A%?&Hw?!OD+DnTiuK|>*e#r<;$2Miawuy~UP&u{<;M^wR1HG9R`TNWmm zp$SL2P5w0J)GR)tKmvbQK!0Y@p>L{NFVLt7#kxPQN8l>;( zy|Jitk~uGMa&pNk7Kb4wo701nsJQl)Cm#3lhm^(e3ZZ0r%HOE(UfC?9cCc<`U1SGi ziBRc-QgJyP|8!0a1|4*oWvW^0(@zQn=sE<{XLSdAB+wz4kbX|$JCYiEh!f2fxy7Bm z%iOVe{;jeZzA^xb)gt-{6T&CHXV4ZtL6u7-D5frgjo6T$+9Yj=K`Bu{qoJGQ;+ifn zylNq%hCid?b2{tn`hm6w&P<4!O3D<_1L*JI@bfPSS=TWMk%yFb!>bv7cTqW-LzZYR z|K9$5KDpTI#(;YkYk~pxgT#DJY3Ui1`k^rBL*^lAqF-tL`^hX#9(XyWqg=~^|J{-N z1VNd<$+Ux0@qarh8e)NWpU(_(a{uk2|E<Y6wMuN6%x=NH&&CrSIO`mjGE{SBiMp0!GD_E{`zdwI zG#`fy#a$YI;Nn-yohy;ys&~U<5^*l}lULCC@`;=#owh<@_={Zm_DVFY;$;W<_sFkB z(9j4IiDl4W^*-BCp;9e%c;^OFC%2DaC<=Uf0nqYrnz!XsX`xZ*?yyX(tN?=*6Ja+r zFbHn(!;UKTf2C(?q(OIDUn#!hP%mRRu?K&4xXf;Qs7-H)cR{TER0#ioa@G^UH8j`6kCF{Efrox zH7mN#ZRsMFttL1c7A3A=1oPyG6rBqBN1xyXg}JhFuCXzx8I)||_px-CgyHtMy6sN} zwp)0)om8@!j&ZO%WKR4%s`u{nNqcrldm`K6-OaR>`6Da4l1Z(Wt4^T6>0Newbsr!5 zUfglfc>}XQN5BThaplT)#i;gxTQ$N}{9&o`r?OZhc{VH5;Cb}ZUAEv7bU6!~!${C( z$JkoKjD(i$hO+JH7XvCSzaqN%GI zau~iCb{M&M4ol9*1*?xg9P6E#oOW2~RoQ8VF{bIATRDn%F8C{Y3L__F=G0F`FG;=q z6c0)_Vg!>Y**eEs3_)WqLkKrL!KQI;-dQbmG;@@i5)bM`cE0kmPT@q>1yRWOTxB+Y zTlYlr+F;p#Dx5f(o4!Q<2+f7c|5P^k;&da&sM5Y<|6X_A*$4OWw zs`VVG|2}z?z2DO%cDv)bQP6~bLRE(SRwg4m`f*~lgq-z>$`fufWkWuJ!l6BFE?8i# zl}F_}-kI;}KC9vGW2V`hTB9jWbB7-JREo-&$UKHgApx5apY=em^{89gXH6t)Tu*i= ztXc!MOKdOWbo`gAf4{stJFS zLH>_>U77}H2?ApOukH9Ru8fKgHq;bZY@c=!c2zmzjgVJwUc1@(3UUYg={!VL?8uQ4 z{S*7XYa&?Oy;JA0p9~i9-s&@yu=&r_OJ|$1xG8y^KfTqn%rKDhi+Vk7XS^-mgr>|Uxvw~HJdGb0xlYM&) z6Ii>V=%Y$ZuT=`TEuOFD_Kwvgsgw3enuNwlDOVlVD3_=%*&j}SE~E>A6`SB}SVe-C z273wwB_I)jZk7)<3M)>NNbbQaNZ9vcx4hc9)-AC%{;+?0&`o^qAYF51(BOV@=$KUA zZhad~uSoDx?!j};s3{8?X^LlM@~ z!)S}6AIR+`KcU@pNbW?tO2GBgWz^>*dr-pw#VS6B@XT$Vz)kggjT$$sTebb`uP4#R zpXpV%SU+!2>s%fi>s}%mo47D$E^|X`A)rcWZItY!A)qRXJVjmV?i^Z`kF|CiB#&#po=a+}@;l7ZpvoEJnFIPhzFXyk1oh8eh z;u6y9*{Q5rd1)2aQ!1sR(cQ-PY_YPza+eeKYachpVI=RI%H9aDP110(xFkm{q>}xdYTdA59 z`X#iDOnfrX{&d4m9x1tAs^5HqJKRy%M_>M$WxGbVF1(P<)PA8yZ~#LD|HP2m3N9WQ ztE|f-$(lult*iZ%!{L~U^NSlY$+vwKeDcEaq0zC(uAB_G_Rx$#&ZA=2RVyZ#NLp1g zW@h{*{x>8--&a1_A&hcv8ylOLMELqHY08ll4A!TUmt4`((gIRqME4Kfa3k3viv7T+ zCnAnYwK@(O22IU@xT77^8MgTu6+CKWaj~sP61%bMM_MF>uCtZ8G1T#9FDRVmngTzT%vTwPtB;Mvd*!&_T*OuQDVm$mY#mIik~zga`F!6 zv$86YQ(Wb-^_COl?tHYrghuLT@jSULAtIuqW;deJvW%Sp5~4gLwWn z|JOPy1)R59o)$*m$m1PC);(S6m=+P}t@U<~kC*Bme7q?j_f@@K3hV9DkA4vxG@ak@ zbhffHuc6!Ka`!5PH3TCoKOd_OH?ycndSjz{$HiYw(76+Je+h%euZMC@?MVE>1MJ`W z$A`Mzut)E6cj&`r<_ZJQd-R@U+VYwfwfRrR%?ln2hQn!S5W~eJVp#PY(jB#8nb7p> zN1N0toW?z4VPq}k`j&E9pim`wxCyrshpRkc^1A)4m39C@SS)f9i@JkU8>*CQ98n4* zF4K_^zJsV)o<-=%Pqx*$z}A2B3@W$Yq^gn&G%cobx-*HVj2#4RG=rE z?S`|Y4?tph7f{f6n%@l@jJrU2Pe);ZGI^iH>C7BL`p^aGLpxVIhGyQmX(JTN(og}j zAU(DnTtcV|Lh4d9ot-NSw>Z@DotQ%m$*+9~^a+OZo9NY#PtQ|OCJFir)kc|Fn@eLj z6_4yOE`mSce;O8@h^30#dC!oZEoeauA~zZD}9Z6g6V<%lV2xa#_;*d z$IqJ_1tyfH!)dA;Nj*8P!sJ*Qn(a9xWgKFAIa-)_w%(C86Ox=%rr08>XP3>tqI0lQ zq8L(w{6wc_=61WK<)EaYyUey~{b!2@>GnpUZSQ(geV_Pg9OpqnLM7kMOBAc&WoPZ} zqmOWXAH9bzK^B}mL|vFocYLT8T`JMC_!|!l(AOe|B=KEDQ{C`lDDD@&7VVdP2bAuD zAzB`*C=V|M+fW+X9xSj;0^HTkhHMH_3$7~&wc<|*hBj80#B zVmsSaoK#y6HeOhs@88^CUKM(ThCgu>v~rFev;OFC^4tqKw5-Qs)4rlFqIIQUQPdeG z3YmN3N?sov$O=)^chWPyzcQ;mUv1z>9Ehy?Fe4=U4&*h4K`zgZjX5MzYz{-oqA7mo z4m3^LOAUt*5_#w_&=oDRDGz+^K^tNw&%YPQ-^7Ojd9jfoo0Y_R^81v4$wBv|#Na#0 zm6@2453T5?>w4dA6^>87Pm|9us0KFNP+w-&kskGy)`_D8z=};Sf7{+%PUD9A=|$q= z8Wl*CF~izdO+mVDMQg+G8jj7?xNxw9q(3Q9$pDw7xQ&`l=Uf~kuTfeE|2dHI{nn&e zscfaH+NK_6x)Sh%t63J)h=|)BD6`;S)V{+wjUJpJEf_43$jnZIbtL6aRjDWJk_|Zz>$Rh^3UFnBSMZ(Z^8JFb*qbEzudiQ8B#aE6_iqKVbKWp} zZChT{-(MCuobn&j`^ao#@C?Buxboo1&^@g{v%)jgwti_2#ki-E8-DgC(>L)%861&R zjFw92+v-;}LXS_@oVUcAj7wp_Wkm0)z;zLRy_oyM0)kxD%6kR2?m$UiR0F7N)ncG2 zsxvjqN5Srbn7liLQPJK%2{L<@L2hE#8QHkmORvwC4$4#d4s1G^knQyKtM$qsk{5km zJ&eXSxh7d`w}FM*KB+u@3uA3a5z;H){VKT1-;%|x>abNV@lwV7^MTyH6_5L!Wn?me&%2!vb^P6$!jxdOLm+eTzJ+#DAklq6k?Kc-Qd zGrIYvN1XU0RO-~>bkdC1abEUWbTs|CQghog*IF*1TXIh3vvt}muaoQPZ%>$vgbl7I ziUe7&aImzr1(!l~Ve&&KPf=z@1s_RF41E{5G#&4Iq|U*2^DdAGrDj9S@q_&d|NZTz z+62SSW6^%!w6@O|woOflX&+ji1`IKz)N!=NX*>Bt|qb6c1p=}g>xhT7%MZU)c)urkB^tgEI#G8kzNBqexY0t$RSl(lCG-!MoAWBViIuKE| zowi0y=#k89D$6e&6g{~H^1&qFsv6Hncx>M$8BOD+C@OdpwzQ@P@ub>zWT@M=akjl` z#>U2MC&>`c=UQB-#vD$-9oX#iUoe(HK56H1$+am^(6a=IpItyml?-ybmH_>c4v5u^ z6L(z1H-kV0^O7|lM!=bVnoZ`MXv1Pzk2j*zH?1H3j4+usw$kv^oIhbIqs_Q3wsNUK2=`jgS@l?j zMSWP=*r%#9>6*FBA`qRFU(g?bgWYVAuA^QB1Xb#s+Oo2;6l}}VuggGoCf}ccTLq#R z^z*opBB!67j4aVIc~4%_9mpz6_3~VPum@sBmNx~O8%z`T&VbZq*HBVlma-Ti#`en>6jgWO#JqRmO7M%WZR(A z%`qt)TwDQ1m$WJHD)ANBn;OUtxwg90UI-q!47cdXFeyiP&F5BX1oP6A-fXsFZ z5_Yu|Bpr4c_yiT&`}Ar*uc3Bgae%kLC)HrP^X40T2E8 z_d`ENV33o0oIJCA0_3U2X86qkC$|%1RF-6{olDoe*)V70AZxH$#e1nI9`u!MkO4+W z3XB_!Wz$O)MAPawks!lHug5s*J-(*|efS3c`~|Ca>!ZmUN~>zMUF#?1Y60H7w~L#M z78{*adh<0f@Q*GR9AZA67CO@Um<|Em(7^4Cg4wsIRhL>*7e63TWt61ZZ&-l(BfPTj7!D~zz-|6uhQGa@S|?d2Y{4FKlsljDwyFLk=u&e!(OX{)b0egD z-4~yeE9=a92aZQE)(`a+Y@RuX%a~19rrnvDYJOhhQqVqO|k^I z!BIdVO2xEt{y_}LDeW!7D-p}BV>u#J+hv$_+^rMX)xnyV9Q{DfLlP{-=g7G}y)cgS zSRG~Ew40TBHuDW13dgZ2{`JriP%n6sXutAwU}w{T!3l__WWGuH_I^Vo4S6i%l8pOd z?*}FN4-ds76x=&IMJH=S9NE~{u^Y}jAom88??YreL-H$DkX_qacgN~agAT8Hp}T@Y z?(I)H>Y_>2UG5os-LsIJisVznT8${rE^G6c!OU}=cxM4LoI6Cw6G(;dt;3yJGBkdI z;ORZ+7=XX&80pkK1B))cC+X&9{IkpIu;YOw3*s1zKzg)x^nS!Va7SUm^+~){ZxJ_5 zH4aTN*$N5tQ)mvS9w73m3-RxOZ#qJNs(%%~Q33Hg@q5j|oeP^U!nJa=C~B#pOyova z-AzD2OCX)s36=mFAP-?vMx#fi=60<;uOJ?y>2|jLjVSTJjO1#t0??%=y(>Gne^i{q z9-Zxj3!Nf>yz>wbiY)F+VX##4W!ytq?Hx8h*Rbu6u`MkdWo~!#OSO^JEHCu)%ABoxzOg4~&l914w#$6;Ep3yw!NO1+Cw8KH z!wXS3xZY~4iY*2? zM0pU@lG36{eIHpH5PD|fRWtxM1<}y@p3jzIJg8N61Fi@aV6siu^{b@n>GP|yX6pu% z_NUG9n3rAH6^3`$jm1t1i}F;dnqXf1&XE5pk4cEUmt4DB_1X-1U+_#16Qb;xCR=!?n@k_60 z@Ib`wOcM!P0@tTT0ujY7aQPzD#LHEnAeBqLDK+>iWyXM?>v(FSHZU{Mm|_PP-|x(E z_A=4=r>cb&nbk&CTX+tJB6meyR*QTeo{dv#-2za>6s`gp5(g4LP6-}16GfI~oz{L+ zqN~f)_8zt}y;UJ_cFs0-Z;{v@94w#ez3mvwx9qmo>;NcVK@|3qKlwD9szyRRO}qT# zx&o#)`u#UtSojR~dcszCx?!k_&GIKK(%t$dXR$zr_C*N;AVjA@PUGm*<5gkuZA*Se z)yu4r*)D_rawDu|T!?JBx}M(6qM%@&NWl2$5*P0K!B4=#x8e z8wSzmL67>e#SegJev!+PM4+U?2fWJ{u8(_Vv}plkG*aC9_LQ3*8F;hjtKos3L7o$q z;{ZF$E}TDV=?eICb}TbA3+DgLMP%{Eg)GcE^VR1CKaTs}@By{bj0NP5u1}fE40vY>7*8Bm{mf>AS$BJ7XsIO_@-ATrx4ze12FY`l*o?bTP4DP9(>zo z_QQNrNx56ILa5@I9C^m=Fun83E-{@PJU)MHra_qwDDRTu~)I3=s?~H5kBi88?^4rYIJw>DH z)0N}FhJx-#0VAZRJPk@833$VJNKe7V@sU7)Uj+zV&_l8`uGQ3yq9nM|NfL`$&G7?G zfadecZQF7Rk{78Ntat9mR{8pTVK3iU789KPNEC_(vkgb4VO+P8VUN}n>1te4B?a>p zo|ouVOw6D=#Gg(0>*}5LBb?9lUT+u05(v=((6~N&j0%GI(h82WSM->F9nYS|6J1Xt zb9O$O6C$nw!mFxbKe6fPfhv*it444sB>8bZ$az zVss)io5Bk84;mC zmdD5QWP?0XL%lV3jfou{$r2B4HABMfhy!bbQn#L?E2yjyvL6UOEW|sJRY`${0)LAkSoI&~beD_Nbun8}^74 zLjZ2u_KfC{J<_Cqe#GmZeo4(9vzHA~lMfKN?X7UQfbqymR!S%QPU8w>Nx(l7Tm(;~ zykJ~O4UzJf#u-vKvsfX+VS)fsT7GjX7gyXE^v{} z<;w1oL91ajWMvs$YTY-p+oZ0bpqE8y&D`bJh&loziZ&ZuZN@T#gIhC;8EocI9~9Y0 z0>Cn5*NX`PsST^nz?vIsM|X8I@2ce@j{|)xE1wxstIH(;#+#igmP7hjPh{s?UaO|A z)r|dxg@$fs>=nn+h8f|*-Zp&JQWFuUTl8tO57rqQ3u=s-QW+ug&>`?AZFk#&>ndRCBqBeqb-$MDg> z$LVI+0{~%6%(^(=UiI#lxv!qhx;)=?86*x^x$a=?i5$DdP9RG2o+XIm6VY6nO!Uc2 zyj3KnO!Q6-{gKfZ9%PbZ`^;FJP+RT0#8-8RJEA*%z`+izT@kDKqWO$f?u4l?tnxZr;N;L`<+#GSUQlS+r=L!TE{gs@{dPpOvnlTa%BaBK7;qJ=bmhD|g?uOBsA&MeHeN{trbS+{3V z%4YO${>Xk8q~Vi!)FQi*F*G$r!^}KTmCO9n%)&)VEuFLAz@(;Gl_zG+wXl!BW+R%Y z{M7wK_~+^%{nm@rL#I;S`0C;{j~m(^33A0~Pp?s|zW9Fa2I@cpBSgE=e9brl+<s{BeM893A3PQ(~OxVMei``_E+KXSr-hnyTyiL-!u$q)LzET%Sz4K zCU4bB=dWK6)F~F0T&d{}t&>N4hCPeHRSRU|j+m5iY|&Wc4bzv9Qm-Qt#AWQ|dY6n> z?)=%L_UE>K(s+xZ*(z*?ZTqQ>DMMQdGvg@U7~PAudTG(-LfbOnYm|zMaiIxuZ_eZb z;5so>Bp^33@-bl8>?psh=fCGZjPOxiK$4cf6k$j0r>j-Vo-l|1pl$bAeQfEA`Iw5D z9X-dUsr+YJ2&u$xaS#SxS-4ZT?1& zh^`{`r9}o&Ta{{|HsyQCGs{BWq!62~v9I}h(P4@beiDk#B;DQc^> zJQ+JNG0_|}|MDj;V> zT}lp0C@COagLJnD44`yKcQc}bA|W8nkb+1_=Kx!}J0z8cA*37Ld))h+v!C(9;u;tCDMDK;;O9*|v`RrpTc|xfKY*2AfXk&m{C>4F-hx%TYnUf(h zzU%;UyxXdSou_#?Pe|sZ&9CWyckMvlzijM&X%WDLs zGef8&${0ndlT^Q#XHzEu5WohqdseZ*az|xEuqfm^tIu2%y9YdT^Q$Ecr9m&9Uu4y< z?68$eJUu5qKwa6j<6So{WR6G}(RG!+8(c+F(3fdK>(5s)jEoT4u{_^X5u^!m8pRcl zDzQf|C;vD_qO~`-2VF)>LKYf~{GMIlSD)feT)8{C+v6UO?zPz6mt|Ek)rscW(aY-b zPpQInnCTR&FgSG`%TJ*eXw&t976Q`Y8W9i>LgcY3v(h66ZmU|2w=~{kb(Ds`Dw--% zxxgy$wM5WVYkK#Ixi_yCA4g0c@U9dgnM9eoRgB_7_N(gLv#6eVk)j<`h2Ysyr3ZO@ zFG;KYEW~_iY>`%6_Kx05MN6?e!jxpYx@Jd_`=tFcLGoF#EsQfX&x5Lj78dKmRg;UY z?0G~vQcUbB9S4}r1=xKpD``{QZ(0oRCGv#gv%0qU1;Q$*w7Ti+{3siR8^+iBu5XDKbA%9vM}O9XGEIkyN!o4<3dnx2cOF z!6Iuu_itAAVbhWOFZ!Ovmj=B+Vp&t1c4abKjiZfIS8R)NJ=-$e+DC^bu75PVnP{W( z$em{p&BNs)2N8WU5;eFMZGU$3PK=u%Zpe^vT-j*HcbYy>n4&7g(=bR(XI|etE)PHt z-`^OF%s3+84y6%(JnetsdjkCo9ZlSIB~o2^mz`-p`YblQ@=n3m>8rZ+6_EAC0Ob_s zG!)y0zvmmDXOx$@z2VDav*X`?QaGEu*?;b8m)?I3j#g9kV*A|qesyPHn~}zPHPOU+ zp0k&Y7&TTX30vvQcJA&=Lgm*NWOYqb4?ag~zmG4UN$uJ9J3KSNU3>6(oUpIHVD9pC z{GeH#(Q_{YpQX>7^T;LF!#+yk(p9)~Mi_0Oc{bb(ZLythHe}(~^{{t8?r@dZSyR1) zW>L$3Fvxc%r~Wp{nR0kN?@D7cLESd)3E|FEH(}xUmCTP8Co*p2ht!&g>2eEp;;o@A zf9x&NK|GPn=ioT@;~NGmNd@XVvtYff-`d$M@)SN!?Qx1)2g9GXTpp3R?(6W)Uu-C< z@$m`Ubr7)HHQJfNUZf8kN2i-Ldo&)SoQqd#&rLcs1dW5PI;;DnTAf;!5JI$oI!)Rf!zy+T?naW0k*jWx%dr^Gih-Hz#t`)Qsg3Wa3NXQ=b0k<9zAMx7zZV!c`NliCS~M#mxG} z>F7X`j8MJU#a!&-5l!{;>@?{e;T5Bhb!X|dD_^@qy)?U@G*yTZttC6e6A+IaFl=H4 zXaDAwsH!RAS+Vye(+AQ-=J>&W;`_*8K6@ z$KLmo+?S!cCYB#j*O^9-UC({XKgG_7S2yez^jXMS4w$V<_omQ3lq*QKL|#8=x-+49 zlrR@G-hyjEDeTxHF66ROwb(*-cA*%aU*`UWmSlCiShKG9Y{u}){HgF_$FurljjOw? z{F2Hb=P|i_${c?nj`9to-a>PB@9?Z zvKXI}?TJh!#H>^K20!j}o0s|7Su(G0z%GR6X0+j0uaZ}BR`1&<#m1H& zyJoggqez0|h_yB0k)##Z69PY*WR%mwPF8*Pd9w2^Vir6=`^?&>W&oWJi;C$46!~r{9(#<;XMbvJ*QFh`rAQEMcH{qRHFb)qk+S)NY=qc%KtXz#tD|WD| zK?QGijgIEh2Chh7tX&OU+Y|2WpX2s5e>q(mnY=5OByE0nd?A}A-|O?R_h|8c=5rzc zKtW^|M<9oOXBtYpIEr>~wJ4u9qgq)%Y444PV^=)tTw`uj7nvv!w!M{wtS3@cSzq*H zL{`IV-)0mrCM>Np?0Q52u*Cv$bC6ocuSq@Y3uyl0sk(Z*0df! z?!Y9Z0O@1V(2$FWbM6F|w2xee(A#_5QCFdC3ZzV<3mgG(jQ#5m1?sq?Gr2` zN{y4jO}0P?%6ARpq}E#v7*f4@sOO?IwO5G0KBPb3N~*_yB4Y|ds&BSW+1rv8qeY;@)2Ig-`NKH||*yG}VAkF%P?z^y7ld62=K?fO>wioYi1ibJ_=UbQNgZOY|SVjM8Zo>DoTR{yL3p zH1vOz`@#4X8{xhi&Hl0EqQ%~IqCR@DUcN@ImFB~>%wFt4dYq`ZNO(EJhz`6%N~HN? zR+~i5*X5QTzxKCIGK$mV6 z)T=pNB;7^}bdl~KH?j$%Rgkc;3Fk4is$r-``aMv=XAp!szc+N*Gq}D16S1jTylquA z@Hj(}>IHO^J7e1*vXh)_?%`sfhk{AIrg=SfFl+rQUB7575UUmhIXZO-T@W!#&jsc4 zc^Oa!rnjLmN8cI~O*U=}Vb@vJzLvh(X2mo z>LWqnsn!3`;(SIH=1pkx(JoLetVO7_faG}jjpe1FINW?4W5KZ-P~?j}d?(u`&4}@- zQE9mAf!qM|Y{$$vH4rBLjrA6g5&CV=TqQ2r)imbK!4`&$DpiPDi@+5&geWwB;7V2N z2|^&Rt|DoB zBzRpB`4%Y#%bV#JSkeP0xrjBhWkhDKY@`uW+-WR zt)r9tj#qpOOfm2hz*UmgyW*rA5$!t9LN(K@1EwZm)$W4b2BJrjdtEP}8cz>y3JNeD z8e;X*wo#V#Kl%&AwPB)GCkQb`E|@=|*Koor6)XEsfa^HDT0nNc0JLwTIyxr#xiK2v z+ki2LVv^^sPC-<07~E{G#^c8P^!S`^jNFo!=HQ%hO_BEYY$^C(R2LcZKS1ZvsMYzP z3bx4o{qLjQlxIZu_?1@x2HpXKfuGwr&;uBFcIx~E8{e6;M^Pd_JBR^%?by;RY_gKP z?>LXm$gY}8j)KbB8-kcbFNniO z0J#%T-w~1rRJOS7CIwDLeZywO8O>;#W0M5aSsKVjFWD?+UmNLsH^UxXNbc~F7v?te znD_b9D$C@^uSBrb574 zU7iA3?N==UvO!&VsAFrMo# z8T>YVQ2LW6YZV=`X$v3{h7;y+XY`WaXZ|RSIglj$Tyc81r=K+Iu^4OXv?r-3-O*vz zif3cyJ9uJz#{1dK;mQH~QJHUa^`vLW#KM$n!h*qI=WJDu@}16!g|#ck?LxVS-h-Ms zTj=NsW$M-ik0=i(YeBI>iPk6`$LRY>yK~C3EG6Z0Thp7B+jqmb&87gje`yI&w^UO6 zvsmUU%Cp_sI-|MOd3>RoFYTpzF5d@n0|g^q&43b>b(~e&L|UYH-enYtbhoCM~^j3de6U zOS%P4?uX{~E*7Ua!PECChd|ezVkzzI_BBxpt%WnQLk?+Iy(|S6&d5EzA15kZbe;N+ zv+Trv@!z#Mx#wCHxY}viYQ6^5u<12w(_P?4-fslqVT$zwqY zYDM|^PjVh4&5bw^y(U7xOMziVdQ6$xxprlFi_-ma;q1LZ#9X%$-gLG^L%2YPi!(^n zC#4(!+8slX_{H{Zo=W6l{%f=J%T#%=!5Uw+rm_L1o&Uvh@{cNpNel(ubeQ1ZR}#V+ zOh7$?$W=4Pzchfq8^!Oy;Nc#)bpdSL-(E$Fc@@{akbh$c{2wOr03>-UF>FVF+h7do z&uiRp3Eh9N@corlt^~&ID~iVX&cDhO|NXCkR`4p1n;o-%|0Y-i3V0nG>v1IiVE{z{ zYui^7HS(bU(DMyIN#|hSn)J6r1E#bHyoxrkSl~bO+<_C!r?)WRhx+^Wgk#w60437j z*J}m@c!T-;MYiIJGFj;6pXoY{Kfobnh!}|* zIBcK3ws_(LK-uJ`?NT_IyvU@4(Va4VyDDj*_<9$J9>(QWEhcaqH{l%pAn?CuJW8;|k_4g*z~jj^H7f0byDz<43^fe!1pIvq0ACe@QKbOPd)cPvY)6Y| zzV}BD015Y6VYTl_4HQTrUh5Bel(33Mn*o-bZ6F8H2HdE7;MJv=$ft&;=Af;Z7_=`D zwzwZ(9@nw3Ju!3s7<1k@83gDtXXUT%IJ+zn`*FtrT_AKYc$BKAr?-99bby1QcyP@7 z&^I<_X}^sD3seuwJ3Bj9^!mV32@$0&(bIV6KqPBlVQ6}K8ohZM3@eA}&9yrU00}G8 zuP?9fn@CH)TINlua$XwWELC3`)-n13;1PmRJD9^vlMEpHEXw)(m0Jklm@v>H+(eIN zEg^;>Aa`I##eS85_PM)Vk8}(W2<_0b$!GKm@SJpPZ3k+5+Z-og;gnFA1HYaIfLXkS zs02%w0!v7`c}sXw`07kA&gSSY+ISV0+~-=uD?-TupwS6)1v&6x|_w z$Oynd7Xik^Edju&PPb$0yUr7OaVp8>Oq2xqDJ;XAfOHZ4Qp&y?1E8QmJZvfA$n1}hO zH(LkQfdkX{ud*=mZTrvrLN3N7e*gq(aXye^H36)KWa~XOK>aXeS~&|3`$*&B{7m|y zRB;xAtKHlTSPEov`brBs)f?65HB}+X2A15sNPF%& z*;E}cR+{D}DTampX(fB(VfZyDFpql7>y8q?=*2N*|I{}5u_7RJA>{Unfu>WOkpO)tlV2mzS5d{AmNpeUWs7C*BJ zmqrt6B)E*-BZk;v50GodtBi-p2L%sUOu=3HrQ^l7XfKZf$|y_+P2RkO5n~OIw~kLd zvfu4@0n)#3ot>44S0lNlB%!ukA5;y3sQiF{Hy##S783o@jj$*~ zOB9e3U6)1Wx-qp_vp`m89T{K^>#x-bXv1e^-r%f-9jE`G;pP_%gWdID4ne0e#e75M! zl$2U=G&dhetLI$@!J2rZ9Srkwa47H^e*K86fns(if#1(7!drl?l+(U;OQw)#wWs^` zRhdntwpa|+tMJXJ^K-D7FC?-p9c0La7SGt+eG^s~m%J-Ticw7u7Z;UaW3m$5y{vWo zU@MS7Y%Gjk%;7t1C0`;dZh^;E2z!816Qf~G<9-Mvq@n>7h&lO#YK|`v=J$$sKJIJ< zuyj#8Jpz(I&hlJ!Xchy4S}w_w!)cNS3pKA-Z7`GI<}|=-&qqk&zgOg_YHGoh=Wu#O zsicV}ufI6FhG0XKHfufJT9*|Kcc%4GwGV+m^3nMO8=sZ$6!zM+?96DCT;UO;0}&#@ z)lV5~WmHHC&E=Rz7f{S4jp(>%{#N7z)6boh3pkT!| z{^yz_K#SuM#cwV+^Q7PhWkqp@5v@wc^Y@ayfBkYHW=*Svsh`_VgAKT5P~Lw~=BDi=6yxkU zf|8UwVPzw-NHiL~@S4gl0w_xLZdC4modaTevCbUpZ}lx%I5(G5Genu+%KM6bc`&0= zI=1nfS41w}T{@qii0)~i6wCzTZWRDODX5{_o@J<%%w_A2*;Z>kcjlSJkKSNco^E$~ z-ZMT>MyS$W&%B=%t+eSgm}ZVI@cq(le!C>?A>SP2lhww%Wm~gg9af;YMDCb#;T&>A zWCb+A;^T!v9` zJhW2Ub3Km}6;B~*BbU<`5e44FK9iBTZq!8 zXg(+FE2(0Wh8G0%^xtD)dd3Q850Sf^Jla}XW4_URdR<6SeXw+R0re}wgANzmQZ$&M z6BOL>ytGe22*BfQ0riq1+S%rp*8LDo{JSjh5EgwS&b*#@9$6O;J{|nA^l&AD|M(1p&llezfPBPZ z5Y0L+&dJR!X_Q%U5hA^N9U*WdlRnIKSxWiJ)tk%e(n9k&J13yB&ik~70&w3(ypHR5 z*zc_6%a@UbvF7;Cl8%G{4qqCIw*?fZ2z1kd&r?v+|ET_n|Jj~2okyX3HXz}~@Qy?6 zfOT%lZ+Y8Rh?D}5URSp;fFBv-So>CjE@4lik3R>sI~Es{M@K+mxM;IX_O+OBdR(TY zspSAk4V>AhnT^$X+^jRE9YDy}QkrL7@&yAY>h6iJ(%`(0&c*YLDmO}9iGdD8{zUeP3llQ z$68=?$3M3ZKjTyk0689}g z$ije4>jA5FSBeM5d^G`H{E#PFQ71|p;5WaWyy?>lbo0V6VArC0bR4S3K`q{9=<17^ z(?$OMT;NTM>|~40%m9o(h(H#Ot*D3{&5LDonnQdSTW(t<(H~!O0x)!8Q4sIjXSIF) zOr-TWjaUttoRgB(=qd2C8JA{21&MWg>1Z@hsJ`T)BvcdH&h>F22{=nY16urxdxP1g zNttZ=AZyXsvs*lPp5cJIP(FMPkVLl;*8@K?a!OK{^VUjHah7_9%r~8LiBv5xR9acR zK*P&M9HYGM_UTjCj7N9?1-LK!S>tQntggg9YJT7SS5ZkTNxik0vzH?w*w66Gw2Wyn zv!YmFp(qf70#COs#HG1m4Mx0d?m4GX-Q;;5fy?oJMZGO&t)=8|(i8^sT`>o);GmT* zDzY1P+xAJ{LjY!Tk~u&(KAl4;&B-S!vIyck?{QMmVDZo)#TiS-QnA>CB2VVzfgnOCI$m7DC4#B4P97o zCyoSR>YmZsJKw8dk#km{prqVz+#&6**>asil-S%XMq3xuhCE$5%G$?0$8b{^-)U&9 zGSN`~XsSuYzB}6n#5)t6xz0`wDkzSCDy2m{uT2cfBVAHR8|1VmtDM(GynbAK&Mk-p zUeTQ;?p?*DBeHRa9}j(&55&(_RK0x#%6U>$5|dq?eAY4YxnVZ)j~Wya46V_zDr~rjN7#|nnm|LV_(avVtsYb(Edca;S?C6x z|7j7XRvoWn)y=F`1!>yFUS6JqhY+IzjcQVSU1` zUYJ;1Vxk~wGW`iTfO|6nQHIn^UwAlD$!dT`y)OCvzFQwhWJ1!)dJx&gGB$~C@ZRNX z)KD@qvVDB+?0#dArzjzC9-xme)?6$uZGC;d(kSCTVas0yifYygRmQ3A2rne`#*6Q0 zQ%QI!dRa_>-yy5lm)xU~qOTxX;B7+f(kGE_a0~`;Z<_o!dz@WDhw@%WcCsu_C1`-sxkEg->b znWCM6B8g?wHEz8zYLOR8#joPxp@Dk;`X!7DVikU$hGh#l{9jCW%kPH9#|4*uK+6Q+_fOzoPyJ;TMMHeWLCeKj#L zQJV8=gaU=7wKXw{M2jB}^=Y85Pwn{8l_xLku8f;Vx%UBOw0Bbm_N$XRlA-!Cwr%}^ zamJCz=%`wvU>hY*&EuajvldFjqoWiC5B4>sqZX3sd$>OGV`Hk4F>wfQS@`s$mbIW? zacP#DcV@?g_XJvtXbZtEF?~o9J2yXVT`+X$8ney>xDQ=+75qb;l|C3A|BU^vO>GwQ z5=22mpU$-MA_2^6jh9H&u-_M>ruO0$tD`MQ?3%|+{wws{XH#aE0L28{dXjKFSiUc* zjCbQFkjSg%#=d$5{cfS;c<1(`SH&H2xY55t#LoZ;KM`Q}+y$(FhpyX8zqi066!0iI zMy~*BM^GF#2>mWgZB`A~pv-!D0X*G-6L2TJNK2pm|0n=20?gh3M4B?Xn&n;6ArBYP`{#gCL8#WHYodi(F~WFdLOcU{rAVB2BPj_ zodUvIpC-nX^C4nI=~VaMpF8fYa!7_a{Nwfh>`)Tu#uKjk&7ufLN}<8_d1W%E#w%tXoI?h}*u!8&G2hQjhJHuJ>3|pY%Ppa^kEPTqUa-1PzpOll{p6^H^ivT!&h@B9u@wCT^`g zs>2V8ZxS3H9@aM4qrSD+m5JWW@yZrDLyB3*$NR6p^k?XM(=aghM6ZB^uMzsQ5HdSw z?rA@ zN6e=$V9XFA5=9dh9H$PqN0zs+IB?e3Irc|paKHOWqy~Vu20Qie<0=KSxU+xvaULDa z-U+?IHkD0D6J|$rOC~k9sd+e8Cn#Rv3Zp|FOa(q4En4m$cJ~W2Vlu9cy`GuX7kvJE z8%5n{sb!(EP-YINj>TPFT}b|o`4%a=qsIojA+WH3%C>&Z7?>!cfS6*d1Iz#2DmxQ< z1vBAR#10GJ*3fj)s=5Jq<`>oBWbyn8=7<{A;al$o3epO?W3Zj(1l;Xyzsbztuo1O| zUOyQkb~W=NZ=1^YD(B*Z9UXheb9R&OIwUQ%Rt{Mk1gA<)=-ur@an}x3H4z@wvLMRu#cE#L67qd1V6{ z5HR;QxXg9eO3*8P5S!#(!+JkoOW8o?2e^4;xcfP}LkIoXBjefD0&nLiSI3{LKg|~~ zvzc7;b2HZo@7?_}tsTdGrmtm_ba*^h8myA0f@OSG==+{`sf1hT8Sgs@Y~%Gp zRQ6c*RRg8hzN$~xeOC>esl1Xi8vzRhc)hps|;`1xTYO%4#7P0hy3 zln)SBv8t8eFKudrrV=RZsNz@+pbpWnypyt-#-gS-yXt|Cgy0ed1Gyqq-Un6HYSO7q zV%laviUN7{ZuJx;h8+q-0{Nu-+fB5>dL)I?-i-;Et@J~=}|26ITHOO5Z#T? zSud@F?AdE04hf9%PKd)RUBBtee2CDt=!AS6MfrYS_%>QW3}HOTAwCn$QgQTo7ir=} zeD8K^(PqIJBni0|`cd`rQlf89uKF;^y(-mVst+;O-`($XoxQ`Ix7C470Lj3Kt|Q~- z=#heZ65f)4j(FureP6RkNnrG$lTO_g({5C!9#tDPS4Ipt!mG~{+{T$43>%IXJ&D9g z+9MhoK1A>1P~N%lTi4c;qey8;nzgvJS;xW#cj%UG0wN&`l4!^ zmKREnRSs6iJMCZ3#Y5!JjvbkYej8pK0V7;NK01v9(O=iR`-Ji?f*h+_6^`XJX}1&L!Q}Jb~QJ+ahWiJ69NNjD+YHY*BhU$_u-a z2ym*IgxcS3TxvyIK=grpqy9acz=m>&Bm>8bhs6b33lgdP2ud4aJF76Aug&fR5Yg*H z@Cjbys_ZJB7&mMgy@|V_Wm9C?7K~WddFu(@PS+(}iQ}-X3)U%%M8YTBA&l60>yg!~ zOlqpBrzaglR1>da?w(LGU+1@*jO*#?;W(`^A{>i?)haLp4C(xQrb5LM$wvM8u}!rr z`tb)b%jkv#$c{cr>iMwOShjV$O~p&ty6@+vxUd{eYPyX1I26uMn8{M2x9PSp5Jl|Q3UDk9_`Im7XtLdOttZf-OLD@pzR^_y8>wxeoJ&z9 zo&|KZw}23jyE3^g$rA=b3h7xgp|Z$;C1KkXA8)3m{nPm1!Hww^KtIh z)z)DL>jr!@Q37K<(dq?KC#mHP4sSv(g-6X)%JCXKNBN$(t$X%~Au_OLAIarS32fP} zIG#g7g%k7=XnZ{*U~AYr?Hhq%IeXqGTI`y8>kDP)9&rr|Z5i=2g&DX)BH z^q?GBEx9<0g(I8=w8zwSpYIVzL9z)(}uzy_zwk=j)h1TeapcL-Y7e0J0tcOHX%wVa#exId(!R<*l`Ox&9g z`XHv^gD}OCufpDyMG?!5gS~*pvxJ5_h%uj~3!moZv!f;H54?uUep~aZYI9NLB^%va zBr|hlg6~B_mQZ3%T7^6lBbE4bt3QvPan2o_BkA5!U%;v9>=)V=AVEdmha%Q%?0(Hd z+pg*Jj*uX(hvMP$E`=wlaYA+m0xYUfoEQ}6Dm$%%E$mjJ_*3#jM@*4p9LB5$`wsY(2KUgTxI=jg5R)6hIb2IqpU^nL6+#<&j9*>{92!Wf z(`4WVXY}g?Y!%W&!$#D!t|8p8XA>-%JXa-W?`Q+b=o>gmNTiB|g~cE^#jB^fX%pX4 zg(-#dFUi1FVYS00qudDi<*h{4Pa+5_V%f)+5k8k4qwPOk1>Rp%Sx@ESZAavBM5P>= zvj_qL(J6q(BY~LN3djoGSF2&U{*D=%)spCZw}2Tsz52~29##-=*iEK|Yv~4$XzgML>5zVHrInI^tQ(tBRHXcJ9xt|7`5&dtZgvz zb!n71rA$93?CX_5dUtmh1h`dIK_1cRK;_(f={;Nl{9GPW1e-R4Zyy3iBb`Ai``RL} zGZuyi382BYEuj@zpustXwh&Ly;9gZL2$+bxEB|URi3gAX%LeHub1x7?qHyh|p$Hix zy`v`c9VD!iLH4bAWiDJCX$az>+@YU7G{7Ln)~W4$L#vS4{A1GPONeI3tUFio3#^Un z!5QA9NWv#<&@l4B`*Nv*2+(OCFOZbVvadgL^8)k+;62w=)l7QWcfq8J);n1YOz{ff#pqqlZjvTX z`HIzXL#n}ltC_-G{EM(ysc_va!Po9Y{!YF;x4+!cL}1WqMkX;AVX*-3b1w{IT-~LU z%hHj4?2hl77M}j)Jz(!n&1~>jcwAujci#uV-U37MHpb+4W$gdG*&@<`A&~ae{+}}A zf7+-s!A75HDH!u_`}05T!aWwKqk6Q?!k=#WA2#K`4fyW}{C5QYFGk?v(lyc>VFjc7 R5|_X~d1)1?f=5qZ{2z6PL*f7c literal 0 HcmV?d00001 diff --git a/documentation/images/mini-ha.png b/documentation/images/mini-ha.png new file mode 100644 index 0000000000000000000000000000000000000000..5393b8d483158c27236d84b4e5011aae3fb4ab9c GIT binary patch literal 44766 zcmdSB`9D zVy_T`P62=0z#q6gQ)I)L@S>xmX`rp=w0yn?lfnHvAVO0c46u*r;l<|i{exI4a5VUifp@KghZFe1nyRI7eh~n{l8IBB30vpW1LQfl^siUcb06**_SW<{qDljwf z*_X{>fHy3|i{lSn4byuE?7 zzZcRy2xYN65ZA+xv3xTdAeRGRTF@Z+4mzH`2!A~b8(ke6F3pkb>ErJYw+ZEwLbUmQ z9(tr;9fZGbAc0^PU_~O>aCHy{Otw7=7RIq>3sC|g)xjQyquC*a2zn5YLl3s#2ZLpL zTLyvgEwpX9K74O;m>!XS);55?EVM&H&KN#S9=JSk@EFkO_5 zfs;PCz@KT!$5Oq8I27C4lka1W12=IB!3KKh`uSq?3_?To5gvLWpoNHwT^Q3bEgBZcy7@oNYJ_Kwr8fLAl>tKdqgpu@pwDr6~JOv?EdO~v?&&$gO z3=GZ)1%KcZ3n?`44zcD~hFJRQf$vx^UCR(|2*V6%!w>Xkd6R4n%q%_3f++fQD>FI? zhT}Toklr?4Lb|}x*AqrE7wQWw$#`vsCtBcVZOOLOp`(TRNPT*!tq&M3fEW~DXQdMy z<^eab2}Qx^`g9v}e*v1{=Py9eNjNMxho%j~kb-$$m>>hEAf7fCfepa1d}vgiV4X=AoQ~cMPmZ}h5oo81f0eJ=7Oe$!OZpHY$hA13Yz1C3t)Sr9UZ`D zj2@0r2odK5^D_vb`GhiRGEv0!>w*r3A5p1xt6 zU>Jf3gai{}DNF+!e;qxnJrzw8vOeFMBOOj`jAM+gZL8U({_9PRb6G+U;PBLnWFr*E(C%Mk>_^=)ZP zaCafy2VqWiFt8umFh(4RG?NpgmbEa)^zS zPbilQCx_{C$TTZ20ysm*lZk^2hRZ@CgxYj{3iur01bi>rfGQ+6)89P2dm~Y(u0t2=u5{NVC8&7LCR6 zV0kcn_5AE;C>#@E7HV!Q04^U5L)ltk2*E;aU!pb!L$hNC`g?h^5XcZ}pr@xjf`!5m zLcBw)wC(+fK({UEBy+MhQCl012)4Ac!+If*o`F2y0F+~BAkx}8#FJ^^&n97Dd<#z+ z$I?-NM|zo~^eAK&+7so=5I5OmQN!6*kulrBL}R|mnj zH+NvOnBJ@)rkSmf%V%kGyd3cUR<E(PL?SK7{%Ct9^c9CD;(7c)FErcBjKHz=vB%k2LRW_b z^3Vi~g)NE_%*3(CEPjvy28Yqdk#Jtnz1U{x5HyW#>qj@h;&6CvTZ|pv-_sr(N6|5s zSQ6SI2xW&y!BH3r&KqlBj==y!ClEM7yuT%m3(iK{IGRy}0_ax$mR`aDA2KpnpRLCs zLz+i~S%&%2y&OR}Me7MDXdgZu3*!YL9YaZB^gyRz4BlQBYa{fB%pFgpTKn5t2G|B* zLvd8LCjl3XqOpU+tk}UgA;Q4YoaW(22tf$wa9cbVD7GaH&Sp_e1L_2fj9w zqJs-@aB`wyaMpZ%U5daP%*Z#uU!UaXj{#vai0#9H(;V#VXiRelU5b?j8NueT5e%F) z59JVm^+8~Q^|-+XIIJFthts#SqF}heR1Yf}0jn2gW3I!q3>7#bJ+Yzw982O-7$I2W z0zGW7{sBBBLBJN$>Aplin3cUJ%gzgl3x#1x0Vo>M3r0X`lO27n?akqiei*D12$2>r ztfRjTA&hAmfFc1jVF6}p;p3s}Z7;CU<#18Dbo)>gfk(!Au>AE22$UBkkV&&8Q+am6 zkU)VI!b8^>M z%_1>~Y@og;I61L6jlvlq9yo7|F@PiG~Vc!5q21 zz~xz5_`!X2%&|_^Y@7uHPYPpM;DWSCG#?61ha}X4JfL>4BR|02!Q7vsi=uk@g*fR6 zcoqcUi_yNpNDL)_9!f@8`w-yZR0~}c4y{8Jdf4&(&0sntG?L;J!qA0zSo+aCtT|8& z;aNFY2kH5nhmgJW`95|QK~Qie(}Qfdes)YW8>44IK;oT(%m_YKC@(TU$jhG)%AJ4jPVsRo&1Pwa)1X}+lh>aoI0IPK+sUZ2#yyv6dgdpdm_T{ zAv9ofXgH5W_3*c$vgvjLx*x-VK`^tU5zVn+9%w$#%MVNRw+*2%xnw;bT>${{XmgAi zSJ%Kh5E&Zm82|-dj&&%{p3BCf>@b#gdj3cQu0e>uHB3+F?ZXIzS>uCHJS!R>ZV(*G zv~{#**b(jc7VI#Xy*=N+j07`h_yrSn{46LuTPnsH=1Zia?LE;X0ukxt%g|v2(}ZCb ze6}ZyW#ucR`@!w~yv_X09eE)<8kQP>rckI{G>;i-LG^{ejSt+1Lc>sj3V4TFBEtxN zL3A)dbAPxOc(XNtpgMdhqAaID%YYyJ^fz0AWBBonAek~xYA8?)RTYARa7@V{O7E!oE(Ytl>F^{Uoy#H+9tkrV{0R-Q}#>=X$hD4fU8W>or%jvLky2n*G zIz8G+>b~peJXpEkCLmgUwdk#nw@X$ni@Q;ldXJmd6Y={ijM=yYIg}doD)FR4ffMfK zl>@I6&)(P*VZyieyR=bE=JZ`tH@EcRSe(h9`LcYcJNT*RIrsV^oad%Sl%A>wTImKY2Q9s27n=-VRX~9>{Z$sd@RbTaPIo-+oxdwLzC{?tCcxh?r z=WC}FFGv3Q-3JT)LP5@cj#KX0ecKtR(-pDtrL`7rd|MHeyF<_u0Asi8*{XdwYIK5h zQWjiKm3oz=V^;WJ0zW%=)&B;!x3c7Ywdzl-=|H_twheTD6S2bT87CYLcj?Ec=fAJl z#cu$*T8!$?xyrpfHs;&v#CSY-v{q77y>F-J3P_EFZ^WRwMhgO{>6&-(%~7nv7KC-H(>YSk2}(s;P-e zyj9NpB_P`7&LQ6*+Z_+8E<=^1o`&^5zibNvqY-~)m?UBYEXkflKobLGSz1Z@h4R=XW#s)L^yc-hF zkodQXA;sqjw%D20)s7Rs!sqtuwt~~;C5L0lw{G3iY$$=3;S)`m9a0t0C_9udUc9I@ zJ3AXQy5Vws{GQ)G#}l7CF@H|IBnxg^M-=UDkKLKGyGY0B=5Bqu)YA6-ed#BQh^l8e z9C~W;Z)frXxQM@JM&*hRZ}>R!C7ZW&h3pzIO^Fv@{$~t#Fvgz_BNgnw$B=-=5Tge} zGZ+=+&rS;0k&PjL6>TX7etF~N3Vng(0=;~{vqB`H=yt{<<$t*c(>*}nE)ADxLN;OA zBQ|_?i@q@AJmhF(cAJ{Y9eapf8t|j2nC4Sr(y=8E6s2Nn%D{b(`5$*gu>nr%*x~*E zdzAVwb@mV+o{+zC;>_~Gwty9_7yI`FO*1cpvlIUDLV%-6ZlZCjq$?hePvRmXB1-o6 z1im}~n`Ks_SV;g|2{VP>pyF{Bn70mtVPjDlD_XB&@5th zsBNRjZQ8V{d;FzV?f%$96Q^HgnDkjSy?Pa&Zgd165^`6vPPbV&QLidh@ObB6xyvM* z+R<;9=`9Inzj$HoqLBS2>%@Lq>5XRYJ#Mf1b!_aFPq8M(2H`(GJ`L<<=hlO7n!=rJ zGhg2gWGA{^*715KmZ;XW`r9@2*bOh2T=g&esLkrN9ft;hE_6y-CBrbA_fwUmP3Cze z5BhkI*9AV?%6t(evhvturC?mM=)uHU$s9eu?mewJ#5geQ>Y+)IzuUQ6UMcfz&&31K zu(tAV?{E@Xqiz34ZoColwfn@(&)Po=B=6TLjLP3~DL;PiT)BFEd=BwygP<>5_0L>q z?V|%98`$Nk6{X!Tbio4qkzg~L?A649s%xzoKD^Pjl7FeJ(U7oyhr!{qH^-06j9uas zRd3WPIUbA*SkP}$ZC+X5ae0pq2e8!t@Zn&?s^8xI4 z@%K$z!IXK&>tYlR-f*iHdXx#@`Q58GlQyFGx=jwgW_)S8Rm$x(+zO+gPA+m?rB(5H zL;Z^v+Py2z6;K}iQ(msyRej{%jhvh-#pXx%=H9rGWGfXH7xyVnWacd75L}{mz8bq_ z>U47~fUW^4ahq8AfSRhR>aUjHH+4s3hJW@7GCmIuu5#D!J6g6@@etJ^pX%XsyV2;U z%k0NcPIF zlNYwi{y3wWeo;Bb;E}<}!GAuCm?3^GnVC(>3fDz^tA`@A4eecEo?-fE!~ zIN67M>rv~TZo;Wu){I>E{WB3bv*e+$6P%J(CI0l6cJEN3pF(-1+?VfDU%T(Xj~`_z z%`_eyfAJt}lK%0@nXD#}RzLRb#Py^i)OJZ|8%n{=#%?M6S6WeC;#0?HU56V=PMlTp z>bA?hzS~DCJ>+NWDFx0?oQBS!YL$lvmrNiGxRi$LPhd;?n=VVO3Mo)~iQO5#N5PPP zF*hT^>EycY-Z|g`W?u3!NGo+V0u9}YI0(7V9<}~En!5~rvSO&e9~HUVch6tGIjRewnzDXrEgxP_qw3rvX0ur@ z?Tg&1%`k@eU4Gw!W`=Pyq$eVcrKwiE!h`yQ>66tj^koz19ACz*8i+ zRyPYTttYv&yyl~UmOnAgEIqKG^CIPT2udp9cHX&HyTO{CikDY)Ki?U-f4iRVMMwAW z_OTk5FNxA~qg`<@o~_KaYPT z3&C3I7DT$=1i;Bv2l;CEJnJ;WhoWDq%~Hxwoc-r-V40CCh6rS$AXsC5Pin-a5VcB{XM= zU&N@W27iQgn5=0XS+x5EDdU>1aEjHMJ$`aKS4M&LVc})wNwN# zC&%{r-D&yoFe7p?3|^gefFkRCsU2JfH1T^m2)ntr6Cdr0irOj0e=#Me;gIxZL9=QM zR`K)wk!*6V_2BrKjjCtY%NAarA34s+2>p0wO>#9Mexla*PvvE1nm#+fRar`2bM4EY0KGOqPA4oZ%wq^HyEI-j z3j&0MD_yJ9GaL^%QadxPDOp}71D;B?kL8GL<}0bG5{4cx|h;F-XGNh@7LKb&YOXOoll{nMvF`Z6&& zneE?K10_7S$hk2tg5F@Arj1L^0N`DGf9H#p+RG9`~E znG9=6H?q5Kv5xa2sfLt(h*o(2a*3*K17#_r#5F- zjky>`N;=(4b;o{|0kS%M%hb~|>(Jt_Pj&@Pi28;GSaf_1=XQL^&vTC2*%A<(Z!2=I zy@+kkvu8XxEqfy82b7Efv6gHAK_Ef*H>4Uad$f=K;@tugxVD3nnZ45F34ElQB8*Lx zYh#bL0dv%sF8+oGl<4*Qi}+>dfrJLdLyyMWZ+Fm0SPbtA z|M`>=wN^vx!7>*9Z^|v>Q0J4~l5EXRD~071P{4-4R#K>~q06vaNm2CtguHa{%o(u1 z_svbQ^Q}Y1;Ztw0F~i^F8}3H_iGZ*17}hmj8Z_QL%675A-@f~@7$ti|TN+Y1Q-0D^ z3WBp6a2m>sK-Y&C}QtI!1^D#X6(Yq?aa)&2b06K#a1;}Z@POt; z4CJnq`BPzJ!KmD#BC~WyJZ4>v9hr<-y>b_1nk6!sCmIwP7PDo4J%fem@mnB4Jf;?N zYK;_YMd9-pMYClluEirm5%AjLCqF|C#1yxb%SG7VPmdGpv{$Va|~;^6yKquo(a zSn*%G$KIU15KtoV%4==oH%PVqm9ChI^Ea7P`ULEMtCIQrvLbo;21DR_WNplgT|l~~ zN?KY?;uU+&aX0iQ1(N+PIRfWb22>)0lToT173C*dzW1#~{mBUV{-lpKkf-h01^hyc zaChRr{Q~fOqUx(1*3cjn@*m$MB7jzMJ^h8sznzT%NMIVG6VO-wdm6Z)P63Gg+fCI# zXv_cWnAQWLCGc?eMHT3bTcU~X5f$a!UMFjlg(+B>0jpW5Y)mE5#q)}5nfHZR=T7&k z5I^2RR~d74?&zV>_8j8&k=ESU^E;4sIiw6lmvrgnkqvN4^|<6+T1(#=HxIwBu6i)t z;S9kXKy_qGMm!pz4!ur0*!ij@Sa;y_=cMVWATGDCCOJMid21?{SoS`0VcKza;-%IA z021=+n%yF1894mM7RKyyt|k)+l;WfN>d%lYco*s_hSD`auDiE(8#N(*FDc#EJNw3s z7sVcr@lN?~r!H*QPwB_6ybJIJX7E+Avb-z*5{=tk&YoyCla_NnCAmE7CuHEG)91#v zLt`eJ7C7BG*1M3OspcY^e|WTwOiHK>9zE&2XRLf1!sqmK9&lm&FKxK=h>}uaDm+TnTP0Jm5B&D@quNJm3|93G!E-CISKwYt7T5@Xe6{70&HZ29Ed0TMq650t>vmJ zD=Ys>{wwq+&S3iQ5p|1Dv`&;9&XgI-h^(ofe&=87y6@-e^*dxZ-C;v%s=zg!7<8P8 zRU_vl+21*M__jlVdeznrvn92v3jtd84%ejwnRU3xZH+##X!@Hh%tO)9A&?GStt}{w zewaM)E7Nr@Wct&C*gX*VQ|>e&hjv)ZYMfF&x1?cDtfNYpUd-3&kRmoo z>G_MQ(41rH7W4-c}B9EO`%ncK~63FN}Jx96XdJXn>;E`v+lrGrLO}=Z05o=rD4S zI{@5xq?Uai<<_vz{@5j(0jvUmQ5H=xkc0(N!bEQ}B1B4i--+HVU&s@Gzhvs;lb3qrTgAf-qeJE8<^6p} zAs+%nH3|>nMRf;Q%~!@s_NcxXgz%3WhEF?;di_vb3AJ+ z{Lb@#SfZ$?A|R%t_Pv{c$NoPLGzL6PDLKyXe^Au_*Jqxttpmy29%be0JzD{{nt^fzz*Mu&% zv>iJ|af|!*Wub)Nsi*SpNmp(HJ5QM(E=|b; z?%>RsGlP?W6j6iN6J*d}QYkArMVIY4_(S@y2Cf15Ozz%yP%PS^UQDmwFw$FfZ@)qLZp;fS89b?UzEJN|&qo@mf0dVy0-qxf*H zd>id>_D9UV1&Lr!_|M)zfMDfY=Yi{h%;jSE@0sM%;Se|+{yMO-33!8sqaE{W6$~kQ z+@5_qkA9D)<-UEE1e`3S{Od%^ZMvdf?1dB|SIR0$`=J=6sCKck-Up9A`{2*wZwSZE zPPLL=XG%k)$cM+LUecIpDj)?!ykwt?UA4!M4)-+Q`VVb))0J>(`&(3a2+`qP(jjxaWow7yZavIYPUKM;*&&91&=eT`(c7ZX}nC30K zADOTD4^$IiX~f+<&2IU2d~O!-`5CiSHzN7Sbve4zAW;DK+zYlqqx}A$eU+Gx@l^!J z`^uY+JYIc6?4UE59VUIxc!^XJli3%$Lc~5So(d7`yo#v1&A#2`YSgn|-`zV`Q$D~N z?I?J-t3Ej8JvkvgUD4%k)=nsgeskppu%QF@^Fl_-j%D{hu`?bvvIh=WR%1))=JpPq z>Qg=t!xph13~EY1#0r)d{SkmAxxYyE3AO(fyh(SZ)loo|?JWiZN*t-t=k=QgnOwe0 zWlP0`Ot+%;SQ~u!$1dn55Jq!mz0^=5bLg(j!HJypj3 zKA&0eWIPD1V4!WwH@DK!0t5M3et&I5+ujlK_tZ@&|a?3!PK4_fRqL=-+VD#cx zrP0^Go3pJ)^6>aeA%y+xD(jTOpGE9C0^|>}R5JevHm)%*e7P_3()Lnu$U7oLF)P(H zJ_&8x3nS!+bNg?$Tn=fv%~Ee#a2}xLWc_A@EpMi}vdu(OWJ1@r(Pe~cBPpWLI1j2z zc~4UEmqVrLQHXqZkDLMYyrL|RkZ2Oi`<=A~)C@2PvZdHZb<;$XBfr@Hz|0WYrk(gT&->Gsyc~4&hioCX@OHNcG z)X{WYuxr=h+!fE`(^7mPe&KXF_;t#+3kLSvQb|qi*4xK^;gq9ieafQlT94>pmY_PX z24MZk(bGFXGC>ftv$J!#OfDVai^QnpcFN2`uxn99DRb`eF%lG#%tgDiO!W&+OVE%? zna^bk$UD|RlRpANY7g1*^Cl<>x|ouJw;4Dl4D{KINNVvqy__d0iYShardRIH1;}l8 zG{~0$&%mH)l8eztkhB?;%0YV-9Vh`Y&@ech(UDIZpr&A!LVcDR zD5%70{I zmHX*Xc@6wV0Hn%)>W8*+Ta1t1*Y)eBEJcZN8;al=RC5y%YuJ?e(mmOHz$xl^apJi% zCzkH1u90~>pa)bTN`r>1!+t!Kml7u%ZrCkdQjouYbP0eMpup(XH^nEQ01!$!{O+z_ z)=FC_XysU^IqeGt>~zBX@4i1*+#?s-uV?2{mnI(pHfqA`0NDaeP-(p=1Tp!;%_yjP z3c)&v<{oTJS8*JF(h>D&*o5Gn=?JG!N%cg+DSO)RXL^MSIBJvuriEtux7pul`@u;w5dB=f>1|MIyUGF@*S)Lt6-3d8|Y{+D~ z*Wx|?62CjNE?&LLaS8=oy(Y#TP*#DDBvq3<8nr;xX_Dp~J>@z2`Nje7IrS9Xx|R+N z1tl*44Rda1YF-yfGG^TeymC&q#^oc`XTQbOF6sWfnyFNR$!MA#U=f~+l^;x9B|F!B zKfvpI&~V?}#a^IKE+ENO_Ir2tz|`5n1=6*)kb5>KxK%frx<#4dC%eOKGs5lwAHUU* ze-qeNTRtez>Ef)duK;ET|2#(FH8btn&kM5rggr0$AdXk9I6#?cQv!>M@r6>`VxnnP zH~ag>x$c_qBiCxrHMVLk2zR~#-rDE=<8V;n4A8bIK2WCEb@8tE*PaS8&;u_;Jzt~M z9Pm>PhT#0Yu=0yKDD3N~xD_9cE@|7q>LZV>JGI8ELcYS>KTp4WFx{dx08|#?+EPAn6CmA(O`wqFI9!%`)J^WLy!)wu zPQx}~u~TJvHgWVIsJl0UE{VgY*&<1+p3G%_yvnUq+u}k@Gi+a*bSWjpY2RVzyS~XZ z$K%@JGY$ZQ*Vig;-*#h!uW`Dp`mitH5szvCH`fG^$1is!_vJQ zp|LsR@mFlE*r+^foev;Lz28p<4Qur9(;RL)90)RrecyT=6<;%2C*2|c6Glz`YARx6 zp?h=9y1+Z)R=HpMYTXS5-*&7`h4O(nQN-iJ0B^2dSthx0uUKv$C?7w}^qK$lW02ap zxf~Q*#B*ywlJM}`8IbQsXkTnCJYVT&WI;OkwY-1AaH`t6^HaXyaFmP&>nb2(k=WqM zn2#)C=Vqjl0Fee*|A!-nsnzL%%7yg`hGMyY0LaZ6dN5c`DiQQ)Xrrh5$J6Nm4|K}740s)j;nog1TpeMW}6ioS@34{xhr8WF7Cpp zCf5S+Jy+a%=iRmG$0%0kBT#J>kEBW%Dc)%(tW@>uRM0;HJTUx^{BR96R#Ac95@mDm+EA%+t5p+%xb5_p zu$=U6^cJU+&GSE?+V1IGpyEH;n0*okRMYXUu3`_fTUra3vzrzrf3|$co0hsbU3frC zC0s2?dnK)mRwpfz@S0xpwg>PB#Gg37f>sf7e&+XHK_5aSDWqrzGLT*@J_ZWtFjV6D z-7U3|e-78>JWP{2KPwu8-Ys(t+)P$xNsmfT#io z5*$}WJ^`AOd*bewWbs*uS}I0?g8g=xE-_KA%r8(xI3~?qn0e>_dWd*c4+l7)SNj-- zcC=waG|X4WwCC8E>CD~Gcp=lG*$A=lv7^IRx% zBLDac40fnw);@~><*mW#c1QO!HB*UQpJEPL#{hTSe+2l*3e|_U2&^}Vo=&3MOh;H3 zgnyFuuFFXW#iotCk;m7XjDL@@zNUQL-47}b>PPZUE>p^Yc$p#`TVH)Ba&f+&P&E*L)-mpjs6;vvE^(z5D8j=3OyWf^qKqI|FC_iP|W>CH~QwJLvSY zYsTJ|m~|An`>(2EfxI@kpf@MDt>wxAXFQ0}=L#S7N!$lbZ4K!@UN&o%Mz)n}_J{@i zDftbvgW8nv=2F2_bQZ~In({rd{igS^3(#DRWt*9gMl^Is8g4nI z?~IH5+%n|D{%NZ4`P3z`16$wAevbu3@_BM+e4Dsv_XdR5v$bIh)15?_?3I=*jW#HO zJ*6nTDTO!P48@a3$zh}0FB7%&rFSVB@D3M0H+^$z?{C71mP!m%-)h^m?D?Zli*43& z+pksgD@*c5xrz_qm0-SawS)XA=G(m)OBg3fiY3(nS~$$H(rr7RKS{AsuSL5NpFvxKc7d&7@ju4&Qc{L- z)}LFKWWHCfxIQ0NeNxSxEoz9qz5eBU5ZEV8KeSw7D1wp!K>t|e!}Qn10Mzay90o*Z z;9xQnKmNWt8-SDvzyZG8?*CvHAQM!Q)TLr)A;h&>tbEkn&hGL^axs6zPCEYdZg6mo zcEir+BBHjNn}1*bcK_4*pwoi)6q3J;l_;{_mw9m?B-;!8t<;GMelviqE)j@)d} z^OO`@BS~j4>fQg$IRdU+@Hv^8ehC2UJIzNImjJt|(l)4@OHU*rWTln0b<8u(w>eE> z_fLV<=Pn8VsVcCv_FH7tuDgC$ht0N1<{Y*Av1uc4z`Ursi@VJBn3(jfjoLJ~Lt{1l z!hWTG4k%gP#N z`p#XfI0e<(K6*fTUd4}R+Zm0i$Smb}z!^6_&dz2@m^*@+UDoK!j)h$^!lpuF^BTYu zE$%bOHm|z|bR~%O^x^@g#27$j{2SR&T?8O&bM1liKD*?XG9A#qv#1XWwPAorIiVS~ zV}ryK5-2Z301Wp*6l$JOG!jBpWR9i-NN@O|G4)W_PgGQf*oKW8p~7jYf1mN}SXt9N zh2L;C>N+GM+jf+9>h4U}eW1**8adaR@fq+R!T7r`{#IC@ij@cBoo{cME!whfV}ne; zVje820eLklFV$xM;Ld2UHpnhtuh_KB=F*Z_WO(Q501^aRmM~;rk^SgnDhaAXLe2`r z(^v^8V_3CzixSi>#ZH^BEZMizFdIb@tNHa&Dh%+2#B&LxvsOB?r>j6B-*Y=^tpP^q z;6dk;>lM_&?x#a3q;>t~S*Y*AuptA{l99W{!b664+BCyB+^W+=;pQ=bQ%htIznyw( zP$n-g;R5PbPz}5SP=>A-ol8$I(Tk=5(f|A-2KD9ALHpZNCJdnGET|_@&Oz^sUUZ_H z5C;&1>Qehlq~~8P=gL{)@s}U3KlrMkOBDne>nN0(iz9>nmK(h@a^6-rGsaFIxMp@E zZdnxH&(=LUA(b6AIS7%-MVrn9zTH;`fZE|eX$Y@B%@&0kJpqoq%8d{sYZjQ7F7<-h zWSIgi@fo< zho!7HSs&q03FEFr^FY&P3%!pP(ZHR&1ckTHpzM?YjQ4(t#OJM`Ly)RGKT`JI5;S9O z7YQFOTJP}C=uyd!kXEi_x0rHLmSNitRy`QKhFcXK|4v`XWy zC_y>O^n}>(gySyF%`<1~K6}kWKZjNgK9Xm6ff5lt*?5oynv3@Vc315WUXj-F#iVE)l!*s;7K0 z#`)yil2Ng<>2V!>g&L<pcCrRr z{=uLqf8hJ4u*3QXMG|_m^tdExUiGCmVL%-=rt0uR(V+2oHyaR2cK2ms?`HnF=Ckw2 zXm(^AUcR{T<*{R;K`~cqbc1AXK8RYs_i)H!svshYopmv62^VbN`WScu{l zEndX>{Bi`oi5RZBONJy;Z9H6ZQ_HWQG~(B3X8Ol!|H6!f+tIF7A<3r}^nBkI2W~%H zP%P?V^0+iKVz-GuGV|>8qojlc#-f*U(pk3?pbMs>M<0`zJh6TBM^apzR}9I>dg02{ zqnoZ?QyA@md)3)PmO^3ekTP1)A^XFGl%P1nX?k0q%Kl-gewV8UW zqi9P+wal~2i?@qvHdKeE1~no}dJ32`4A(8?atP3=a!9EKenR7#(l*OEozwfHAK$I# zPnTVQ-QTq?e+rBAE5PG$lGZ;bRu^Vm>ilMlRZuBoHeD%jzBh5`*$X70=h)7o3>DAN z&{ZP#sp^SZoRSk61_4>1>%M{SdhTm_^&S0%4WFj9xJPX#J9I{KTZW}qY;y@)z)BZp z%e`=JOETQNIe1@k>#T?W`m*WNw^Oq^?s(F}EV@%MN( zAhAo{l|27>L*8rQ<6!oY+vVccO{5K?T>0i8@_6`^-N(6+?CvQ6EtS7O2jDga^9``7 zB`(fy&WZf=j6c`Val$QfAs7i*X~`U4d%4N$QHo7Mmm|AIihhfc-xT2#2AZ~wXw4m& z99Nv8&e$c|7iKIPU8$x9jxJ)0TuiE|Sh)imMeH$8)hiwZ>oo6mxAPT$ANpxs#rH>2 zURC6bF=8xJOU%9-mfd^WQX&**8p}4i+natr!s6blX~n#)aBp*gs&92tlw)DW(=~&w zlAzZsFLdF~#lz|rZ+BGz9lNmgd=|4QeHVF_l^dnl$>nyvabMG$8hLQ^vR@dJRZm9*EWpj z3{e6WraDV-?58gJp8^)>@g_%NjG@-rwN-NZf-(tbrBkH&*`bU9z$vXA46MJb(oz}B zId&`4a>`^lQAwYFtyOa~*mDy_zBDH7@r4OTe)8MTuOg4wGVk$CfQ0{i3IZhwK*u)z zVm?tt@?K82sTlIXCT%gA;M!P_hri#MB zkp1yLfYHm%T{Rh#Z~pXt#Pv^4Xl>#GLhkAT(8z&<`a`H(D#ve+oJk8oHgfkrWDO{3 zI+tDs4@mS@2@;_WiK`q$9aKkR-$?a>uNH{0OP>T&dp3UJJ5~XS4%67o&EN%i6Q&Wri-SRAa zYe<{#(|aYLN4)~b!yLP@%t;H;-@yg_-NA{_vp;fu`#2P*y854nfeZQkI}SciV@(#R zP%#sv+&V*ppc0-97^lHWsBnt}{ks}>Tpi9@DP8F#eYP^Sy`IasL%tl5+!++V$ysDw5~$0GLG3}h z+@7~%M*$<>Qs{c{*sZq?C%UetuM}z;6eOG-WsXU&QVoE%bvpHoT=3r-jyE2qvStc1OihLp6B|=bK70e=6=24qx+LaTb7t zGk5JqWWLNLZ`vuM>Sppu`9pg32IyAMdL5ziRWTPI)K{B zX@Z$(CQfYB=3SCYomJzcv2Ve>|`!;JJMuA%F9& zsJKSSntY7Y)k{T12kerQlLs-Ql+WpxxDRnd|HLg8oo;KtFghAehI+lWULcR6cOube zi&hMoA6?L>6o}@XJ)2PaAWqXdo8CHEX2>5tTvI&4icrSJOdSnNb2C*NF_B?d(RJc?DMbl6M zkSfm_K1yA^kR$D}kbg3?MrQ_0FN=(4)74;Rnw!|H9(tKMNbKr@dVXf51tO)_lg z0KE?b&_f*{XD~FX`dlOy4g7mD(|7t??>jkH_|)KFXv|D*tPbxZ@z=HXDv+ztKph5Y z0!rXPaF*HdQR6Bz({b!b_nq6r6>Ot-&+Cz$mlpY_2SfK3X8dUT*;i8$vAHm)TJKXnEwH#Td=__3N_^oce(`%u0=m z9BGXj&%0d+lWsqsDEs@4yJ_S|B%mEWDi+5(z(6;=l6Lx;QRboP1qknWMvU#rJUf5r zbf13ei^*>{4;WMvY(aJmb?3*xl;JZHtZJVHL*&!n^SrsI9|7N~oD?`A;!^(r;68jg zH!U@l$9?kq&#BTc02R>C-_n|mt3%FN<&2~uYmsp8 z(;Y=|_}@3v^Hgs zFBmKwD+V2YSMd$rsr&k;=7Q&DWycMBfm?o1R=0S4k7@6fb)!>%cBU@=;dDLPZ<`X; zI@&ohgk|52NCD6L<#y-fgVLp?h-Oku{$^(wrnmdi3kxW_!9v}XmBJT0mh=Cm1XaR- z23WWHJ7({f^PlDHyzUL)g<=X7VM{axRNI5<$kv-dlVjl5i~7si^zsEfsGI%z?NhIw z;+4{+(=OO=?UEMd%Jt9lIj=AIn@I2?=B`~)^P)kVm86cz?r4H$g*nh7h8sYK(4!X) zP?MzRJE&KP0ot?eY9kiG(eY0_)Q`P`}{nwk-Di`_6H4k%NtwyfhXZ9vPwwu_--8I2_Uwk=^(aVv&$GkmU=R=O(He-&5y(@kB!_tnjgJjUm~DBJ`Zi4)of3$37Mv^p{X|6>o4N#f$- zrGQd6ysJEc4RBS~!xysH9lJodKgx4s@8lopu0(*xGC(`i=3Z;o9BA8O00@wykukVS zR{``^4>pNQZ5C7I^k=*7&+jMvJ|P%x-u256K!)QlPe!oX^Acv^pr4Zu%aG-r0CLjE zJ7YCRi2}e-j#T2R3QB#Wd9Tv8rW*u!YJbvjEQ|-VS#qtk(Ui?d+pRb#uZ0P|xolj) z)@TQW$3~Z$8`70MB$EZYXeQLc_3cQk(M%F}d@RKt#2@wX+CMdtH=znIAgY{s>HQ3j|bDU zZ^w9t?*+0eTa$bL|76FB3i^0P6%QVB8GxP+1|4bV5T~C22JDp)!SHJer%-mC&#Lu1 zs9(a#PIC~Q4oC&e__@jtC%2oggsiLMHL9blNUK}8XUJZwD zJH&4J%1JLsoMGI%9MOR~06jK63woIcN?&TmW%aHpV1!}o>Vf527k*NDeC|L0F93y! zL4imksdlGGeBS|xry~$63^IgTcAy4C%%rc=Dm2sOIGU`x!EZC+M6FqOzI-%wm6hR+ ziX2d8X%wSxemF7ka;g0d(0%p6ovuvcmD>3MEf{!Qfeot3yp)LMZzq7kgcet&TemuB ztjp7tX(^0K!7e=Tc)aS(uC`d@;1)P>QXyrkCcpVct)eji854CYrC$K_uVv_RP*Ox+ zk{3~V>+Xx&ePRYC3l5*+I}3o1_KIlt#L{8|g+;x<%>QG;F#E>PA=Q-y*=gc?XAMnmQ z!#K`n-#6=C_gdGw*7b{(K!H(Snmb@2BjG^CDlA*In$DT&S1Ga1L%hg zfce3>P@6Dw4o2>TGZo+WAUs)z{4xcu<@Y(3t>b?34=FIJoG+Y7*+t?Y_QySk_ZIb{ zaw+!o-WT6PUOsN#DK1$9fD;F!&NLsWr8oJxq&fNUwC49h$q+#!HFx9ZBH-2FiN+q< z4K_?1mFjhz$TX@?Fxl9`#)pa7W&qc%#uu6yRHzY(CM?YC7 zs2uKe_n^skvM{C@P3S-dKhsVo_uUZsov(|Wni^fsqk{qO;a%M!jvQio7pNm;&^wZ)q zGMIpr`0VYNm%+<2jWUB+Ps9&cWK2Lo>TLg@*Im;eq;8+48T*z%!DdO%H*Y;-*W8k4 zu;e2g)>+v{b=m=*+b==t(;DTvv|ytMQ4qKkoi5k2mfKh|g@c4F!@7+0UZyxFf!mB>KDuPD@y_yIX05- zzqJ;~qJYmphgxVe{sbPVw}yL>8~@5$VAuUB1@F_mBq3l-_@Aa3*tVc^EzqiO8vr;% z@Gn6-EU~!|2l)?|+J1Y$xbS*APr=u}(jov4m%>=hjKg?|mYc=q{r~`f?kA)K8V%IO`%ggWl}`nN|Azn#drAk`QyQ0i z2p6LRZOaWP{fj6MyGjuZXf-Y2LANphiM)42}lqkN9dpYp;`awUWa<{H=B2(7DI4Gh3EkA#mdUsIcWNK znf=e>pMj^IGw(|vd7)TqrN#})y_D3{yp$3qf7O)#yw(!fYc10vOO$E828<)Udr-^% zSN9={!lWZ9O*Xtt@BlzfjE~2^1lan|DqmNaviq;^t&)KbyPvGJm%f56PGUrQlrRF> z-g4^#*Lr^Df9_UEBpG_kG)DnHhIavk0Bh-P%Q4`a($2FdNYCC2eTyW0+DcmW2Wf@1 z4DNve8UXHgch1$nyEwOl?Rg)#f)-Sq;(&Jk@CcAd`u66kTh$b${}WdJ{q{bIt{#sF zCc>r`z~|RP>C0JCVbYcsP=nhzgk?~Gx&2}earfV21-vTROliaS+Yh_*S+hD}HWm3{ zTEQA7!Ki33Knc)S&0(cCcLPA`0Ujg$HIjfBHOM!~<(#uSQ^9o%lbpgRM>Uoc;@UM9 zM*1OSJ`I#qW5*nm^u$n?Nik9yN`1f~_x?T4aV^&I8y+OeQP$5IeZE1U>AbqIDMN^@rO3T1BYc;u$E1e6jY|C-OKy_>&H^ zvB{QW7Y2b*N;Uf9Gb-5dZH-|TqN1X%fptCc`k;A4mjOoLWqL7`%Efg!-x?9*r-GR6 z{f4Gwk-#>KZXUJC;k)pd-%WY*K`a_mgAgw67NcGQ_wf?N>NH_-OAFuFGh|Sy;AAF^ z--6{mKpvZ-k23$zKge$Z+~+nIV5}5DFh;~DGod`JLef`ngG%+9)t`Cpz1p6w%#mv} zV?+KbPCic*R3)u6=0~@9=DgMm2=_8zBQeP9? z?&c5_2E|~G#kaB|fT}yz<^w7kPBg#RO86s{mijM#|CCDM~Q zV)M)nw-i`xHc6NkQA@2rKv?75( zvmETLgMuXY3z+ivm6gs|$hba$r4Uq<@XWf9Rb_>IXGfhJ0kMogCngUF4D;cE*-VK0 zX!9KltdR(Sn2H9GYZ|l2>Iaq#B3{gTO_O|u)A^-UPY~WRVecEv_R~eDGpaUK0FwFu zOdsok=LO0aTFvGH@R5YStk59YE3ON+Fxe|SUfftrE_MVlYzTZXS>USGVzqhHmWU1PhI|kBxlTy{U&qf0 z2-~5qmN0V|5J-e-09`nn%DT%{MMY%@S&Y*rR1V|`L-0HvNf|jnqUccPxBPRkac7U` zg&T5%9*D1BIFdls6KruXho5VSbhN{J*zIsjgPt>oMOHMdAFi&XF}w(iOda(P>SxF= z|H18At)CAo@KoL*FqNp-T`;1@Z02cD(fSY-#faO+cRF5mc|)-^KRq`&E^IK z*oAtnp*7s>Qh?+ckI5Lj9<-6xM6epxpAEutRKKfBjwdTtZ<;B0)y=t+(|&?iAM?lg zgINaz_)q>m&xVL!rW{w>uHNiT2#8s!m*9RO)SAM8Wpau#NXNaW6Z|gg8)-*ngG03% zeJ%D2D&$cuiL_`w{bg%}nv7s4xyPiPY^~2cG?WlGPdQ-lK{f7*W4re0ZqvuJI|1!j z3Os?q_*Si(4jfx+-nt6^gOSwZq20jl7x&4@R5MU@IeGr)-H}7p7$GPQx6Ycu@f5te zf&>0vU*MqqvSoM+d~^-F&fuSLzr*?x-&|a#X~*$++GFyH(p_S;FA6wR(Ox6x>+0qa zBU_VN>BY8Cb({h^T_5b9tr}&}b!Fn&Nzk;GhmJGWPpa4g4cc;gNvh?n`K`RIT4NWP zS)42*Tet-mZV|H(#D#X1cjMPE9+X3u3nfbMbp;twmiIxHUL`Y*xZu8DeOq-^??tp0 z&yb)Yw5zfUS+jd!JM~zQabhW${2b66mo|N9us!;smMbLu37!^a!_$PE*v=4aZw& z5k@tpC|yDN&2QS>V77*)zIW`OFLORUHtD|8r`efVs%>_q4-9K)ek3!@@1j!`p|kPp zzC_2W*zq#g2X<93xC+(t{aMAWGs>~elGT?&12+xk)6Ih`ddBlfTT7mUBni@{oD8~D znVtG7$5JA?4kxfNAvtpgqZe}kXYRj@78F4TC}MjOWEE=N(<|AY-d7*qwcjx1uY zKZ^Q)hZ8#BF{k!`fl%zPK=gN{`D^#{mnqG^Q#4f}kdDJ^GVFNyBSczkYT$C8lz}RX zeMv?@vk)j_AVNe5N$CM)BC?G5(0s2um00>G&nL(3pE{;pXM)dUbr@?yTVCbIJb3K2 zl20!BmR!^0c3K2}U}Ip*ydJTjfKn|c=Ao73p>at+4o2+yrd;FE;0ZtLc{79i<%USE z(f-%fs_76=Zs;9~s~a$;*wu*(IIQrl(h#^5(7C%Uc=W|`w|=AF+~4w$VV=7A{P$oc zIXW31Jt$qw73Uc~!v>`bAu44A1u)Oui+PsRlIExIa_u5Sxl43h+~3Y07;Y%+D-I!O ze?e*YeSKM2Jbn{xa%`o|O#3)UZ@T`2?yJmvS*-?z{73rc&ZSJo5|eRZWS4UKX{{2) z1yo7c@&Gr~#Il*}Z@j-h^klmBpdL|AQ@j0wMYfZmJooe_@o zAnT_2C-%XAnc=u!x)rvUsudkc49H#1IFG~{RLnDvORw$OZL=OPFmPYz?&Q~6PLDH; zm3yi4@Q+3)9A;gu?#fz@(P&*gI!N4^e<@c=Sa8LGx1O!P?RcbQnN5_8a(EcCEyG$i zCF4!zVl}jS6Cl(wdCB}Vz<1?!LqhnLhR8gp9OhnoDBrJ*;Z;6?3mqU{l5%vqw-`Ff zezaLYWIbhII)1Dgmr(*lL>Y8^n*{~Mah<@S3M2{yh3$Tx`|yhKyst*56q@JsJhd6e z60%j~3ty5U!Drnz2I~ZTxLKaEvq;>Dz41KncE; z%e>s$u<4+cw85N~y;ksrm^Q+r9L-~$^0d(xYA5!#!8-(7FEx2b34Yb;V8*0|Omm)V zv~mcF9C(;YWST1E8XJ;-I-+d7mp*J9PxQf*;~DKXKHSG#l&BrCIc(VY-i+{}yvy7d z8Hn{2>ulzJTy{G8c(jFjD|G;u5cjIIG-yVtI_kPG*b$THT3(@DfZ`-+nuuprow0q} zpk4ywZBz-JVCl%KK;aXNN;5T9kP?D@c1+&#%gMjHwk6ae74v$d*BbR7nI_d5_SO!A$5DzO$Jr6c@rWJ zKoJ;Wa0NyXU8MuyE>IG^RA)P1G`ABmdV5-lOAHF5HcmUOh!BYlRS|8rXH z&6jgMe{+RacIvL0$nC-aN*MPm50E%nZRcxnUjcx7FtGucn#sBzIM3X=r9r9G>DhE? zSlkX4KR3eE2G+`i z-scG@SHi@ih#3K#wB{mVJ6qt3Lza}-2skG()lZKZ1MfW1NF zPbqxr-(o}AvK3Nvm(QcrTVoT#W}I#Fq@+F>xb=Boha&C7X!8L~w#wtwYOGw;X=9M~ z9b7ViPgD=uuI-8d|3cU6clS|Z`%T_#D@p5tqsC{oC%l4CD0ESyfYs!cPJzeiuc+dW zYLBwt-J%b9dU!NVmg#5!nVfPdht_%7F?+53ivB^ulwm$)GTPm76WoIQT4m+5ldxhiDD^R&bu0O9 zxdU;4Xkb5hMSTzreOj=`X4~AkyEVnKS|j#jW$nMzdW2n#Lj!Yh0EA9nUgGx8+svyl z5-H%23j@T=q^%)%AWFbI2W&#k5*`^|SziF00G07D#9+2_?!r|2{DVxc-7v0{oE!&` zV%ER7)n5Sq;l&+;w)sDccSVrQUVt0{r{JejyBI`O`{ zxjd@yy&XVMYl_5NFMWwY%+6x4ad+kOG+p+nuw#3BBAX>9qK`nY;ocsP0^ zzytG?Hh0$|#C}^F)gOKJbVjhq$`0<2qzHRMyQdibgVQ-lgY@9EX}3I%8tPT07;Unz z&O8j`!RvaTPE|RsOT)O+>wp{!f$8FznzmjXuMcY{xY?uPuWj~$vcMow&?xg!78Hov zO#!=T<2?_cVweJc#mmQ|%%{2iXj`q3fFxRJA@dvNpLev@m;a1CbBek5s$6QfQ~HYH zSaf%INaH z7jCgudzcGI;WI4)S6dV?g2|JcF{u%y{iEC&<=rb#6aekM#6(0Cbouip9 z&KRVUv6Yf>m6ElF-r3%6WlfwmjO`ZaKXm5lTLBw4rowwryIpq1*#U^(C1MMx1mu%7 z_`IoZW7%c?F)y7*WP7V7X8>Gkn-_su;}?Tj(bwPg6qg|VKVo{$kQYTbKI`u85Pg7E z$YA6#n1L0jMDTB1(9QHS;HwKAMvbc*c?Zk*-tHQhe(wz4@S2+T0}8kP*-GOf=8bvy z-{*77?ICn&VS@Q;F}#N@)yiUk{Zql9`MCq#%MpNBDNNQ^KT~{?Db#d@Mq1IBTSi-i zXQtuQJ>9nMBj$rO+DWj8*(FZ|%vuGtJl}tOfBkGt5v$!>Krx#6Dlq3RDT{emBGSQk zm(RM%=YGD)rR2%BS`^vsZ({cD>Rq!y74$Ox?mXwNVk@ZiQQQ{)lNz$$&Nd$Tw< zE^_zBGX~NpVafXF!0<$_=p=V0xBW9I?t4iVlm`T(2Fn16wbGw2E^7C>2cV-P(1XL_ zBR;U@o}4!2mpGM(f>i-o-+o0_!BUzfO+AVBbEV)1s?I?fBsQ3^oDx=R2R>9`nf>29 ze1On>kjM(70p_2pT@lH%L-X{!BR`E{^eXAVpNtYol_%^feHYX$xcKpF+@dkNDgtrd zp{u1#Lg;Z78*}H`6?cGTjeGDwl>k}+SVdkYRrwfA7)T$qp6fe}ptpYxRxwVw3la3b za>mo$?p60Nc+jW+pw&%!0@QofMttw8OQ2Gw73ex8AN??GvPDjIZw2S|lrpAsV1;xs@A7@lUtF_YlKf30j*sZvYe?EFs#6FQ3SBe@eZHwo$TjFQMJhcta67y( zKPcz586de!A`^0+Y=ZIQtGK5zkS)$P0VhojpywoU4}^ry>-;iuW^|n*P|BUzHTHM* zH;^Peo9p%%NxSkFcAq4cZF{T0fL9V=266;5jEtvM;lq)wwTxxpk1C(7hRE|)rql>b zUY-*n+>Jm$S}b!clJ-0G+pq6kLmy)ekn9Gj=BP7?G;AS9Z&|u8Emz(t$1rZQq15g| zmjfk2b%lKWCOyfFU!}ZVYh=79d@VR&PY^-nQloJ@CgNL4c_{*Nwuh%Jfbz`dI0lwQ zn=4=5GRYneg_uVic=TyyM$`oZaw1l07Om~+Dmsry49bLezZ!qf4We^AM|8s{$>m54}+ql{EcektR3T>ajgcmIYVMTE5YzV&>WEf9+!y=P?^fLsjvPXd_p^W!JHj=Gb9jgXT4YE^&Qe`1lC!12q_3H*RMxvsW zy4`Lh&Mbsv&+c5_kTl(QB-n-O6?WaOhr9n5q{On37;A+RTHZ6l-tQQ(CTl@uj-spH z??;?1=U!_~+YCbr1Qw0!)ReU(PD-BdW1{hquvLb#7st`8l+Q6o-P_ljtRt}E#->cl z#9GUj(UuWY4IE)`ADwl2A9)+zlWOPp_xf`@G)42WgV0&~48$dF?m-BiI^}6w5j0-K zr;JysX_H4?vRZCrbev%&EX_M>U1OQY$GHg+BOQ56M9+^m*kupu7kB--iUaW~L^C5Y8H8oN(tChhyzk)a1JFc-pAp z6|O%Dz}zQ(F9jtIRv&Y`xmLi)9f&EadVlJ(JkUk$RVV#O9c z<>Qen7yM~B)9I_Lt}x6`tH*gb7`NdsWV86u!z_I?BC+m1Hr)BPnYqxqzksj+>EN_V zIZO!qs)Onjw#BtsN@wQ6EH{N$wDu`ob`ipp%fiZxm};9-xP)Qdo}mAPYar54sHkHk z+$Qy=`B=_xShfk)+OZo&E$3&xo-&~9Z9irHHIKX0CzixXG?ef3i=DgOhCZ7`=d6(8 z*xfJuW2*Z^p;4#r6z$2k;ms`>=;(_TC!M9O8;6?@4;9+ld9j1?dUFN2@LOrmlB3Q) zKGKKY$1^&$55%(ZJi8h9>GG{Z)Q)gmA8+HxHJd_cr1?Aqg)cLD`0&keVB#c1-G91x zd_1(HitABb*7oJor_wW=mSzUiJpK)|(G~n=WMC-u-Yq_K)G{;}BEEaq8|CAg3i$kn zcXR%PH6Lp6)^mjnFP4d}*XFi8*S`wMOy6TvP)Kmd#gO^XE=sG7-_^R%rHu_ZvB_dz z>SlKDu8{4=G;!dXUhzlBxh_0QVDQ%|UR$WWUWarz3O>?xH6!s#NK(ttY~2+>bN}I4 zT0`GPHmu+ag=R^FXOvvou*Euo-sKhydltb%(tdU^`u+0s!7s`~)SKV?Xh4gyv?jFNtnc} zNzxkOz})XQTUXHuR)#MVDf7kp;d}xZ3q{`K9Sp>ZkQy+6JnAc~nrO-woj<2N(ehop zpD+ciAosd$v!W#wqDKNz>9Lmk0{MqLgY}WJ5{~(Bo10(8Ufp{5e0T;)E6E}rz8Hkf zxC6uwG^)tK0Rl>yEk<9(C6Z=y!QEC`$7h|X|eZSJYy!T+93?Qq{J0yL&p0J5k zX9lSo3f~KS`u;Qyo%`5-Xv1YXFjX#0&2l&Rpq(JIW|fdZBf5q|1%+%emtQ3@0~f(7|*$j2E$NEfAMk=s3o z-_S0hW6XrH7?-;=T64X!Dr7xG5M>W5L#%IP)ehZ?d%`m^JX9#8TS0gfM8oq~bd1mB zk#=09WddEu)P0W2fVSCmy_Q+3FP3vz>YZGI?kK;WBmB~F&1s3qs(wjqY#p)XIghRP z&f~|LKbFLo|ob#6#>rT=y4bHqFla}^)wHTvBehWWPamT3P+_wvV zuD--)naZH2(=cElb!Ee&QY`(a zdH&2^xTn)qE!R4J;Grj0%>AlmM!SNV~}K`2KN>{GrCUN$dULfb5fH3VsI_voayY1XFRu z!5Itc=sJX^1v%tJx4bf`Cfio>rMn}{=KQc6 z_ufD}%3hedhA^RrKPx;J-oBL@dN^Q(mRe`B-5WGrcWSREahP|t5Vjx|*vt!WI)b+L z&WyZwLcJinDEGGgUd>FCLeJok*^}JCH_1+?`NDGi#^*xq1B&iY$76pQd5ITk?8Z^a zBZA2IfHbvs!{8zNtNyQfn(o9N8>fk?%8e~qxtOJKp66}iEeul}R z_F)dvj^es$UXS>6;z?$w>HNE zX!X<|W~a4eTSIQ$+#i09Ju`?M`{FqqilmZ40YpvOr~SjvWiIqGbC2_KjVsuhherZ= z(x7>yRDcxIabNHD%t0A{d&PG)4MirFoaV*jU)VUJO0VFuY0MP3M?RXrcKSw9NF5q8 z!}d#X#5rRdpPwQhk0l4$BQ(HIT3$wTmSp8>YPPw$nytB-x2WVw13Hp>P`*>prXZ+t ztNK3WY>tny4m!m417`y}Q^Yi~oMzh|L0=7Pt4wOO!zz1Z1kE2|QI>q{EN1JtS&J(^ z0s+^S?2~|C1sv{2f(JaRl`jSyjm1}Ux+4O0zV4a4L6twb4E*?2j&hY4{!?9)D0GD* zG?SbXJ4h&dpJHqiDmMyAYGd>Uag)r^WQer^UR#TmJLU~E-es}G=*Srd2#PIu73zF@2wBr??Rp|5lYG*4E z`3uC6FW0!u?%9{VzuLS;)jCSGy=yAcF1D79UiM^tSt1azoE}%}eWr2d{J^4!U8PuwUTL!>06h8IoLBOVbyg>bRUFf$}o!xjzkV2L^Y*lkl*&$qb_rNHKF*~Sg8%0aCu~{-5^5p+V?ctz=^q3> zhhXpV(f(rqgTd^F0Y=|krr5Oqf8AY@{5itob>`=@OYg>s38=hPj3P?sbs0-gtxU{m zf3h}9;aXq(>_@?W!Xt;7pI=aL5Ub zgS>LJg;KWiyp&Ne&C%Lsm*?7{ooB7Q4d)!2h4h@Ux!Pzs@`L$#%D{KJf1C$lJ#JN| zr7MFhHRtbW=o2y{)$1dqx=r`qm_m+@Kj8K$VPUquSItNN*_FIhT>f#5j-d$RJIokY z-^P@9aM?F5F=rkXSrjXN9RAU|l=bJOA)2~se;71jV4iWxaq+?SQ4h|O@r04=P=ceN z#DO!m?{pS1a5jQEL19Ssv2dwd>H%yVI_{=ri|67+3|cCT8WGmaAqVM3R%(3o=RJb) zec>~818Ln5$Nk^Qu^Ii(-sv!Xu%GOzv-fS(6i4rk=c3+!VOz~jzd*S}NI27*bBN!6 ziFFt)RbVzCB;dC|dCEagzc6Bk(|8A02QAl#sf4eplWBngTY0S7R4Ma7d4v0x`4x zGi4}XBYx|f_Gp%4j`~~5i7;rqRHN03JsGqjAM2eeg?Q3WWrhPmdqT7_9kmEIT zA@^N<286k9vMG#%pv_{G;sm8t7#4}%TgfMrwAtsAOh&0V3{AZi=>v`XPgEh2^{9h> zDRj`GM0jsaiozdwbZ738dWhQ>@?*K&w0#x2tbpE^QJhiKD3yQyKUGWfB6+vV)!_{oa zmOk5uyd-s;n={IPBqD>6E?htQDy|Vswv>|Q4NCLtIg;lGScxFQI7mJ5S+qK3Nac5x z;%Fhn7kvC+i{Q$XHLim$ueoh$d=NiFnM($)DL68DkgDTsLU)OSntNen1NohA1DH|4 zGX1MRt)^_^LW{NgB#p^Y1E(GQDB`=P7(8eKL3e4n#C$_il6k~QJ{knw4_lC zFFx>pK7S2Q`%OyLKZ06OkHT@?7}4==tdX@)p{abN+R3dyA)Ujq`?u}sA#_^3Kb)&> zKyPzRM&B&<=5Pkp{dVB3R#m<*KMk9nJV^@Gt>;C?65D38vitVLi*#m=#t4$kem<`i z+3hKAG1M^YwGH#S-U#aYnf7yjpGL%6GRq`g$Gq_HG&J%)Y+%AvaZ( z%J!Ct=?q=x{n6jAO)T7RqSoDTKYxPTpLl6o#Z-rr={=vXG~Gi8Pm)z77WZh1LQBsj z)lGGvKrmrS>UP|`d!Dwq^o+m4^iILF>a91#Rex1ikJG6d7Sd>uHhc5;ryREj;k9ZT zr8f=g^Q5`8A*@!02_&;zdIH+IFU^RYriwxZKHlTC6@k~YEO=RLf zQZZSYwz@QeFf$+VRseg_r0xiA7th>XoT4}0UKEO0ZCW(% zQ!Z<&dAE!~|1uUV*1mbBLLpfnb#6N#ZBIe{EWYdzDn_-=cBv%qX?#7y;r8Rs zwku_pdv`kJ7c|eJpMyL@KNNr75#DvV4Cu0oi-A#E<;3Qa0k@1`;kM`Fm@C-`)h|Xo zHc;8iw?+1oI|~K_roQXW&u{@+sI@6oJwhrXPQN;40DYWxIZCX>rthgbTCtCEHMX;@Hp8T>q320qd-5|E3GM>`JJ@L6vEcZ19+zuUI6vX z*PzI~NkY2o>=BDCC&|B6U2C%=RW{LBKe=%lmfwdnqNEk$X1i_5>-XZ|S%YHNWcx;W z6cpOJ#_GBV>olGYVpb)F-koFBw)G3@6`d`WW?${+lN6<9%+uu&oK9SQmSyCnC^~>_ zY$_VXqpvT6-@y||{wza0&3Svbe%ySH49s2xV1`LLI`8HSYKu>GyJ5bVfDT<>yj-JD zGTGO!DddL<348S>{Or2%!VYqP0aJ-vC(8zc#CEw;ym3vn_JLQC zfHqm@sNJmk((1Y_fpfn&^4UAO$KP`74@YAjrtjaJF!?`t_y~4ri3!jUKYxx-NC;dR zq4E$_c6Q7n>TMp_w80BubrDe&_HKETz9PeR=IlH@G~+F zpa8W}v@5>%-u#d+HQWAMMk1ZZ>1ha?sD17N1Z`3LEHX?_;;5rqn?>ZIZA!c{GwGA+ zNR=Hvxs>g}N=d{YXGG|D3{zLi9eRwqXl7M8LODE*KDGRY*wp%wc3~&|BKnDL zP2kz~3sc4TkH{oG7vF(0Y%$Yx5ZHc?avuR?7h&meI-*9Q-s*#+J4EOyfpU${L7WDy zXx1_T0o~hl?GxhtAi+yPfTsYJ^-VNRH+FZS9LvfopL#)BtvXp(?(22+pvCw)SnIF}-LQjZA4kv)8XL zNJlg6cWM1 z97mS=28sO4(QY#ZzLq=_Uq9jLWN?#9xYixU;5|tFdB&!`U!d{;AqoaFFlg6E0=z7J z=n8N1VDw8pCt%%2zj|@&pcv6w=M>ue<}@rZbtRQ{f@7{@1ju=N*}c?J zxQ{XeF=GKQzRxsK@8#4~8yuQ{0D0&b80GX9_~2Nymx;QLpX~Tx-m0|pCBEwUMB}jIuZz8SK9?JkoKrY@J`9+=68>xR``+l!J5t==|9Cr>YL0N7;ObIA(f7Oo{rl zL(W5$ZMDJ|kWU1S6Vhr&-byJb%rGxfBKVB-_s8nF&WT~CV_pa3NqUY4Kt9=49vGpy z=$m_%iWe&00SfmT>?jQ28eRj}W6Q8%{pN7H_*s=D}!U07Cx z`+B+I&9C;D^ZUqEWI%$Q@t4QWvIFNBIerf&+DX0oDrwuJ#p`y!0NfJF4H#LjlF)Ai-eLyi}=PdZ$6xwh~{p8D;}F24lp8TM_L z5{wIQN}tsGk$oE2&F{;8*L-~5@b2eY-Gbc&1#Z&AVH0Nm#f*^NvieAjl7I#yO(4@~ z0S5%If`bJhhwA+B#Oh|x@Z;g~@r{;V=zkdps%09O=dNoFi(|NaEbwj9IZqPg=qa~s z{IYYnZo9R`V|OE0rY%wn%h5X!nG^6-^zf$4Kmrv6XoB~OwRN!zySlU4Dxx{@jPU1`*-aY>>N((v<&vwRE4X#j z;Y|dO^U6I6j+9Qu`9PMsvxZ3)B@Or@+L1F9JuATxgWt03n&)eQQ6-j+0lUenaXmpN z`#mAyI(A3zcVJpl>dA(~X9n!!9RXQoXW3!%u}`cJ3w)9|50LEM3bVw4LrhdRiY;82 z0o~tieouIK6;d$XIY%u?J4T=Tavu|!S2Y4ex5aqTN{W+-Wf)Ag(KiyQvbVz3K( z#YKYClO&m$hk(ucIO~jCcuLuEh|C}-1A7TL_kKYp7f6;Q$At&WAnZV#?XmRPCH+qFk@G*uo)^^qY<y^+O(Odo^i1T*ZZhpQgr`EL44{v#d1pC>2VPuI&TyfC%o#beQzDfFr)+_@#P zx4^N5;<4%Oj4y<)o$Fa&$q|jj=>TVwX2d+_SI>p6!%?OSi9$13KhpKBVZfKQ=bSzw z8<{z(CL37$N!E{`pecCemONgsQ=Cy3GM1;GuDmv2x;~z#S9N0~T5gh8AA@zTK}WS- zTVHrA#K2 zkJU3|PgP|@;G#~IXYFO5Ky!YP`Nxa?&g-)u#K~p-I(6mBXNpG|9u9T6c^4-1C8Y$i z>6(p2(wGZeDjyCZS@|9x4ZrS+s4ctE)33p;r;(PQndTRf`V=)bwcg7WdDRZ~?BoVk zHXpE@YRyQLuPq|=fb^aFVAr^y{C7-ot;&;0PN8bMQbsbhxQz_$tLFS+TX++LVVq9pc)4hZz4lcXR8VaB{Vh*eJqs>z~}L--+`n zCSmb1sLWEHrZ2EMs@6f|V)fq^G{7IgD)GjWZ@Cv-Fr6P*s4+f0G8c24%R#*QrMNjF z3HCmj>{F!CDU0HDd!;eHbXwB=*10v=>P+|T@b21u2ojbXd&0>_x~(?Rn*-3vF8dOz z^&MaQ-um{d{N<&iL3KUxI^z8+Uwi!im1PEWe65t?g`EDr)ZBh1l}+feUQu|veH=bL zHHPK4-Qc6G1>Tc3PyIRldeX?vOWy@b!SbSb<bOW^ZlE`m||%`Nt_`X*pJnqTH(OHl>rBD17(SRyBLsWA5-F zd!M~oO*QY)FQouc9mRJZJV$hW!QP8>aFobn4j$L;6||kSB>(2Tfj}ac$&fCo8yl%) z!v#<|>qQZ%T05$=HT~*`&Y`{Gbe86rK4R$7^?QUSgBJm^J+1xApY z&%C*@y#8l}g6a2X7+r_F?2G48*EXei%yR`QoaCK5==F=88H$->Cf;#}c~_eP{dxZ)>|4r87l zw=qLnr|`eYEtIUg^;?ScD^4J*OcoN4cPx$9S zJ2|rN7Qq4<;j)H!4Y;yJjhHBIKH~yUce`cIo@id5IIr*vt-9XTg}@!A?TwjES|4>- z2YZl?$7#4GL>=rgE%73px6jy~1`8j~FK>RmK&yJgjrXOFw`Xtum6*c9!sjvpVY1Hb z&Npc1H}YB+iz;Qw^XjTO>FhVl4NIGQYi85D?z0C_ma0eQ7Inl^DM#059!)>;*t{n& zZ==v|4kpVAiba}IK4>ax8X$>26a9?-+O^H{)9H1ZBMbkp{+|ndyee)nDj!S-1gF@$ zlMnYCT#w@rQYf*@y+;Ksp>q0?i%;<2Yew=bJ<4uPBupn}0x>+dL6yGy@l=+$SXhS# zRk(EPk$a_$;_#DEA%z`SS-AP=nEQ!L zBg0Fy4#F&u!_sTc{^@Kh4i~1AOExt)^hKJtCcT0fE4(W9*X?d-rY>(Fu8NLoerO2g z$dJ%UnkmZ(u3j`UqHUbAm7*95gmQ-fvx`kMbuNUTLZp@Xd*-#Sd#9J(0Wnp9Q_iA$ zBGF;KM$oqb-kQ6?L7BwVE46Qs4qtm1CS?53oSQv7669}>6C(A`-+PEOCLmL*H!iTt z^u*^A1(^NDvo?ZH*}fBcf+e#`B2lTLuTf%M`V+Aj_vJ9g(eEG1`z)7&Nkj|ZmMZlT z-D%=vzO7BA3sMzDpo&+@G2JYfk&^zpczznQk4*zFMfzem31?dLHaq(u{e90P(vI)> zDhu1uJ96pw4PWMXejx<#ydeQvk&bSSe!aoX_?vyc6L zRovP)mHC184Z=Q9&cQ34=uBaXCrT2Y+E?gV+m5N6I9#S-Q&kK@lUVOlqN95;j&SPT z=vXt{VxHqwJZ$&5$|S^6BdlF`&%`;vfwKv_$RjO2MaHP%m)5QX`dXa|Ql z-(n)L-Uy+}8aRz__R}`#B6?ixv_12vp<}zif}DV^BVGg zvB=$7jl{$1zvvxvdwR&VR-ne_o+g_E6~-Lx`x6$@N?KB2oR>FSb{6^pi} zQ42Cm*^E|>rr=wU)r^eZ^MCrRASx<$?XVm(rWY+>Hu%`P5qQ%zb0o?I@(p)hK6?7| zB5=O|%k{ObEKTj_TUoE`C(>Mh?Q?%8;Wxo&)ezmCt%kkr92ueD7wT9^MlEH^-wT#@ zh}4c{5yoQ(=J93O{Mv^V@-p6djRf!F(^$2`aZs^N;XVy-e9L=rLg5a!7$Ixqpen3* zv1=R2__MiaB;W1~xc%x^KNVdcTk$T8d_!;S_C|RBbW~LyQ|+rzLb%Br(i4erHeM(YfOM(2z$3^fD1bh2UR-gp2NxqAfG7(zTDT z#jjcck+T{u_QD~T>f4l44h-b{PXS-DV*-@k z-Gy$^d4D{hDxp&zSsrDxmV&3m7KFbsh#=b)&G_UYPwCA&EEm#`$WaDT3s0oOM+S<5>D5FY2podZC? zQ4{08_mD0!i!EzuzJ1R~6^p{SDiqE+G$c%<2LM9=?I=G; ziA&ozcM56{)CFi#Cza{NJ8De18O*6G+ZIbUT?oe45P>mrAsBActB&RTjGsmCnWue7 zGC}6#a}=S1QAX!M6Q&0MRoyA4?VI!A6brf5mIjch%%>+FUOkJHm$%QiS=SpGmGz7* zsAyat!v6=CdVT&8J^9?b$g8qua8X+E=n(?Ing73WDI!7?{;s2pS9|~&AB>Y4v5N_% zV+!Ds^~U%b3`3joJOD;6WM}p?*;%`8d|O?sA$xJJU(usXWDA!k*e-dH8el_Sq&ysk z*W1yn=3s_rg}#(aOoZglpX!YC#&RlCm@ii=3EcU&erM9qQv99C^#|$8(g2v--VD81 zenti(9(3-7+tB>W;BFFWPPvlNC(cNu!il4_aijpan$MD`gSu{wMbuiTl*B$oWRuGe z4Mi;~Wv32Z-jty?-SEs)R@J6&c9WZ`XkoaTv1>hWji2ltc}jOmFLCLR} zQ^TLqmn#0LBM@LyF)}jltbfokeE05ML&(dUhR#^+%gser_uXB`?F=Y`cf9x6RuWww z`$*94%OtoYo|^njWp$cs-R3;)NM3TiyVoy7=FPh63XDOCMLP|4`YK+D*g{oxwT`n_ z4VKyU6WeTt5vWkhyu!WHm2XsyUWCi_oH+kezjD9!Z`$YSm}-3hYH~xw6ga6}z&k6qD?f?HfCyLZRo!71ZY=6Pn4L(5@o^sR*K7ZaE7#iPeXqUN| zu?PDVouM>_g@($0k>!@kj0CVR5B?1_w6-BC-r6WXbl4Nw#DwW6zSMv1VwrDr>f~-`BfezTbL0?*HKS z<9N&%XU;iu&g;5f&sP&iMW7 z|9=o&`{U-0igh5&CLdWD)^V@8BTIf86T5j7w7{)atZ}8VeHYabjTNJ*tk;1;SD`!rh*y6A@h@+;0gdHz-|>CP z{rzT}naYkGNf?*#0*R0S^!f50AOyOscfENN57w>|L68l-8L-eUNe{hCoqf_Bx@dPv zxTDSRDw(}*6TE2@$9hM03*@z;3Dj22&p%UecY)v|aim`3 zbI3728^009fGOav(lhtxJW_}Tj3D=05o<1NbIhM%QUtWf+6&OpRWGgq+~&I**dcv# zZc(++Q9?BFb<71dGo+xlH20w%XEN)ljp zXv!28%#D&(wF>0D>fl_zxl)wIIyv_nW3jKBOYmLPK@S?4JVw~^6Z{dCAY;S_gNS3h zRsK5yy%F&Qq_z+6Z+!%zpM|_wm#&!+P=^qKy zuy2Q;RJEkDX6j2i$MgVDNcs5fPy4o+cyR)^>wq?2ugTDpdhz@FTcr@#0EGG}+mgDk#R3xXV0XsLuF0T(YvE zhn|(NE4n4J@SJi?!?A<2!GII@KFBAu4#Miz@cCa=xvfs@dxOUr2%#AA?&1P@Z&HLN zjK-h>R`XD8IH9__daZ{;I0kZQ^B&6>y*+7Y*w>S%C8&@Absq>NQnO+YNGsRe$(sBr z-kvd|aop^A1ySCrA?3Upl&tM)tM(hAE8-N6iBBq|&2YrIk=IV<0tdC9AkV4`lvyW# zS_mT=zj&RWX?{D~`Hd>940C^=Np=D3MFr$p9~lKk3U#qKQ~R&`zEC^6%tKALW_ zU6!7;$J*LD)H(cjPHW~;*0HekrYX&^FGZu@f6;eapG-Vcyf~6rS1HK%BlWK3t?YZL zsi`*}cl&oiX8$=Dr9<_AOT&}iM-SKoC7y*W2C`PbLZCrjgM&5Ws}RXp{;9tuw> zoAxbW#swr{bB@F9rS*+_Sr(0Yno%iz?egWzgJTE7y#p)a8kRZb8XYQnJ|!@9g@?P@Mf(nan9J5@6wu=pdV6K=DJRlR)2-0*oB34n&S7O@ z(ZN6y9S)NC5l!m?|>B!2sL z)gwN~@-VvRx0RO-B4zXgn)*c|obR*Sa80?Ot7bhnYt>5|ypYBmIc$mtu$5xI>>-zp z8Ii+RC~r4#(tHAiJnu|QOsDY>@}@ToShl-8zr+$lD;w(G3J=O=u4C1*}f0#aVX+@6Jk~CNedVSiP)#i)7Usg2d=rD=&J59%>j8fGr+Pdvf5=$ zZxGNExM1&4*RhJ`70SWbQ{#g;h0@KdZnkc3zs=rZePDY0xekF4Yi4PA?zE^y7;K{C zgCfL$dV%krIZ@JFj*gDq5=?f>-=UH(2mROhM|(;Jx^}RL+qtYeOUs*5!OhBkfNiK!dMaPr{4O-RC{f)t zP?S*<%IfhmLB^~1B;HDNc;(x8yw&H3uj*7oS7o@tiR!7I#Ql=J*BeY}RXDN=q{33| zf`@|`avI(8IZE<258CeTOUz_7uP}%>b)O=YGJcLn-`xK$BNI)dvf+`J*&X$1iMR^& zWtt>5n6B6{ZE&t^^RurT2Ws@aNtzGkeNAt+%l>MMuim`$X}mQSPwZ5~u(Y)o?eNr) z@r|x7BmHgf;CYG~T)A3Rt~R|f6AJuec^K72S5{_9{18c1@@6qV=v{)zyMv7+m)WFyK z_qE=ZZ0-_k{NOG@x4v|G{rhFx<`b%j7iKin%ibX+s zvyBA4eAUKm?E*#t+2^*X+2ZHQ^xd6PUL!#@>%7ZlKE~@<2A}-tdK@$e>$csWm}niS z_Wxw2^-!uU>>@cqYez7jioC|@5E(|sIy>UzZ}L6Sj*z6^hXgTpaZZxy#;o#(&ahtl zW9g^~fhKOelkk=jsAESLC3+@5SbdCO=}un!$YMjBtWrk9s!Hc8Cz0edP6=B*$!_jv zspGE=C*GL@Azc=J$zL#DdS+Pej1E{u_!el-f3tbKH)+#%PWw4ATUib#&@}yx&p)s6 zm+WOVr*)3ZK8(F!C?tmZ7AKe)A}>kwj#*60Y{06;7L?3l>)CdL1^4JlV(=A^ zgOB96SoCY5D@=AI>n9kJh0rUlnyLm^&g6d@sg2-J)z#0JC-JvX_WR_0?@JR>I6G+cQW%bed05PqtxwY2O4e)|W)iM(WkjuAOjpNh%mGy|1>E$i7=Cpn zhAVanpd0pyC;-L6f9}<4}IN~M=rUA7T0J@=0$yqqS#@A9mF0hd4AM|?4kQeKqll3-yVU_ zA{U}i177={I1xQYXLSgt5Kbq!unsVslM;l~+!-H;Z}qL*nf8T16pn_p1CF~_YR{UR zi>GkaU2NHjs7rFzWk~LY_;G&EaG>N9kkj0exf>SotmcQE7j4AsgissU?O*lV10gbO zB((g7p{0cduN7Z3h=hOKR8f-i&{ot^VcWuKhe^|-VZT9S`tk3^51WU^HL+lrDmXKE z_wOYlWQ?hbkTK5k=ly3%ts})ulL8X^-^MGgba*_P!zm-;@7Ez~!n~ZONW1F(BN0o& z;~z1p1;T&t6eUQD*}6{$Z^Hj?O{Bv%!Gt3?N@vN literal 0 HcmV?d00001 diff --git a/examples/cluster.yaml/allinone-cluster.yaml b/examples/cluster.yaml/allinone-cluster.yaml new file mode 100644 index 000000000..e75f90688 --- /dev/null +++ b/examples/cluster.yaml/allinone-cluster.yaml @@ -0,0 +1,11 @@ +node_defaults: + keyfile: "/home/username/.ssh/id_rsa" + username: "centos" + +nodes: + - name: "k8s-master" + address: "10.101.0.1" + internal_address: "192.168.0.1" + roles: ["master", "worker"] + +cluster_name: "k8s.example.com" diff --git a/examples/cluster.yaml/full-cluster.yaml b/examples/cluster.yaml/full-cluster.yaml new file mode 100644 index 000000000..769b3adb7 --- /dev/null +++ b/examples/cluster.yaml/full-cluster.yaml @@ -0,0 +1,494 @@ +node_defaults: + keyfile: "/home/username/.ssh/id_rsa" + username: "centos" + +vrrp_ips: +- hosts: + - name: k8s-lb-1 + priority: 254 + - name: k8s-lb-2 + priority: 253 + id: d8efc729e4 + interface: eth0 + ip: 192.168.0.250 + password: 11a1aabe + router_id: '250' + +nodes: + - name: "k8s-lb-1" + address: "10.101.0.1" + internal_address: "192.168.0.1" + roles: ["balancer"] + - name: "k8s-lb-2" + address: "10.101.0.2" + internal_address: "192.168.0.2" + roles: ["balancer"] + - name: "k8s-master-1" + address: "10.101.0.3" + internal_address: "192.168.0.3" + roles: ["master"] + - name: "k8s-master-2" + address: "10.101.0.4" + internal_address: "192.168.0.4" + roles: ["master"] + - name: "k8s-master-3" + address: "10.101.0.5" + internal_address: "192.168.0.5" + roles: ["master"] + - name: "k8s-worker-1" + address: "10.101.0.6" + internal_address: "192.168.0.6" + roles: ["worker"] + taints: + - "node-role.kubernetes.io/remove-example:NoSchedule-" + - "node-role.kubernetes.io/add-example=add-example:NoSchedule" + - name: "k8s-worker-2" + address: "10.101.0.7" + internal_address: "192.168.0.7" + roles: ["worker"] + labels: + netcracker-infra: infra + region: europe + - name: "k8s-worker-3" + address: "10.101.0.8" + internal_address: "192.168.0.8" + roles: ["worker"] + labels: + netcracker-infra: infra + region: asia + +cluster_name: "k8s.example.com" +public_cluster_ip: "10.101.0.1" + +services: + + kubeadm: + kubernetesVersion: v1.16.3 + controlPlaneEndpoint: 'k8s.example.com:6443' + imageRepository: artifactory.example.com:5443/k8s.gcr.io + networking: + podSubnet: 10.30.0.0/24 + serviceSubnet: 172.30.0.0/24 + apiServer: + certSANs: + - 10.101.0.1 + - 10.101.0.2 + - 10.101.0.3 + - 10.101.0.4 + - 10.101.0.5 + - 10.101.0.6 + - 10.101.0.7 + - 10.101.0.8 + - 192.168.0.1 + - 192.168.0.2 + - 192.168.0.3 + - 192.168.0.4 + - 192.168.0.5 + - 192.168.0.6 + - 192.168.0.7 + - 192.168.0.8 + - k8s-lb-1 + - k8s-lb-2 + - k8s-master-1 + - k8s-master-2 + - k8s-master-3 + - k8s-worker-1 + - k8s-worker-2 + - k8s-worker-3 + extraArgs: + enable-admission-plugins: NodeRestriction,PodNodeSelector + + cri: + dockerConfig: + ipv6: True + log-driver: json-file + log-opts: + max-size: 128m + max-file: "5" + exec-opts: + - native.cgroupdriver=systemd + insecure-registries: + - artifactory.example.com:5443 + registry-mirrors: + - https://artifactory.example.com:5443 + + resolv.conf: + search: default + nameservers: + - 1.1.1.1 + - 1.0.0.1 + - 2606:4700:4700::1111 + - 2606:4700:4700::1001 + + etc_hosts: + 1.1.1.1: + - example.com + + ntp: + # For RHEL OS use chrony: + chrony: + servers: + - 0.de.pool.ntp.org iburst + - 1.de.pool.ntp.org iburst + makestep: 5 10 + rtcsync: True + # For Debian OS use timesyncd: + timesyncd: + Time: + NTP: + - 0.de.pool.ntp.org + - 1.de.pool.ntp.org + FallbackNTP: + - 2.de.pool.ntp.org + - 3.de.pool.ntp.org + RootDistanceMaxSec: 10 + PollIntervalMinSec: 64 + PollIntervalMaxSec: 1024 + + thirdparties: + /usr/bin/kubeadm: + source: 'https://example.com/kubernetes/v1.16.3/bin/linux/amd64/kubeadm' + sha1: e5cdfcda337a5c8d59035da9db0c2b02913271d1 + groups: + - master + - worker + /opt/cni/cni-plugins-linux.tgz: + source: 'https://example.com/cni-plugins/v0.8.3/cni-plugins-linux-amd64-v0.8.3.tgz' + sha1: f662ec5b648e114802276f8f353ad48a3376da47 + unpack: '/opt/cni/bin' + group: master + /usr/bin/calicoctl: + source: 'https://example.com/calicoctl/v3.10.1/calicoctl-linux-amd64' + + kernel_security: + # For RHEL OS use selinux: + selinux: + state: enforcing + policy: targeted + permissive: + - http_port_t + - http_cache_port_t + - commplex_main_port_t + # For Debian OS use apparmor: + apparmor: + complain: + - man_filter + disable: + - man_groff + + modprobe: + - br_netfilter + - ip_vs + - ip_vs_rr + - ip_vs_wrr + - ip_vs_sh + - ip6table_filter + - nf_conntrack_ipv6 + - nf_nat_masquerade_ipv6 + - nf_reject_ipv6 + - nf_defrag_ipv6 + + sysctl: + net.bridge.bridge-nf-call-iptables: 1 + net.ipv4.ip_forward: 1 + net.ipv4.ip_nonlocal_bind: 1 + net.bridge.bridge-nf-call-ip6tables: 1 + net.ipv6.conf.all.forwarding: 1 + net.ipv6.ip_nonlocal_bind: 1 + + loadbalancer: + haproxy: + config: + defaults: + timeout_connect: '10s' + timeout_client: '1m' + timeout_server: '1m' + timeout_tunnel: '60m' + timeout_client_fin: '1m' + maxconn: 10000 + + packages: + package_manager: + replace-repositories: true + # For RHEL OS use yum-like format: + repositories: + centos-sclo-haproxy18: + name: "CentOS SCLO rh-haproxy18" + enabled: 1 + gpgcheck: 0 + baseurl: "http://mirror.centos.org/centos/7/sclo/x86_64/rh/rh-haproxy18/" + # For Debian OS use apt-like format: + # repositories: + # - "deb [arch=amd64 trusted=yes] http://example.com/deb/ubuntu/ focal main restricted" + associations: + haproxy: + executable_name: '/bin/haproxy' + package_name: 'haproxy' + service_name: 'haproxy' + config_location: '/etc/haproxy/haproxy.cfg' + docker: + package_name: + - 'docker-ce-19.03*' + - 'docker-ce-cli-19.03*' + install: + include: + - ethtool + - ebtables + - socat + - curl + - openssl + - unzip + - policycoreutils-python + +rbac: + account_defaults: + namespace: kube-system + configs: + - apiVersion: v1 + kind: ServiceAccount + metadata: {} + + accounts: + - name: superadmin + role: cluster-admin + - name: superadmin2 + role: cluster-admin + namespace: kube-system + + psp: + pod-security: enabled + oob-policies: + default: enabled + host-network: enabled + anyuid: enabled + custom-policies: + psp-list: + - apiVersion: policy/v1beta1 + kind: PodSecurityPolicy + metadata: + name: most-restricted-psp + annotations: + seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'docker/default,runtime/default' + seccomp.security.alpha.kubernetes.io/defaultProfileName: 'runtime/default' + spec: + privileged: false + # Allow core volume types. + hostPID: false + hostIPC: false + hostNetwork: false + volumes: + - 'configMap' + - 'emptyDir' + - 'projected' + - 'secret' + - 'downwardAPI' + - 'persistentVolumeClaim' + fsGroup: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + readOnlyRootFilesystem: true + runAsUser: + rule: 'MustRunAsNonRoot' + supplementalGroups: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + runAsGroup: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + allowPrivilegeEscalation: false + seLinux: + rule: 'RunAsAny' + requiredDropCapabilities: + - ALL + roles-list: + - apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + name: most-restricted-psp-cr + rules: + - apiGroups: ['policy'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: + - most-restricted-psp + bindings-list: + - kind: ClusterRoleBinding + apiVersion: rbac.authorization.k8s.io/v1 + metadata: + name: most-restricted-psp-crb + roleRef: + kind: ClusterRole + name: most-restricted-psp-cr + apiGroup: rbac.authorization.k8s.io + subjects: + - kind: ServiceAccount + # it is possible to bind to non-existing SA in non-existing namespace + name: sa-name + namespace: sa-namespace + + + +plugin_defaults: + installation: + registry: artifactory.example.com:5443 + +plugins: + + calico: + install: true + version: v3.10.1 + installation: + priority: 0 + mode: ipip + crossSubnet: true + natOutgoing: true + mtu: 1440 + typha: + enabled: false + replicas: 2 + image: calico/typha:v3.10.1 + nodeSelector: + beta.kubernetes.io/os: linux + cni: + image: calico/cni:v3.10.1 + ipam: + assign_ipv4: true + assign_ipv6: false + ipv4_pools: + - 192.168.0.0/24 + - default-ipv4-ippool + ipv6_pools: + - default-ipv6-ippool + type: calico-ipam + node: + image: calico/node:v3.10.1 + kube-controllers: + image: calico/kube-controllers:v3.10.1 + nodeSelector: + beta.kubernetes.io/os: linux + flexvol: + image: calico/pod2daemon-flexvol:v3.10.1 + + flannel: + install: false + installation: + priority: 0 + image: quay.io/coreos/flannel:v0.11.0-amd64 + + nginx-ingress-controller: + install: true + installation: + priority: 1 + controller: + image: quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.26.1 + ssl: + enableSslPassthrough: false + default-certificate: + paths: + cert: /path/to/cert + key: /path/to/key + nodeSelector: + kubernetes.io/os: linux + + haproxy-ingress-controller: + install: false + installation: + priority: 1 + controller: + image: haproxytech/kubernetes-ingress:1.2.7 + nodeSelector: + kubernetes.io/os: linux + backend: + image: k8s.gcr.io/defaultbackend:1.0 + nodeSelector: + kubernetes.io/os: linux + + kubernetes-dashboard: + install: true + installation: + priority: 2 + hostname: 'dashboard.k8s.example.com' + dashboard: + image: kubernetesui/dashboard:v2.0.0-rc2 + nodeSelector: + beta.kubernetes.io/os: linux + metrics-scraper: + image: kubernetesui/metrics-scraper:v1.0.2 + nodeSelector: + beta.kubernetes.io/os: linux + ingress: + metadata: + name: kubernetes-dashboard + namespace: kubernetes-dashboard + annotations: + nginx.ingress.kubernetes.io/backend-protocol: "HTTPS" + nginx.ingress.kubernetes.io/ssl-redirect: "true" + nginx.ingress.kubernetes.io/rewrite-target: / + nginx.ingress.kubernetes.io/secure-backends: "true" + nginx.ingress.kubernetes.io/ssl-passthrough: "true" + spec: + tls: + - hosts: + - '{{ plugins["kubernetes-dashboard"].hostname }}' + rules: + - host: '{{ plugins["kubernetes-dashboard"].hostname }}' + http: + paths: + - path: / + backend: + serviceName: kubernetes-dashboard + servicePort: 443 + + sock-shop: + install: true + installation: + procedures: + - template: templates/plugins/sock-shop.yaml.j2 + expect: + pods: + - carts + - carts-db + - catalogue + - catalogue-db + - front-end + - orders + - orders-db + - payment + - queue-master + - rabbitmq + - shipping + - user + - user-db + - template: templates/plugins/sock-shop-ingress.yaml.j2 + carts-db: + image: mongo + carts: + image: weaveworksdemos/carts:0.4.8 + catalogue-db: + image: weaveworksdemos/catalogue-db:0.3.0 + catalogue: + image: weaveworksdemos/catalogue:0.3.5 + front-end: + image: weaveworksdemos/front-end:0.3.12 + orders-db: + image: mongo + orders: + image: weaveworksdemos/orders:0.4.7 + payment: + image: weaveworksdemos/payment:0.4.3 + queue-master: + image: weaveworksdemos/queue-master:0.3.1 + rabbitmq: + image: rabbitmq:3.6.8 + shipping: + image: weaveworksdemos/shipping:0.4.8 + user-db: + image: weaveworksdemos/user-db:0.4.0 + user: + image: weaveworksdemos/user:0.4.7 diff --git a/examples/cluster.yaml/miniha-cluster.yaml b/examples/cluster.yaml/miniha-cluster.yaml new file mode 100644 index 000000000..ebcf4fd69 --- /dev/null +++ b/examples/cluster.yaml/miniha-cluster.yaml @@ -0,0 +1,22 @@ +node_defaults: + keyfile: "/home/username/.ssh/id_rsa" + username: "centos" + +vrrp_ips: +- 192.168.0.250 + +nodes: + - name: "k8s-master-1" + address: "10.101.0.1" + internal_address: "192.168.0.1" + roles: ["balancer", "master", "worker"] + - name: "k8s-master-2" + address: "10.101.0.2" + internal_address: "192.168.0.2" + roles: ["balancer", "master", "worker"] + - name: "k8s-master-3" + address: "10.101.0.3" + internal_address: "192.168.0.3" + roles: ["balancer", "master", "worker"] + +cluster_name: "k8s.example.com" diff --git a/examples/cluster.yaml/minimal-cluster.yaml b/examples/cluster.yaml/minimal-cluster.yaml new file mode 100644 index 000000000..88e013f30 --- /dev/null +++ b/examples/cluster.yaml/minimal-cluster.yaml @@ -0,0 +1,35 @@ +node_defaults: + keyfile: "/home/username/.ssh/id_rsa" + username: "centos" + +nodes: + - name: "k8s-lb" + address: "10.101.0.1" + internal_address: "192.168.0.1" + roles: ["balancer"] + - name: "k8s-master-1" + address: "10.101.0.2" + internal_address: "192.168.0.2" + roles: ["master"] + - name: "k8s-master-2" + address: "10.101.0.3" + internal_address: "192.168.0.3" + roles: ["master"] + - name: "k8s-master-3" + address: "10.101.0.4" + internal_address: "192.168.0.4" + roles: ["master"] + - name: "k8s-worker-1" + address: "10.101.0.5" + internal_address: "192.168.0.5" + roles: ["worker"] + - name: "k8s-worker-2" + address: "10.101.0.6" + internal_address: "192.168.0.6" + roles: ["worker"] + - name: "k8s-worker-3" + address: "10.101.0.7" + internal_address: "192.168.0.7" + roles: ["worker"] + +cluster_name: "k8s.example.com" diff --git a/examples/cluster.yaml/typical-cluster.yaml b/examples/cluster.yaml/typical-cluster.yaml new file mode 100644 index 000000000..6ee8ade54 --- /dev/null +++ b/examples/cluster.yaml/typical-cluster.yaml @@ -0,0 +1,104 @@ +node_defaults: + keyfile: "/home/username/.ssh/id_rsa" + username: "centos" + +vrrp_ips: +- 192.168.0.250 + +nodes: + - name: "k8s-lb-1" + address: "10.101.0.1" + internal_address: "192.168.0.1" + roles: ["balancer"] + - name: "k8s-lb-2" + address: "10.101.0.2" + internal_address: "192.168.0.2" + roles: ["balancer"] + - name: "k8s-master-1" + address: "10.101.0.3" + internal_address: "192.168.0.3" + roles: ["master"] + - name: "k8s-master-2" + address: "10.101.0.4" + internal_address: "192.168.0.4" + roles: ["master"] + - name: "k8s-master-3" + address: "10.101.0.5" + internal_address: "192.168.0.5" + roles: ["master"] + - name: "k8s-worker-1" + address: "10.101.0.6" + internal_address: "192.168.0.6" + roles: ["worker"] + - name: "k8s-worker-2" + address: "10.101.0.7" + internal_address: "192.168.0.7" + roles: ["worker"] + - name: "k8s-worker-3" + address: "10.101.0.8" + internal_address: "192.168.0.8" + roles: ["worker"] + +cluster_name: "k8s.example.com" + +services: + resolv.conf: + search: default + nameservers: + - 1.1.1.1 + - 1.0.0.1 + - 2606:4700:4700::1111 + - 2606:4700:4700::1001 + + ntp: + # For RHEL OS use chrony: + chrony: + servers: + - 0.de.pool.ntp.org iburst + - 1.de.pool.ntp.org iburst + makestep: 5 10 + rtcsync: True + # For Debian OS use timesyncd: + timesyncd: + Time: + NTP: + - 0.de.pool.ntp.org + - 1.de.pool.ntp.org + FallbackNTP: + - 2.de.pool.ntp.org + - 3.de.pool.ntp.org + RootDistanceMaxSec: 10 + PollIntervalMinSec: 64 + PollIntervalMaxSec: 1024 + + packages: + package_manager: + replace-repositories: true + # For RHEL OS use yum-like format: + repositories: + centos-sclo-haproxy18: + name: "CentOS SCLO rh-haproxy18" + enabled: 1 + gpgcheck: 0 + baseurl: "http://mirror.centos.org/centos/7/sclo/x86_64/rh/rh-haproxy18/" + # For Debian OS use apt-like format: + # repositories: + # - "deb [arch=amd64 trusted=yes] http://example.com/deb/ubuntu/ focal main restricted" + install: + include: + - ethtool + - ebtables + - socat + - curl + - openssl + - unzip + - policycoreutils-python + +rbac: + accounts: + - name: superadmin + role: cluster-admin + +plugins: + kubernetes-dashboard: + install: true diff --git a/examples/most_restricted_psp.yaml b/examples/most_restricted_psp.yaml new file mode 100644 index 000000000..cc8984171 --- /dev/null +++ b/examples/most_restricted_psp.yaml @@ -0,0 +1,54 @@ +# This PSP is not for deployment, but just for reference to how "most restricted" PSP looks like +# so that when in future we will add new PSPs we can modify this PSP step-by-step +# until suitable restricted access if found +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: default + annotations: + seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'docker/default,runtime/default' + seccomp.security.alpha.kubernetes.io/defaultProfileName: 'runtime/default' +spec: + privileged: false + # Allow core volume types. + hostPID: false + hostIPC: false + hostNetwork: false + volumes: + - 'configMap' + - 'emptyDir' + - 'projected' + - 'secret' + - 'downwardAPI' + # Assume that persistentVolumes set up by the cluster admin are safe to use. + - 'persistentVolumeClaim' + fsGroup: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + readOnlyRootFilesystem: true + runAsUser: + # Require the container to run without root privileges. + rule: 'MustRunAsNonRoot' + supplementalGroups: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + runAsGroup: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + # Required to prevent escalations to root. + allowPrivilegeEscalation: false + seLinux: + rule: 'RunAsAny' + # This is redundant with non-root + disallow privilege escalation, + # but we can provide it for defense in depth. + requiredDropCapabilities: + - ALL \ No newline at end of file diff --git a/examples/procedure.yaml/full-backup.yaml b/examples/procedure.yaml/full-backup.yaml new file mode 100644 index 000000000..d7c5a0d27 --- /dev/null +++ b/examples/procedure.yaml/full-backup.yaml @@ -0,0 +1,18 @@ +backup_location: /home/centos + +backup_plan: + etcd: + source_node: master-1 + certificates: + cert: /etc/kubernetes/pki/etcd/server.crt + key: /etc/kubernetes/pki/etcd/server.key + cacert: /etc/kubernetes/pki/etcd/ca.crt + nodes: + /etc/resolv.conf: True + /root: True + /etc/hosts: False + kubernetes: + namespaced_resources: + namespaces: all + resources: all + nonnamespaced_resources: all diff --git a/examples/procedure.yaml/full-reboot.yaml b/examples/procedure.yaml/full-reboot.yaml new file mode 100644 index 000000000..72db9ace1 --- /dev/null +++ b/examples/procedure.yaml/full-reboot.yaml @@ -0,0 +1,6 @@ +graceful_reboot: True + +nodes: + - name: master-1 + - name: master-2 + - name: master-3 diff --git a/examples/procedure.yaml/full-restore.yaml b/examples/procedure.yaml/full-restore.yaml new file mode 100644 index 000000000..65f0018be --- /dev/null +++ b/examples/procedure.yaml/full-restore.yaml @@ -0,0 +1,23 @@ +backup_location: /home/centos/backup.tar.gz + +restore_plan: + etcd: + image: k8s.gcr.io/etcd:3.3.15-0 + certificates: + cert: /etc/kubernetes/pki/etcd/server.crt + key: /etc/kubernetes/pki/etcd/server.key + cacert: /etc/kubernetes/pki/etcd/ca.crt + peer_cert: /etc/kubernetes/pki/etcd/peer.crt + peer_key: /etc/kubernetes/pki/etcd/peer.key + peer_cacert: /etc/kubernetes/pki/etcd/ca.crt + thirdparties: + /usr/bin/kubeadm: + source: https://storage.googleapis.com/kubernetes-release/release/v1.18.8/bin/linux/amd64/kubeadm + /usr/bin/kubelet: + source: https://storage.googleapis.com/kubernetes-release/release/v1.18.8/bin/linux/amd64/kubelet + /usr/bin/kubectl: + source: https://storage.googleapis.com/kubernetes-release/release/v1.18.8/bin/linux/amd64/kubectl + /opt/cni/cni-plugins-linux.tgz: + source: https://github.com/containernetworking/plugins/releases/download/v0.8.6/cni-plugins-linux-amd64-v0.8.6.tgz + /usr/bin/calicoctl: + source: https://github.com/projectcalico/calicoctl/releases/download/v3.14.1/calicoctl-linux-amd64 diff --git a/examples/procedure.yaml/minimal-backup.yaml b/examples/procedure.yaml/minimal-backup.yaml new file mode 100644 index 000000000..cffbca90b --- /dev/null +++ b/examples/procedure.yaml/minimal-backup.yaml @@ -0,0 +1,5 @@ +backup_plan: + nodes: + /etc/resolv.conf: True + /root: True + /etc/hosts: False diff --git a/examples/procedure.yaml/minimal-restore.yaml b/examples/procedure.yaml/minimal-restore.yaml new file mode 100644 index 000000000..f2abf4239 --- /dev/null +++ b/examples/procedure.yaml/minimal-restore.yaml @@ -0,0 +1 @@ +backup_location: /home/centos/backup.tar.gz diff --git a/kubetool/__init__.py b/kubetool/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/kubetool/__main__.py b/kubetool/__main__.py new file mode 100755 index 000000000..c93b31a65 --- /dev/null +++ b/kubetool/__main__.py @@ -0,0 +1,189 @@ +#!/usr/bin/env python3 + +import sys +from collections import OrderedDict + +# Don't remove this line. The idna encoding +# is used by getaddrinfo when dealing with unicode hostnames, +# and in some cases, there appears to be a race condition +# where threads will get a LookupError on getaddrinfo() saying +# that the encoding doesn't exist. Using the idna encoding before +# running any kubetools code (and any threads it may create) ensures that +# the encodings.idna is imported and registered in the codecs registry, +# which will stop the LookupErrors from happening. +# See: https://bugs.python.org/issue29288 +u''.encode('idna') + +# This redirect required for fixing Fabric2 problem: +# In Kubetool stdout messages writes only to stdout - no stderr messaging at all, +# but Fabric2 writes to stderr if hide=false used and remote console has stderr messages. +sys.stderr = sys.stdout + +release_version = 'non-release version' + +procedures = OrderedDict({ + 'install': { + 'description': "Install a cluster from scratch", + 'group': 'installation' + }, + 'upgrade': { + 'description': "Automatically upgrade the entire Kubernetes cluster to a new version", + 'group': 'maintenance' + }, + 'backup': { + 'description': "Backup Kubernetes resources and nodes content to backup file", + 'group': 'maintenance' + }, + 'restore': { + 'description': "Restore Kubernetes resources and nodes content from backup file", + 'group': 'maintenance' + }, + 'add_node': { + 'description': "Add new nodes to an existing cluster", + 'group': 'maintenance' + }, + 'remove_node': { + 'description': "Remove existing nodes from cluster", + 'group': 'maintenance' + }, + 'manage_psp': { + 'description': "Manage PSP on Kubernetes cluster", + 'group': 'maintenance' + }, + 'cert_renew': { + 'description': "Renew certificates on Kubernetes cluster", + 'group': 'maintenance' + }, + 'reboot': { + 'description': "Reboot Kubernetes nodes", + 'group': 'maintenance' + }, + 'check_iaas': { + 'description': "Check environment for compliance with IAAS requirements", + 'group': 'checks' + }, + 'check_paas': { + 'description': "Check environment for compliance with PAAS requirements", + 'group': 'checks' + }, + 'version': { + 'description': "Print current release version", + 'group': 'other' + }, + 'do': { + 'description': "Execute shell command on cluster nodes", + 'group': 'other' + }, + 'selftest': { + 'description': "Test internal imports and resources presence", + 'group': 'other' + }, + 'migrate_cri': { + 'description': "Migrate from Docker to Containerd", + 'group': 'maintenance' + }, +}) + + +def main(): + + arguments = sys.argv[1:] + + if len(arguments) > 0: + if arguments[0] == 'selftest': + return selftest() + elif arguments[0] == 'version': + print('Kubetools %s' % release_version) + return + + if len(arguments) < 1 or arguments[0] not in procedures.keys(): + descriptions_print_list = [] + max_module_name_size = len(max(procedures.keys(), key=len)) + + items_description_by_groups = {} + + for module_name, module in procedures.items(): + if items_description_by_groups.get(module['group']) is None: + items_description_by_groups[module['group']] = [] + items_description_by_groups[module['group']].append(' %s%s %s' % (module_name, ' ' * (max_module_name_size - len(module_name)), module['description'])) + + previous_group = None + for group, descriptions in items_description_by_groups.items(): + if group != previous_group: + descriptions_print_list.append('\n%s:' % group.upper()) + previous_group = group + for description in descriptions: + descriptions_print_list.append(' ' + description) + return print('''The following procedures available: +%s + +Usage: kubetools +''' % '\n'.join(descriptions_print_list)) + + result = import_procedure(arguments[0]).main(arguments[1:]) + if result is not None: + from kubetool.testsuite import TestSuite + if isinstance(result, TestSuite) and result.is_any_test_failed(): + sys.exit(1) + + +def import_procedure(name): + module_name = 'kubetool.procedures.%s' % name + return __import__(module_name, fromlist=['object']) + + +def selftest(): + + print("Running selftest") + + import time + + time_start = int(round(time.time() * 1000)) + + from collections import OrderedDict + import types + + for procedure, procedure_details in procedures.items(): + print("\nImporting %s..." % procedure) + + if procedure in ['version', 'selftest']: + continue + + module = import_procedure(procedure) + imports = [] + + for attr in dir(module): + if isinstance(getattr(module, attr), types.ModuleType): + imports.append(attr) + + print("%s has %s imports" % (procedure, len(imports))) + + if "main" not in dir(module): + raise Exception("No main method in %s" % procedure) + if procedure != "do": + if "tasks" not in dir(module): + raise Exception("Tasks tree is not presented in %s" % procedure) + if not isinstance(module.tasks, OrderedDict): + raise Exception("Tasks are not ordered in %s" % procedure) + if not module.tasks: + raise Exception("Tasks are empty in %s" % procedure) + + print("%s OK" % procedure) + + del module + del sys.modules['kubetool.procedures.%s' % procedure] + + print("\nTrying fake cluster...") + + from kubetool import demo + + demo.new_cluster(demo.generate_inventory(**demo.FULLHA)) + + print("Finished") + + time_end = int(round(time.time() * 1000)) + print("\nElapsed: %sms\n" % (time_end-time_start)) + + +if __name__ == '__main__': + main() diff --git a/kubetool/apparmor.py b/kubetool/apparmor.py new file mode 100644 index 000000000..2d9a3ed7a --- /dev/null +++ b/kubetool/apparmor.py @@ -0,0 +1,114 @@ +import json + +from kubetool import system + + +def verify_inventory(inventory, cluster): + expected_states = ['enforce', 'complain', 'disable'] + for state in inventory['services']['kernel_security'].get('apparmor', {}).keys(): + if state not in expected_states: + raise Exception('Unknown apparmor mode found in configfile. Expected %s, but \'%s\' found.' + % (expected_states, state)) + return inventory + + +def get_status(group): + log = group.cluster.log + result = group.sudo("apparmor_status --json") + parsed_result = {} + for connection, node_result in result.items(): + log.verbose('Parsing status for %s...' % connection.host) + parsed_result[connection] = parse_status(node_result.stdout) + print_status(log, parsed_result) + return parsed_result + + +def parse_status(result_stdout): + result = {} + parsed_data = json.loads(result_stdout) + for profile_name, profile_state in parsed_data['profiles'].items(): + result[profile_state] = profile_name + return result + + +def print_status(log, parsed_result): + res = "AppArmor Status:" + for state in parsed_result.keys(): + res += "\n Profiles in %s mode:" % state + for profile in parsed_result[state]: + res += "\n - %s" % profile + log.verbose(res) + + +def is_state_valid(group, expected_profiles): + log = group.cluster.log + + log.verbose('Verifying Apparmor modes...') + + parsed_result = get_status(group) + valid = True + + for connection, status in parsed_result.items(): + for state, profiles in expected_profiles.items(): + if not profiles: + continue + if state == 'disable': + for profile in profiles: + for remote_profiles in status.values(): + if profile in remote_profiles: + valid = False + log.verbose('Mode %s is enabled on remote host %s' % (state, connection.host)) + break + else: + if not status.get(state): + valid = False + log.verbose('Mode %s is not presented on remote host %s' % (state, connection.host)) + break + for profile in profiles: + if convert_profile(profile) not in status[state]: + valid = False + log.verbose('Profile %s is not enabled in %s mode on remote host %s' % (profile, state, connection.host)) + break + + return valid, parsed_result + + +def convert_profile(profile): + profile = profile.replace('/', '.') + if profile[0] == '.': + profile = profile[1:] + return profile + + +def configure_apparmor(group, expected_profiles): + cmd = '' + for profile in expected_profiles.get('enforce', []): + profile = convert_profile(profile) + cmd += 'sudo rm -f /etc/apparmor.d/disable/%s; sudo rm -f /etc/apparmor.d/force-complain/%s; ' % (profile, profile) + for profile in expected_profiles.get('complain', []): + profile = convert_profile(profile) + cmd += 'sudo rm -f /etc/apparmor.d/disable/%s; sudo ln -s /etc/apparmor.d/%s /etc/apparmor.d/force-complain/; ' % (profile, profile) + for profile in expected_profiles.get('disable', []): + profile = convert_profile(profile) + cmd += 'sudo rm -f /etc/apparmor.d/force-complain/%s; sudo ln -s /etc/apparmor.d/%s /etc/apparmor.d/disable/; ' % (profile, profile) + cmd += 'sudo systemctl reload apparmor.service && sudo apparmor_status' + return group.sudo(cmd) + + +def setup_apparmor(group): + log = group.cluster.log + + if system.get_os_family(group.cluster) != 'debian': + log.debug("Skipped - Apparmor is supported only on Ubuntu/Debian") + return + + expected_profiles = group.cluster.inventory['services']['kernel_security'].get('apparmor', {}) + valid, parsed_result = is_state_valid(group, expected_profiles) + + if valid: + log.debug("Skipped - Apparmor already correctly configured") + return + + log.debug(configure_apparmor(group, expected_profiles)) + group.cluster.schedule_cumulative_point(system.reboot_nodes) + group.cluster.schedule_cumulative_point(system.verify_system) diff --git a/kubetool/apt.py b/kubetool/apt.py new file mode 100644 index 000000000..d6b666944 --- /dev/null +++ b/kubetool/apt.py @@ -0,0 +1,83 @@ +import io + +DEBIAN_HEADERS = 'DEBIAN_FRONTEND=noninteractive ' + + +def ls_repofiles(group): + return group.sudo('ls -la /etc/apt/sources.list.d') + + +def backup_repo(group, repo_filename="*"): + if not group.cluster.inventory['services']['packages']['package_manager']['replace-repositories']: + group.cluster.log.debug("Skipped - repos replacement disabled in configuration") + return + # all files in directory will be renamed: xxx.repo -> xxx.repo.bak + # if there already any files with ".bak" extension, they should not be renamed to ".bak.bak"! + return group.sudo( + "find %s -type f -name '%s.list' | sudo xargs -iNAME mv -f NAME NAME.bak" % ("/etc/apt/", repo_filename)) + + +def add_repo(group, repo_data="", repo_filename="predefined"): + # if repo_data is list, then convert it to string using join + if isinstance(repo_data, list): + repo_data_str = "\n".join(repo_data) + "\n" + else: + repo_data_str = str(repo_data) + group.put(io.StringIO(repo_data_str), '%s/%s.list' % ("/etc/apt/sources.list.d/", repo_filename), sudo=True) + return group.sudo(DEBIAN_HEADERS + 'apt clean && sudo apt update') + + +def clean(group): + return group.sudo(DEBIAN_HEADERS + "apt clean") + + +def install(group, include=None, exclude=None): + if include is None: + raise Exception('You must specify included packages to install') + + if isinstance(include, list): + include = ' '.join(include) + command = DEBIAN_HEADERS + 'apt update && ' + \ + DEBIAN_HEADERS + 'sudo apt install -y %s' % include + + if exclude is not None: + if isinstance(exclude, list): + exclude = ','.join(exclude) + command += ' --exclude=%s' % exclude + + return group.sudo(command) + # apt fails to install (downgrade) package if it is already present and has higher version, + # thus we do not need additional checks here (in contrast to yum) + + +def remove(group, include=None, exclude=None): + if include is None: + raise Exception('You must specify included packages to remove') + + if isinstance(include, list): + include = ' '.join(include) + command = DEBIAN_HEADERS + 'apt purge -y %s' % include + + if exclude is not None: + if isinstance(exclude, list): + exclude = ','.join(exclude) + command += ' --exclude=%s' % exclude + + return group.sudo(command) + + +def upgrade(group, include=None, exclude=None): + if include is None: + raise Exception('You must specify included packages to upgrade') + + if isinstance(include, list): + include = ' '.join(include) + command = DEBIAN_HEADERS + 'apt update && ' + \ + DEBIAN_HEADERS + 'sudo apt upgrade -y %s' % include + + if exclude is not None: + if isinstance(exclude, list): + exclude = ','.join(exclude) + command += ' --exclude=%s' % exclude + + return group.sudo(command) diff --git a/kubetool/audit.py b/kubetool/audit.py new file mode 100644 index 000000000..5ceb03edf --- /dev/null +++ b/kubetool/audit.py @@ -0,0 +1,37 @@ +""" +This module works with audit on remote systems. +Using this module you can apply audit rules. +""" + +import io + +from kubetool import system +from kubetool.core import utils +from kubetool.core.group import NodeGroup, NodeGroupResult + + +def apply_audit_rules(group: NodeGroup) -> NodeGroupResult or None: + """ + Generates and applies audit rules to the group. + """ + + log = group.cluster.log + + # TODO: fix this - currently audit preinstalled only on Centos/RHEL, but not presented on Ubuntu/Debian + if system.get_os_family(group.cluster) not in ['rhel', 'rhel8']: + log.debug('Skipped - audit not supported on debian os family') + return + + rules = group.cluster.inventory['services'].get('audit', {}).get('rules') + if not rules: + log.debug('Skipped - no audit rules in inventory') + return + + log.debug('Applying audit rules...') + rules_content = " \n".join(rules) + + utils.dump_file(group.cluster, rules_content, 'predefined.rules') + group.put(io.StringIO(rules_content), '/etc/audit/rules.d/predefined.rules', + sudo=True, backup=True) + + return group.sudo('service auditd restart') diff --git a/kubetool/core/__init__.py b/kubetool/core/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/kubetool/core/cluster.py b/kubetool/core/cluster.py new file mode 100755 index 000000000..d91ea6a5c --- /dev/null +++ b/kubetool/core/cluster.py @@ -0,0 +1,285 @@ +#!/usr/bin/env python3 +import re +from copy import deepcopy +from typing import Dict, List + +import yaml + +from kubetool.core import log +from kubetool.core.connections import ConnectionPool, Connections +from kubetool.core.environment import Environment +from kubetool.core.group import NodeGroup + +jinja_query_regex = re.compile("{{ .* }}", re.M) + + +class KubernetesCluster(Environment): + + def __init__(self, inventory, context, procedure_inventory=None, gather_facts=False): + + self.supported_roles = [ + "balancer", + "master", + "worker" + ] + + self.roles = [] + self.ips = { + "all": [] + } + self.nodes: Dict[str, NodeGroup] = {} + + self.context = context + self.context['runtime_vars'] = {} + + with open(utils.get_resource_absolute_path('resources/configurations/globals.yaml', + script_relative=True), 'r') as stream: + self._globals = yaml.safe_load(stream) + + with open(utils.get_resource_absolute_path('resources/configurations/defaults.yaml', + script_relative=True), 'r') as stream: + self._defaults = yaml.safe_load(stream) + + if isinstance(inventory, dict): + self.raw_inventory = deepcopy(inventory) + else: + with open(inventory, 'r') as stream: + self.raw_inventory = yaml.safe_load(stream) + + self._log = log.init_log_from_context_args(self) + + self.procedure_inventory = {} + if procedure_inventory is not None: + if isinstance(procedure_inventory, dict): + self.procedure_inventory = deepcopy(procedure_inventory) + else: + with open(procedure_inventory, 'r') as stream: + self.procedure_inventory = yaml.safe_load(stream) + + self._inventory = {} + self._connection_pool = ConnectionPool(self) + + if gather_facts: + self.gather_facts('before') + + self._inventory = defaults.enrich_inventory(self, self.raw_inventory) + + @property + def inventory(self) -> dict: + return self._inventory + + @property + def globals(self) -> dict: + return self._globals + + @property + def defaults(self) -> dict: + return self._defaults + + @property + def log(self) -> log.EnhancedLogger: + return self._log.logger + + def make_group(self, ips: List[str] or List[NodeGroup]) -> NodeGroup: + connections: Connections = {} + for ip in ips: + if isinstance(ip, NodeGroup): + ip = list(ip.nodes.keys())[0] + connections[ip] = self._connection_pool.get_connection(ip) + return NodeGroup(connections, self) + + def get_addresses_from_node_names(self, node_names: List[str]) -> dict: + result = {} + for node in self.inventory["nodes"]: + for requested_node_name in node_names: + if requested_node_name == node['name']: + result[node['name']] = { + 'address': node.get('address'), + 'internal_address': node.get('internal_address'), + 'connect_to': node.get('connect_to') + } + return result + + def make_group_from_nodes(self, node_names: List[str]) -> NodeGroup: + addresses = self.get_addresses_from_node_names(node_names) + ips = [] + for item in list(addresses.values()): + ips.append(item['connect_to']) + return self.make_group(ips) + + def create_group_from_groups_nodes_names(self, groups_names: List[str], nodes_names: List[str]) -> NodeGroup: + common_group = None + + if nodes_names: + common_group = self.make_group_from_nodes(nodes_names) + + if groups_names: + for group in groups_names: + + if group not in self.roles: + self.log.verbose('Group \'%s\' is requested for usage, but this group is not exists.' % group) + continue + + if common_group is None: + common_group = self.nodes[group] + else: + common_group = common_group.include_group(self.nodes[group]) + + return common_group + + def schedule_cumulative_point(self, point_method): + return flow.schedule_cumulative_point(self, point_method) + + def is_task_completed(self, task_path) -> bool: + return flow.is_task_completed(self, task_path) + + def get_final_inventory(self): + return utils.get_final_inventory(self) + + def get_facts_enrichment_fns(self): + return [ + "kubetool.kubernetes.add_node_enrichment", + "kubetool.kubernetes.remove_node_enrichment", + "kubetool.core.defaults.append_controlplain", + "kubetool.core.defaults.compile_inventory", + "kubetool.core.defaults.calculate_node_names", + "kubetool.core.defaults.apply_defaults", + "kubetool.core.defaults.calculate_nodegroups" + ] + + def gather_facts(self, step) -> None: + self.log.debug('Gathering facts started...') + + if step == 'before': + t_cluster = deepcopy(self) + defaults.enrich_inventory(t_cluster, t_cluster.raw_inventory, make_dumps=False, custom_fns=self.get_facts_enrichment_fns()) + + for node in t_cluster.nodes['all'].get_ordered_members_list(provide_node_configs=True): + t_cluster.context['nodes'][node['connect_to']] = { + "name": node['name'], + "roles": node['roles'], + "online": False + } + + system.whoami(t_cluster.nodes['all']) + self.log.verbose('Whoami check finished') + system.detect_active_interface(t_cluster.nodes['all'].get_online_nodes()) + self.log.verbose('Interface check finished') + system.detect_os_family(t_cluster, suppress_exceptions=True) + self.log.verbose('OS family check finished') + self.context = t_cluster.context + elif step == 'after': + self.remove_invalid_cri_config(self.inventory) + if not system.is_multiple_os_detected(self): + self.cache_package_versions() + self.log.verbose('Package versions detection finished') + else: + self.log.verbose('Package versions detection cancelled - cluster in multiple OS state') + + self.log.debug('Gathering facts finished!') + + def get_associations_for_os(self, os_family): + package_associations = self.inventory['services']['packages']['associations'] + active_os_family = system.get_os_family(self) + if active_os_family != os_family: + package_associations = package_associations[os_family] + + return package_associations + + def get_os_family_for_node(self, host): + node_context = self.context['nodes'].get(host) + if not node_context or not node_context.get('os', {}).get('family'): + raise Exception('Node %s do not contain necessary context data' % host) + return node_context['os']['family'] + + def get_associations_for_node(self, host): + node_os_family = self.get_os_family_for_node(host) + return self.get_associations_for_os(node_os_family) + + def cache_package_versions(self): + detected_packages = packages.detect_installed_packages_version_groups(self.nodes['all'].get_unchanged_nodes().get_online_nodes()) + if self.inventory['services']['packages']['associations'].get('debian'): + del self.inventory['services']['packages']['associations']['debian'] + if self.inventory['services']['packages']['associations'].get('rhel'): + del self.inventory['services']['packages']['associations']['rhel'] + if self.inventory['services']['packages']['associations'].get('rhel8'): + del self.inventory['services']['packages']['associations']['rhel8'] + for association_name, associated_params in self.inventory['services']['packages']['associations'].items(): + associated_packages = associated_params.get('package_name', []) + packages_list = [] + final_packages_list = [] + if isinstance(associated_packages, str): + packages_list.append(associated_packages) + else: + packages_list = packages_list + associated_packages + for package in packages_list: + detected_package_versions = list(detected_packages[package].keys()) + for version in detected_package_versions: + if "not installed" in version: + # if not installed somewhere - just skip + final_packages_list.append(package) + continue + if len(detected_package_versions) == 1: + final_packages_list.append(detected_package_versions[0]) + else: + # if detected multiple versions, then such broken package should be skipped + final_packages_list.append(package) + # if non-multiple value, then convert to simple string + if len(final_packages_list) == 1: + final_packages_list = final_packages_list[0] + associated_params['package_name'] = final_packages_list + # packages from direct installation section + if self.inventory['services']['packages']['install']: + final_packages_list = [] + for package in self.inventory['services']['packages']['install']['include']: + detected_package_versions = list(detected_packages[package].keys()) + for version in detected_package_versions: + if "not installed" in version: + # if not installed somewhere - just skip + final_packages_list.append(package) + continue + if len(detected_package_versions) == 1: + final_packages_list.append(detected_package_versions[0]) + else: + # if detected multiple versions, then such broken package should be skipped + final_packages_list.append(package) + self.inventory['services']['packages']['install']['include'] = final_packages_list + return detected_packages + + def finish(self): + self.gather_facts('after') + # TODO: rewrite the following lines to deenrichment functions like enrichment mechanism + from kubetool.procedures import remove_node + prepared_inventory = remove_node.remove_node_finalize_inventory(self, self.inventory) + prepared_inventory = defaults.prepare_for_dump(prepared_inventory, copy=False) + prepared_inventory = self.escape_jinja_characters_for_inventory(prepared_inventory) + utils.dump_file(self, yaml.dump(prepared_inventory), "cluster_finalized.yaml") + + def escape_jinja_characters_for_inventory(self, obj): + if isinstance(obj, dict): + for key, value in obj.items(): + obj[key] = self.escape_jinja_characters_for_inventory(value) + elif isinstance(obj, list): + for key, value in enumerate(obj): + obj[key] = self.escape_jinja_characters_for_inventory(value) + elif isinstance(obj, str): + obj = self.escape_jinja_character(obj) + return obj + + def escape_jinja_character(self, value): + if '{{' in value and '}}' in value and re.search(jinja_query_regex, value): + matches = re.findall(jinja_query_regex, value) + for match in matches: + # TODO: rewrite to correct way of match replacement: now it can lead to "{raw}{raw}xxx.." bug + value = value.replace(match, '{% raw %}'+match+'{% endraw %}') + return value + + def remove_invalid_cri_config(self, inventory): + if inventory['services']['cri']['containerRuntime'] == 'docker': + if inventory['services']['cri'].get('containerdConfig'): + del inventory['services']['cri']['containerdConfig'] + elif inventory['services']['cri'].get('dockerConfig'): + del inventory['services']['cri']['dockerConfig'] + +from kubetool import system, packages +from kubetool.core import defaults, flow, utils diff --git a/kubetool/core/connections.py b/kubetool/core/connections.py new file mode 100644 index 000000000..910f07174 --- /dev/null +++ b/kubetool/core/connections.py @@ -0,0 +1,76 @@ +from typing import Dict + +import fabric + +from kubetool.core.environment import Environment + + +Connections = Dict[str, fabric.connection.Connection] + + +class ConnectionPool: + def __init__(self, env: Environment): + self._env = env + self._connections: Connections = {} + self._gateways: Connections = {} + + def get_connection(self, ip: str) -> fabric.connection.Connection: + conn = self._connections.get(ip) + if conn is None: + for node in self._env.inventory['nodes']: + if node.get('address') == ip or node.get('internal_address') == ip or node.get('connect_to') == ip: + conn = self._create_connection(ip, node) + + if conn is None: + raise Exception("Failed to find suitable node to connect to by address %s" % ip) + + self._connections[ip] = conn + + return conn + + def _create_connection_from_details(self, ip: str, conn_details: dict, gateway=None, inline_ssh_env=True): + return fabric.connection.Connection( + host=ip, + user=conn_details.get('username', self._env.globals['connection']['defaults']['username']), + gateway=gateway, + port=conn_details.get('connection_port', self._env.globals['connection']['defaults']['port']), + connect_timeout=conn_details.get('connection_timeout', + self._env.globals['connection']['defaults']['timeout']), + connect_kwargs={ + "key_filename": conn_details['keyfile'] + }, + inline_ssh_env=inline_ssh_env + ) + + def _create_connection(self, ip: str, node: dict) -> fabric.connection.Connection: + if node.get('keyfile') is None: + raise Exception('There is no keyfile specified in configfile for node \'%s\'' % node['name']) + + gateway = None + if 'gateway' in node: + gateway = self._get_gateway_node_connection(node['gateway']) + + return self._create_connection_from_details(ip, node, gateway=gateway) + + def _get_gateway_node_connection(self, name: str) -> fabric.connection.Connection: + gateway_conn = self._gateways.get(name) + + if gateway_conn is None: + for gateway in self._env.inventory.get('gateway_nodes', []): + if gateway.get('name') == name: + if gateway.get('address') is None: + raise Exception('There is no address specified in configfile for gateway \'%s\'' % name) + if gateway.get('keyfile') is None: + raise Exception('There is no keyfile specified in configfile for gateway \'%s\'' % name) + + # todo since we have no workaround for gateway connections currently, + # probably we need different default connection timeout + gateway_conn = self._create_connection_from_details(gateway["address"], gateway, + inline_ssh_env=False) + + if gateway_conn is None: + raise Exception('Requested gateway \'%s\' is not found in configfile' % name) + + self._gateways[name] = gateway_conn + + return gateway_conn diff --git a/kubetool/core/defaults.py b/kubetool/core/defaults.py new file mode 100755 index 000000000..71adbb2c7 --- /dev/null +++ b/kubetool/core/defaults.py @@ -0,0 +1,455 @@ +#!/usr/bin/env python3 + +import re +from importlib import import_module +from copy import deepcopy + +import yaml + +from kubetool import jinja +from kubetool.core import utils +from kubetool.core.yaml_merger import default_merger + +DEFAULT_ENRICHMENT_FNS = [ + "kubetool.kubernetes.add_node_enrichment", + "kubetool.kubernetes.remove_node_enrichment", + "kubetool.core.defaults.append_controlplain", + "kubetool.kubernetes.enrich_upgrade_inventory", + "kubetool.plugins.enrich_upgrade_inventory", + "kubetool.packages.enrich_inventory_associations", + "kubetool.system.enrich_upgrade_inventory", + "kubetool.core.defaults.compile_inventory", + "kubetool.psp.manage_psp_enrichment", + "kubetool.thirdparties.enrich_inventory_apply_upgrade_defaults", + "kubetool.procedures.migrate_cri.enrich_inventory", + "kubetool.core.defaults.apply_registry", + "kubetool.core.defaults.calculate_node_names", + "kubetool.core.defaults.verify_node_names", + "kubetool.core.defaults.apply_defaults", + "kubetool.keepalived.enrich_inventory_apply_defaults", + "kubetool.haproxy.enrich_inventory", + "kubetool.kubernetes.enrich_inventory", + "kubetool.psp.enrich_inventory", + "kubetool.kubernetes_accounts.enrich_inventory", + "kubetool.plugins.calico.enrich_inventory", + "kubetool.plugins.nginx_ingress.cert_renew_enrichment", + "kubetool.plugins.nginx_ingress.verify_inventory", + "kubetool.plugins.nginx_ingress.enrich_inventory", + "kubetool.core.defaults.calculate_nodegroups", + "kubetool.keepalived.enrich_inventory_calculate_nodegroup", + "kubetool.thirdparties.enrich_inventory_apply_defaults", + "kubetool.system.verify_inventory", + "kubetool.system.enrich_inventory", + "kubetool.selinux.verify_inventory", + "kubetool.apparmor.verify_inventory", + "kubetool.plugins.enrich_inventory", + "kubetool.plugins.verify_inventory", + "kubetool.coredns.enrich_add_hosts_config", + "kubetool.k8s_certs.renew_verify", + "kubetool.cri.enrich_inventory" +] + +supported_defaults = { + 'rbac': { + 'account_defaults': 'accounts' + }, + 'node_defaults': 'nodes', + 'plugin_defaults': 'plugins', +} + +invalid_node_name_regex = re.compile("[^a-z-.\\d]", re.M) +escaped_expression_regex = re.compile('({%[\\s*|]raw[\\s*|]%}.*?{%[\\s*|]endraw[\\s*|]%})', re.M) + + +def apply_defaults(inventory, cluster): + recursive_apply_defaults(supported_defaults, inventory) + + for i, node in enumerate(inventory["nodes"]): + + node_name = node.get("name") + if node_name is None: + raise Exception('Some nodes from inventory are unnamed') + + if re.findall(invalid_node_name_regex, node_name): + raise Exception('Node name \"%s\" contains invalid characters. A DNS-1123 subdomain must consist of lower ' + 'case alphanumeric characters, \'-\' or \'.\'' % node_name) + + address = node.get('connect_to') + if address is None: + address = node.get('address') + if address is None: + address = node.get('internal_address') + if address is None: + raise Exception('Node %s do not have any address' % node_name) + + # we have definitely know how to connect + cluster.inventory["nodes"][i]["connect_to"] = address + cluster.inventory["nodes"][i]["connection"] = cluster.make_group([address]) + + if not cluster.context["nodes"].get(address): + cluster.context["nodes"][address] = {} + + if not node.get("roles"): + raise Exception('There are no roles defined for the node %s' % node_name) + + if address not in cluster.ips["all"]: + cluster.ips['all'].append(address) + + for role in node.get("roles"): + if role not in cluster.supported_roles: + raise Exception('An unknown role defined for the node %s' % node_name) + if role not in cluster.roles: + cluster.roles.append(role) + cluster.ips[role] = [] + if address not in cluster.ips[role]: + cluster.ips[role].append(address) + + return inventory + + +def apply_registry(inventory, cluster): + + if not inventory.get('registry', {}).get('address'): + cluster.log.verbose('Unified registry is not used') + return inventory + + if inventory['registry'].get('docker_port'): + full_registry_address = "%s:%s" % (inventory['registry']['address'], inventory['registry']['docker_port']) + else: + full_registry_address = inventory['registry']['address'] + + protocol = 'http' + if inventory['registry'].get('ssl', False): + protocol = 'https' + + # Patch kubeadm imageRepository + if not inventory['services']['kubeadm'].get('imageRepository'): + inventory['services']['kubeadm']["imageRepository"] = full_registry_address + if inventory['registry'].get('webserver', False): + # it is necessary to search in example.com:XXXX/k8s.gcr.io because images from other hubs located in + # directory with the hub name + inventory['services']['kubeadm']["imageRepository"] += "/k8s.gcr.io" + + # it is necessary to convert URIs from quay.io/xxx:v1 to example.com:XXXX/quay.io/xxx:v1 + if inventory.get('plugin_defaults') is None: + inventory['plugin_defaults'] = {} + if inventory['plugin_defaults'].get('installation') is None: + inventory['plugin_defaults']['installation'] = {} + if not inventory['plugin_defaults']['installation'].get('registry'): + inventory['plugin_defaults']['installation']['registry'] = full_registry_address + + cri_impl = inventory['services']['cri']['containerRuntime'] + if cri_impl == "docker": + if not inventory['registry'].get('ssl', False): + if inventory['services']['cri']['dockerConfig'].get("insecure-registries") is None: + inventory['services']['cri']['dockerConfig']["insecure-registries"] = [] + insecure_registries = inventory['services']['cri']['dockerConfig']["insecure-registries"] + insecure_registries.append(full_registry_address) + inventory['services']['cri']['dockerConfig']["insecure-registries"] = list(set(insecure_registries)) + + if inventory['services']['cri']['dockerConfig'].get("registry-mirrors") is None: + inventory['services']['cri']['dockerConfig']["registry-mirrors"] = [] + registry_mirrors = inventory['services']['cri']['dockerConfig']["registry-mirrors"] + registry_mirrors.append(f"{protocol}://{full_registry_address}") + inventory['services']['cri']['dockerConfig']["registry-mirrors"] = list(set(registry_mirrors)) + elif cri_impl == "containerd": + registry_section = f'plugins."io.containerd.grpc.v1.cri".registry.mirrors."{full_registry_address}"' + if not inventory['services']['cri']['containerdConfig'].get(registry_section): + inventory['services']['cri']['containerdConfig'][registry_section] = { + 'endpoint': ["%s://%s" % (protocol, full_registry_address)] + } + if not inventory['services']['cri']['containerdConfig'].get('plugins."io.containerd.grpc.v1.cri"'): + inventory['services']['cri']['containerdConfig']['plugins."io.containerd.grpc.v1.cri"'] = {} + if not inventory['services']['cri']['containerdConfig']['plugins."io.containerd.grpc.v1.cri"'].get('sandbox_image'): + inventory['services']['cri']['containerdConfig']['plugins."io.containerd.grpc.v1.cri"']['sandbox_image'] = \ + f"{inventory['services']['kubeadm']['imageRepository']}/pause:3.2" + + if inventory['registry'].get('webserver', False) and inventory['services'].get('thirdparties', []): + for destination, config in inventory['services']['thirdparties'].items(): + + if isinstance(config, str): + new_source = inventory['services']['thirdparties'][destination] + elif config.get('source') is not None: + new_source = inventory['services']['thirdparties'][destination]['source'] + else: + continue + + for binary in ['kubeadm', 'kubelet', 'kubectl']: + if destination == '/usr/bin/' + binary: + new_source = new_source.replace('https://storage.googleapis.com/kubernetes-release/release', + '%s://%s/kubernetes/%s' + % (protocol, inventory['registry']['address'], binary)) + + if '/usr/bin/calicoctl' == destination: + new_source = new_source.replace('https://github.com/projectcalico/calicoctl/releases/download', + '%s://%s/projectcalico/calicoctl' + % (protocol, inventory['registry']['address'])) + + if '/usr/bin/crictl.tar.gz' == destination: + new_source = new_source.replace('https://github.com/kubernetes-sigs/cri-tools/releases/download', + '%s://%s/kubernetes-sigs/cri-tools' + % (protocol, inventory['registry']['address'])) + if isinstance(config, str): + inventory['services']['thirdparties'][destination] = new_source + else: + inventory['services']['thirdparties'][destination]['source'] = new_source + + return inventory + + +def append_controlplain(inventory, cluster): + + if inventory.get('control_plain', {}).get('internal') and inventory.get('control_plain', {}).get('external'): + if cluster: + cluster.log.verbose('Control plains are set manually, nothing to detect.') + return inventory + + if cluster: + cluster.log.verbose('Detecting control plains...') + + # calculate controlplain ips + internal_address = None + internal_address_source = None + external_address = None + external_address_source = None + + # vrrp_ip section is not enriched yet + # todo what if ip is an ip of some node to remove? + if inventory.get('vrrp_ips'): + for i, item in enumerate(inventory['vrrp_ips']): + if isinstance(item, str): + if internal_address is None: + internal_address = item + internal_address_source = 'vrrp_ip[%s]' % i + else: + if internal_address is None or item.get('control_endpoint', False): + internal_address = item['ip'] + internal_address_source = 'vrrp_ip[%s]' % i + if item.get('floating_ip') and (external_address is None or item.get('control_endpoint', False)): + external_address = item['floating_ip'] + external_address_source = 'vrrp_ip[%s]' % i + + if internal_address is not None and external_address is None and cluster: + cluster.log.warning('VRRP_IPs has an internal address, but do not have an external one. Your configuration may be incorrect. Trying to handle this problem automatically...') + + if internal_address is None or external_address is None: + for role in ['balancer', 'master']: + # nodes are not compiled to groups yet + for node in inventory['nodes']: + if role in node['roles'] and 'remove_node' not in node['roles']: + if internal_address is None or node.get('control_endpoint', False): + internal_address = node['internal_address'] + internal_address_source = role + if node.get('name'): + internal_address_source += ' \"%s\"' % node['name'] + if node.get('address') and (external_address is None or node.get('control_endpoint', False)): + external_address = node['address'] + external_address_source = role + if node.get('name'): + external_address_source += ' \"%s\"' % node['name'] + + if external_address is None: + cluster.log.warning('Failed to detect external control plain. Something may work incorrect!') + external_address = internal_address + + if cluster: + cluster.log.debug('Control plains:\n Internal: %s (%s)\n External: %s (%s)' % (internal_address, internal_address_source, external_address, external_address_source)) + + # apply controlplain ips + if not inventory.get('control_plain'): + inventory['control_plain'] = {} + + if not inventory['control_plain'].get('internal'): + inventory['control_plain']['internal'] = internal_address + + if not inventory['control_plain'].get('external'): + inventory['control_plain']['external'] = external_address + + return inventory + + +def recursive_apply_defaults(defaults, section): + for key, value in defaults.items(): + if isinstance(value, dict) and section.get(key) is not None and section[key]: + recursive_apply_defaults(value, section[key]) + # check if target section exists and not empty + elif section.get(value) is not None and section[value]: + + if isinstance(section[value], list): + for i, v in enumerate(section[value]): + # copy defaults as new dict, to avoid problems with memory links + node_config = deepcopy(section[key]) + + # update defaults with custom-defined node configs + # TODO: deepmerge required here + node_config.update(v) + + # replace old node config with merged one + section[value][i] = node_config + + else: + # deepcopy the whole section, otherwise it will break dict while replacing + section_copy = deepcopy(section[value]) + for custom_key, custom_value in section_copy.items(): + # here section['key'] refers to default, not custom value + default_value = deepcopy(section[key]) + section[value][custom_key] = default_merger.merge(default_value, custom_value) + + del section[key] + + +def calculate_node_names(inventory, cluster): + roles_iterators = {} + for i, node in enumerate(inventory['nodes']): + for role_name in ['master', 'worker', 'balancer']: + if role_name in node.get('roles', []): + # The idea is this: + # If the name is already specified, we must skip this node, + # however, we must consider that we already have a node of this type + # and increase this type iterator + # As a result, we get such an algorithm. For example, with the following inventory: + # + # - name: k8s-master-1, roles: ['master'] + # - roles: ['master'] + # - name: k8s-master-3, roles: ['master'] + # + # We should get the following calculation result: + # + # - name: k8s-master-1, roles: ['master'] + # - name: master-2, roles: ['master'] + # - name: k8s-master-3, roles: ['master'] + # + role_i = roles_iterators.get(role_name, 1) + roles_iterators[role_name] = role_i + 1 + if node.get('name') is None: + inventory['nodes'][i]['name'] = '%s-%s' % (role_name, role_i) + return inventory + + +def verify_node_names(inventory, cluster): + known_names = [] + for i, node in enumerate(inventory['nodes']): + if node.get('name') is None: + raise Exception('Node item %s in nodes section do not contain name' % i) + if node['name'] in known_names: + raise Exception('Node name %s is duplicated in configfile' % node['name']) + known_names.append(node['name']) + return inventory + + +def calculate_nodegroups(inventory, cluster): + for role in cluster.ips.keys(): + cluster.nodes[role] = cluster.make_group(cluster.ips[role]) + return inventory + + +def enrich_inventory(cluster, custom_inventory, apply_fns=True, make_dumps=True, custom_fns=None): + with open(utils.get_resource_absolute_path('resources/configurations/defaults.yaml', + script_relative=True), 'r') as stream: + base_inventory = yaml.safe_load(stream) + + inventory = default_merger.merge(base_inventory, custom_inventory) + + # it is necessary to temporary put half-compiled inventory to cluster inventory field + cluster._inventory = inventory + if apply_fns: + if custom_fns: + enrichment_functions = custom_fns + else: + enrichment_functions = DEFAULT_ENRICHMENT_FNS + + # run required fields calculation + for enrichment_fn in enrichment_functions: + fn_package_name, fn_method_name = enrichment_fn.rsplit('.', 1) + mod = import_module(fn_package_name) + cluster.log.verbose('Calling fn "%s"' % enrichment_fn) + inventory = getattr(mod, fn_method_name)(inventory, cluster) + + cluster.log.verbose('Enrichment finished!') + + if make_dumps: + utils.dump_file(cluster, yaml.dump(prepare_for_dump(inventory), ), "cluster.yaml") + procedure_config = cluster.context["execution_arguments"].get("procedure_config") + if procedure_config: + with open(procedure_config, 'r') as stream: + utils.dump_file(cluster, stream, "procedure.yaml") + + return inventory + + +def compile_inventory(inventory, cluster): + + # convert references in yaml to normal values + iterations = 100 + root = deepcopy(inventory) + root['globals'] = cluster.globals + + while iterations > 0: + + cluster.log.verbose('Inventory is not rendered yet...') + inventory = compile_object(cluster.log, inventory, root) + + temp_dump = yaml.dump(inventory) + + # remove golang specific + temp_dump = re.sub(escaped_expression_regex, '', temp_dump.replace('\n', '')) + + # it is necessary to carry out several iterations, + # in case we have dynamic variables that reference each other + if '{{' in temp_dump or '{%' in temp_dump: + iterations -= 1 + else: + iterations = 0 + + inventory = compile_object(cluster.log, inventory, root, ignore_jinja_escapes=False) + + merged_inventory = yaml.dump(prepare_for_dump(inventory)) + utils.dump_file(cluster, merged_inventory, "cluster_precompiled.yaml") + + return inventory + + +def compile_object(log, struct, root, ignore_jinja_escapes=True): + if isinstance(struct, list): + for i, v in enumerate(struct): + struct[i] = compile_object(log, v, root, ignore_jinja_escapes=ignore_jinja_escapes) + elif isinstance(struct, dict): + for k, v in struct.items(): + struct[k] = compile_object(log, v, root, ignore_jinja_escapes=ignore_jinja_escapes) + elif isinstance(struct, str) and ('{{' in struct or '{%' in struct): + struct = compile_string(log, struct, root, ignore_jinja_escapes=ignore_jinja_escapes) + return struct + + +def compile_string(log, struct, root, ignore_jinja_escapes=True): + log.verbose("Rendering \"%s\"" % struct) + + if ignore_jinja_escapes: + iterator = escaped_expression_regex.finditer(struct) + struct = re.sub(escaped_expression_regex, '', struct) + struct = jinja.new(log, root).from_string(struct).render(**root) + + for match in iterator: + span = match.span() + struct = struct[:span[0]] + match.group() + struct[span[0]:] + else: + struct = jinja.new(log, root).from_string(struct).render(**root) + + log.verbose("\tRendered as \"%s\"" % struct) + return struct + + +def prepare_for_dump(inventory, copy=True): + # preparation for dump required to remove memory links + + if copy: + dump_inventory = deepcopy(inventory) + else: + dump_inventory = inventory + + for i, node in enumerate(dump_inventory['nodes']): + if 'connection' in dump_inventory['nodes'][i]: + del dump_inventory['nodes'][i]['connection'] + + return dump_inventory + diff --git a/kubetool/core/environment.py b/kubetool/core/environment.py new file mode 100644 index 000000000..d4154857c --- /dev/null +++ b/kubetool/core/environment.py @@ -0,0 +1,18 @@ +import os +from abc import ABC, abstractmethod + + +class Environment(ABC): + @property + @abstractmethod + def inventory(self) -> dict: + pass + + @property + @abstractmethod + def globals(self) -> dict: + pass + + @staticmethod + def is_deploying_from_windows(): + return os.name == 'nt' diff --git a/kubetool/core/executor.py b/kubetool/core/executor.py new file mode 100644 index 000000000..99efbe36d --- /dev/null +++ b/kubetool/core/executor.py @@ -0,0 +1,251 @@ +import random +import time +from typing import Tuple, List, Dict, Callable, Any, Union +from contextvars import Token, ContextVar + +import fabric +import invoke + +from fabric.connection import Connection +from concurrent.futures.thread import ThreadPoolExecutor + +GRE = ContextVar('KubetoolsGlobalRemoteExecutor', default=None) + + +class RemoteExecutor: + + def __init__(self, log, lazy=True, parallel=True, ignore_failed=False, enforce_children=False, timeout=None): + self.log = log + self.lazy = lazy + self.parallel = parallel + self.ignore_failed = ignore_failed + self.enforce_children = enforce_children + self.timeout = timeout + self.connections_queue: Dict[Connection, List[Tuple]] = {} + self._last_token = -1 + self.previous_context_token = Token.MISSING + self.command_separator = ''.join(random.choice('=-_') for _ in range(32)) + self.results = [] + + def __del__(self): + pass + + def __enter__(self): + executor = self._get_active_executor() + if executor == self or not executor.enforce_children: + self.previous_context_token = GRE.set(self) + return executor + + def __exit__(self, exc_type, exc_value, tb): + if self.previous_context_token != Token.MISSING: + GRE.reset(self.previous_context_token) + if self.connections_queue: + self.flush() + + def _get_active_executor(self): + executor = GRE.get() + if executor: + return executor + else: + return self + + def _is_actions_equal(self, action1, action2): + if action1[0] not in ["sudo", "run"] or action1[0] != action2[0]: + return False + for key, value in action1[2].items(): + if value != action2[2].get(key): + return False + return True + + def reparse_results(self, results, batch): + batch_no_cnx: Dict[str, tuple] = {} + conns_by_host: Dict[str, Connection] = {} + for cnx, data in batch.items(): + batch_no_cnx[cnx.host] = data + conns_by_host[cnx.host] = cnx + executor = self._get_active_executor() + reparsed_results = {} + for host, result in results.items(): + conn_results = {} + action, callbacks, tokens = batch_no_cnx[host] + if isinstance(result, fabric.runners.Result) and executor.command_separator in result.stdout and executor.command_separator in result.stderr: + stderrs = result.stderr.strip().split(executor.command_separator) + raw_stdouts = result.stdout.strip().split(executor.command_separator) + stdouts = [] + exit_codes = [] + i = 0 + while i < len(raw_stdouts): + stdouts.append(raw_stdouts[i].strip()) + if i+1 < len(raw_stdouts): + exit_codes.append(int(raw_stdouts[i+1].strip())) + i += 2 + exit_codes.append(result.exited) + for i, code in enumerate(exit_codes): + token = tokens[i] + conn_results[token] = fabric.runners.Result(stdout=stdouts[i], stderr=stderrs[i], exited=code, connection=conns_by_host[host]) + else: + conn_results[tokens[0]] = result + reparsed_results[host] = conn_results + # TODO: run and collect callbacks and wait for them + return reparsed_results + + def _merge_actions(self, actions): + executor = self._get_active_executor() + + if executor.ignore_failed: + separator_symbol = ";" + else: + separator_symbol = "&&" + + separator = f" {separator_symbol} " \ + f"echo \"{executor.command_separator}\" {separator_symbol} " \ + f"echo $? {separator_symbol} " \ + f"echo \"{executor.command_separator}\" {separator_symbol} " \ + f"echo \"{executor.command_separator}\" 1>&2 {separator_symbol} " + + merged_actions = [] + + for payload in actions: + action, callback, token = payload + if merged_actions and executor._is_actions_equal(merged_actions[-1][0], action): + precommand = '' + if action[0] == 'sudo': + precommand = 'sudo ' + previous_action = merged_actions[-1][0] + merged_action_command = previous_action[1][0] + separator + precommand + action[1][0] + merged_actions[-1][0] = (previous_action[0], tuple([merged_action_command]), previous_action[2]) + merged_actions[-1][1].append(callback) + merged_actions[-1][2].append(token) + else: + merged_actions.append([action, [callback], [token]]) + + return merged_actions + + def _get_callables(self): + executor = self._get_active_executor() + callables = {} + + for connection, actions in executor.connections_queue.items(): + callables[connection] = executor._merge_actions(actions) + + i = 0 + batches = [] + + while i != -1: + batch = {} + for conn, actions in callables.items(): + if len(actions) > i: + batch[conn] = actions[i] + if not batch: + i = -1 + else: + i += 1 + batches.append(batch) + + return batches + + def queue(self, target, action: Tuple, callback: Callable = None) -> int or dict: + executor = self._get_active_executor() + executor._last_token = token = executor._last_token + 1 + + if isinstance(target, Connection): + target = [Connection] + if isinstance(target, dict): + target = list(target.values()) + + if not target: + executor.log.verbose('Connections list is empty, nothing to queue') + else: + for connection in target: + if not executor.connections_queue.get(connection): + executor.connections_queue[connection] = [] + executor.connections_queue[connection].append((action, callback, token)) + + if not executor.lazy: + return executor.flush() + else: + return token + + def reset_queue(self) -> None: + executor = self._get_active_executor() + executor.connections_queue = {} + + def get_last_results(self): + executor = self._get_active_executor() + if len(executor.results) == 0: + return None + return executor.results[-1] + + def get_last_results_str(self): + batched_results = self.get_last_results() + if not batched_results: + return + output = "" + for conn, results in batched_results.items(): + for token, result in results.items(): + if isinstance(result, invoke.exceptions.UnexpectedExit): + result = result.result + + # for now we do not know how-to print transfer result + if not isinstance(result, fabric.runners.Result): + continue + + if output != "": + output += "\n" + output += "\t%s (%s): code=%i" % (conn, token, result.exited) + if result.stdout: + output += "\n\t\tSTDOUT: %s" % result.stdout.replace("\n", "\n\t\t ") + if result.stderr: + output += "\n\t\tSTDERR: %s" % result.stderr.replace("\n", "\n\t\t ") + + return output + + def flush(self) -> dict: + executor = self._get_active_executor() + + batch_results = {} + + if not executor.connections_queue: + executor.log.verbose('Queue is empty, nothing to perform') + return batch_results + + callable_batches = executor._get_callables() + + max_workers = len(executor.connections_queue.keys()) + if not executor.parallel: + max_workers = 1 + + with ThreadPoolExecutor(max_workers=max_workers) as TPE: + for batch in callable_batches: + results = {} + futures = {} + + def safe_exec(result_map: Dict[str, Any], key: str, call: Callable[[], Any]): + try: + # sleep required to avoid thread starvation + time.sleep(0.1) + result_map[key] = call() + time.sleep(0.1) + except Exception as e: + results[key] = e + + for cxn, payload in batch.items(): + action, callbacks, tokens = payload + do_type, args, kwargs = action + executor.log.verbose('Executing %s %s with options: %s' % (do_type, args, kwargs)) + safe_exec(futures, cxn.host, lambda: TPE.submit(getattr(cxn, do_type), *args, **kwargs)) + + for host, future in futures.items(): + safe_exec(results, host, lambda: future.result(timeout=executor.timeout)) + + parsed_results = executor.reparse_results(results, batch) + for host, tokenized_results in parsed_results.items(): + if not batch_results.get(host): + batch_results[host] = {} + for token, res in tokenized_results.items(): + batch_results[host][token] = res + + executor.reset_queue() + executor.results.append(batch_results) + + return batch_results diff --git a/kubetool/core/flow.py b/kubetool/core/flow.py new file mode 100755 index 000000000..23551eaa1 --- /dev/null +++ b/kubetool/core/flow.py @@ -0,0 +1,286 @@ +#!/usr/bin/env python3 + +import argparse +import sys +import time +from copy import deepcopy + +import yaml +import importlib + +from kubetool.core import utils, cluster as c + +DEFAULT_CLUSTER_OBJ = None + + +def run(tasks, + tasks_filter, + excluded_tasks, + inventory_filepath, + context, + procedure_inventory_filepath=None, + cumulative_points=None, + print_final_message=True, + cluster_obj=None): + + time_start = time.time() + + if cumulative_points is None: + cumulative_points = {} + + if not context['execution_arguments'].get('disable_dump', True): + utils.prepare_dump_directory(context['execution_arguments'].get('dump_location'), + reset_directory=not context['execution_arguments'].get('disable_dump_cleanup', False)) + + cluster = load_inventory(inventory_filepath, context, procedure_inventory_filepath=procedure_inventory_filepath, + cluster_obj=cluster_obj) + + cluster.log.debug("Excluded tasks:") + filtered_tasks = filter_flow(tasks, tasks_filter, excluded_tasks) + if filtered_tasks == tasks: + cluster.log.debug("\tNo excluded tasks") + + if 'ansible_inventory_location' in cluster.context['execution_arguments']: + utils.make_ansible_inventory(cluster.context['execution_arguments']['ansible_inventory_location'], cluster) + + if cluster.context.get('execution_arguments', {}).get('without_act', False): + if cluster.context.get('inventory_regenerate_required', False) is True: + utils.recreate_final_inventory_file(cluster) + cluster.log.debug('\nFurther acting manually disabled') + return cluster + + run_flow(filtered_tasks, cluster, cumulative_points) + + if cluster.context.get('inventory_regenerate_required', False) is True: + utils.recreate_final_inventory_file(cluster) + + cluster.finish() + + time_end = time.time() + + if print_final_message: + cluster.log.info("") + cluster.log.info("SUCCESSFULLY FINISHED") + cluster.log.info("Elapsed: "+utils.get_elapsed_string(time_start, time_end)) + + return cluster + + +def create_context(execution_arguments, procedure=None): + + if isinstance(execution_arguments, argparse.Namespace): + execution_arguments = vars(execution_arguments) + + context = { + "execution_arguments": deepcopy(execution_arguments), + "proceeded_tasks": [], + "nodes": {}, + 'initial_procedure': procedure + } + + if context['execution_arguments'].get('exclude_cumulative_points_methods', '').strip() != '': + context['execution_arguments']['exclude_cumulative_points_methods'] = \ + context['execution_arguments']['exclude_cumulative_points_methods'].strip().split(",") + # print('The following cumulative points methods are marked for exclusion: [ %s ]' % + # ', '.join(context['execution_arguments']['exclude_cumulative_points_methods'])) + else: + context['execution_arguments']['exclude_cumulative_points_methods'] = [] + + return context + + +def load_inventory(inventory_filepath, context, silent=False, procedure_inventory_filepath=None, cluster_obj=None): + if not silent: + print("Loading inventory file '%s'" % inventory_filepath) + try: + if cluster_obj is None: + cluster_obj = DEFAULT_CLUSTER_OBJ + if cluster_obj is None: + cluster_obj = c.KubernetesCluster + cluster = cluster_obj(inventory_filepath, + context, + procedure_inventory=procedure_inventory_filepath, + gather_facts=True) + if not silent: + cluster.log.debug("Inventory file loaded:") + for role in cluster.roles: + cluster.log.debug(" %s %i" % (role, len(cluster.ips[role]))) + for ip in cluster.ips[role]: + cluster.log.debug(" %s" % ip) + return cluster + except yaml.YAMLError as exc: + utils.do_fail("Failed to load inventory file", exc) + except Exception as exc: + utils.do_fail("Failed to proceed inventory file", exc) + + +def filter_flow(tasks, tasks_filter, excluded_tasks, _task_path='', flow_changed=False): + filtered = {} + + # Remove any whitespaces from filters + map(str.strip, tasks_filter) + map(str.strip, excluded_tasks) + + for task_name, task in tasks.items(): + if _task_path == '': + __task_path = task_name + else: + __task_path = _task_path + "." + task_name + + allowed = True + # if task_filter is not empty - smb specified filter argument + if tasks_filter: + allowed = False + # Проверяем если итерируемый подпуть находится разрешенных путях. То есть проверяем есть ли + # system_prepare.cri в разрешенном пути system_prepare.cri.docker + for task_path in tasks_filter: + if __task_path in task_path or task_path in __task_path: + allowed = True + # print("Allowed %s in %s" % (__task_path, task_path)) + + if allowed and (not excluded_tasks or __task_path not in excluded_tasks): + if callable(task): + filtered[task_name] = task + else: + filtered_flow = filter_flow(task, tasks_filter, excluded_tasks, __task_path, flow_changed) + if filter_flow is not {}: + filtered[task_name] = filtered_flow + else: + print("\t%s" % __task_path) + + return filtered + + +def run_flow(tasks, cluster, cumulative_points, _task_path=''): + for task_name, task in tasks.items(): + + if _task_path == '': + __task_path = task_name + else: + __task_path = _task_path + "." + task_name + + proceed_cumulative_point(cluster, cumulative_points, __task_path) + + if callable(task): + cluster.log.info("*** TASK %s ***" % __task_path) + try: + task(cluster) + add_task_to_proceeded_list(cluster, __task_path) + except Exception as exc: + utils.do_fail("TASK FAILED %s" % __task_path, exc, + hint=cluster.globals['error_handling']['failure_message'] % (sys.argv[0], __task_path), + log=cluster.log) + else: + run_flow(task, cluster, cumulative_points, __task_path) + + +def new_parser(cli_help): + + parser = argparse.ArgumentParser(description=cli_help, + formatter_class=argparse.RawTextHelpFormatter) + + parser.add_argument('-v', '--verbose', + action='store_true', + help='enable the verbosity mode') + + parser.add_argument('-c', '--config', + default='cluster.yaml', + help='define main cluster configuration file') + + parser.add_argument('--without-act', + action='store_true', + help='prevent tasks to be executed') + + parser.add_argument('--ansible-inventory-location', + default='./ansible-inventory.ini', + help='auto-generated ansible-compatible inventory file location') + + parser.add_argument('--dump-location', + default='./dump/', + help='dump directory for intermediate files') + + parser.add_argument('--disable-dump', + action='store_true', + help='prevent dump directory creation') + + parser.add_argument('--disable-dump-cleanup', + action='store_true', + help='prevent dump directory cleaning on process launch') + + parser.add_argument('--disable-cumulative-points', + action='store_true', + help='disable cumulative points execution (use only when you understand what you are doing!)') + + parser.add_argument('--force-cumulative-points', + action='store_true', + help='force cumulative points execution - they will be executed regardless of whether it was ' + 'scheduled or not (use only when you understand what you are doing!)') + + parser.add_argument('--exclude-cumulative-points-methods', + default='', + help='comma-separated cumulative points methods names to be excluded from execution') + + parser.add_argument('--log', + action='append', + nargs='*', + help='Logging options, can be specified multiple times') + + return parser + + +def schedule_cumulative_point(cluster, point_method): + + if cluster.context['execution_arguments'].get('disable_cumulative_points', False): + cluster.log.verbose('Method %s not scheduled - cumulative points disabled' % point_method.__qualname__) + return + + if point_method.__qualname__ in cluster.context['execution_arguments']['exclude_cumulative_points_methods']: + cluster.log.verbose('Method %s not scheduled - it set to be excluded' % point_method.__qualname__) + return + + scheduled_points = cluster.context.get('scheduled_cumulative_points', []) + + if point_method not in scheduled_points: + scheduled_points.append(point_method) + cluster.context['scheduled_cumulative_points'] = scheduled_points + cluster.log.verbose('Method %s scheduled' % point_method.__qualname__) + else: + cluster.log.verbose('Method %s already scheduled' % point_method.__qualname__) + + +def proceed_cumulative_point(cluster, points_list, point_task_path): + + if cluster.context['execution_arguments'].get('disable_cumulative_points', False): + return + + scheduled_methods = cluster.context.get('scheduled_cumulative_points', []) + + for point_method_fullname, points_tasks_paths in points_list.items(): + if point_task_path in points_tasks_paths: + + if cluster.context['execution_arguments'].get('force_cumulative_points', False): + cluster.log.verbose('Method %s will be forcibly executed' % point_method_fullname) + else: + if point_method_fullname not in [x.__module__+'.'+x.__qualname__ for x in scheduled_methods]: + cluster.log.verbose('Method %s not scheduled - cumulative point call skipped' % point_method_fullname) + continue + + cluster.log.info("*** CUMULATIVE POINT %s ***" % point_method_fullname) + + mod_name, func_name = point_method_fullname.rsplit('.', 1) + mod = importlib.import_module(mod_name) + func = getattr(mod, func_name) + + call_result = cluster.nodes["all"].get_new_nodes_or_self().call(func) + cluster.context['scheduled_cumulative_points'].remove(func) + return call_result + + +def add_task_to_proceeded_list(cluster, task_path): + if not is_task_completed(cluster, task_path): + cluster.context['proceeded_tasks'].append(task_path) + utils.dump_file(cluster, "\n".join(cluster.context['proceeded_tasks'])+"\n", 'finished_tasks') + + +def is_task_completed(cluster, task_path): + return task_path in cluster.context['proceeded_tasks'] diff --git a/kubetool/core/group.py b/kubetool/core/group.py new file mode 100755 index 000000000..3ddd79396 --- /dev/null +++ b/kubetool/core/group.py @@ -0,0 +1,620 @@ +#!/usr/bin/env python3 + +import io +import os +import random +import subprocess +import time +import uuid +from datetime import datetime +from typing import Callable, Dict, List, Union, IO + +import fabric +import invoke +from invoke import UnexpectedExit + +from kubetool.core.connections import Connections +from kubetool.core.executor import RemoteExecutor + +_GenericResult = Union[Exception, fabric.runners.Result, fabric.transfer.Result] +_HostToResult = Dict[str, _GenericResult] + + +# fabric.runners.Result is not equitable OOB, let it make equitable +def _compare_fabric_results(self: fabric.runners.Result, other) -> bool: + if not isinstance(other, fabric.runners.Result): + return False + + # todo should other fields be compared? Or probably custom class should be used to store result. + return self.exited == other.exited \ + and self.stdout == other.stdout \ + and self.stderr == other.stderr + + +fabric.runners.Result.__eq__ = _compare_fabric_results +fabric.runners.Result.__ne__ = lambda self, other: not _compare_fabric_results(self, other) + + +class NodeGroupResult(fabric.group.GroupResult, Dict[fabric.connection.Connection, _GenericResult]): + def get_simple_out(self): + if len(self) != 1: + raise NotImplementedError("Simple output can be returned only for NodeGroupResult consisted of " + "exactly one node, but %s were provided." % list(self.keys())) + + res = list(self.values())[0] + if not isinstance(res, fabric.runners.Result): + raise NotImplementedError("It does not make sense to return simple output for result of type %s" + % type(res)) + + return res.stdout + + def __str__(self): + output = "" + for conn, result in self.items(): + + if isinstance(result, invoke.exceptions.UnexpectedExit): + result = result.result + + # for now we do not know how-to print transfer result + if not isinstance(result, fabric.runners.Result): + continue + + if output != "": + output += "\n" + output += "\t%s: code=%i" % (conn.host, result.exited) + if result.stdout: + output += "\n\t\tSTDOUT: %s" % result.stdout.replace("\n", "\n\t\t ") + if result.stderr: + output += "\n\t\tSTDERR: %s" % result.stderr.replace("\n", "\n\t\t ") + return output + + def print(self): + print(self) + + def is_any_has_code(self, code): + for conn, result in self.items(): + if str(result.exited) == str(code): + return True + return False + + def is_any_failed(self): + return self.is_any_has_code(1) + + def __eq__(self, other): + if self is other: + return True + + if not isinstance(other, NodeGroupResult): + return False + + if len(self) != len(other): + return False + + for conn, result in self.items(): + compared_result = other.get(conn) + if compared_result is None: + return False + + if not isinstance(result, fabric.runners.Result) or not isinstance(compared_result, fabric.runners.Result): + raise NotImplementedError('Currently only instances of fabric.runners.Result can be compared!') + + if result != compared_result: + return False + + return True + + def __ne__(self, other): + return not self == other + + +class NodeGroup: + + def __init__(self, connections: Connections, cluster): + from kubetool.core.cluster import KubernetesCluster + + self.cluster: KubernetesCluster = cluster + self.nodes = connections + + def _make_result(self, results: _HostToResult) -> NodeGroupResult: + group_result = NodeGroupResult() + for host, result in results.items(): + group_result[self.nodes[host]] = result + + return group_result + + def _make_result_or_fail(self, results: _HostToResult, + failure_criteria: Callable[[str, _GenericResult], bool]) -> NodeGroupResult: + failed_hosts = [host for host, result in results.items() if failure_criteria(host, result)] + group_result = self._make_result(results) + + if failed_hosts: + raise fabric.group.GroupException(group_result) + + return group_result + + def run(self, *args, **kwargs) -> NodeGroupResult: + return self.do("run", *args, **kwargs) + + def sudo(self, *args, **kwargs) -> NodeGroupResult: + return self.do("sudo", *args, **kwargs) + + def put(self, local_file: Union[io.StringIO, str], remote_file: str, **kwargs): + # pop it early, so that StringIO "put" is not affected by unexpected keyword argument + binary = kwargs.pop("binary", True) is not False + + if isinstance(local_file, io.StringIO): + self.cluster.log.verbose("Text is being transferred to remote file \"%s\" on nodes %s with options %s" + % (remote_file, list(self.nodes.keys()), kwargs)) + self._put(local_file, remote_file, **kwargs) + return + + self.cluster.log.verbose("Local file \"%s\" is being transferred to remote file \"%s\" on nodes %s with options %s" + % (local_file, remote_file, list(self.nodes.keys()), kwargs)) + + group_to_upload = self + # Fabric opens file in 'rb' mode. + open_mode = "b" + + # hashes checking for text files is currently not supported when deploying from windows + # because we need to change CRLF -> LF when transferring file + if not binary and self.cluster.is_deploying_from_windows(): + self.cluster.log.verbose("The file for transferring is marked as text. CRLF -> LF transformation is required") + # Let's open file in 'rt' mode to automatically make CRLF -> LF transformation. + open_mode = "t" + else: + self.cluster.log.verbose('File size: %s' % os.path.getsize(local_file)) + local_file_hash = self.get_local_file_sha1(local_file) + self.cluster.log.verbose('Local file hash: %s' % local_file_hash) + remote_file_hashes = self.get_remote_file_sha1(remote_file) + self.cluster.log.verbose('Remote file hashes: %s' % remote_file_hashes) + + hosts_to_upload = [] + for remote_ip, remote_file_hash in remote_file_hashes.items(): + if remote_file_hash != local_file_hash: + self.cluster.log.verbose('Local and remote hashes does not match on node \'%s\' %s %s' % (remote_ip, + local_file_hash, remote_file_hash)) + hosts_to_upload.append(remote_ip) + if not hosts_to_upload: + self.cluster.log.verbose('Local and remote hashes are equal on all nodes, no transmission required') + return + + group_to_upload = self.cluster.make_group(hosts_to_upload) + + with open(local_file, "r" + open_mode) as local_stream: + group_to_upload._put(local_stream, remote_file, **kwargs) + + def _put(self, local_stream: IO, remote_file: str, **kwargs): + hide = kwargs.pop("hide", True) is True + sudo = kwargs.pop("sudo", False) is True + backup = kwargs.pop("backup", False) is True + immutable = kwargs.pop("immutable", False) is True + + # for unknown reason fabric v2 can't put async + # Let's remember passed value, which by default is True, and make it False forcibly. + is_async = kwargs.pop("is_async", True) is not False + kwargs["is_async"] = False + + if sudo: + self.cluster.log.verbose('A sudoer upload required') + + if backup: + self.cluster.log.verbose('File \"%s\" backup required' % remote_file) + + if immutable: + self.cluster.log.verbose('File \"%s\" immutable set required' % remote_file) + + if not sudo and not backup and not immutable: + # no additional commands execution is required - directly upload file + self.do("put", local_stream, remote_file, **kwargs) + return + + # for unknown reason fabric v2 can't put as sudo, and we should use WA via mv + # also, if we need to backup the file first, then we also have to upload file to tmp first + + temp_filepath = "/tmp/%s" % uuid.uuid4().hex + self.cluster.log.verbose("Uploading to temporary file '%s'..." % temp_filepath) + self.do("put", local_stream, temp_filepath, **kwargs) + + self.cluster.log.verbose("Moving temporary file '%s' to '%s'..." % (temp_filepath, remote_file)) + + if sudo: + mv_command = "sudo chown root:root %s && sudo mv -f %s %s" % (temp_filepath, temp_filepath, remote_file) + else: + mv_command = "mv -f %s %s" % (temp_filepath, remote_file) + + if backup: + if sudo: + mv_command = "sudo cp -f %s %s.bak$(sudo ls %s* | sudo wc -l); %s" \ + % (remote_file, remote_file, remote_file, mv_command) + else: + mv_command = "cp -f %s %s.bak$(ls %s* | wc -l); %s" \ + % (remote_file, remote_file, remote_file, mv_command) + + mv_command = "cmp --silent %s %s || (%s)" % (remote_file, temp_filepath, mv_command) + + if immutable: + if sudo: + mv_command = "sudo chattr -i %s; %s; sudo chattr +i %s" % (remote_file, mv_command, remote_file) + else: + mv_command = "chattr -i %s; %s; chattr +i %s" % (remote_file, mv_command, remote_file) + + kwargs["hide"] = hide + kwargs["is_async"] = is_async + self.sudo(mv_command, **kwargs) + + def get(self, *args, **kwargs): + return self.do("get", *args, **kwargs) + + def do(self, do_type, *args, **kwargs) -> NodeGroupResult or int: + raw_results = self._do_with_wa(do_type, *args, **kwargs) + if isinstance(raw_results, int): + return raw_results + group_results = self._make_result_or_fail(raw_results, lambda host, result: isinstance(result, Exception)) + + if not kwargs.get('hide', True): + self.cluster.log.debug(group_results, extra={'ignore_stdout': True}) + + return group_results + + def _do_with_wa(self, do_type, *args, **kwargs): + # by default all code is async, but can be set False forcibly + is_async = kwargs.pop("is_async", True) is not False + + left_nodes = self.nodes + retry = 0 + results: _HostToResult = {} + while True: + retry += 1 + + result = self._do(do_type, left_nodes, is_async, *args, **kwargs) + if isinstance(result, int): + return result + + results.update(result) + left_nodes = {host: left_nodes[host] for host, result in results.items() if isinstance(result, Exception)} + + if not left_nodes or retry >= self.cluster.globals['workaround']['retries'] \ + or not self._try_workaround(results, left_nodes): + break + + self.cluster.log.verbose('Retrying #%s...' % retry) + time.sleep(self.cluster.globals['workaround']['delay_period']) + + return results + + def _try_workaround(self, results: _HostToResult, failed_nodes: Connections) -> bool: + not_booted = [] + + for host in failed_nodes.keys(): + exception = results[host] + if isinstance(exception, UnexpectedExit): + exception_message = str(exception.result) + else: + exception_message = str(exception) + + if self.is_allowed_etcd_exception(exception_message): + self.cluster.log.verbose("Detected ETCD problem at %s, need retry: %s" % (host, exception_message)) + elif self.is_allowed_kubernetes_exception(exception_message): + self.cluster.log.verbose("Detected kubernetes problem at %s, need retry: %s" % (host, exception_message)) + elif self.is_allowed_connection_exception(exception_message): + self.cluster.log.verbose("Detected connection exception at %s, will try to reconnect to node. Exception: %s" + % (host, exception_message)) + not_booted.append(host) + else: + self.cluster.log.verbose("Detected unavoidable exception at %s, trying to solve automatically: %s" + % (host, exception_message)) + return False + + # if there are not booted nodes, but we succeeded to wait for at least one is booted, we can continue execution + if not_booted and self.cluster.make_group(not_booted).wait_active_nodes().is_empty(): + return False + + return True + + def _do(self, do_type: str, nodes: Connections, is_async, *args, **kwargs) -> _HostToResult: + + if do_type in ["run", "sudo"]: + # by default fabric will print all output from nodes + # let's disable this feature if it was not forcibly defined + if kwargs.get("hide") is None: + kwargs['hide'] = True + + execution_timeout = kwargs.get("timeout", None) + + results = {} + + if not nodes: + self.cluster.log.verbose('No nodes to perform %s %s with options: %s' % (do_type, args, kwargs)) + return results + + self.cluster.log.verbose('Performing %s %s on nodes %s with options: %s' % (do_type, args, list(nodes.keys()), kwargs)) + + executor = RemoteExecutor(self.cluster.log, lazy=False, parallel=is_async, timeout=execution_timeout) + results = executor.queue(nodes, (do_type, args, kwargs)) + + if not isinstance(results, int): + simplified_results = {} + for cnx, conn_results in results.items(): + raw_results = list(conn_results.values()) + if len(raw_results) > 1: + raise Exception('Unexpected condition: not supported multiple results with non-lazy GRE') + simplified_results[cnx] = raw_results[0] + return simplified_results + + return results + + def call(self, action, **kwargs): + return self.call_batch([action], **{"%s.%s" % (action.__module__, action.__name__): kwargs}) + + def call_batch(self, actions, **kwargs): + results = {} + + for action in actions: + + callable_path = "%s.%s" % (action.__module__, action.__name__) + self.cluster.log.debug("Running %s: " % callable_path) + + action_kwargs = {} + if kwargs.get(callable_path) is not None: + action_kwargs = kwargs[callable_path] + + results[action] = action(self, **action_kwargs) + if results[action] is not None: + self.cluster.log.debug(results[action]) + + return results + + def wait_for_reboot(self, initial_boot_history: NodeGroupResult, timeout=None) -> NodeGroupResult: + results = self._await_rebooted_nodes(timeout, initial_boot_history=initial_boot_history) + return self._make_result_or_fail( + results, + lambda host, result: isinstance(result, Exception) or result == initial_boot_history.get(self.nodes[host]) + ) + + def get_online_nodes(self) -> 'NodeGroup': + online = [host for host, node_context in self.cluster.context['nodes'].items() if node_context.get('online', False)] + return self.cluster.make_group(online).intersection_group(self) + + def wait_active_nodes(self, timeout=None) -> 'NodeGroup': + results = self._await_rebooted_nodes(timeout) + not_booted = [host for host, result in results.items() if isinstance(result, Exception)] + return self.exclude_group(self.cluster.make_group(not_booted)) + + def _await_rebooted_nodes(self, timeout=None, initial_boot_history: NodeGroupResult = None) -> _HostToResult: + + if timeout is None: + timeout = self.cluster.globals['nodes']['boot']['defaults']['timeout'] + + delay_period = self.cluster.globals['nodes']['boot']['defaults']['delay_period'] + + if initial_boot_history: + self.cluster.log.verbose("Initial boot history:\n%s" % initial_boot_history) + else: + initial_boot_history = NodeGroupResult() + + left_nodes = self.nodes + results: _HostToResult = {} + time_start = datetime.now() + + # each connection has timeout, so the only we need is to repeat connecting attempts + # during specified number of seconds + while True: + attempt_time_start = datetime.now() + self.disconnect(list(left_nodes.keys())) + + self.cluster.log.verbose("Attempting to connect to nodes...") + results.update(self._do("run", left_nodes, True, "last reboot", timeout=delay_period)) + left_nodes = {host: left_nodes[host] for host, result in results.items() + if isinstance(result, Exception) or result == initial_boot_history.get(self.nodes[host])} + + waited = (datetime.now() - time_start).total_seconds() + + if not left_nodes or waited >= timeout: + break + + for host, exc in results.items(): + if isinstance(exc, Exception) and not self.is_allowed_connection_exception(str(exc)): + self.cluster.log.verbose("Unexpected exception at %s, node is considered as not booted: %s" + % (host, str(exc))) + + self.cluster.log.verbose("Nodes %s are not ready yet, remaining time to wait %i" + % (list(left_nodes.keys()), timeout - waited)) + + attempt_time = (datetime.now() - attempt_time_start).total_seconds() + if attempt_time < delay_period: + time.sleep(delay_period - attempt_time) + + if left_nodes: + self.cluster.log.verbose("Failed to wait for boot of nodes %s." % list(left_nodes.keys())) + else: + self.cluster.log.verbose("All nodes are online now") + + return results + + def is_allowed_connection_exception(self, exception_message): + exception_message = exception_message.partition('\n')[0] + for known_exception_message in self.cluster.globals['connection']['bad_connection_exceptions']: + if known_exception_message in exception_message: + return True + + return False + + def is_allowed_etcd_exception(self, exception_message): + for known_exception_message in self.cluster.globals['etcd']['temporary_exceptions']: + if known_exception_message in exception_message: + return True + + return False + + def is_allowed_kubernetes_exception(self, exception_message): + for known_exception_message in self.cluster.globals['kubernetes']['temporary_exceptions']: + if known_exception_message in exception_message: + return True + + return False + + def get_local_file_sha1(self, filename): + # todo: use fabric instead of subprocess + openssl_result = subprocess.check_output("openssl sha1 %s" % filename, shell=True) + # process output is bytes and we have to decode it to utf-8 + return openssl_result.decode("utf-8").split("= ")[1].strip() + + def get_remote_file_sha1(self, filename): + results = self._do_with_wa("sudo", "openssl sha1 %s" % filename, warn=True) + self._make_result_or_fail(results, lambda h, r: isinstance(r, Exception)) + + return {host: result.stdout.split("= ")[1].strip() if result.stdout else None + for host, result in results.items()} + + def get_ordered_members_list(self, provide_node_configs=False, apply_filter=None) \ + -> List[Union[dict, 'NodeGroup']]: + + if apply_filter is None: + apply_filter = {} + + result = [] + # we have to iterate strictly in order which was defined by user in config-file + for node in self.cluster.inventory['nodes']: + # is iterable node from inventory is part of current NodeGroup? + if node['connect_to'] in self.nodes.keys(): + + # apply filters + suitable = True + if apply_filter is not None: + if callable(apply_filter): + if not apply_filter(node): + suitable = False + else: + # here intentionally there is no way to filter by values in lists field, + # for this you need to use custom functions. + # Current solution implemented in this way because the filtering strategy is + # unclear - do I need to include when everything matches or is partial partial matching enough? + for key, value in apply_filter.items(): + if node.get(key) is None: + suitable = False + break + if isinstance(value, list): + if node[key] not in value: + suitable = False + break + # elif should definitely be here, not if + elif node[key] != value: + suitable = False + break + + # if not filtered + if suitable: + if provide_node_configs: + result.append(node) + else: + result.append(node['connection']) + + return result + + def get_member(self, number, provide_node_configs=False, apply_filter=None): + results = self.get_ordered_members_list(provide_node_configs=provide_node_configs, apply_filter=apply_filter) + + if not results: + return None + + return results[number] + + def get_first_member(self, provide_node_configs=False, apply_filter=None): + return self.get_member(0, provide_node_configs=provide_node_configs, apply_filter=apply_filter) + + def get_last_member(self, provide_node_configs=False, apply_filter=None): + return self.get_member(-1, provide_node_configs=provide_node_configs, apply_filter=apply_filter) + + def get_any_member(self, provide_node_configs=False, apply_filter=None): + return random.choice(self.get_ordered_members_list(provide_node_configs=provide_node_configs, + apply_filter=apply_filter)) + + def get_member_by_name(self, name, provide_node_configs=False): + return self.get_first_member(provide_node_configs=provide_node_configs, apply_filter={"name": name}) + + def new_group(self, apply_filter=None): + return self.cluster.make_group(self.get_ordered_members_list(apply_filter=apply_filter)) + + def include_group(self, group): + if group is None: + return self + + ips = list(self.nodes.keys()) + list(group.nodes.keys()) + return self.cluster.make_group(list(dict.fromkeys(ips))) + + def exclude_group(self, group): + if group is None: + return self + + ips = list(set(self.nodes.keys()) - set(group.nodes.keys())) + return self.cluster.make_group(list(dict.fromkeys(ips))) + + def intersection_group(self, group): + if group is None: + return self.cluster.make_group([]) + + ips = list(set(self.nodes.keys()).intersection(set(group.nodes.keys()))) + return self.cluster.make_group(list(dict.fromkeys(ips))) + + def disconnect(self, hosts: List[str] = None): + for host, cxn in self.nodes.items(): + if host in (hosts or self.nodes.keys()): + self.cluster.log.verbose('Disconnected session with %s' % host) + cxn.close() + cxn._sftp = None + + def get_nodes_names(self) -> List[str]: + result = [] + members = self.get_ordered_members_list(provide_node_configs=True) + for node in members: + result.append(node['name']) + return result + + def is_empty(self): + return not self.nodes + + def has_node(self, node_name): + return self.get_first_member(apply_filter={"name": node_name}) is not None + + def get_new_nodes(self): + return self.intersection_group(self.cluster.nodes.get('add_node')) + + def get_new_nodes_or_self(self): + new_nodes = self.get_new_nodes() + if not new_nodes.is_empty(): + return new_nodes + return self + + def get_nodes_for_removal(self): + return self.intersection_group(self.cluster.nodes.get('remove_node')) + + def get_nodes_for_removal_or_self(self): + nodes_for_removal = self.get_nodes_for_removal() + if not nodes_for_removal.is_empty(): + return nodes_for_removal + return self + + def get_changed_nodes(self): + return self.get_new_nodes().include_group(self.get_nodes_for_removal()) + + def get_unchanged_nodes(self): + return self.exclude_group(self.get_changed_nodes()) + + def get_final_nodes(self): + return self.exclude_group(self.cluster.nodes.get('remove_node')) + + def get_initial_nodes(self): + return self.exclude_group(self.cluster.nodes.get('add_node')) + + def nodes_amount(self): + return len(self.nodes.keys()) + + def get_nodes_with_os(self, os_family): + if os_family not in ['debian', 'rhel', 'rhel8']: + raise Exception('Unsupported OS family provided') + node_names = [] + for node in self.get_ordered_members_list(provide_node_configs=True): + node_os_family = self.cluster.get_os_family_for_node(node['connect_to']) + if node_os_family == os_family: + node_names.append(node['name']) + return self.cluster.make_group_from_nodes(node_names) diff --git a/kubetool/core/log.py b/kubetool/core/log.py new file mode 100644 index 000000000..932f478e4 --- /dev/null +++ b/kubetool/core/log.py @@ -0,0 +1,308 @@ +import logging +import os +import sys +from pygelf import gelf, GelfTcpHandler, GelfUdpHandler, GelfTlsHandler, GelfHttpHandler + +from typing import List + +VERBOSE = 5 +gelf.LEVELS.update({VERBOSE: 8}) + +DEFAULT_FORMAT = '%(asctime)s %(name)s %(levelname)s %(message)s' + +BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE = range(8) + +RESET_SEQ = "\033[0m" +COLOR_SEQ = "\033[1;%dm" +BOLD_SEQ = "\033[1m" + +COLORS = { + 'RESET': RESET_SEQ, + 'BOLD': BOLD_SEQ, + 'BLACK': COLOR_SEQ % (30 + BLACK), + 'RED': COLOR_SEQ % (30 + RED), + 'GREEN': COLOR_SEQ % (30 + GREEN), + 'YELLOW': COLOR_SEQ % (30 + YELLOW), + 'BLUE': COLOR_SEQ % (30 + BLUE), + 'MAGENTA': COLOR_SEQ % (30 + MAGENTA), + 'CYAN': COLOR_SEQ % (30 + CYAN), + 'WHITE': COLOR_SEQ % (30 + WHITE), +} + +COLORS_SCHEME = { + 'WARNING': 'YELLOW', + 'VERBOSE': 'BLUE', + 'INFO': 'GREEN', + 'ERROR': 'RED', + 'CRITICAL': 'RED' +} + +LOGGING_LEVELS_BY_NAME = { + '5': VERBOSE, + 'verbose': VERBOSE, + '10': logging.DEBUG, + 'debug': logging.DEBUG, + '20': logging.INFO, + 'info': logging.INFO, + '30': logging.WARNING, + 'warn': logging.WARNING, + 'warning': logging.WARNING, + '40': logging.ERROR, + 'error': logging.ERROR, + '50': logging.CRITICAL, + 'critical': logging.CRITICAL +} + +LOGGING_NAMES_BY_LEVEL = { + VERBOSE: 'verbose', + logging.DEBUG: 'debug', + logging.INFO: 'info', + logging.WARNING: 'warning', + logging.ERROR: 'error', + logging.CRITICAL: 'critical' +} + + +class EnhancedLogger(logging.getLoggerClass()): + def __init__(self, name, level=logging.NOTSET): + super().__init__(name, level) + logging.addLevelName(VERBOSE, 'VERBOSE') + + def verbose(self, msg, *args, **kwargs): + if self.isEnabledFor(VERBOSE): + self._log(VERBOSE, msg, args, **kwargs) + + +logging.setLoggerClass(EnhancedLogger) + + +class LogFormatter(logging.Formatter): + def __init__(self, fmt=None, datefmt=None, style='%', colorize=False, correct_newlines=False): + super().__init__(fmt, datefmt, style) + self.colorize = colorize + self.correct_newlines = correct_newlines + + def _format(self, record): + s = super().format(record) + if self.colorize and record.levelname in COLORS_SCHEME: + s = '$__COLOR_' + COLORS_SCHEME[record.levelname] + s + '$__COLOR_RESET' + for color_name, color_code in COLORS.items(): + if self.colorize: + s = s.replace('$__COLOR_' + color_name, color_code) + else: + s = s.replace('$__COLOR_' + color_name, '') + return s + + def format(self, record): + messages = str(record.msg).split('\n') + if self.correct_newlines and len(messages): + subrecord = logging.makeLogRecord(record.__dict__) + s = '' + for message in messages: + if s != '': + s += '\n' + subrecord.msg = message + s += self._format(subrecord) + return s + else: + return self._format(record) + + +class StdoutHandler(logging.StreamHandler): + def __init__(self): + super().__init__(sys.stdout) + + def emit(self, record): + if hasattr(record, 'ignore_stdout') and getattr(record, 'ignore_stdout'): + return + super().emit(record) + + +class FileHandlerWithHeader(logging.FileHandler): + def __init__(self, filename, header=None, mode='a', encoding=None, delay=0): + # Store the header information. + self.header = header + + # Determine if the file pre-exists + self.file_pre_exists = os.path.exists(filename) + + # Call the parent __init__ + logging.FileHandler.__init__(self, filename, mode, encoding, delay) + + # Write the header if delay is False and a file stream was created. + if not delay and header and self.stream is not None: + self.stream.write('%s\n' % header) + + def emit(self, record): + # Create the file stream if not already created. + if self.stream is None: + self.stream = self._open() + + # If the file pre_exists, it should already have a header. + # Else write the header to the file so that it is the first line. + if not self.file_pre_exists: + self.stream.write('') + if self.header: + self.stream.write('%s\n' % self.header) + + # Call the parent class emit function. + logging.FileHandler.emit(self, record) + + +class LogHandler: + + def __init__(self, + target: str, + level: str, + colorize: bool = False, + correct_newlines: bool = False, + filemode: str = 'a', + format: str = DEFAULT_FORMAT, + datefmt: str = None, + header: str = None, + **kwargs): + + self._colorize = colorize + self._correct_newlines = correct_newlines + self._format = format + self._datefmt = datefmt + self._header = header + + self._formatter = LogFormatter(self._format, self._datefmt, + colorize=self._colorize, + correct_newlines=self._correct_newlines) + + if target.lower() == 'stdout': + self._target = 'stdout' + self.handler = StdoutHandler() + elif target.lower() == 'graylog': + self._target = 'graylog' + if not kwargs.get('host'): + raise Exception('Graylog host is not defined') + if not kwargs.get('port'): + raise Exception(f'Graylog port is not defined for "{kwargs["host"]}"') + if not kwargs.get('type'): + raise Exception(f'Graylog type is not defined for "{kwargs["host"]}:{kwargs["port"]}"') + handler_options = { + 'host': kwargs['host'], + 'port': kwargs['port'], + '_app_name': kwargs.get('appname', 'kubetools'), + 'debug': kwargs.get('debug', False), + 'version': kwargs.get('version', '1.1') + } + if kwargs['type'] == 'tcp': + self.handler = GelfTcpHandler(**handler_options) + elif kwargs['type'] == 'udp': + handler_options['compress'] = kwargs.get('compress', True) + handler_options['chunk_size'] = kwargs.get('chunk_size', 1300) + self.handler = GelfUdpHandler(**handler_options) + elif kwargs['type'] == 'tls': + handler_options['validate'] = kwargs.get('validate', True) + handler_options['ca_certs'] = kwargs.get('ca_certs') + handler_options['certfile'] = kwargs.get('certfile') + handler_options['keyfile'] = kwargs.get('keyfile') + self.handler = GelfTlsHandler(**handler_options) + elif kwargs['type'] == 'http': + handler_options['compress'] = kwargs.get('compress', True) + handler_options['path'] = kwargs.get('path', '/gelf') + handler_options['timeout'] = kwargs.get('timeout', 5) + self.handler = GelfHttpHandler(**handler_options) + else: + raise Exception(f'Unknown Graylog type "{kwargs["type"]}" for "{kwargs["host"]}:{kwargs["port"]}"') + else: + self._target = target + self.handler = FileHandlerWithHeader(self._target, mode=filemode, header=self._header) + + self._level = LOGGING_LEVELS_BY_NAME.get(level) + if self._level is None: + raise Exception(f'Failed to create logger - unknown logging level: "{level}"') + self.handler.setLevel(self._level) + + self.handler.setFormatter(self._formatter) + + def __str__(self): + return f'target: {self._target}, level: {LOGGING_NAMES_BY_LEVEL[self._level]}, colorize: {self._colorize}, datefmt: {self._datefmt}, format: {self._format}' + + def append_to_logger(self, logger) -> None: + logger.addHandler(self.handler) + + def has_stdout_target(self) -> bool: + return self._target == 'stdout' + + +class Log: + + def __init__(self, cluster, handlers: List[LogHandler]): + self._cluster = cluster + self._logger = logging.getLogger(cluster.raw_inventory.get('cluster_name', 'cluster.local')) + self._logger.setLevel(VERBOSE) + + if self._logger.hasHandlers(): + self._logger.handlers.clear() + + for handler in handlers: + handler.append_to_logger(self._logger) + + @property + def logger(self) -> EnhancedLogger: + return self._logger + + +def parse_log_argument(argument: str) -> LogHandler: + """ + Parse raw CLI arguments and verify for required parameters + :param argument: Raw CLI argument string. For example: test.log;level=verbose;colorize=true + :return: Initialized LogHandler + """ + parameters = {} + argument_parts = argument.split(';') + if not argument_parts: + raise Exception('Defined logger do not contain parameters') + parameters['target'] = argument_parts[0] + for parameter in argument_parts[1:]: + if parameter == '': + continue + key, value, *rest = parameter.split('=') + if key in ['colorize', 'correct_newlines', 'debug', 'compress', 'validate']: + value = value.lower() in ['true', '1'] + elif key in ['chunk_size', 'timeout', 'port']: + value = int(value) + parameters[key] = value + if not parameters.get('level'): + raise Exception(f'Logging level is not set for logger "{parameters["target"]}"') + return LogHandler(**parameters) + + +def init_log_from_context_args(cluster) -> Log: + """ + Create Log from raw CLI arguments in Cluster context + :param cluster: Cluster for which logging is created. It may not be fully initialized, it is enough to be globals and raw_inventory loaded. + :return: Initialized Log, based on all parsed logging arguments + """ + + handlers = [] + stdout_specified = False + + if cluster.context['execution_arguments'].get('log') is not None: + for argument in cluster.context['execution_arguments'].get('log'): + handler = parse_log_argument(argument[0]) + if handler.has_stdout_target(): + if stdout_specified: + raise Exception('Multiple stdout logs specified') + else: + stdout_specified = True + handlers.append(handler) + + if not cluster.context['execution_arguments'].get('disable_dump', True): + handlers.append(LogHandler(target=os.path.join(cluster.context['execution_arguments']['dump_location'], 'debug.log'), + **cluster.globals['logging']['default_targets']['dump'])) + + if not stdout_specified: + handlers.append(LogHandler(target='stdout', + **cluster.globals['logging']['default_targets']['stdout'])) + + log = Log(cluster, handlers) + + log.logger.verbose('Using the following loggers: \n\t%s' % "\n\t".join("- " + str(x) for x in handlers)) + + return log diff --git a/kubetool/core/utils.py b/kubetool/core/utils.py new file mode 100755 index 000000000..637651ad0 --- /dev/null +++ b/kubetool/core/utils.py @@ -0,0 +1,303 @@ +#!/usr/bin/env python3 + +import io +import json +import os +import shutil +import sys +import time +from typing import Union + +import ruamel.yaml +from copy import deepcopy +from traceback import * +from datetime import datetime +from collections import OrderedDict + +import fabric.exceptions + +from kubetool.plugins import nginx_ingress + + +def do_fail(message='', reason: Union[str, Exception] = '', hint='', log=None): + + if log: + log.critical('FAILURE!') + if message != "": + log.critical(message) + else: + sys.stderr.write("\033[91mFAILURE!") + if message != "": + sys.stderr.write(" - " + message + "\n") + + if reason != "": + if isinstance(reason, fabric.exceptions.GroupException): + if log: + log.critical("Remote group exception") + for connection, result in reason.result.items(): + log.critical("%s:" % connection.host) + log.critical(result) + else: + sys.stderr.write("Remote group exception\n") + for connection, result in reason.result.items(): + sys.stderr.write("\n%s:" % connection.host) + sys.stderr.write("\n%s\n" % result) + elif isinstance(reason, Exception): + if log: + log.critical('Unexpected exception', exc_info=True) + else: + sys.stderr.write("Unexpected exception\n\n") + print_exc() + else: + if log: + log.critical(reason) + else: + sys.stderr.write(reason + "\n") + + sys.stderr.write("\n") + + # Please do not rewrite this to logging approach: + # hint should be visible only in stdout and without special formatting + if hint != "": + sys.stderr.write(hint) + + if not log: + sys.stderr.write("\033[0m\n") + + sys.exit(1) + + +def get_elapsed_string(start, end): + elapsed = end - start + hours, remainder = divmod(elapsed, 3600) + minutes, seconds = divmod(remainder, 60) + return '{:02}h {:02}m {:02}s'.format(int(hours), int(minutes), int(seconds)) + + +def prepare_dump_directory(location, reset_directory=True): + if reset_directory and os.path.exists(location) and os.path.isdir(location): + shutil.rmtree(location) + os.makedirs(location, exist_ok=True) + + +def make_ansible_inventory(location, cluster): + + inventory = get_final_inventory(cluster) + roles = [] + for node in inventory['nodes']: + for role in node['roles']: + if role not in roles: + roles.append(role) + + config = { + 'all': [ + 'localhost ansible_connection=local' + ], + 'cluster:children': [] + } + + already_global_defined = [] + + for role in roles: + config[role] = [] + config['cluster:children'].append(role) + for node in cluster.nodes[role].get_final_nodes().get_ordered_members_list(provide_node_configs=True): + record = "%s ansible_host=%s ansible_ssh_user=%s ansible_ssh_private_key_file=%s ip=%s" % \ + (node['name'], + node['connect_to'], + node.get('username', cluster.globals['connection']['defaults']['username']), + node['keyfile'], + node['internal_address']) + if node.get('address') is not None: + record += ' external_ip=%s' % node['address'] + + if node['name'] not in already_global_defined: + config['all'].append(record) + # to avoid duplicate definition in global section we have to check is that was already defined? + already_global_defined.append(node['name']) + + config[role].append(node['name']) + + config['cluster:vars'] = [ + 'ansible_become=True' + ] + + for group in ['services', 'plugins']: + if inventory.get(group) is not None: + for service_name, service_configs in inventory[group].items(): + # write to inventory only plugins, which will be installed + if group != 'plugins' or service_configs.get('install', False) is True: + + config['cluster:vars'].append('\n# %s.%s' % (group, service_name)) + + if isinstance(service_configs, dict): + + if service_configs.get('installation') is not None: + del service_configs['installation'] + if service_configs.get('install') is not None: + del service_configs['install'] + + for config_name, config_value in service_configs.items(): + if isinstance(config_value, dict) or isinstance(config_value, list): + config_value = json.dumps(config_value) + config['cluster:vars'].append('%s_%s=%s' % ( + # todo: use regexp for replace + service_name.replace('-', '_').replace('.', '_').replace('/', '_'), + config_name.replace('-', '_').replace('.', '_').replace('/', '_'), + config_value)) + else: + config_value = json.dumps(service_configs) + config['cluster:vars'].append('%s=%s' % ( + service_name.replace('-', '_').replace('.', '_'), + config_value)) + + config_compiled = '' + for section_name, strings in config.items(): + config_compiled += '[%s]' % section_name + for string in strings: + config_compiled += '\n'+string + config_compiled += '\n\n' + + with open(location, 'w') as configfile: + configfile.write(config_compiled) + + +def get_current_timestamp_formatted(): + return datetime.now().strftime("%Y%m%d-%H%M%S") + + +def recreate_final_inventory_file(cluster): + + # load inventory as ruamel.yaml to save original structure + ruamel_yaml = ruamel.yaml.YAML() + ruamel_yaml.preserve_quotes = True + with open(get_resource_absolute_path(cluster.context['execution_arguments']['config']), "r") as stream: + initial_inventory = ruamel_yaml.load(stream) + + # write original file data to backup file with timestamp + timestamp = get_current_timestamp_formatted() + inventory_file_basename = os.path.basename(cluster.context['execution_arguments']['config']) + dump_file(cluster, stream, "%s_%s" % (inventory_file_basename, str(timestamp))) + + # convert initial inventory to final + final_inventory = get_final_inventory(cluster, initial_inventory=initial_inventory) + + # replace intial inventory with final one + with open(get_resource_absolute_path(cluster.context['execution_arguments']['config']), "w+") as stream: + ruamel_yaml.dump(final_inventory, stream) + + +def get_final_inventory(cluster, initial_inventory=None): + if initial_inventory is None: + inventory = deepcopy(cluster.inventory) + else: + inventory = deepcopy(initial_inventory) + + from kubetool import psp + from kubetool.procedures import add_node, remove_node, upgrade, migrate_cri + + inventory_finalize_functions = { + add_node.add_node_finalize_inventory, + remove_node.remove_node_finalize_inventory, + upgrade.upgrade_finalize_inventory, + psp.finalize_inventory, + nginx_ingress.finalize_inventory, + migrate_cri.migrate_cri_finalize_inventory + } + + for finalize_fn in inventory_finalize_functions: + inventory = finalize_fn(cluster, inventory) + + return inventory + + +def merge_vrrp_ips(procedure_inventory, inventory): + if "vrrp_ips" in inventory and len(inventory["vrrp_ips"]) > 0: + raise Exception("vrrp_ips section already defined, merging not supported yet") + else: + inventory["vrrp_ips"] = procedure_inventory["vrrp_ips"] + + if isinstance(inventory, OrderedDict): + inventory.move_to_end("vrrp_ips", last=False) + + +def dump_file(cluster, data, filename): + if isinstance(data, io.StringIO): + data = data.getvalue() + if isinstance(data, io.TextIOWrapper): + data = data.read() + + if cluster.context.get("dump_filename_prefix"): + filename = f"{cluster.context['dump_filename_prefix']}_{filename}" + + if not cluster.context['execution_arguments'].get('disable_dump', True): + with open(get_resource_absolute_path(cluster.context['execution_arguments']['dump_location']+'/'+filename), 'w') as file: + file.write(data) + + +def wait_command_successful(group, command, retries=15, timeout=5, warn=True, hide=False): + log = group.cluster.log + + while retries > 0: + log.debug("Waiting for command to succeed, %s retries left" % retries) + result = group.sudo(command, warn=warn, hide=hide) + exit_code = list(result.values())[0].exited + if exit_code == 0: + log.debug("Command succeeded") + return + retries = retries - 1 + time.sleep(timeout) + raise Exception("Command failed") + + +def get_resource_absolute_path(path, script_relative=False): + initial_relative = '' + if script_relative: + initial_relative = os.path.dirname(__file__) + '/../' + return os.path.abspath(initial_relative + path) + + +def determine_resource_absolute_path(path): + # is resource exists as it is defined? + initial_definition = get_resource_absolute_path(path, script_relative=False) + if os.path.isfile(initial_definition): + return initial_definition + + # is resource exists as internal resource? + patched_definition = get_resource_absolute_path(path, script_relative=True) + if os.path.isfile(patched_definition): + return patched_definition + + raise Exception('Requested resource %s is not exists at %s or %s' % (path, initial_definition, patched_definition)) + + +def get_resource_absolute_dir(path: str, script_relative=False) -> str: + """ + Get absolute path to resource directory + :param path: Relative path to resource + :param script_relative: True, if resource is internal + :return: Absolute path to resource directory + """ + initial_relative = '' + if script_relative: + initial_relative = os.path.dirname(__file__) + '/../' + return os.path.abspath(os.path.dirname(initial_relative + path)) + + +def determine_resource_absolute_dir(path: str) -> str: + """ + Get and verify absolute path to resource directory + :param path: Relative path to resource + :return: Absolute path to resource directory + """ + # is resource dir exists as it is defined? + initial_definition = get_resource_absolute_dir(path, script_relative=False) + if os.path.isdir(initial_definition): + return initial_definition + + # is resource dir exists as internal resource? + patched_definition = get_resource_absolute_dir(path, script_relative=True) + if os.path.isdir(patched_definition): + return patched_definition + + raise Exception('Requested resource directory %s is not exists at %s or %s' % (path, initial_definition, patched_definition)) diff --git a/kubetool/core/yaml_merger.py b/kubetool/core/yaml_merger.py new file mode 100644 index 000000000..17c68fcb2 --- /dev/null +++ b/kubetool/core/yaml_merger.py @@ -0,0 +1,37 @@ +from deepmerge import Merger + + +def list_merger(config, path, base, nxt): + strategy = None + strategy_definition_position = 0 + for i, v in enumerate(nxt): + if isinstance(v, dict) and v.get('<<') is not None: + strategy = v.get('<<') + strategy_definition_position = i + + if strategy is None: + strategy = 'replace' + else: + # delete << key-value from array elements + del nxt[strategy_definition_position] + + if strategy == 'merge': + elements_after = nxt[strategy_definition_position:] + elements_before = nxt[:strategy_definition_position] + + nxt = [] + nxt.extend(elements_before) + nxt.extend(base) + nxt.extend(elements_after) + + return nxt + + +default_merger = Merger( + [ + (list, [list_merger]), + (dict, ["merge"]) + ], + ["override"], + ["override"] +) \ No newline at end of file diff --git a/kubetool/coredns.py b/kubetool/coredns.py new file mode 100644 index 000000000..7e4c74e58 --- /dev/null +++ b/kubetool/coredns.py @@ -0,0 +1,164 @@ +import yaml + +from kubetool import system +from kubetool.core import utils + +import io + + +def enrich_add_hosts_config(inventory, cluster): + if not inventory['services']['coredns']['configmap'].get('Hosts'): + inventory['services']['coredns']['configmap']['Hosts'] = system.generate_etc_hosts_config(inventory, cluster) + return inventory + + +def proceed_section_keyvalue(data, tabsize): + tab = " "*tabsize + config = '' + + for key, value in data.items(): + if isinstance(value, bool): + if value: + config += '\n' + tab + '%s' % key + continue + if isinstance(value, str) or isinstance(value, int) and value: + if not isinstance(value, int) and any((c in set(' ')) for c in value): + config += '\n' + tab + '%s \"%s\"' % (key, value) + elif isinstance(value, str) and len(value) == 0: + config += '\n' + tab + '%s' % key + else: + config += '\n' + tab + '%s %s' % (key, value) + continue + if isinstance(value, list) and value: + config += '\n' + tab + '%s %s' % (key, " ".join(value)) + continue + if isinstance(value, dict): + config += generate_nested_sections(key, value, tabsize) + continue + raise Exception('Unknown type of field in coredns services') + + return config + + +def generate_nested_sections(type, data, tabsize): + tab = " "*tabsize + config = '' + + max_priority = 0 + for section_name, section_value in data.items(): + if section_value.get('priority') is not None and section_value['priority'] > max_priority: + max_priority = section_value['priority'] + + iterated = 0 + sections = [] + for section_name, section_value in data.items(): + if section_value.get('priority') is None: + iterated += 1 + section_priority = max_priority + iterated + else: + section_priority = section_value['priority'] + + if section_value.get('enabled', True) in ['1', 1, True, 'True']: + sections.append({ + 'name': section_name, + 'priority': section_priority + }) + + sections = sorted(sections, key=lambda i: i['priority']) + + for section in sections: + + if type == 'kubernetes': + config += '\n' + tab + type + if data[section['name']].get('zone'): + if isinstance(data[section['name']]['zone'], list): + data[section['name']]['zone'] = ' '.join(data[section['name']]['zone']) + config += ' ' + data[section['name']]['zone'] + config += ' {' + proceed_section_keyvalue(data[section['name']]['data'], tabsize + 2) + '\n' + tab + '}' + + elif type == 'hosts': + config += '\n' + tab + type + if data[section['name']].get('file') and isinstance(data[section['name']]['file'], str): + config += ' ' + data[section['name']]['file'] + config += ' {' + proceed_section_keyvalue(data[section['name']]['data'], tabsize + 2) + '\n' + tab + '}' + + elif type == 'template': + if data[section['name']].get('zone'): + if isinstance(data[section['name']]['zone'], str): + data[section['name']]['zone'] = [data[section['name']]['zone']] + else: + data[section['name']]['zone'] = [None] + for zone in data[section['name']]['zone']: + config += '\n' + tab + type + if data[section['name']].get('class'): + config += ' ' + data[section['name']]['class'] + if data[section['name']].get('type'): + config += ' ' + data[section['name']]['type'] + if zone: + config += ' ' + zone + config += ' {' + proceed_section_keyvalue(data[section['name']]['data'], tabsize + 2) + '\n' + tab + '}' + + else: + config += '\n' + tab + type + ' {' + proceed_section_keyvalue(data[section['name']]['data'], tabsize + 2)\ + + '\n' + tab + '}' + + return config + + +def generate_configmap(inventory): + config = '''apiVersion: v1 +kind: ConfigMap +metadata: + name: coredns + namespace: kube-system +data:''' + + for config_type, data in inventory['services']['coredns']['configmap'].items(): + config += '\n %s: |' % config_type + if config_type == 'Corefile': + for port, settings in data.items(): + config += '\n %s {' % port + config += proceed_section_keyvalue(settings, 6) + config += '\n }' + else: + config += '\n ' + data.replace('\n', '\n ') + return config + '\n' + + +def apply_configmap(cluster, config): + utils.dump_file(cluster, config, 'coredns-configmap.yaml') + + group = cluster.nodes['master'].include_group(cluster.nodes['worker']).get_final_nodes() + group.put(io.StringIO(config), '/etc/kubernetes/coredns-configmap.yaml', backup=True, sudo=True) + + return cluster.nodes['master'].get_final_nodes().get_first_member()\ + .sudo('kubectl apply -f /etc/kubernetes/coredns-configmap.yaml && ' + 'sudo kubectl rollout restart -n kube-system deployment/coredns') + + +def apply_patch(cluster): + apply_command = '' + + for config_type in ['deployment']: + + if not cluster.inventory['services']['coredns'].get(config_type): + continue + + if apply_command != '': + apply_command += ' && sudo ' + + config = yaml.dump(cluster.inventory['services']['coredns'][config_type]) + filename = 'coredns-%s-patch.yaml' % config_type + filepath = '/etc/kubernetes/' + filename + + utils.dump_file(cluster, config, filename) + + group = cluster.nodes['master'].include_group(cluster.nodes['worker']).get_final_nodes() + group.put(io.StringIO(config), filepath, backup=True, sudo=True) + + apply_command = 'kubectl patch %s coredns -n kube-system --type merge -p \"$(sudo cat %s)\"' % (config_type, filepath) + + if apply_command == '': + return 'Nothing to patch' + + return cluster.nodes['master'].get_final_nodes().get_first_member().sudo(apply_command) diff --git a/kubetool/cri/__init__.py b/kubetool/cri/__init__.py new file mode 100644 index 000000000..8d2717f5d --- /dev/null +++ b/kubetool/cri/__init__.py @@ -0,0 +1,56 @@ +from kubetool.core.group import NodeGroupResult +from kubetool.cri import docker, containerd + + +def enrich_inventory(inventory, cluster): + if "docker" in cluster.inventory['services']: + raise Exception(f"docker configuration no longer belongs to 'services.docker' section, " + f"please move docker configuration to 'services.cri.dockerConfig' section") + + cri_impl = inventory['services']['cri']['containerRuntime'] + if cri_impl != "docker" and cri_impl != "containerd": + raise Exception("Unexpected container runtime specified: %s, supported are: docker, containerd" % cri_impl) + + if cluster.context.get("initial_procedure") == "migrate_cri": + return inventory + + if cri_impl == "docker": + forbidden_cri_sections = {"containerd": "containerdConfig"} + else: + forbidden_cri_sections = {"docker": "dockerConfig"} + for key, value in forbidden_cri_sections.items(): + if value in cluster.raw_inventory.get('services', {}).get('cri', {}): + raise Exception(f"{key} is not used, please remove {value} config from `services.cri` section") + + return inventory + + +def install(group): + cri_impl = group.cluster.inventory['services']['cri']['containerRuntime'] + + if cri_impl == "docker": + return docker.install(group) + else: + return containerd.install(group) + + +def configure(group): + cri_impl = group.cluster.inventory['services']['cri']['containerRuntime'] + + if cri_impl == "docker": + return docker.configure(group) + else: + return containerd.configure(group) + + +def prune(group, all_implementations=False): + cri_impl = group.cluster.inventory['services']['cri']['containerRuntime'] + + result = NodeGroupResult() + if cri_impl == "docker" or all_implementations: + result.update(docker.prune(group)) + + if cri_impl == "containerd" or all_implementations: + result.update(containerd.prune(group)) + + return result diff --git a/kubetool/cri/containerd.py b/kubetool/cri/containerd.py new file mode 100755 index 000000000..97acb8900 --- /dev/null +++ b/kubetool/cri/containerd.py @@ -0,0 +1,90 @@ +#!/usr/bin/env python3 + +from io import StringIO + +import toml +import yaml + +from kubetool import system, packages +from kubetool.core import utils +from kubetool.core.executor import RemoteExecutor + + +def install(group): + with RemoteExecutor(group.cluster.log) as exe: + for node in group.get_ordered_members_list(provide_node_configs=True): + os_specific_associations = group.cluster.get_associations_for_node(node['connect_to'])['containerd'] + + group.cluster.log.debug("Installing latest containerd and podman on %s node" % node['name']) + # always install latest available containerd and podman + packages.install(node['connection'], include=os_specific_associations['package_name']) + + # remove previous config.toml to avoid problems in case when previous config was broken + node['connection'].sudo("rm -f %s && sudo systemctl restart %s" + % (os_specific_associations['config_location'], + os_specific_associations['service_name'])) + + system.enable_service(node['connection'], os_specific_associations['service_name'], now=True) + return exe.get_last_results_str() + + +def configure(group): + log = group.cluster.log + + log.debug("Uploading crictl configuration for containerd...") + crictl_config = yaml.dump({"runtime-endpoint": "unix:///run/containerd/containerd.sock"}) + utils.dump_file(group.cluster, crictl_config, 'crictl.yaml') + group.put(StringIO(crictl_config), '/etc/crictl.yaml', backup=True, sudo=True) + + config_string = "" + # double loop is used to make sure that no "simple" `key: value` pairs are accidentally assigned to sections + for key, value in group.cluster.inventory["services"]["cri"]['containerdConfig'].items(): + # first we process all "simple" `key: value` pairs + if not isinstance(value, dict): + config_string += f"{toml.dumps({key: value})}" + for key, value in group.cluster.inventory["services"]["cri"]['containerdConfig'].items(): + # next we process all "complex" `key: dict_value` pairs, representing named sections + if isinstance(value, dict): + config_string += f"\n[{key}]\n{toml.dumps(value)}" + + # if there are any insecure registries in containerd config, then it is required to configure them for podman too + config_toml = toml.loads(config_string) + insecure_registries = [] + if config_toml.get('plugins', {}).get('io.containerd.grpc.v1.cri', {}).get('registry', {}).get('mirrors'): + for mirror, mirror_conf in config_toml['plugins']['io.containerd.grpc.v1.cri']['registry']['mirrors'].items(): + is_insecure = False + for endpoint in mirror_conf.get('endpoint', []): + if "http://" in endpoint: + is_insecure = True + break + if is_insecure: + insecure_registries.append(mirror) + if insecure_registries: + log.debug("Uploading podman configuration...") + podman_registries = f"[registries.insecure]\nregistries = {insecure_registries}\n" + utils.dump_file(group.cluster, podman_registries, 'podman_registries.conf') + group.sudo("mkdir -p /etc/containers/") + group.put(StringIO(podman_registries), "/etc/containers/registries.conf", backup=True, sudo=True) + else: + log.debug("Removing old podman configuration...") + group.sudo("rm -f /etc/containers/registries.conf") + + utils.dump_file(group.cluster, config_string, 'containerd-config.toml') + with RemoteExecutor(group.cluster.log) as exe: + for node in group.get_ordered_members_list(provide_node_configs=True): + os_specific_associations = group.cluster.get_associations_for_node(node['connect_to'])['containerd'] + log.debug("Uploading containerd configuration to %s node..." % node['name']) + node['connection'].put(StringIO(config_string), os_specific_associations['config_location'], backup=True, + sudo=True) + log.debug("Restarting Containerd on %s node..." % node['name']) + node['connection'].sudo(f"chmod 600 {os_specific_associations['config_location']} && " + f"sudo systemctl restart {os_specific_associations['service_name']} && " + f"systemctl status {os_specific_associations['service_name']}") + return exe.get_last_results_str() + + +def prune(group): + return group.sudo('crictl rm -fa; ' + 'sudo crictl rmp -fa; ' + 'sudo crictl rmi -a; ' + 'sudo ctr content ls -q | xargs -r sudo ctr content rm', warn=True) diff --git a/kubetool/cri/docker.py b/kubetool/cri/docker.py new file mode 100755 index 000000000..2e90915e7 --- /dev/null +++ b/kubetool/cri/docker.py @@ -0,0 +1,68 @@ +#!/usr/bin/env python3 + +import json +from io import StringIO + +from kubetool import system, packages +from kubetool.core import utils +from kubetool.core.executor import RemoteExecutor + + +def install(group): + with RemoteExecutor(group.cluster.log) as exe: + for node in group.get_ordered_members_list(provide_node_configs=True): + os_specific_associations = group.cluster.get_associations_for_node(node['connect_to'])['docker'] + packages.install(node['connection'], include=os_specific_associations['package_name']) + enable(node['connection']) + + # remove previous daemon.json to avoid problems in case when previous config was broken + node['connection'].sudo("rm -f %s && sudo systemctl restart %s" + % (os_specific_associations['config_location'], + os_specific_associations['service_name'])) + + return exe.get_last_results_str() + + +def uninstall(group): + # delete all known docker packages + return packages.remove(group, include=['docker', 'docker-engine', 'docker.io', 'docker-ce']) + + +def enable(group): + system.enable_service( + group, + name=group.cluster.inventory['services']['packages']['associations']['docker']['service_name'], now=True) + + +def disable(group): + system.disable_service( + group, + name=group.cluster.inventory['services']['packages']['associations']['docker']['service_name'], now=True) + + +def configure(group): + log = group.cluster.log + + settings_json = json.dumps(group.cluster.inventory["services"]['cri']['dockerConfig'], sort_keys=True, indent=4) + utils.dump_file(group.cluster, settings_json, 'docker-daemon.json') + + with RemoteExecutor(group.cluster.log) as exe: + for node in group.get_ordered_members_list(provide_node_configs=True): + os_specific_associations = group.cluster.get_associations_for_node(node['connect_to'])['docker'] + log.debug("Uploading docker configuration to %s node..." % node['name']) + node['connection'].put(StringIO(settings_json), os_specific_associations['config_location'], backup=True, + sudo=True) + log.debug("Restarting Docker on %s node..." % node['name']) + node['connection'].sudo(f"chmod 600 {os_specific_associations['config_location']} && " + f"sudo systemctl restart {os_specific_associations['service_name']} && " + f"sudo {os_specific_associations['executable_name']} info") + + return exe.get_last_results_str() + + +def prune(group): + return group.sudo('docker container stop $(sudo docker container ls -aq); ' + 'sudo docker container rm $(sudo docker container ls -aq); ' + 'sudo docker system prune -a -f; ' + # kill all containerd-shim processes, so that no orphan containers remain + 'sudo pkill -9 -f "^containerd-shim"', warn=True, hide=True) diff --git a/kubetool/demo.py b/kubetool/demo.py new file mode 100644 index 000000000..4dd6c1801 --- /dev/null +++ b/kubetool/demo.py @@ -0,0 +1,298 @@ +import io +from typing import List, Dict, Union, Any + +import fabric +from invoke import UnexpectedExit + +from kubetool.core import cluster, group, flow, executor +from kubetool.core.cluster import KubernetesCluster +from kubetool.core.connections import Connections +from kubetool.core.group import NodeGroup, _HostToResult +from kubetool.core.executor import RemoteExecutor + + +class FakeShell: + def __init__(self, _cluster): + self.cluster = _cluster + self.results: List[Dict[str, Union[_HostToResult, Any]]] = [] + self.history = [] + + def reset(self): + self.results = [] + self.history = [] + + def add(self, result: _HostToResult, do_type, args, usage_limit=0): + args.sort() + + result = { + 'result': result, + 'do_type': do_type, + 'args': args + } + + if usage_limit > 0: + result['usage_limit'] = usage_limit + + self.results.append(result) + + def find(self, do_type, args, kwargs): + # TODO: support kwargs + if isinstance(args, tuple): + args = list(args) + for i, item in enumerate(self.results): + if item['do_type'] == do_type and item['args'] == args: + self.history.append(item) + if item.get('usage_limit') is not None: + self.results[i]['usage_limit'] -= 1 + if self.results[i]['usage_limit'] < 1: + del self.results[i] + return item['result'] + return None + + # covered by test.test_demo.TestFakeShell.test_calculate_calls + def history_find(self, do_type, args): + # TODO: support kwargs + result = [] + if isinstance(args, tuple): + args = list(args) + for item in self.history: + if item['do_type'] == do_type and item['args'] == args: + result.append(item) + return result + + +class FakeFS: + def __init__(self, _cluster): + self.cluster = _cluster + self.storage = {} + + def reset(self): + self.storage = {} + + def reset_host(self, host): + self.storage[host] = {} + + # covered by test.test_demo.TestFakeFS.test_put_string + # covered by test.test_demo.TestFakeFS.test_put_stringio + def write(self, host, filename, data): + if isinstance(data, io.StringIO): + data = data.getvalue() + if self.storage.get(host) is None: + self.storage[host] = {} + self.storage[host][filename] = data + + # covered by test.test_demo.TestFakeFS.test_write_file_to_cluster + def group_write(self, _group, filename, data): + for host, connection in _group.nodes.items(): + self.write(host, filename, data) + + # covered by test.test_demo.TestFakeFS.test_put_string + # covered by test.test_demo.TestFakeFS.test_get_nonexistent + def read(self, host, filename): + return self.storage.get(host, {}).get(filename) + + # covered by test.test_demo.TestFakeFS.test_write_file_to_cluster + def group_read(self, _group, filename): + result = {} + for host, connection in _group.nodes.items(): + result[host] = self.read(host, filename) + return result + + def ls(self, host, path): + for _path in list(self.storage.get(host, {}).keys()): + # TODO + pass + + def rm(self, host, path): + for _path in list(self.storage.get(host, {}).keys()): + if path in _path: + del self.storage[host][_path] + + +class FakeKubernetesCluster(cluster.KubernetesCluster): + + def __init__(self, inventory, execution_arguments): + super().__init__(inventory, execution_arguments) + self.fake_shell = FakeShell(self) + self.fake_fs = FakeFS(self) + + def make_group(self, ips) -> NodeGroup: + nodegroup = super().make_group(ips) + return FakeNodeGroup(nodegroup.nodes, self) + + def finish(self): + return + + +class FakeNodeGroupResult(group.NodeGroupResult): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + +class FakeNodeGroup(group.NodeGroup): + + def __init__(self, connections: Connections, cluster_: FakeKubernetesCluster): + super().__init__(connections, cluster_) + self.cluster = cluster_ + + def _do(self, do_type, nodes: Connections, is_async, *args, **kwargs) -> _HostToResult: + + if do_type in ['sudo', 'run']: + found_result = self.cluster.fake_shell.find(do_type, args, kwargs) + + if found_result is None: + raise Exception('Fake result not found for requested action type \'%s\' and args %s' % (do_type, args)) + + found_result = {host: result for host, result in found_result.items() if host in nodes.keys()} + for host, result in found_result.items(): + if isinstance(result, UnexpectedExit) and kwargs.get('warn', False): + found_result[host] = result.result + + # Remote Executor support code + gre = RemoteExecutor(self.cluster.log) + executor = gre._get_active_executor() + batch_results = {} + for host, result in found_result.items(): + batch_results[host] = {0: result} + executor.results.append(batch_results) + + return found_result + + raise Exception('Unsupported do type') + + def put(self, *args, **kwargs): + self.cluster.fake_fs.group_write(self, args[1], args[0]) + + def disconnect(self, hosts: List[str] = None): + return + + def _make_result(self, results: _HostToResult) -> FakeNodeGroupResult: + group_result = FakeNodeGroupResult() + for host, result in results.items(): + group_result[self.nodes[host]] = result + + return group_result + + +def new_cluster(inventory, procedure=None, fake=True, + os_name='centos', os_version='7.9', net_interface='eth0'): + + context = flow.create_context({ + 'disable_dump': True, + 'nodes': [] + }, procedure=procedure) + + os_family = None + + if os_name in ['centos', 'rhel']: + os_family = 'rhel' + elif os_name in ['ubuntu', 'debian']: + os_family = 'debian' + + for node in inventory['nodes']: + node_context = { + 'name': node['name'], + 'online': True, + 'hasroot': True, + 'active_interface': net_interface, + 'os': { + 'name': os_name, + 'family': os_family, + 'version': os_version + } + } + connect_to = node['internal_address'] + if node.get('address'): + connect_to = node['address'] + context['nodes'][connect_to] = node_context + + context['os'] = os_family + + # It is possible to disable FakeCluster and create real cluster Object for some business case + if fake: + return FakeKubernetesCluster(inventory, context) + else: + return KubernetesCluster(inventory, context) + + +def generate_inventory(balancer=1, master=1, worker=1, keepalived=0): + inventory = { + 'node_defaults': { + 'keyfile': '/dev/null', + 'username': 'anonymous' + }, + 'nodes': [], + 'services': { + 'cri': {} + }, + 'cluster_name': 'k8s.fake.local' + } + + id_roles_map = {} + + for role_name in ['balancer', 'master', 'worker']: + + item = locals()[role_name] + + if isinstance(item, int): + ids = [] + if item > 0: + for i in range(0, item): + ids.append('%s-%s' % (role_name, i + 1)) + item = ids + + if item: + for id_ in item: + roles = id_roles_map.get(id_) + if roles is None: + roles = [] + roles.append(role_name) + id_roles_map[id_] = roles + + ip_i = 0 + + for id_, roles in id_roles_map.items(): + ip_i = ip_i + 1 + inventory['nodes'].append({ + 'name': id_, + 'address': '10.101.1.%s' % ip_i, + 'internal_address': '192.168.0.%s' % ip_i, + 'roles': roles + }) + + if isinstance(keepalived, int): + ips = [] + if keepalived > 0: + for i in range(0, keepalived): + ips.append('10.101.2.%s' % (i + 1)) + keepalived = ips + + inventory['vrrp_ips'] = keepalived + + return inventory + + +def create_exception_result(group_: NodeGroup, exception: Exception) -> _HostToResult: + return {host: exception for host in group_.nodes.keys()} + + +def create_nodegroup_result(group_: NodeGroup, stdout='', stderr='', code=0) -> _HostToResult: + results = {} + for host, cxn in group_.nodes.items(): + results[host] = fabric.runners.Result(stdout=stdout, stderr=stderr, exited=code, connection=cxn) + if code == -1: + results[host] = UnexpectedExit(results[host]) + return results + + +def empty_action(group): + pass + + +FULLHA = {'balancer': 1, 'master': 3, 'worker': 3} +FULLHA_KEEPALIVED = {'balancer': 2, 'master': 3, 'worker': 3, 'keepalived': 1} +FULLHA_NOBALANCERS = {'balancer': 0, 'master': 3, 'worker': 3} +ALLINONE = {'master': 1} +MINIHA = {'master': 3} +MINIHA_KEEPALIVED = {'master': 3, 'balancer': ['master-1', 'master-2', 'master-3'], + 'worker': ['master-1', 'master-2', 'master-3'], 'keepalived': 1} diff --git a/kubetool/etcd.py b/kubetool/etcd.py new file mode 100644 index 000000000..3d269ecd3 --- /dev/null +++ b/kubetool/etcd.py @@ -0,0 +1,34 @@ +from kubetool.core.group import NodeGroup + + +# the method requires etcdctl.sh to be installed on all active master nodes during thirdparties task. +def remove_members(group: NodeGroup): + log = group.cluster.log + + masters = group.cluster.nodes["master"] + managing_master = masters.get_unchanged_nodes().get_any_member() + + log.verbose(f"etcd will be managed using {managing_master.get_nodes_names()[0]}.") + output = managing_master.sudo("etcdctl member list").get_simple_out().splitlines() + + etcd_members = {} + for line in output: + params = [p.strip() for p in line.split(sep=',')] + # 6 is expected number of comma-separated parameters of an etcd member + if len(params) == 6: + etcd_members[params[2]] = params[0] + else: + log.warning("Unexpected line in 'etcdctl member list' output: " + line) + + log.verbose(f"Found etcd members {list(etcd_members.keys())}") + unexpected_members = etcd_members.keys() - set(masters.get_nodes_names()) + if unexpected_members: + log.warning(f"Found unexpected etcd members {list(unexpected_members)}") + + for node_name in group.get_nodes_names(): + if node_name in etcd_members: + command = "etcdctl member remove " + etcd_members[node_name] + log.verbose(f"Removing found etcd member {node_name}...") + managing_master.sudo(command) + else: + log.verbose(f"Skipping {node_name} as it is not among etcd members.") diff --git a/kubetool/haproxy.py b/kubetool/haproxy.py new file mode 100644 index 000000000..8b0d8037a --- /dev/null +++ b/kubetool/haproxy.py @@ -0,0 +1,140 @@ +import io +import time + +from jinja2 import Template + +from kubetool import system, packages +from kubetool.core import utils +from kubetool.core.executor import RemoteExecutor +from kubetool.core.group import NodeGroupResult + +ERROR_VRRP_IS_NOT_CONFIGURED = "Balancer is combined with other role, but VRRP IP is not configured." + + +def enrich_inventory(inventory, cluster): + + for node in inventory["nodes"]: + if 'balancer' in node['roles'] and len(node['roles']) > 1: + + # ok, seems we have combination of balancer-master / balancer-worker + # in that case VRRP IP should be defined + + # let's check vrrp ip section is defined + if not inventory["vrrp_ips"]: + raise Exception(ERROR_VRRP_IS_NOT_CONFIGURED) + + found = False + # let's check we have current balancer to be defined in vrrp ip hosts: + for item in inventory["vrrp_ips"]: + for record in item['hosts']: + if record['name'] == node['name']: + # seems there is at least 1 vrrp ip for current balancer + found = True + + if not found: + raise Exception('Balancer is combined with other role, but there is no any VRRP IP configured for ' + 'node \'%s\'.' % node['name']) + + return inventory + + +def install(group): + with RemoteExecutor(group.cluster.log) as exe: + for node in group.get_ordered_members_list(provide_node_configs=True): + package_associations = group.cluster.get_associations_for_node(node['connect_to'])['haproxy'] + group.sudo("%s -v" % package_associations['executable_name'], warn=True) + + haproxy_installed = True + for host, host_results in exe.get_last_results().items(): + if list(host_results.values())[0].exited != 0: + haproxy_installed = False + + if haproxy_installed: + # TODO: return haproxy version + group.cluster.log.debug("HAProxy already installed, nothing to install") + else: + with RemoteExecutor(group.cluster.log) as exe: + for node in group.get_ordered_members_list(provide_node_configs=True): + package_associations = group.cluster.get_associations_for_node(node['connect_to'])['haproxy'] + packages.install(node["connection"], include=package_associations['package_name']) + + service_name = package_associations['service_name'] + patch_path = utils.get_resource_absolute_path("./resources/drop_ins/haproxy.conf", script_relative=True) + group.call(system.patch_systemd_service, service_name=service_name, patch_source=patch_path) + enable(group) + + return exe.get_last_results_str() + + +def uninstall(group): + return packages.remove(group, include=['haproxy', 'rh-haproxy18']) + + +def restart(group): + for node in group.get_ordered_members_list(provide_node_configs=True): + service_name = group.cluster.get_associations_for_node(node['connect_to'])['haproxy']['service_name'] + system.restart_service(node['connection'], name=service_name) + RemoteExecutor(group.cluster.log).flush() + group.cluster.log.debug("Sleep while haproxy comes-up...") + time.sleep(group.cluster.globals['haproxy']['restart_wait']) + return + + +def disable(group): + with RemoteExecutor(group.cluster.log): + for node in group.get_ordered_members_list(provide_node_configs=True): + os_specific_associations = group.cluster.get_associations_for_node(node['connect_to']) + system.disable_service(node['connection'], name=os_specific_associations['haproxy']['service_name']) + + +def enable(group): + with RemoteExecutor(group.cluster.log): + for node in group.get_ordered_members_list(provide_node_configs=True): + os_specific_associations = group.cluster.get_associations_for_node(node['connect_to']) + system.enable_service(node['connection'], name=os_specific_associations['haproxy']['service_name'], + now=True) + + +def get_config(cluster, node, future_nodes): + + bindings = [] + if len(node['roles']) == 1 or not cluster.inventory['vrrp_ips']: + bindings.append("0.0.0.0") + bindings.append("::") + else: + for item in cluster.inventory['vrrp_ips']: + for record in item['hosts']: + if record['name'] == node['name']: + bindings.append(item['ip']) + + # remove duplicates + bindings = list(set(bindings)) + + return Template(open(utils.get_resource_absolute_path('templates/haproxy.cfg.j2', script_relative=True)).read())\ + .render(nodes=future_nodes, bindings=bindings,config_options=cluster.inventory['services']['loadbalancer']['haproxy']) + + +def configure(group): + all_nodes_configs = group.cluster.nodes['all'].get_final_nodes().get_ordered_members_list(provide_node_configs=True) + + for node in group.get_ordered_members_list(provide_node_configs=True): + package_associations = group.cluster.get_associations_for_node(node['connect_to'])['haproxy'] + configs_directory = '/'.join(package_associations['config_location'].split('/')[:-1]) + + group.cluster.log.debug("\nConfiguring haproxy on \'%s\'..." % node['name']) + config = get_config(group.cluster, node, all_nodes_configs) + utils.dump_file(group.cluster, config, 'haproxy_%s.cfg' % node['name']) + node['connection'].sudo('mkdir -p %s' % configs_directory) + node['connection'].put(io.StringIO(config), package_associations['config_location'], backup=True, sudo=True) + node['connection'].sudo('ls -la %s' % package_associations['config_location']) + + +def override_haproxy18(group): + rhel_nodes = group.get_nodes_with_os('rhel') + if rhel_nodes.is_empty(): + group.cluster.log.debug('Haproxy18 override is not required') + return + package_associations = group.cluster.get_associations_for_os('rhel')['haproxy'] + # TODO: do not replace the whole file, replace only parameter + return group.put(io.StringIO("CONFIG=%s\n" % package_associations['config_location']), + '/etc/sysconfig/%s' % package_associations['service_name'], backup=True, sudo=True) diff --git a/kubetool/jinja.py b/kubetool/jinja.py new file mode 100644 index 000000000..661cf3021 --- /dev/null +++ b/kubetool/jinja.py @@ -0,0 +1,23 @@ +import yaml +import jinja2 + +from kubetool.core import defaults + + +def new(log, root=None): + if root is None: + root = {} + env = jinja2.Environment() + env.filters['toyaml'] = lambda data: yaml.dump(data, default_flow_style=False) + env.filters['isipv4'] = lambda ip: ":" not in precompile(log, ip, root) + env.filters['minorversion'] = lambda version: ".".join(precompile(log, version, root).split('.')[0:2]) + env.filters['majorversion'] = lambda version: precompile(log, version, root).split('.')[0] + + return env + + +def precompile(log, struct, root): + # maybe we have non compiled string like templates/plugins/calico-{{ globals.compatibility_map }} ? + if '{{' in struct or '{%' in struct: + struct = defaults.compile_object(log, struct, root) + return struct diff --git a/kubetool/k8s_certs.py b/kubetool/k8s_certs.py new file mode 100644 index 000000000..3433cddef --- /dev/null +++ b/kubetool/k8s_certs.py @@ -0,0 +1,101 @@ +from kubetool import kubernetes + +supported_k8s_certs = ["all", + "apiserver", "apiserver-etcd-client", "apiserver-kubelet-client", + "etcd-healthcheck-client", "etcd-peer", "etcd-server", + "admin.conf", "controller-manager.conf", "scheduler.conf", + "front-proxy-client"] +version_kubectl_alpha_removed = "v1.21.0" + + +def k8s_certs_overview(masters): + if kubernetes.version_higher_or_equal(masters.cluster.inventory['services']['kubeadm']['kubernetesVersion'], + version_kubectl_alpha_removed): + for master in masters.get_ordered_members_list(provide_node_configs=True): + masters.cluster.log.debug(f"Checking certs expiration for master {master['name']}") + master['connection'].sudo("kubeadm certs check-expiration", hide=False) + else: + for master in masters.get_ordered_members_list(provide_node_configs=True): + masters.cluster.log.debug(f"Checking certs expiration for master {master['name']}") + master['connection'].sudo("kubeadm alpha certs check-expiration", hide=False) + + +def renew_verify(inventory, cluster): + if cluster.context.get('initial_procedure') != 'cert_renew' or "kubernetes" not in cluster.procedure_inventory: + return inventory + + cert_list = cluster.procedure_inventory["kubernetes"].get("cert-list") + verify_cert_list_format(cert_list) + verify_certs_supported(cert_list) + verify_all_is_absent_or_single(cert_list) + + return inventory + + +def renew_apply(masters): + log = masters.cluster.log + + procedure = masters.cluster.procedure_inventory["kubernetes"] + cert_list = remove_certs_duplicates(procedure["cert-list"]) + + if kubernetes.version_higher_or_equal(masters.cluster.inventory['services']['kubeadm']['kubernetesVersion'], + version_kubectl_alpha_removed): + for cert in cert_list: + masters.sudo(f"kubeadm certs renew {cert}") + else: + for cert in cert_list: + masters.sudo(f"kubeadm alpha certs renew {cert}") + + if "all" in cert_list or "admin.conf" in cert_list: + # need to update cluster-admin config + kubernetes.copy_admin_config(log, masters) + + masters.call(force_renew_kubelet_serving_certs) + + # for some reason simple pod delete do not work for certs update - we need to delete containers themselves + masters.call(force_restart_control_plane) + + for master in masters.get_ordered_members_list(provide_node_configs=True): + kubernetes.wait_for_any_pods(masters.cluster, master["connection"], apply_filter=master["name"]) + + +def force_restart_control_plane(masters): + cri_impl = masters.cluster.inventory['services']['cri']['containerRuntime'] + restart_containers = ["etcd", "kube-scheduler", "kube-apiserver", "kube-controller-manager"] + c_filter = "grep -e %s" % " -e ".join(restart_containers) + + if cri_impl == "docker": + masters.sudo("sudo docker container rm -f $(sudo docker ps -a | %s | awk '{ print $1 }')" % c_filter, warn=True) + else: + masters.sudo("sudo crictl rm -f $(sudo crictl ps -a | %s | awk '{ print $1 }')" % c_filter, warn=True) + + +def force_renew_kubelet_serving_certs(masters): + # Delete *serving* kubelet cert (kubelet.crt) and restart kubelet to create new up-to-date cert. + # Client kubelet cert (kubelet.conf) is assumed to be updated automatically by kubelet. + for master in masters.get_ordered_members_list(): + master.sudo(f"rm -f /var/lib/kubelet/pki/kubelet.crt /var/lib/kubelet/pki/kubelet.key") + masters.sudo("systemctl restart kubelet") + + +def verify_cert_list_format(cert_list): + if cert_list is None or not isinstance(cert_list, list) or len(cert_list) == 0: + raise Exception("Incorrect k8s certs renew configuration, 'cert_list' list should be present and non-empty") + return True + + +def verify_certs_supported(cert_list): + for line in cert_list: + if line not in supported_k8s_certs: + raise Exception(f"Found unsupported cert: {line}, list of supported certs: {supported_k8s_certs}") + return True + + +def verify_all_is_absent_or_single(cert_list): + if "all" in cert_list and len(cert_list) > 1: + raise Exception(f"Found 'all' in certs list, but it is not single: {cert_list}") + return True + + +def remove_certs_duplicates(cert_list): + return set(cert_list) diff --git a/kubetool/keepalived.py b/kubetool/keepalived.py new file mode 100644 index 000000000..e1ea4f610 --- /dev/null +++ b/kubetool/keepalived.py @@ -0,0 +1,257 @@ +import hashlib +import io +import random +import time + +from jinja2 import Template + +from kubetool import system, packages +from kubetool.core import utils +from kubetool.core.executor import RemoteExecutor +from kubetool.core.group import NodeGroup, NodeGroupResult + + +def autodetect_interface(cluster, name): + for node_address, node_context in cluster.context['nodes'].items(): + if node_context['name'] == name and node_context.get('active_interface'): + return node_context['active_interface'] + if cluster.context['initial_procedure'] == 'remove_node': + for node_to_remove in cluster.procedure_inventory['nodes']: + if node_to_remove['name'] == name: + return None + raise Exception('Failed to autodetect active interface for %s' % name) + + +def enrich_inventory_apply_defaults(inventory, cluster): + # if vrrp_ips is empty, then nothing to do + if not inventory['vrrp_ips']: + return inventory + + default_names = get_default_node_names(inventory) + + cluster.log.verbose("Detected default keepalived hosts: %s" % default_names) + if not default_names: + cluster.log.verbose("WARNING: Default keepalived hosts are empty: something can go wrong!") + + # iterate over each vrrp_ips item and check if any hosts defined to be used in it + for i, item in enumerate(inventory['vrrp_ips']): + + if isinstance(item, str): + inventory['vrrp_ips'][i] = item = { + 'ip': item + } + + # is router_id defined? + if item.get('router_id') is None: + # is there ipv6? + if ':' in item['ip']: + item['router_id'] = item['ip'].split(':').pop() + if item['router_id'] == '': + item['router_id'] = '0' + # in adress with long last octet e.g. "765d" it is necessary to use only last "5d" and convert it from hex to int + item['router_id'] = str(int(item['router_id'][-2:], 16)) + else: + item['router_id'] = item['ip'].split('.').pop() + + # is id defined? + if item.get('id') is None: + # label max size is 15, then 15 - 5 (size from string "vip_") = 10 symbols we can use + source_string = item.get('interface', 'auto') + item['ip'] + label_size = cluster.globals['keepalived']['defaults']['label_size'] + item['id'] = hashlib.md5(source_string.encode('utf-8')).hexdigest()[:label_size] + + # is password defined? + if item.get('password') is None: + password_size = cluster.globals['keepalived']['defaults']['password_size'] + item['password'] = ("%032x" % random.getrandbits(128))[:password_size] + + # if nothing defined then use default names + if item.get('hosts') is None: + # is there default names found? + if not default_names: + raise Exception('Section #%s in vrrp_ips has no hosts, but default names can\'t be found.' % i) + # ok, default names found, and can be used + inventory['vrrp_ips'][i]['hosts'] = default_names + + for j, record in enumerate(item['hosts']): + if isinstance(record, str): + item['hosts'][j] = { + 'name': record + } + if not item['hosts'][j].get('priority'): + item['hosts'][j]['priority'] = cluster.globals['keepalived']['defaults']['priority']['max_value'] - \ + (j + cluster.globals['keepalived']['defaults']['priority']['step']) + if not item['hosts'][j].get('interface') and item.get('interface'): + item['hosts'][j]['interface'] = item['interface'] + if item['hosts'][j].get('interface', 'auto') == 'auto': + item['hosts'][j]['interface'] = autodetect_interface(cluster, item['hosts'][j]['name']) + + return inventory + + +def get_default_node_names(inventory): + default_names = [] + + # well, vrrp_ips is not empty, let's find balancers defined in config-file + for i, node in enumerate(inventory.get('nodes', [])): + if 'balancer' in node['roles'] and \ + (node.get('address') is not None or node.get('internal_address') is not None): + default_names.append(node['name']) + + # just in case, we remove duplicates + return list(set(default_names)) + + +def enrich_inventory_calculate_nodegroup(inventory, cluster): + # if vrrp_ips is empty, then nothing to do + if not inventory['vrrp_ips']: + return inventory + + # Calculate group, where keepalived should be installed: + names = [] + + for i, item in enumerate(cluster.inventory['vrrp_ips']): + for record in item['hosts']: + names.append(record['name']) + + # it is important to remove duplicates + names = list(set(names)) + + filtered_members = cluster.nodes['all'].get_ordered_members_list(provide_node_configs=False, apply_filter={ + 'name': names + }) + + # create new group where keepalived will be installed + cluster.nodes['keepalived'] = cluster.make_group(filtered_members) + + # create new role + cluster.roles.append('keepalived') + + # fill in ips + cluster.ips['keepalived'] = list(cluster.nodes['keepalived'].nodes.keys()) + + return inventory + + +def install(group): + log = group.cluster.log + + package_associations = group.cluster.inventory['services']['packages']['associations']['keepalived'] + + keepalived_version = group.sudo("%s -v" % package_associations['executable_name'], warn=True) + keepalived_installed = True + + for connection, result in keepalived_version.items(): + if result.exited != 0: + keepalived_installed = False + + if keepalived_installed: + log.debug("Keepalived already installed, nothing to install") + installation_result = keepalived_version + else: + installation_result = packages.install(group.get_new_nodes_or_self(), include=package_associations['package_name']) + + service_name = group.cluster.inventory['services']['packages']['associations']['keepalived']['service_name'] + patch_path = utils.get_resource_absolute_path("./resources/drop_ins/keepalived.conf", script_relative=True) + group.call(system.patch_systemd_service, service_name=service_name, patch_source=patch_path) + group.call(install_haproxy_check_script) + enable(group) + + return installation_result + + +def install_haproxy_check_script(group: NodeGroup): + local_path = utils.get_resource_absolute_path("./resources/scripts/check_haproxy.sh", script_relative=True) + group.put(local_path, "/usr/local/bin/check_haproxy.sh", sudo=True, binary=False) + group.sudo("chmod +x /usr/local/bin/check_haproxy.sh") + + +def uninstall(group): + return packages.remove(group, include='keepalived') + + +def restart(group): + results = NodeGroupResult() + for node in group.get_ordered_members_list(provide_node_configs=True): + os_specific_associations = group.cluster.get_associations_for_node(node['connect_to']) + package_associations = os_specific_associations['keepalived'] + results.update(system.restart_service(node['connection'], name=package_associations['service_name'])) + group.cluster.log.debug("Sleep while keepalived comes-up...") + time.sleep(group.cluster.globals['keepalived']['restart_wait']) + return results + + +def enable(group): + with RemoteExecutor(group.cluster.log): + for node in group.get_ordered_members_list(provide_node_configs=True): + os_specific_associations = group.cluster.get_associations_for_node(node['connect_to']) + system.enable_service(node['connection'], name=os_specific_associations['keepalived']['service_name'], + now=True) + + +def disable(group): + with RemoteExecutor(group.cluster.log): + for node in group.get_ordered_members_list(provide_node_configs=True): + os_specific_associations = group.cluster.get_associations_for_node(node['connect_to']) + system.disable_service(node['connection'], name=os_specific_associations['keepalived']['service_name']) + + +def generate_config(inventory, node): + config = '' + + for i, item in enumerate(inventory['vrrp_ips']): + + if i > 0: + # this is required for double newline in config, but avoid double newline in the end of file + config += "\n" + + ips = { + 'source': node['internal_address'], + 'peers': [] + } + + priority = 100 + interface = 'eth0' + for record in item['hosts']: + if record['name'] == node['name']: + priority = record['priority'] + interface = record['interface'] + + for i_node in inventory['nodes']: + for record in item['hosts']: + if i_node['name'] == record['name'] and i_node['internal_address'] != ips['source']: + ips['peers'].append(i_node['internal_address']) + + template_location = utils.get_resource_absolute_path('templates/keepalived.conf.j2', script_relative=True) + config += Template(open(template_location).read()).render(inventory=inventory, item=item, node=node, + interface=interface, + priority=priority, **ips) + "\n" + + return config + + +def configure(group): + log = group.cluster.log + group_members = group.get_ordered_members_list(provide_node_configs=True) + + with RemoteExecutor(log): + for node in group_members: + + log.debug("Configuring keepalived on '%s'..." % node['name']) + + package_associations = group.cluster.get_associations_for_node(node['connect_to'])['keepalived'] + configs_directory = '/'.join(package_associations['config_location'].split('/')[:-1]) + + group.sudo('mkdir -p %s' % configs_directory, hide=True) + + config = generate_config(group.cluster.inventory, node) + utils.dump_file(group.cluster, config, 'keepalived_%s.conf' % node['name']) + + node['connection'].put(io.StringIO(config), package_associations['config_location'], sudo=True) + + log.debug(group.sudo('ls -la %s' % package_associations['config_location'])) + + log.debug("Restarting keepalived in all group...") + restart(group) + + return group.sudo('systemctl status %s' % package_associations['service_name'], warn=True) diff --git a/kubetool/kubernetes.py b/kubetool/kubernetes.py new file mode 100644 index 000000000..b62cd0e51 --- /dev/null +++ b/kubetool/kubernetes.py @@ -0,0 +1,1070 @@ +import io +import math +import time +from copy import deepcopy +from typing import List + +import ruamel.yaml +import yaml +from jinja2 import Template + +from kubetool import system, plugins, psp, etcd, cri, packages +from kubetool.core import utils +from kubetool.core.executor import RemoteExecutor +from kubetool.core.group import NodeGroup + +version_coredns_path_breakage = "v1.21.2" + + +def add_node_enrichment(inventory, cluster): + if cluster.context.get('initial_procedure') != 'add_node': + return inventory + + for new_node in inventory["nodes"]: + if "add_node" in new_node["roles"]: + raise Exception("Manually setting 'new_node' role not permitted") + + # adding new supported role to be able to select group of "add_node"s + cluster.supported_roles.append("add_node") + + # adding role "new_node" for all specified new nodes and putting these nodes to all "nodes" list + for new_node in cluster.procedure_inventory.get("nodes", []): + # deepcopy is necessary, otherwise role append will happen in procedure_inventory too + node = deepcopy(new_node) + node["roles"].append("add_node") + inventory["nodes"].append(node) + + if "vrrp_ips" in cluster.procedure_inventory: + utils.merge_vrrp_ips(cluster.procedure_inventory, inventory) + + return inventory + + +def remove_node_enrichment(inventory, cluster): + if cluster.context.get('initial_procedure') != 'remove_node': + return inventory + + for node in inventory['nodes']: + if 'remove_node' in node['roles']: + raise Exception("Manually setting 'remove_node' role is not permitted") + + # adding new supported role to be able to select group of "remove_node"s + cluster.supported_roles.append('remove_node') + + # adding role "remove_node" for all specified nodes + node_names_to_remove = [node['name'] for node in cluster.procedure_inventory.get("nodes", [])] + for i, node in enumerate(inventory['nodes']): + if node['name'] in node_names_to_remove: + inventory['nodes'][i]['roles'].append('remove_node') + + return inventory + + +def enrich_upgrade_inventory(inventory, cluster): + if cluster.context.get('initial_procedure') == 'upgrade': + if not inventory.get('services'): + inventory['services'] = {} + if not inventory['services'].get('kubeadm'): + inventory['services']['kubeadm'] = {} + cluster.context['initial_kubernetes_version'] = inventory['services']['kubeadm']['kubernetesVersion'] + inventory['services']['kubeadm']['kubernetesVersion'] = cluster.context['upgrade_version'] + + disable_eviction = cluster.procedure_inventory.get("disable-eviction") + if disable_eviction not in (None, True, False): + raise Exception(f"'disable-eviction' value could be either True or False, found {disable_eviction}") + + test_version_upgrade_possible(cluster.context['initial_kubernetes_version'], cluster.context['upgrade_version']) + cluster.log.info( + '------------------------------------------\nUPGRADING KUBERNETES %s ⭢ %s\n------------------------------------------' % ( + cluster.context['initial_kubernetes_version'], cluster.context['upgrade_version'])) + return inventory + + +def version_higher_or_equal(version, compared_version): + ''' + The method checks target Kubernetes version, is it more/equal than compared_version. + ''' + compared_version_list = compared_version.replace('v', '').split('.') + version_list = version.replace('v', '').split('.') + if int(version_list[0]) > int(compared_version_list[0]): + return True + if int(version_list[0]) == int(compared_version_list[0]): + if int(version_list[1]) > int(compared_version_list[1]): + return True + if int(version_list[1]) == int(compared_version_list[1]): + if int(version_list[2]) >= int(compared_version_list[2]): + return True + return False + + +def enrich_inventory(inventory, cluster): + if version_higher_or_equal(inventory['services']['kubeadm']['kubernetesVersion'], version_coredns_path_breakage): + repository = inventory['services']['kubeadm'].get('imageRepository', "") + if repository: + inventory['services']['kubeadm']['dns'] = {} + inventory['services']['kubeadm']['dns']['imageRepository'] = ("%s/coredns" % repository) + # if user redefined apiServer as, string, for example? + if not isinstance(inventory["services"]["kubeadm"].get('apiServer'), dict): + inventory["services"]["kubeadm"]['apiServer'] = {} + + # if user redefined apiServer.certSANs as, string, or removed it, for example? + if not isinstance(inventory["services"]["kubeadm"]['apiServer'].get('certSANs'), list): + inventory["services"]["kubeadm"]['apiServer']['certSANs'] = [] + + certsans = inventory["services"]["kubeadm"]['apiServer']['certSANs'] + + # do not overwrite apiServer.certSANs, but append - may be user specified something already there? + for node in inventory["nodes"]: + if 'balancer' in node['roles'] or 'master' in node['roles']: + inventory["services"]["kubeadm"]['apiServer']['certSANs'].append(node['internal_address']) + inventory["services"]["kubeadm"]['apiServer']['certSANs'].append(node['name']) + if node.get('address') is not None and node['address'] not in certsans: + inventory["services"]["kubeadm"]['apiServer']['certSANs'].append(node['address']) + + if inventory["vrrp_ips"] is not None: + for item in inventory["vrrp_ips"]: + inventory["services"]["kubeadm"]['apiServer']['certSANs'].append(item['ip']) + if item.get("floating_ip"): + inventory["services"]["kubeadm"]["apiServer"]["certSANs"].append(item["floating_ip"]) + + if inventory.get("public_cluster_ip"): + if inventory["public_cluster_ip"] not in inventory["services"]["kubeadm"]["apiServer"]["certSANs"]: + inventory["services"]["kubeadm"]["apiServer"]["certSANs"].append(inventory["public_cluster_ip"]) + + # validating node labels and configuring additional labels + for node in inventory["nodes"]: + if "master" not in node["roles"] and "worker" not in node["roles"]: + if "labels" in node: + raise Exception("Only 'worker' or 'master' nodes can have labels, " + "but found label on %s, roles: %s" % (node["name"], node["roles"])) + if "taints" in node: + raise Exception("Only 'worker' or 'master' nodes can have taints, " + "but found taints on %s, roles: %s" % (node["name"], node["roles"])) + continue + + if "worker" in node["roles"]: + if "labels" not in node: + node["labels"] = {} + node["labels"]["node-role.kubernetes.io/worker"] = "worker" + + if "master" in node["roles"]: + # node is both master and worker, thus we remove NoSchedule taint + if "taints" not in node: + node["taints"] = [] + node["taints"].append("node-role.kubernetes.io/master:NoSchedule-") + + return inventory + + +def reset_installation_env(group: NodeGroup): + log = group.cluster.log + + log.debug("Cleaning up previous installation...") + + cluster = group.cluster + + drain_timeout = cluster.procedure_inventory.get('drain_timeout') + grace_period = cluster.procedure_inventory.get('grace_period') + + # if we perform "add" or "remove" node procedure + # then we need to additionally perform "drain" and "delete" during reset + nodes_for_draining = cluster.make_group([]) + + # perform FULL reset only for "add" or "remove" procedures + # do not perform full reset on cluster (re)installation, it could hang on last etcd member + # nodes should be deleted only during "add" or "remove" procedures + is_add_or_remove_procedure = True + + nodes_for_manual_etcd_remove = cluster.make_group([]) + + if not group.get_nodes_for_removal().is_empty(): + # this is remove_node procedure + check_active_timeout = int(cluster.globals["nodes"]["remove"]["check_active_timeout"]) + active_nodes = group.wait_active_nodes(timeout=check_active_timeout) + + # We need to manually remove members from etcd for "remove" procedure, + # only if corresponding nodes are not active. + # Otherwise, they will be removed by "kubeadm reset" command. + nodes_for_manual_etcd_remove = group.exclude_group(active_nodes) + + # kubectl drain command hands on till timeout is exceeded for nodes which are off + # so we should drain only active nodes + nodes_for_draining = active_nodes + else: + # in other case we consider all nodes are active + active_nodes = group + + if not group.get_new_nodes().is_empty(): + # this is add_node procedure + nodes_for_draining = group + else: + # this is install procedure + is_add_or_remove_procedure = False + + if not nodes_for_manual_etcd_remove.is_empty(): + log.warning(f"Nodes {list(nodes_for_manual_etcd_remove.nodes.keys())} are considered as not active. " + "Full cleanup procedure cannot be performed. " + "Corresponding members will be removed from etcd manually.") + etcd.remove_members(nodes_for_manual_etcd_remove) + + if not nodes_for_draining.is_empty(): + drain_nodes(nodes_for_draining, drain_timeout=drain_timeout, grace_period=grace_period) + + if is_add_or_remove_procedure and not active_nodes.is_empty(): + log.verbose(f"Resetting kubeadm on nodes {list(active_nodes.nodes.keys())} ...") + result = active_nodes.sudo('sudo kubeadm reset -f') + log.debug("Kubeadm successfully reset:\n%s" % result) + + if not active_nodes.is_empty(): + log.verbose(f"Cleaning nodes {list(active_nodes.nodes.keys())} ...") + # bash semicolon mark will avoid script from exiting and will resume the execution + result = active_nodes.sudo( + 'sudo kubeadm reset phase cleanup-node; ' # it is required to "cleanup-node" for all procedures + 'sudo systemctl stop kubelet; ' + 'sudo rm -rf /etc/kubernetes/manifests /var/lib/kubelet/pki /var/lib/etcd; ' + 'sudo mkdir -p /etc/kubernetes/manifests; ', warn=True) + + # Disabled initial prune for images prepull feature. Need analysis for possible negative impact. + # result.update(cri.prune(active_nodes, all_implementations=True)) + + log.debug(f"Nodes {list(active_nodes.nodes.keys())} cleaned up successfully:\n" + "%s" % result) + + if is_add_or_remove_procedure: + return delete_nodes(group) + + +def drain_nodes(group, disable_eviction=False, drain_timeout=None, grace_period=None): + log = group.cluster.log + + master = group.cluster.nodes['master'].get_final_nodes().get_first_member() + result = master.sudo("kubectl get nodes -o custom-columns=NAME:.metadata.name") + + stdout = list(result.values())[0].stdout + log.verbose("Detected the following nodes in cluster:\n%s" % stdout) + + for node in group.get_ordered_members_list(provide_node_configs=True): + if node["name"] in stdout: + log.debug("Draining node %s..." % node["name"]) + master.sudo(prepare_drain_command(node, group.cluster.inventory['services']['kubeadm']['kubernetesVersion'], + group.cluster.globals, disable_eviction, group.cluster.nodes, + drain_timeout, grace_period), + hide=False) + else: + log.warning("Node %s is not found in cluster and can't be drained" % node["name"]) + + return master.sudo("kubectl get nodes") + + +def delete_nodes(group): + log = group.cluster.log + + master = group.cluster.nodes['master'].get_final_nodes().get_first_member() + result = master.sudo("kubectl get nodes -o custom-columns=NAME:.metadata.name") + + stdout = list(result.values())[0].stdout + log.verbose("Detected the following nodes in cluster:\n%s" % stdout) + + for node in group.get_ordered_members_list(provide_node_configs=True): + if node["name"] in stdout: + log.debug("Deleting node %s from the cluster..." % node["name"]) + master.sudo("kubectl delete node %s" % node["name"], hide=False) + else: + log.warning("Node %s is not found in cluster and can't be removed" % node["name"]) + + return master.sudo("kubectl get nodes") + + +def is_available_master(master): + return not ("new_node" in master["roles"] or "remove_node" in master["roles"]) + + +def install(group): + log = group.cluster.log + + with RemoteExecutor(log): + log.debug("Making systemd unit...") + group.sudo('rm -rf /etc/systemd/system/kubelet*') + for node in group.cluster.inventory["nodes"]: + # perform only for current group members + if node["connect_to"] in group.nodes.keys(): + template = Template(open(utils.get_resource_absolute_path('templates/kubelet.service.j2', + script_relative=True)).read()).render( + hostname=node["name"]) + log.debug("Uploading to '%s'..." % node["connect_to"]) + node["connection"].put(io.StringIO(template + "\n"), '/etc/systemd/system/kubelet.service', sudo=True) + + log.debug("\nReloading systemd daemon...") + system.reload_systemctl(group) + group.sudo('systemctl enable kubelet') + + return group.sudo('systemctl status kubelet', warn=True) + + +def join_other_masters(group): + other_masters_group = group.get_ordered_members_list(provide_node_configs=True)[1:] + + join_dict = group.cluster.context["join_dict"] + for node in other_masters_group: + join_master(group, node, join_dict) + + group.cluster.log.debug("Verifying installation...") + first_master = group.get_first_member(provide_node_configs=True) + return first_master['connection'].sudo("kubectl get pods --all-namespaces -o=wide") + + +def join_new_master(group): + join_dict = get_join_dict(group) + for node in group.get_ordered_members_list(provide_node_configs=True): + join_master(group, node, join_dict) + + +def join_master(group, node, join_dict): + log = group.cluster.log + + join_config = { + 'apiVersion': group.cluster.inventory["services"]["kubeadm"]['apiVersion'], + 'kind': 'JoinConfiguration', + 'discovery': { + 'bootstrapToken': { + 'apiServerEndpoint': group.cluster.inventory["services"]["kubeadm"]['controlPlaneEndpoint'], + 'token': join_dict['token'], + 'caCertHashes': [ + join_dict['discovery-token-ca-cert-hash'] + ] + } + }, + 'controlPlane': { + 'certificateKey': join_dict['certificate-key'], + 'localAPIEndpoint': { + 'advertiseAddress': node['internal_address'], + } + } + } + + if group.cluster.inventory['services']['kubeadm']['controllerManager']['extraArgs'].get( + 'external-cloud-volume-plugin'): + join_config['nodeRegistration'] = { + 'kubeletExtraArgs': { + 'cloud-provider': 'external' + } + } + + configure_container_runtime(group.cluster, join_config) + + config = get_kubeadm_config(group.cluster.inventory) + "---\n" + yaml.dump(join_config, default_flow_style=False) + + utils.dump_file(group.cluster, config, 'join-config_%s.yaml' % node['name']) + + log.debug("Uploading init config to master '%s'..." % node['name']) + node['connection'].sudo("mkdir -p /etc/kubernetes") + node['connection'].put(io.StringIO(config), '/etc/kubernetes/join-config.yaml', sudo=True) + + # ! ETCD on masters can't be initialized in async way, that is why it is necessary to disable async mode ! + log.debug('Joining master \'%s\'...' % node['name']) + node['connection'].sudo("kubeadm join " + " --config=/etc/kubernetes/join-config.yaml" + " --ignore-preflight-errors=Port-6443 --v=5", + is_async=False, hide=False) + + log.debug("Patching apiServer bind-address for master %s" % node['name']) + + with RemoteExecutor(log): + node['connection'].sudo("sed -i 's/--bind-address=.*$/--bind-address=%s/' " + "/etc/kubernetes/manifests/kube-apiserver.yaml" % node['internal_address']) + node['connection'].sudo("systemctl restart kubelet") + copy_admin_config(log, node['connection']) + + wait_for_any_pods(group.cluster, node['connection'], apply_filter=node['name']) + + +def copy_admin_config(log, nodes): + log.debug("Setting up admin-config...") + nodes.sudo("mkdir -p /root/.kube && sudo cp -f /etc/kubernetes/admin.conf /root/.kube/config") + + +def get_join_dict(group): + first_master = group.cluster.nodes["master"].get_first_member(provide_node_configs=True) + token_result = first_master['connection'].sudo("kubeadm token create --print-join-command", hide=False) + join_strings = list(token_result.values())[0].stdout.rstrip("\n") + + join_dict = {"worker_join_command": join_strings} + join_array = join_strings[join_strings.find("--"):].split() + for idx, _ in enumerate(join_array): + current_string = join_array[idx] + if "--" in current_string: + join_dict[current_string.lstrip("--")] = join_array[idx + 1] + + cert_key_result = first_master['connection'].sudo("kubeadm init phase upload-certs --upload-certs") + cert_key = list(cert_key_result.values())[0].stdout.split("Using certificate key:\n")[1].rstrip("\n") + join_dict["certificate-key"] = cert_key + return join_dict + + +def init_first_master(group): + log = group.cluster.log + + first_master = group.get_first_member(provide_node_configs=True) + first_master_group = first_master["connection"] + + # setting global apiServer bind-address to first master internal address + # for other masters we override it during initialization + group.cluster.inventory["services"]["kubeadm"]['apiServer']['extraArgs']['bind-address'] = \ + first_master['internal_address'] + + init_config = { + 'apiVersion': group.cluster.inventory["services"]["kubeadm"]['apiVersion'], + 'kind': 'InitConfiguration', + 'localAPIEndpoint': { + 'advertiseAddress': first_master['internal_address'] + } + } + + if group.cluster.inventory['services']['kubeadm']['controllerManager']['extraArgs'].get( + 'external-cloud-volume-plugin'): + init_config['nodeRegistration'] = { + 'kubeletExtraArgs': { + 'cloud-provider': 'external' + } + } + + configure_container_runtime(group.cluster, init_config) + + config = get_kubeadm_config(group.cluster.inventory) + "---\n" + yaml.dump(init_config, default_flow_style=False) + + utils.dump_file(group.cluster, config, 'init-config_%s.yaml' % first_master['name']) + + log.debug("Uploading init config to initial master...") + first_master_group.sudo("mkdir -p /etc/kubernetes") + first_master_group.put(io.StringIO(config), '/etc/kubernetes/init-config.yaml', sudo=True) + + log.debug("Initializing first master...") + result = first_master_group.sudo("kubeadm init" + " --upload-certs" + " --config=/etc/kubernetes/init-config.yaml" + " --ignore-preflight-errors=Port-6443" + " --v=5", + hide=False) + + log.debug("Setting up admin-config...") + first_master_group.sudo("mkdir -p /root/.kube && sudo cp -f /etc/kubernetes/admin.conf /root/.kube/config") + + if psp.is_security_enabled(group.cluster.inventory): + log.debug("Setting up privileged psp...") + first_master_group.call(psp.apply_privileged_policy) + + log.debug('Downloading admin.conf...') + group.cluster.context['kube_config'] = \ + list(first_master_group.sudo('cat /etc/kubernetes/admin.conf').values())[0].stdout + + # Preparing join_dict to init other nodes + master_lines = list(result.values())[0].stdout. \ + split("You can now join any number of the control-plane")[1].splitlines()[2:5] + worker_lines = list(result.values())[0].stdout. \ + split("Then you can join any number of worker")[1].splitlines()[2:4] + master_join_command = " ".join([x.replace("\\", "").strip() for x in master_lines]) + worker_join_command = " ".join([x.replace("\\", "").strip() for x in worker_lines]) + + # TODO: get rid of this code and use get_join_dict() method + args = master_join_command.split("--") + join_dict = {} + for arg in args: + key_val = arg.split(" ") + if len(key_val) > 1: + join_dict[key_val[0].strip()] = key_val[1].strip() + join_dict["worker_join_command"] = worker_join_command + group.cluster.context["join_dict"] = join_dict + + wait_for_any_pods(group.cluster, first_master_group, apply_filter=first_master['name']) + # refresh cluster installation status in cluster context + is_cluster_installed(group.cluster) + + + +def wait_for_any_pods(cluster, connection, apply_filter=None): + if isinstance(cluster, NodeGroup): + # cluster is a group, not a cluster + cluster = cluster.cluster + + plugins.expect_pods(cluster, [ + 'kube-apiserver', + 'kube-controller-manager', + 'kube-proxy', + 'kube-scheduler', + 'etcd' + ], node=connection, apply_filter=apply_filter, + timeout=cluster.globals['pods']['expect']['kubernetes']['timeout'], + retries=cluster.globals['pods']['expect']['kubernetes']['retries']) + + +def wait_for_nodes(group): + log = group.cluster.log + + first_master = group.cluster.nodes["master"].get_first_member() + node_names = group.get_nodes_names() + + wait_conditions = { + "Ready": "True", + "NetworkUnavailable": "False" + } + if len(node_names) > 1: + status_cmd = "kubectl get nodes %s -o jsonpath='{.items[*].status.conditions[?(@.type==\"%s\")].status}'" + else: + status_cmd = "kubectl get nodes %s -o jsonpath='{.status.conditions[?(@.type==\"%s\")].status}'" + + timeout = group.cluster.globals['nodes']['ready']['timeout'] + retries = group.cluster.globals['nodes']['ready']['retries'] + log.debug("Waiting for new kubernetes nodes to become ready") + while retries > 0: + correct_conditions = 0 + for condition, cond_value in wait_conditions.items(): + result = first_master.sudo(status_cmd % (" ".join(node_names), condition)) + condition_results = list(result.values())[0].stdout.split(" ") + correct_values = [value for value in condition_results if value == cond_value] + if len(correct_values) == len(node_names): + correct_conditions = correct_conditions + 1 + log.debug(f"Condition {condition} is {cond_value} for all nodes.") + else: + log.debug(f"Condition {condition} is not met, retrying") + retries = retries - 1 + time.sleep(timeout) + break + + if correct_conditions == len(wait_conditions): + log.debug("All nodes are ready!") + return + + raise Exception("Nodes did not become ready in the expected time") + + +def init_workers(group): + join_dict = group.cluster.context.get("join_dict", get_join_dict(group)) + + join_config = { + 'apiVersion': group.cluster.inventory["services"]["kubeadm"]['apiVersion'], + 'kind': 'JoinConfiguration', + 'discovery': { + 'bootstrapToken': { + 'apiServerEndpoint': group.cluster.inventory["services"]["kubeadm"]['controlPlaneEndpoint'], + 'token': join_dict['token'], + 'caCertHashes': [ + join_dict['discovery-token-ca-cert-hash'] + ] + } + } + } + + if group.cluster.inventory['services']['kubeadm']['controllerManager']['extraArgs'].get( + 'external-cloud-volume-plugin'): + join_config['nodeRegistration'] = { + 'kubeletExtraArgs': { + 'cloud-provider': 'external' + } + } + + configure_container_runtime(group.cluster, join_config) + + config = yaml.dump(join_config, default_flow_style=False) + + utils.dump_file(group.cluster, config, 'join-config-workers.yaml') + + group.sudo("mkdir -p /etc/kubernetes") + group.put(io.StringIO(config), '/etc/kubernetes/join-config.yaml', sudo=True) + + group.cluster.log.debug('Joining workers...') + return group.sudo( + "kubeadm join --config=/etc/kubernetes/join-config.yaml --ignore-preflight-errors=Port-6443 --v=5", + is_async=False, hide=False) + + +def apply_labels(group): + log = group.cluster.log + + log.debug("Applying additional labels for nodes") + # TODO: support "--overwrite" switch? maybe also add labels validation + with RemoteExecutor(log): + for node in group.get_ordered_members_list(provide_node_configs=True): + if "labels" not in node: + log.verbose("No additional labels found for %s" % node['name']) + continue + log.verbose("Found additional labels for %s: %s" % (node['name'], node['labels'])) + for key, value in node["labels"].items(): + group.cluster.nodes["master"].get_first_member() \ + .sudo("kubectl label node %s %s=%s" % (node["name"], key, value)) + + log.debug("Successfully applied additional labels") + + return group.cluster.nodes["master"].get_first_member() \ + .sudo("kubectl get nodes --show-labels") + # TODO: maybe wait for pods on workers? + + +def apply_taints(group): + log = group.cluster.log + + log.debug("Applying additional taints for nodes") + with RemoteExecutor(log): + for node in group.get_ordered_members_list(provide_node_configs=True): + if "taints" not in node: + log.verbose("No additional taints found for %s" % node['name']) + continue + log.verbose("Found additional taints for %s: %s" % (node['name'], node['taints'])) + for taint in node["taints"]: + group.cluster.nodes["master"].get_first_member() \ + .sudo("kubectl taint node %s %s" % (node["name"], taint)) + + log.debug("Successfully applied additional taints") + + return group.cluster.nodes["master"].get_first_member() \ + .sudo("kubectl get nodes -o=jsonpath=" + "'{range .items[*]}{\"node: \"}{.metadata.name}{\"\\ntaints: \"}{.spec.taints}{\"\\n\"}'", hide=True) + + +def is_cluster_installed(cluster): + cluster.log.verbose('Searching for already installed cluster...') + try: + result = cluster.nodes['master'].sudo('kubectl cluster-info', warn=True, timeout=15) + for conn, result in result.items(): + if 'is running at' in result.stdout: + cluster.log.verbose('Detected running Kubernetes cluster on %s' % conn.host) + for line in result.stdout.split("\n"): + if 'master' in line: + cluster.context['controlplain_uri'] = line.split('at ')[1] + return True + except Exception as e: + cluster.log.verbose(e) + cluster.context['controlplain_uri'] = None + cluster.log.verbose('Failed to detect any Kubernetes cluster') + return False + + +def get_kubeadm_config(inventory): + kubeadm_kubelet = yaml.dump(inventory["services"]["kubeadm_kubelet"], default_flow_style=False) + kubeadm = yaml.dump(inventory["services"]["kubeadm"], default_flow_style=False) + return f'{kubeadm_kubelet}---\n{kubeadm}' + + +def upgrade_first_master(version, upgrade_group, cluster, drain_timeout=None, grace_period=None): + first_master = cluster.nodes['master'].get_first_member(provide_node_configs=True) + + if not upgrade_group.has_node(first_master['name']): + cluster.log.debug("First master \"%s\" upgrade is not required" % first_master['name']) + return + + cluster.log.debug("Upgrading first master \"%s\"" % first_master) + + flags = "-f --certificate-renewal=true --ignore-preflight-errors=CoreDNSUnsupportedPlugins" + if patch_kubeadm_configmap(first_master, cluster): + flags += " --config /tmp/kubeadm_config.yaml" + + disable_eviction = cluster.procedure_inventory.get("disable-eviction", True) + drain_cmd = prepare_drain_command(first_master, version, cluster.globals, disable_eviction, cluster.nodes, + drain_timeout, grace_period) + first_master['connection'].sudo(drain_cmd, is_async=False, hide=False) + + upgrade_cri_if_required(first_master['connection']) + + first_master['connection'].sudo(f"sudo kubeadm upgrade apply {version} {flags} && " + f"sudo kubectl uncordon {first_master['name']} && " + f"sudo systemctl restart kubelet", is_async=False, hide=False) + + copy_admin_config(cluster.log, first_master['connection']) + + expect_kubernetes_version(cluster, version, apply_filter=first_master['name']) + wait_for_any_pods(cluster, first_master['connection'], apply_filter=first_master['name']) + exclude_node_from_upgrade_list(first_master['connection'], first_master['name']) + + +def upgrade_other_masters(version, upgrade_group, cluster, drain_timeout=None, grace_period=None): + first_master = cluster.nodes['master'].get_first_member(provide_node_configs=True) + for node in cluster.nodes['master'].get_ordered_members_list(provide_node_configs=True): + if node['name'] != first_master['name']: + + if not upgrade_group.has_node(node['name']): + cluster.log.debug("Master \"%s\" upgrade is not required" % node['name']) + continue + + cluster.log.debug("Upgrading master \"%s\"" % node['name']) + + disable_eviction = cluster.procedure_inventory.get("disable-eviction", True) + drain_cmd = prepare_drain_command(node, version, cluster.globals, disable_eviction, cluster.nodes, + drain_timeout, grace_period) + node['connection'].sudo(drain_cmd, is_async=False, hide=False) + + upgrade_cri_if_required(node['connection']) + + node['connection'].sudo(f"sudo kubeadm upgrade node --certificate-renewal=true && " + f"sudo sed -i 's/--bind-address=.*$/--bind-address={node['internal_address']}/' " + f"/etc/kubernetes/manifests/kube-apiserver.yaml && " + f"sudo kubectl uncordon {node['name']} && " + f"sudo systemctl restart kubelet", is_async=False, hide=False) + + expect_kubernetes_version(cluster, version, apply_filter=node['name']) + copy_admin_config(cluster.log, node['connection']) + wait_for_any_pods(cluster, node['connection'], apply_filter=node['name']) + exclude_node_from_upgrade_list(first_master, node['name']) + + +def patch_kubeadm_configmap(first_master, cluster): + ''' + The method checks and patches the Kubeadm configuration for compliance with the current imageRepository + and the corresponding version of the CoreDNS path to the image. + ''' + kubeadm_config_map = first_master["connection"].sudo("kubectl get cm -o yaml -n kube-system kubeadm-config") \ + .get_simple_out() + yaml = ruamel.yaml.YAML() + config_map = yaml.load(kubeadm_config_map) + cluster_configuration_yaml = config_map["data"]["ClusterConfiguration"] + cluster_configuration = yaml.load(cluster_configuration_yaml) + if version_higher_or_equal(cluster.inventory['services']['kubeadm']['kubernetesVersion'], + version_coredns_path_breakage): + if not cluster_configuration.get("dns", {}): + cluster_configuration["dns"] = {} + cluster_configuration['dns']['imageRepository'] = ("%s/coredns" % cluster_configuration["imageRepository"]) + + else: + if not cluster_configuration['dns'].get('imageRepository', ""): + cluster_configuration['dns']['imageRepository'] = ( + "%s/coredns" % cluster_configuration["imageRepository"]) + updated_config = io.StringIO() + kubelet_config = first_master["connection"].sudo("cat /var/lib/kubelet/config.yaml").get_simple_out() + yaml.dump(cluster_configuration, updated_config) + result_config = kubelet_config + "---\n" + updated_config.getvalue() + first_master["connection"].put(io.StringIO(result_config), "/tmp/kubeadm_config.yaml", sudo=True) + return True + return False + + +def upgrade_workers(version, upgrade_group, cluster, drain_timeout=None, grace_period=None): + first_master = cluster.nodes['master'].get_first_member(provide_node_configs=True) + for node in cluster.nodes['worker'].exclude_group(cluster.nodes['master']).get_ordered_members_list( + provide_node_configs=True): + + if not upgrade_group.has_node(node['name']): + cluster.log.debug("Worker \"%s\" upgrade is not required" % node['name']) + continue + + cluster.log.debug("Upgrading worker \"%s\"" % node['name']) + + disable_eviction = cluster.procedure_inventory.get("disable-eviction", True) + drain_cmd = prepare_drain_command(node, version, cluster.globals, disable_eviction, cluster.nodes, + drain_timeout, grace_period) + first_master['connection'].sudo(drain_cmd, is_async=False, hide=False) + + upgrade_cri_if_required(node['connection']) + + node['connection'].sudo("kubeadm upgrade node --certificate-renewal=true && " + "sudo systemctl restart kubelet") + + first_master['connection'].sudo("kubectl uncordon %s" % node['name'], is_async=False, hide=False) + + expect_kubernetes_version(cluster, version, apply_filter=node['name']) + # workers do not have system pods to wait for their start + exclude_node_from_upgrade_list(first_master, node['name']) + + +def prepare_drain_command(node, version: str, globals, disable_eviction: bool, nodes, + drain_timeout: int = None, grace_period: int = None): + drain_globals = globals['nodes']['drain'] + if drain_timeout is None: + drain_timeout = recalculate_proper_timeout(nodes, drain_globals['timeout']) + if grace_period is None: + grace_period = drain_globals['grace_period'] + drain_cmd = f"kubectl drain {node['name']} --force --ignore-daemonsets --delete-local-data " \ + f"--timeout={drain_timeout}s --grace-period={grace_period}" + if version and version >= "v1.18" and disable_eviction: + drain_cmd += " --disable-eviction=true" + return drain_cmd + + +def upgrade_cri_if_required(group): + log = group.cluster.log + cri_impl = group.cluster.inventory['services']['cri']['containerRuntime'] + + if cri_impl in group.cluster.context["packages"]["upgrade_required"]: + cri_packages = group.cluster.inventory['services']['packages']['associations'][cri_impl]['package_name'] + + log.debug(f"Installing {cri_packages}") + packages.install(group, include=cri_packages) + log.debug(f"Restarting all containers on nodes: {group.get_nodes_names()}") + if cri_impl == "docker": + group.sudo("docker container rm -f $(sudo docker container ls -q)", warn=True) + else: + group.sudo("crictl rm -fa", warn=True) + else: + log.debug(f"{cri_impl} upgrade is not required") + + +def verify_upgrade_versions(cluster): + first_master = cluster.nodes['master'].get_first_member(provide_node_configs=True) + upgrade_version = cluster.context["upgrade_version"] + + k8s_nodes_group = cluster.nodes["worker"].include_group(cluster.nodes['master']) + for node in k8s_nodes_group.get_ordered_members_list(provide_node_configs=True): + cluster.log.debug(f"Verifying current k8s version for node {node['name']}") + result = first_master['connection'].sudo("kubectl get nodes -o custom-columns=" + "'VERSION:.status.nodeInfo.kubeletVersion,NAME:.metadata.name' " + f"| grep -w {node['name']} " + "| awk '{ print $1 }'") + curr_version = list(result.values())[0].stdout + test_version_upgrade_possible(curr_version, upgrade_version, skip_equal=True) + + +def verify_target_version(target_version): + test_version(target_version) + + pos = target_version.rfind(".") + target_version = target_version[:pos] + with open(utils.get_resource_absolute_path('resources/configurations/globals.yaml', + script_relative=True), 'r') as stream: + globals_yml = yaml.safe_load(stream) + if target_version not in globals_yml["kubernetes_versions"]: + raise Exception("ERROR! Specified target Kubernetes version '%s' - cannot be installed!" % target_version) + if not globals_yml["kubernetes_versions"][target_version]["supported"]: + message = "\033[91mWarning! Specified target Kubernetes version '%s' - is not supported!\033[0m" % target_version + print(message) + return message + return "" + + +def expect_kubernetes_version(cluster, version, timeout=None, retries=None, node=None, apply_filter=None): + if timeout is None: + timeout = cluster.globals['nodes']['expect']['kubernetes_version']['timeout'] + if retries is None: + retries = cluster.globals['nodes']['expect']['kubernetes_version']['retries'] + + cluster.log.debug("Expecting Kubernetes version %s" % version) + cluster.log.debug("Max expectation time: %ss" % (timeout * retries)) + + cluster.log.debug("Waiting for nodes...\n") + + if node is None: + node = cluster.nodes['master'].get_first_member() + + command = 'kubectl get nodes -o=wide' + if apply_filter is not None: + command += ' | grep -w %s' % apply_filter + + while retries > 0: + result = node.sudo(command, warn=True) + stdout = list(result.values())[0].stdout + cluster.log.verbose(stdout) + nodes_version_correct = True + for stdout_line in iter(stdout.splitlines()): + if version not in stdout_line: + nodes_version_correct = False + cluster.log.verbose("Invalid version detected: %s\n" % stdout_line) + + if nodes_version_correct: + cluster.log.debug("Nodes have correct Kubernetes version = %s" % version) + cluster.log.debug(result) + return + else: + retries -= 1 + cluster.log.debug("Some nodes have invalid Kubernetes version... (%ss left)" % (retries * timeout), result) + time.sleep(timeout) + + raise Exception('In the expected time, the nodes did not receive correct Kubernetes version') + + +def test_version(version): + # catch version without "v" at the first symbol + if isinstance(version, str): + if not version.startswith('v'): + raise Exception('Version \"%s\" do not have \"v\" as first symbol, ' + 'expected version pattern is \"v1.NN.NN\"' % version) + version_list = version.replace('v', '').split('.') + # catch invalid version 'v1.16' + if len(version_list) != 3: + raise Exception('Version \"%s\" has invalid amount of numbers, ' + 'expected version pattern is \"v1.NN.NN\"' % version) + + # parse str to int and catch invalid symbols in version number + for i, value in enumerate(version_list): + try: + # whitespace required because python's int() ignores them + version_list[i] = int(value.replace(' ', '.')) + except ValueError: + raise Exception('Version \"%s\" contains invalid symbols, ' + 'expected version pattern is \"v1.NN.NN\"' % version) from None + return version_list + + +def test_version_upgrade_possible(old, new, skip_equal=False): + versions = { + 'old': old.strip(), + 'new': new.strip() + } + versions_unchanged = versions.copy() + + for v_type, version in versions.items(): + versions[v_type] = test_version(version) + + # test new is greater than old + if tuple(versions['old']) > tuple(versions['new']): + raise Exception('Kubernetes old version \"%s\" is greater than new one \"%s\"' + % (versions_unchanged['old'], versions_unchanged['new'])) + + # test new is the same as old + if tuple(versions['old']) == tuple(versions['new']) and not skip_equal: + raise Exception('Kubernetes old version \"%s\" is the same as new one \"%s\"' + % (versions_unchanged['old'], versions_unchanged['new'])) + + # test major step is not greater than 1 + if versions['new'][0] - versions['old'][0] > 1: + raise Exception('Major version \"%s\" rises to new \"%s\" more than one' + % (versions_unchanged['old'], versions_unchanged['new'])) + + # test minor step is not greater than 1 + if versions['new'][1] - versions['old'][1] > 1: + raise Exception('Minor version \"%s\" rises to new \"%s\" more than one' + % (versions_unchanged['old'], versions_unchanged['new'])) + + +def recalculate_proper_timeout(nodes, timeout): + try: + amount_str = nodes['master'].get_first_member().sudo('kubectl get pods -A | wc -l').get_simple_out() + return timeout * int(amount_str) + except Exception: + return timeout * 10 * nodes['all'].nodes_amount() + + +def configure_container_runtime(cluster, kubeadm_config): + if cluster.inventory['services']['cri']['containerRuntime'] == "containerd": + if 'nodeRegistration' not in kubeadm_config: + kubeadm_config['nodeRegistration'] = {} + if 'kubeletExtraArgs' not in kubeadm_config['nodeRegistration']: + kubeadm_config['nodeRegistration']['kubeletExtraArgs'] = {} + + kubeadm_config['nodeRegistration']['criSocket'] = '/var/run/containerd/containerd.sock' + kubeadm_config['nodeRegistration']['kubeletExtraArgs']['container-runtime'] = 'remote' + kubeadm_config['nodeRegistration']['kubeletExtraArgs']['container-runtime-endpoint'] = \ + 'unix:///run/containerd/containerd.sock' + + +def exclude_node_from_upgrade_list(first_master, node_name): + if isinstance(first_master, dict): + first_master = first_master['connection'] + return first_master.sudo('sed -i \'/%s/d\' /etc/kubernetes/nodes-k8s-versions.txt' % node_name, warn=True) + + +def autodetect_non_upgraded_nodes(cluster, future_version) -> List[str]: + first_master = cluster.nodes['master'].get_first_member() + try: + nodes_list_result = first_master.sudo('[ ! -f /etc/kubernetes/nodes-k8s-versions.txt ] && ' + 'sudo kubectl get nodes -o custom-columns=\'' + 'VERSION:.status.nodeInfo.kubeletVersion,' + 'NAME:.metadata.name,' + 'STATUS:.status.conditions[-1].type\' ' + '| sed -n \'1!p\' | tr -s \' \' ' + '| sed \'1 i\\# This file contains a cached list of nodes and versions ' + 'required to continue the Kubernetes upgrade procedure if it fails. ' + 'If all the nodes are completely updated or you manually fixed the ' + 'problem that occurred during the upgrade, you can delete it.\' ' + '| sudo tee /etc/kubernetes/nodes-k8s-versions.txt; ' + 'sudo cat /etc/kubernetes/nodes-k8s-versions.txt') \ + .get_simple_out() + cluster.log.verbose("Remote response with nodes description:\n%s" % nodes_list_result) + except Exception as e: + cluster.log.warn("Failed to detect cluster status before upgrade. All nodes will be scheduled for upgrade.") + cluster.log.verbose(e) + return cluster.nodes['all'].get_nodes_names() + + detected_nodes_lines = nodes_list_result.splitlines() + + if not detected_nodes_lines: + raise Exception('Remote result did not returned any lines containing node info') + + upgrade_list = [] + for line in detected_nodes_lines: + line = line.strip() + + # comes from nodes-k8s-versions.txt content as a comment symbol + if line[0] == '#': + continue + version, node_name, status = line.split(' ') + if version != future_version: + cluster.log.verbose("Node \"%s\" has version \"%s\" and scheduled for upgrade." % (node_name, version)) + upgrade_list.append(node_name) + elif status != 'Ready': + cluster.log.verbose("Node \"%s\" is not ready and scheduled for upgrade." % node_name) + upgrade_list.append(node_name) + else: + cluster.log.verbose("Node \"%s\" already upgraded." % node_name) + + return upgrade_list + + +def get_group_for_upgrade(cluster, ignore_cache=False): + + if cluster.context.get('upgrade_group') and not ignore_cache: + return cluster.context['upgrade_group'] + + version = cluster.inventory["services"]["kubeadm"]["kubernetesVersion"] + if cluster.procedure_inventory.get('upgrade_nodes'): + nodes_for_upgrade = [] + for node in cluster.procedure_inventory['upgrade_nodes']: + if isinstance(node, str): + node_name = node + else: + node_name = node['name'] + nodes_for_upgrade.append(node_name) + cluster.log.verbose("Node \"%s\" manually scheduled for upgrade." % node_name) + cluster.nodes['master'].get_first_member().sudo('rm -f /etc/kubernetes/nodes-k8s-versions.txt', warn=True) + else: + nodes_for_upgrade = autodetect_non_upgraded_nodes(cluster, version) + + upgrade_group = cluster.make_group_from_nodes(nodes_for_upgrade) + cluster.context['upgrade_group'] = upgrade_group + + return cluster.context['upgrade_group'] + + +def images_grouped_prepull(group: NodeGroup, group_size: int = None): + """ + Prepull kubeadm images on group, separated on sub-groups with certain group size. Separation required to avoid high + load on images repository server, when using large clusters. + :param group: NodeGroup where prepull should be performed. + :param group_size: integer number of nodes per group. Will be automatically used from procedure_yaml or globals, if not set. + :return: String results from all nodes in presented group. + """ + + cluster = group.cluster + log = cluster.log + + if not group_size: + group_size = cluster.procedure_inventory.get('prepull_group_size') + + if not group_size: + log.verbose("Group size is not set in procedure inventory, a default one will be used") + group_size = cluster.globals.get('prepull_group_size') + + nodes = group.get_ordered_members_list() + + if len(nodes) < group_size: + group_size = len(nodes) + + groups_amount = math.ceil(len(nodes) / group_size) + + log.verbose("Nodes amount: %s\nGroup size: %s\nGroups amount: %s" % (len(nodes), group_size, groups_amount)) + with RemoteExecutor(cluster.log) as exe: + for group_i in range(groups_amount): + log.verbose('Prepulling images for group #%s...' % group_i) + # RemoteExecutor used for future cases, when some nodes will require another/additional actions for prepull + for node_i in range(group_i*group_size, (group_i*group_size)+group_size): + images_prepull(nodes[node_i]) + + return exe.get_last_results_str() + + +def images_prepull(group: NodeGroup): + """ + Prepull kubeadm images on group. + :param group: NodeGroup where prepull should be performed. + :return: NodeGroupResult from all nodes in presented group. + """ + + config = get_kubeadm_config(group.cluster.inventory) + group.put(io.StringIO(config), '/etc/kubernetes/prepull-config.yaml', sudo=True) + + return group.sudo("kubeadm config images pull --config=/etc/kubernetes/prepull-config.yaml") + diff --git a/kubetool/kubernetes_accounts.py b/kubetool/kubernetes_accounts.py new file mode 100644 index 000000000..8120d5285 --- /dev/null +++ b/kubetool/kubernetes_accounts.py @@ -0,0 +1,81 @@ +import io + +import yaml + +from kubetool.core import utils + + +def enrich_inventory(inventory, cluster): + rbac = inventory['rbac'] + if not rbac.get("accounts"): + return inventory + + for i, account in enumerate(rbac["accounts"]): + if account.get('name') is None or account.get('role') is None: + raise Exception('Invalid account definition - name or role not defined') + + if account['configs'][0]['metadata'].get('name') is None: + rbac["accounts"][i]['configs'][0]['metadata']['name'] = account['name'] + if account['configs'][0]['metadata'].get('namespace') is None: + rbac["accounts"][i]['configs'][0]['metadata']['namespace'] = account['namespace'] + + if account['configs'][1]['metadata'].get('name') is None: + rbac["accounts"][i]['configs'][1]['metadata']['name'] = account['name'] + if account['configs'][1]['roleRef'].get('name') is None: + rbac["accounts"][i]['configs'][1]['roleRef']['name'] = account['role'] + + if account['configs'][1]['subjects'][0].get('name') is None: + rbac["accounts"][i]['configs'][1]['subjects'][0]['name'] = account['name'] + if account['configs'][1]['subjects'][0].get('namespace') is None: + rbac["accounts"][i]['configs'][1]['subjects'][0]['namespace'] = account['namespace'] + + return inventory + + +def install(cluster): + rbac = cluster.inventory['rbac'] + if not rbac.get("accounts"): + cluster.log.debug("No accounts specified to install, skipping...") + return + + tokens = [] + for account in rbac["accounts"]: + cluster.log.debug('Creating cluster account:') + cluster.log.debug('\tName: %s\n\tRole: %s\n\tNameSpace: %s' % (account['name'], account['role'], account['namespace'])) + + dump = '' + for config in account['configs']: + dump += '---\n'+yaml.dump(config, default_flow_style=False) + + filename = 'account_%s_%s_%s.yaml' % (account['name'], account['role'], account['namespace']) + destination_path = '/etc/kubernetes/%s' % filename + + utils.dump_file(cluster, dump, filename) + + cluster.log.debug("Uploading template...") + cluster.log.debug("\tDestination: %s" % destination_path) + cluster.nodes['master'].put(io.StringIO(dump), destination_path, sudo=True) + + cluster.log.debug("Applying yaml...") + cluster.nodes['master'].get_first_member().sudo('kubectl apply -f %s' % destination_path, hide=False) + + # TODO: load all via api + cluster.log.debug('Loading token...') + load_tokens_cmd = 'kubectl -n kube-system get secret ' \ + '$(sudo kubectl -n kube-system get sa %s -o \'jsonpath={.secrets[0].name}\') ' \ + '-o \'jsonpath={.data.token}\' | sudo base64 -d' % account['name'] + result = cluster.nodes['master'].get_first_member().sudo(load_tokens_cmd) + token = list(result.values())[0].stdout + + tokens.append({ + 'name': account['name'], + 'role': account['role'], + 'namespace': account['namespace'], + 'token': token, + }) + + cluster.log.debug('\nSaving tokens...') + token_filename = './account-tokens.yaml' + with open(token_filename, 'w') as tokenfile: + tokenfile.write(yaml.dump(tokens, default_flow_style=False)) + cluster.log.debug('Tokens saved to %s' % token_filename) diff --git a/kubetool/packages.py b/kubetool/packages.py new file mode 100644 index 000000000..fe0ed6518 --- /dev/null +++ b/kubetool/packages.py @@ -0,0 +1,170 @@ +from copy import deepcopy +from typing import List, Dict + +from kubetool import yum, system, apt +from kubetool.core.executor import RemoteExecutor +from kubetool.core.group import NodeGroup, NodeGroupResult + + +def enrich_inventory_associations(inventory, cluster): + os_family = system.get_os_family(cluster) + + associations = inventory['services']['packages']['associations'] + if not associations.get(os_family): + # already enriched + return inventory + + os_specific_associations = deepcopy(associations[os_family]) + os_specific_associations['debian'] = deepcopy(associations['debian']) + os_specific_associations['rhel'] = deepcopy(associations['rhel']) + os_specific_associations['rhel8'] = deepcopy(associations['rhel8']) + + for association_name, properties in associations.items(): + if association_name in os_specific_associations.keys(): + for key, value in properties.items(): + os_specific_associations[association_name][key] = value + + inventory['services']['packages']['associations'] = os_specific_associations + + return inventory + + +def get_package_manager(cluster): + os_family = system.get_os_family(cluster) + + if os_family in ['rhel', 'rhel8']: + return yum + elif os_family == 'debian': + return apt + + raise Exception('Failed to return package manager for unknown OS') + + +def ls_repofiles(group: NodeGroup) -> NodeGroupResult: + return get_package_manager(group.cluster).ls_repofiles(group) + + +def backup_repo(group: NodeGroup, repo_filename="*") -> NodeGroupResult: + return get_package_manager(group.cluster).backup_repo(group, repo_filename) + + +def add_repo(group: NodeGroup, repo_data="", repo_filename="predefined") -> NodeGroupResult: + return get_package_manager(group.cluster).add_repo(group, repo_data, repo_filename) + + +def clean(group: NodeGroup, mode="all") -> NodeGroupResult: + return get_package_manager(group.cluster).clean(group, mode) + + +def install(group: NodeGroup, include=None, exclude=None) -> NodeGroupResult: + return get_package_manager(group.cluster).install(group, include, exclude) + + +def remove(group: NodeGroup, include=None, exclude=None) -> NodeGroupResult: + return get_package_manager(group.cluster).remove(group, include, exclude) + + +def upgrade(group: NodeGroup, include=None, exclude=None) -> NodeGroupResult: + return get_package_manager(group.cluster).upgrade(group, include, exclude) + + +def detect_installed_package_version(group: NodeGroup, package: str): + """ + Detect package versions for each host on remote group + :param group: Group of nodes, where package should be found + :param package: RPM-compatible package name, which version should be detected + :return: NodeGroupResults with package version on each host + + Method generates different package query for different OS. + + Note: for Ubuntu/Debian some packages returns multiline results for some queries + (for example docker-ce* returns docker-ce and docker-ce-cli). + """ + + if system.get_os_family(group.cluster) in ["rhel", "rhel8"]: + cmd = r"rpm -q %s" % package + else: + # in ubuntu it is much easier to parse package name + package_name = package.split("=")[0] + cmd = r"dpkg-query -f '${Package}=${Version}\n' -W %s" % package_name + return group.sudo(cmd + ' || true') + + +def detect_installed_packages_versions(group: NodeGroup, packages_list: List or str = None) -> Dict[str, NodeGroupResult]: + """ + Detect packages versions for each host on remote group from specified list of packages + :param group: Group of nodes, where packages should be found + :param packages_list: Single package or list of packages, which versions should be detected. If packages list empty, + then packages will be automatically added from services.packages.associations and services.packages.install.include + :return: Dictionary with NodeGroupResults for each queried package, e.g. "foo" -> {1.1.1.1:"foo-1", 1.1.1.2:"foo-2"} + """ + + cluster = group.cluster + + if not packages_list: + packages_list = [] + # packages from associations + for association_name, associated_params in cluster.inventory['services']['packages']['associations'].items(): + associated_packages = associated_params.get('package_name', []) + if isinstance(associated_packages, str): + packages_list.append(associated_packages) + else: + packages_list = packages_list + associated_packages + # packages from direct installation section + if cluster.inventory['services']['packages'].get('install', {}): + packages_list = packages_list + cluster.inventory['services']['packages']['install']['include'] + + # dedup + packages_list = list(set(packages_list)) + + with RemoteExecutor(cluster.log) as exe: + for package in packages_list: + detect_installed_package_version(group, package) + + raw_result = exe.get_last_results() + results: dict[str, NodeGroupResult] = {} + + for i, package in enumerate(packages_list): + results[package] = NodeGroupResult() + for host, multiple_results in raw_result.items(): + node_detected_package = multiple_results[i].stdout.strip() + multiple_results[i].stderr.strip() + if "not installed" in node_detected_package or "no packages found" in node_detected_package: + node_detected_package = f"not installed {package}" + results[package][host] = node_detected_package + + return results + + +def detect_installed_packages_version_groups(group: NodeGroup, packages_list: List or str = None) \ + -> Dict[str, Dict[str, List]]: + """ + Detect grouped packages versions on remote group from specified list of packages. + :param group: Group of nodes, where packages should be found + :param packages_list: Single package or list of packages, which versions should be detected. If packages list empty, + then packages will be automatically added from services.packages.associations and services.packages.install.include + :return: Dictionary with grouped versions for each queried package, pointing to list of hosts, + e.g. {"foo" -> {"foo-1": [host1, host2]}, "bar" -> {"bar-1": [host1], "bar-2": [host2]}} + """ + + detected_packages = detect_installed_packages_versions(group, packages_list) + grouped_packages: Dict[str, Dict[str, List]] = {} + for queried_package, detected_packages_results in detected_packages.items(): + detected_grouped_packages = {} + for host, packages in detected_packages_results.items(): + if '\n' in packages: + # TODO: are we really need this??? + # this is the test, when package name containes multiple names, + # e.g. docker-ce and docker-cli for "docker-ce-*" query + packages = packages.split('\n') + else: + packages = [packages] + + for pckg in packages: + if pckg not in detected_grouped_packages: + detected_grouped_packages[pckg] = [host] + else: + detected_grouped_packages[pckg].append(host) + + grouped_packages[queried_package] = detected_grouped_packages + + return grouped_packages diff --git a/kubetool/plugins/__init__.py b/kubetool/plugins/__init__.py new file mode 100755 index 000000000..7712ce1e5 --- /dev/null +++ b/kubetool/plugins/__init__.py @@ -0,0 +1,722 @@ +#!/usr/bin/env python3 +import glob +import importlib.util +import io +import os +import re +import subprocess +import sys +import time +from copy import deepcopy +from distutils.dir_util import copy_tree +from distutils.dir_util import remove_tree +from distutils.dir_util import mkpath +from itertools import chain + +import yaml + +from kubetool import jinja, thirdparties +from kubetool.core import utils +from kubetool.core.executor import RemoteExecutor +from kubetool.core.yaml_merger import default_merger +from kubetool.core.group import NodeGroup + +# list of plugins owned and managed by kubetools +oob_plugins = [ + "calico", + "flannel", + "nginx-ingress-controller", + "haproxy-ingress-controller", + "kubernetes-dashboard", + "local-path-provisioner", +] + + +def verify_inventory(inventory, cluster): + supported_procedure_types = list(procedure_types.keys()) + + for plugin_name, plugin_item in inventory["plugins"].items(): + for step in plugin_item.get('installation', {}).get('procedures', []): + for procedure_type, configs in step.items(): + if procedure_type not in supported_procedure_types: + raise Exception('Unknown installation procedure type found in a plugin \'%s\'. ' + 'Expected any of %s, but found \'%s\'.' + % (plugin_name, supported_procedure_types, procedure_type)) + procedure_types[procedure_type]['verify'](cluster, configs) + + return inventory + + +def enrich_inventory(inventory, cluster): + for plugin_name, plugin_item in inventory["plugins"].items(): + for i, step in enumerate(plugin_item.get('installation', {}).get('procedures', [])): + for procedure_type, configs in step.items(): + if procedure_types[procedure_type].get('convert') is not None: + inventory["plugins"][plugin_name]['installation']['procedures'][i][procedure_type] = \ + procedure_types[procedure_type]['convert'](cluster, configs) + return inventory + + +def enrich_upgrade_inventory(inventory, cluster): + if cluster.context.get("initial_procedure") != "upgrade": + return inventory + + with open(utils.get_resource_absolute_path('resources/configurations/defaults.yaml', script_relative=True), 'r') \ + as stream: + base_plugins = yaml.safe_load(stream)["plugins"] + current_plugins = deepcopy(inventory["plugins"]) + + # validate all plugin sections in procedure inventory + upgrade_plan = cluster.procedure_inventory.get('upgrade_plan') + previous_version = cluster.context['initial_kubernetes_version'] + for version in upgrade_plan: + upgrade_plugins = cluster.procedure_inventory.get(version, {}).get("plugins", {}) + for oob_plugin in oob_plugins: + verify_image_redefined(oob_plugin, + previous_version, + base_plugins[oob_plugin], + current_plugins[oob_plugin], + upgrade_plugins.get(oob_plugin, {})) + default_merger.merge(current_plugins, upgrade_plugins) + previous_version = version + + upgrade_plugins = cluster.procedure_inventory.get(cluster.context["upgrade_version"], {}).get("plugins", {}) + default_merger.merge(inventory["plugins"], upgrade_plugins) + return inventory + + +def verify_image_redefined(plugin_name, previous_version, base_plugin, cluster_plugin, upgrade_plugin): + """ + If some image in "cluster_plugin" is different from image in "base_plugin", + i.e. redefined, then "upgrade_plugin" should have this image explicitly + redefined too. + """ + for key, value in base_plugin.items(): + if isinstance(value, dict): + verify_image_redefined(plugin_name, + previous_version, + base_plugin[key], + cluster_plugin[key], + upgrade_plugin.get(key, {})) + elif key == "image" and base_plugin["image"] != cluster_plugin["image"] and not upgrade_plugin.get("image"): + raise Exception(f"Image is redefined for {plugin_name} in cluster.yaml for version {previous_version}, " + f"but not present in procedure inventory for next version(s). " + f"Please, specify required plugin version explicitly in procedure inventory.") + + +def install(cluster, plugins=None): + if not plugins: + plugins = cluster.inventory["plugins"] + plugins_queue = [] + max_priority = 0 + for plugin_name, plugin_item in plugins.items(): + if plugin_item.get("install", False) and plugin_item.get("installation", {}).get('procedures') is not None: + plugin_item['plugin_name'] = plugin_name + plugins_queue.append(plugin_item) + if plugin_item.get("installation", {}).get('priority') is not None \ + and plugin_item['installation']['priority'] > max_priority: + max_priority = plugin_item['installation']['priority'] + + plugins_queue.sort(key=lambda item: item.get("installation", {}).get('priority', max_priority + 1)) + + cluster.log.debug('The following plugins will be installed:') + for plugin_item in plugins_queue: + cluster.log.debug('%i. %s' % ( + plugin_item.get("installation", {}).get('priority', max_priority + 1), + plugin_item['plugin_name'] + )) + + cluster.log.debug('Starting plugins installation:') + + for plugin_item in plugins_queue: + install_plugin(cluster, plugin_item['plugin_name'], plugin_item["installation"]['procedures']) + + +def install_plugin(cluster, plugin_name, installation_procedure): + cluster.log.debug("**** INSTALLING PLUGIN %s ****" % plugin_name) + for step in installation_procedure: + for apply_type, configs in step.items(): + procedure_types[apply_type]['apply'](cluster, configs) + + +def expect_pods(cluster, pods, timeout=None, retries=None, node=None, apply_filter=None): + + if isinstance(cluster, NodeGroup): + # cluster is a group, not a cluster + cluster = cluster.cluster + + if timeout is None: + timeout = cluster.globals['expect']['kubernetes']['timeout'] + if retries is None: + retries = cluster.globals['expect']['kubernetes']['retries'] + + cluster.log.debug("Expecting the following pods to be ready: %s" % pods) + cluster.log.verbose("Max expectation time: %ss" % (timeout * retries)) + + cluster.log.debug("Waiting for pods...") + + failures = 0 + + if node is None: + node = cluster.nodes['master'].get_first_member() + + command = 'kubectl get pods -A -o=wide' + if apply_filter is not None: + command += ' | grep %s' % apply_filter + + while retries > 0: + + result = node.sudo(command, warn=True) + + stdout = list(result.values())[0].stdout + running_pods_stdout = '' + + failure_found = False + + for stdout_line in iter(stdout.splitlines()): + + stdout_line_allowed = False + + # is current line has requested pod for verification? + # we do not have to fail on pods with bad status which was not requested + for pod in pods: + if pod + "-" in stdout_line: + stdout_line_allowed = True + + if stdout_line_allowed: + if 'Running' in stdout_line: + running_pods_stdout += stdout_line + '\n' + elif is_critical_state_in_stdout(cluster, stdout_line): + cluster.log.verbose("Failed pod detected: %s\n" % stdout_line) + + if not failure_found: + failure_found = True + failures += 1 + + # just in case, skip the error a couple of times, what if it comes out of the failure state? + if failures > cluster.globals['pods']['allowed_failures']: + raise Exception('Pod entered a state of error, further proceeding is impossible') + + pods_ready = False + if running_pods_stdout and running_pods_stdout != "" and "0/1" not in running_pods_stdout: + pods_ready = True + for pod in pods: + # it is necessary to look for pods with the name "xxxx-xxxx-" instead of "xxxx-xxxx" because + # "xxxx-xxxx" may be the name of the namespace in which another healthy pod will be running + if pod + "-" not in running_pods_stdout: + pods_ready = False + + if pods_ready: + cluster.log.debug("Pods are ready!") + cluster.log.debug(running_pods_stdout) + return + else: + retries -= 1 + cluster.log.debug("Pods are not ready yet... (%ss left)" % (retries * timeout)) + cluster.log.debug(running_pods_stdout) + time.sleep(timeout) + + raise Exception('In the expected time, the pods did not become ready') + + +def is_critical_state_in_stdout(cluster, stdout): + for state in cluster.globals['pods']['critical_states']: + if state in stdout: + return True + return False + + +# **** TEMPLATES **** + +def convert_template(cluster, config): + return _convert_file(config) + + +def verify_template(cluster, config): + _verify_file(config, "Template") + + +def apply_template(cluster, config): + _apply_file(cluster, config, "Template") + + +# **** EXPECT **** + +def convert_expect(cluster, config): + if config.get('pods') is not None and isinstance(config['pods'], list): + config['pods'] = { + 'list': config['pods'] + } + return config + + +def verify_expect(cluster, config): + if not config: + raise Exception('Expect procedure is empty, but it should not be') + if config.get('pods') is not None and config['pods'].get('list') is None: + raise Exception('Pod expectation defined, but pods list is missing') + + +def apply_expect(cluster, config): + # TODO: support more expects? + if config.get('pods') is not None: + expect_pods(cluster, config['pods']['list'], + timeout=config['pods'].get('timeout', cluster.globals['pods']['expect']['plugins']['timeout']), + retries=config['pods'].get('retries', cluster.globals['pods']['expect']['plugins']['retries']) + ) + + +# **** PYTHON **** + +def verify_python(cluster, step): + if step.get('module') is None: + raise Exception('Module path is missing for python in plugin steps, but should be defined. Step:\n%s' % step) + if step.get('method') is None: + raise Exception('Method name is missing for python in plugin steps, but should be defined. Step:\n%s' % step) + # TODO: verify fields types and contents + + +def apply_python(cluster, step): + module_path = utils.determine_resource_absolute_path(step['module']) + method_name = step['method'] + method_arguments = step.get('arguments', {}) + + cluster.log.debug("Running method %s from %s module..." % (method_name, module_path)) + module_filename = os.path.basename(module_path) + spec = importlib.util.spec_from_file_location(os.path.splitext(module_filename)[0], module_path) + module = importlib.util.module_from_spec(spec) + spec.loader.exec_module(module) + getattr(module, method_name)(cluster, **method_arguments) + + +# **** THIRDPARTIES **** + +def verify_thirdparty(cluster, thirdparty): + defined_thirdparties = list(cluster.inventory['services'].get('thirdparties', {}).keys()) + if thirdparty not in defined_thirdparties: + raise Exception('Specified thirdparty %s not found in thirdpartirs definition. Expected any of %s.' + % (thirdparty, defined_thirdparties)) + + +def apply_thirdparty(cluster, thirdparty): + return thirdparties.install_thirdparty(cluster, thirdparty) + +# **** SHELL **** + +def convert_shell(cluster, config): + if isinstance(config, str): + config = { + 'command': config + } + return config + + +def verify_shell(cluster, config): + if config.get('command') is None or config['command'] == '': + raise Exception('Shell command is missing') + + out_vars = config.get('out_vars', []) + explicit_group = cluster.create_group_from_groups_nodes_names(config.get('groups', []), config.get('nodes', [])) + if out_vars and explicit_group and explicit_group.nodes_amount() != 1: + raise Exception('Shell output variables could be used for single-node groups, but multi-node group was found') + + in_vars = config.get('in_vars', []) + words_splitter = re.compile('\W') + for var in chain(in_vars, out_vars): + if not var.get('name'): + raise Exception('All output and input shell variables should have "name" property specified') + var_name = var['name'] + if len(words_splitter.split(var_name)) > 1: + raise Exception(f"'{var_name}' is not a valid shell variable name") + + # TODO: verify fields types and contents + + +def apply_shell(cluster, step): + commands = step['command'] + sudo = step.get('sudo', False) + groups = step.get('groups', []) + nodes = step.get('nodes', []) + in_vars = step.get('in_vars', []) + out_vars = step.get('out_vars', []) + vars_separator = "~~~~EXPORTED_VARIABLE~~~~" + + if not groups and not nodes: + common_group = cluster.nodes['master'].get_any_member() + else: + common_group = cluster.create_group_from_groups_nodes_names(groups, nodes) + + if isinstance(commands, list): + commands = ' && '.join(commands) + + out_vars_aliases = {} + for var in out_vars: + var_name = var['name'] + if var_name in out_vars_aliases: + # var is already exported, need to only add alternative alias + out_vars_aliases[var_name].add(var.get('save_as', var_name)) + continue + + out_vars_aliases[var_name] = {var.get('save_as', var_name)} + # print variable info to stdout in yaml format, separating data using `vars_separator` + # quotes usage is important for following code to work correctly in different cases + echo_var_cmd = f"echo {vars_separator} && " \ + f"echo name: {var_name} && " \ + f"echo 'value: |2-' && " \ + f"echo \"${var_name}\" | sed 's/^/ /'" + commands = f"{commands} && {echo_var_cmd}" + + in_vars_dict = {} + for var in in_vars: + var_name = var['name'] + # get defined value or saved value, defaulting to empty value + var_value = var.get('value', cluster.context['runtime_vars'].get(var_name, '')) + # replace single-quotes with '"'"' to isolate all single quotes during ssh env inlining + var_value = var_value.replace("'", "'\"'\"'") + # wrap variable value with single-quotes for `inline_ssh_env` feature to work correctly with different content + in_vars_dict[var_name] = f"'{var_value}'" + + method = common_group.run + if sudo: + method = common_group.sudo + + cluster.log.debug('Running shell command...') + result = method(commands, env=in_vars_dict) + + if out_vars: + stdout = list(result.values())[0].stdout + stdout_parts = stdout.split(vars_separator) + cluster.log.debug(stdout_parts[0]) # printing original user output + for part in stdout_parts[1:]: + var = yaml.safe_load(part) + aliases = out_vars_aliases[var['name']] + for alias in aliases: + cluster.context['runtime_vars'][alias] = var['value'] + else: + cluster.log.debug(result) + + +# **** ANSIBLE **** + +def convert_ansible(cluster, config): + if isinstance(config, str): + config = { + 'playbook': config + } + # if config['playbook'][0] != '/': + # config['playbook'] = os.path.abspath(os.path.dirname(__file__) + '../../../' + config['playbook']) + return config + + +def verify_ansible(cluster, config): + if config.get('playbook') is None or config['playbook'] == '': + raise Exception('Playbook path is missing') + if not os.path.isfile(config['playbook']): + raise Exception('Playbook file %s not exists' % config['location']) + # TODO: verify fields types and contents + + +def apply_ansible(cluster, step): + playbook_path = utils.determine_resource_absolute_path(step.get('playbook')) + external_vars = step.get('vars', {}) + become = step.get('become', False) + groups = step.get('groups', []) + nodes = step.get('nodes', []) + + command = 'ansible-playbook -i ansible-inventory.ini %s' % playbook_path + + if become: + command += ' -b' + + if groups or nodes: + common_group = cluster.create_group_from_groups_nodes_names(groups, nodes) + command += ' --limit %s' % ','.join(common_group.get_nodes_names()) + + if external_vars: + _vars = [] + for k, v in external_vars.items(): + _vars.append('%s=%s' % (k, v)) + command += ' --extra-vars "%s"' % ' '.join(_vars) + + cluster.log.verbose("Running shell \"%s\"" % command) + + result = subprocess.run(command, stdout=sys.stdout, stderr=sys.stderr, shell=True) + if result.returncode != 0: + raise Exception("Failed to apply ansible plugin, see error above") + + +def verify_helm(cluster, config): + if config.get('chart_path') is None or config['chart_path'] == '': + raise Exception('Chart path is missing') + + if cluster.inventory.get('public_cluster_ip') is None: + raise Exception(f'public_cluster_ip is a mandatory parameter in the inventory in case of usage of helm plugin.') + + +def apply_helm(cluster, config): + chart_path = get_local_chart_path(cluster.log, config) + process_chart_values(config, chart_path) + + common_group = cluster.nodes['master'].get_first_member() + + cluster.log.debug('Loading kubeconfig from master...') + kubeconfig = common_group.sudo("cat /root/.kube/config") + + kubeconfig_stdout = list(kubeconfig.values())[0].stdout + + # Replace cluster FQDN with ip + public_cluster_ip = cluster.inventory.get('public_cluster_ip') + cluster_name = cluster.inventory.get('cluster_name') + kubeconfig_stdout = kubeconfig_stdout.replace(cluster_name, public_cluster_ip) + + cluster.log.debug("Writing config to file...") + local_config_path = os.getcwd() + "/config" + command = 'echo "%s" > %s' % (kubeconfig_stdout, local_config_path) + subprocess.check_output(command, shell=True) + + with open(chart_path + '/Chart.yaml', 'r') as stream: + chart_metadata = yaml.safe_load(stream) + chart_name = chart_metadata["name"] + + cluster.log.debug("Running helm chart %s" % chart_name) + + namespace = config.get('namespace') + if not namespace: + cluster.log.verbose('Namespace configuration is missing, "default" namespace will be used') + namespace = "default" + + prepare_for_helm_command = f'export KUBECONFIG="{local_config_path}"; cd "{chart_path}"; helm -n {namespace} ' + + cluster.log.verbose("Check if chart already has been installed") + command = prepare_for_helm_command + 'list -q' + helm_existed_releases = subprocess.check_output(command, shell=True).decode('utf-8') + + command = f'echo "{helm_existed_releases}" | grep "^{chart_name}$" | cat' + deployed_release = subprocess.check_output(command, shell=True) + if deployed_release: + cluster.log.debug("Deployed release %s is found. Upgrading it..." % chart_name) + deployment_mode = "upgrade" + else: + cluster.log.debug("Deployed release %s is not found. Installing it..." % chart_name) + deployment_mode = "install" + + command = prepare_for_helm_command + f'{deployment_mode} {chart_name} . --debug' + output = subprocess.check_output(command, shell=True) + cluster.log.debug(output.decode('utf-8')) + + +def process_chart_values(config, local_chart_path): + config_values = config.get("values") + config_values_file = config.get("values_file") + + if config_values is not None: + with open(local_chart_path + '/values.yaml', 'r+') as stream: + original_values = yaml.safe_load(stream) + stream.seek(0) + merged_values = default_merger.merge(original_values, config_values) + stream.write(yaml.dump(merged_values)) + stream.truncate() + else: + if config_values_file is not None: + with open(local_chart_path + '/values.yaml', 'r+') as stream: + with open(config_values_file, 'r+') as additional_stream: + original_values = yaml.safe_load(stream) + additional_values = yaml.safe_load(additional_stream) + if additional_values is None: + return + stream.seek(0) + merged_values = default_merger.merge(original_values, additional_values) + stream.write(yaml.dump(merged_values)) + stream.truncate() + + +def get_local_chart_path(log, config): + chart_path = config.get('chart_path') + + is_curl = chart_path[:4] == 'http' and '://' in chart_path[4:8] + + local_chart_folder = os.getcwd() + "/local_chart_folder" + if os.path.isdir(local_chart_folder): + remove_tree(local_chart_folder) + mkpath(local_chart_folder) + if is_curl: + log.verbose('Chart download via curl detected') + destination = os.path.basename(chart_path) + commands = 'curl -g -k %s -o %s' % (chart_path, destination) + extension = destination.split('.')[-1] + if extension == 'zip': + log.verbose('Unzip will be used for unpacking') + commands += ' && unzip %s -d %s' % (destination, local_chart_folder) + else: + log.verbose('Tar will be used for unpacking') + commands += ' && tar -zxf %s -C %s' % (destination, local_chart_folder) + log.debug(subprocess.check_output(commands, shell=True)) + else: + log.debug("Create copy of chart to work with") + copy_tree(chart_path, local_chart_folder) + + log.debug("Ready chart path = %s" % local_chart_folder) + return local_chart_folder + + +def convert_config(cluster, config): + return _convert_file(config) + + +def verify_config(cluster, config): + _verify_file(config, "Config") + + +def apply_config(cluster, config): + _apply_file(cluster, config, "Config") + + +def _convert_file(config): + if isinstance(config, str): + config = { + 'source': config + } + # if config['source'][0] != '/': + # config['source'] = os.path.abspath(os.path.dirname(__file__) + '../../../' + config['source']) + return config + + +def _verify_file(config, file_type): + """ + Verifies if the path matching the config 'source' key exists and points to + existing files. + """ + if config.get('source') is None or config['source'] == '': + raise Exception('%s file source is missing' % file_type) + + # Determite absolute path to templates + source = os.path.join( + utils.determine_resource_absolute_dir(config['source']), + os.path.basename(config['source']) + ) + + files = glob.glob(source) + + if len(files) == 0: + raise Exception('Cannot find any %s files matching this ' + 'source value: %s' % (file_type, source)) + + for file in files: + source = utils.determine_resource_absolute_path(file) + if not os.path.isfile(source): + raise Exception('%s file %s not exists' % (file_type, source)) + # TODO: verify fields types and contents + + +def _apply_file(cluster, config, file_type): + """ + Apply yamls as is or + renders and applies templates that match the config 'source' key. + """ + # Set needed settings from config + apply_required = config.get('apply_required', True) + use_sudo = config.get('sudo', True) + destination_groups = config.get('destination_groups', []) + destination_nodes = config.get('destination_nodes', []) + apply_groups = config.get('apply_groups', []) + apply_nodes = config.get('apply_nodes', []) + do_render = config.get('do_render', True) + + # Determite absolute path to templates + source = os.path.join( + utils.determine_resource_absolute_dir(config['source']), + os.path.basename(config['source']) + ) + + files = glob.glob(source) + + if len(files) == 0: + raise Exception('Cannot find any %s files matching this ' + 'source value: %s' % (source, file_type)) + + for file in files: + source_filename = os.path.basename(file) + + if do_render: + # templates usually have '.j2' extension, which we want to remove from resulting filename + # but we also support usual '.yaml' files without '.j2' extension, in this case we do not want to remove extension + split_extension = os.path.splitext(source_filename) + if split_extension[1] == ".j2": + source_filename = split_extension[0] + + destination_path = config.get('destination', '/etc/kubernetes/%s' % source_filename) + apply_command = config.get('apply_command', 'kubectl apply -f %s' % destination_path) + + if not destination_groups and not destination_nodes: + destination_common_group = cluster.nodes['master'] + else: + destination_common_group = cluster.create_group_from_groups_nodes_names(destination_groups, + destination_nodes) + + if not apply_groups and not apply_nodes: + apply_common_group = cluster.nodes['master'].get_any_member() + else: + apply_common_group = cluster.create_group_from_groups_nodes_names(apply_groups, apply_nodes) + + cluster.log.debug("Uploading %s..." % file_type) + cluster.log.debug("\tSource: %s" % file) + cluster.log.debug("\tDestination: %s" % destination_path) + + if do_render: + render_vars = {**cluster.inventory, 'runtime_vars': cluster.context['runtime_vars']} + generated_data = jinja.new(cluster.log).from_string( + open(utils.determine_resource_absolute_path(file)).read()).render(**render_vars) + utils.dump_file(cluster, generated_data, source_filename) + destination_common_group.put(io.StringIO(generated_data), destination_path, backup=True, sudo=use_sudo) + else: + destination_common_group.put(utils.determine_resource_absolute_path(file), destination_path, backup=True, sudo=use_sudo) + + if apply_required: + method = apply_common_group.run + if use_sudo: + method = apply_common_group.sudo + cluster.log.debug("Applying yaml...") + method(apply_command, hide=False) + else: + cluster.log.debug('Apply is not required') + + +procedure_types = { + 'template': { + 'convert': convert_template, + 'verify': verify_template, + 'apply': apply_template + }, + 'expect': { + 'convert': convert_expect, + 'verify': verify_expect, + 'apply': apply_expect + }, + 'python': { + 'verify': verify_python, + 'apply': apply_python + }, + 'thirdparty': { + 'verify': verify_thirdparty, + 'apply': apply_thirdparty + }, + 'shell': { + 'convert': convert_shell, + 'verify': verify_shell, + 'apply': apply_shell + }, + 'ansible': { + 'convert': convert_ansible, + 'verify': verify_ansible, + 'apply': apply_ansible + }, + 'helm': { + 'verify': verify_helm, + 'apply': apply_helm + }, + 'config': { + 'convert': convert_config, + 'verify': verify_config, + 'apply': apply_config + }, +} diff --git a/kubetool/plugins/calico.py b/kubetool/plugins/calico.py new file mode 100755 index 000000000..f456e9b8a --- /dev/null +++ b/kubetool/plugins/calico.py @@ -0,0 +1,18 @@ +#!/usr/bin/env python3 + + +def enrich_inventory(inventory, cluster): + + # By default we use calico, but have to find it out + # First of all we have to check is Calicon set to be installed or not + # By default installation parameter is unset, means user did not made any decision + if inventory["plugins"]["calico"].get("install") is None: + # Is user defined Flannel plugin and set it to install? + flannel_required = inventory["plugins"].get("flannel", {}).get("install", False) + # Is user defined Canal plugin and set it to install? + canal_required = inventory["plugins"].get("canal", {}).get("install", False) + # If Flannel and Canal is unset or not required to install, then install Calico + if not flannel_required and not canal_required: + inventory["plugins"]["calico"]["install"] = True + + return inventory diff --git a/kubetool/plugins/haproxy_ingress.py b/kubetool/plugins/haproxy_ingress.py new file mode 100755 index 000000000..4d669852a --- /dev/null +++ b/kubetool/plugins/haproxy_ingress.py @@ -0,0 +1,16 @@ +#!/usr/bin/env python3 + +import time + + +def override_priviledged_ports(cluster, service=None, namespace=None): + cluster.log.debug('Unlocking privileged ports...') + masters = cluster.nodes['master'] + masters.sudo('sed \'/- kube-apiserver/a\ - --service-node-port-range=80-32000\' -i /etc/kubernetes/manifests/kube-apiserver.yaml', hide=False) + masters.sudo('systemctl restart kubelet.service', hide=False) + # TODO: do not use timeout - wait for service start on all nodes + time.sleep(60) + masters.get_first_member().sudo('kubectl patch svc %s -n %s -p \'[ { "op": "replace", "path": "/spec/ports/1/nodePort", "value": 443 }, { "op": "replace", "path": "/spec/ports/0/nodePort", "value": 80 } ]\' --type=\'json\'' % (service, namespace), hide=False) + masters.sudo('sed \'/service-node-port-range=.*/d\' -i /etc/kubernetes/manifests/kube-apiserver.yaml', hide=False) + masters.sudo('systemctl restart kubelet.service', hide=False) + time.sleep(60) diff --git a/kubetool/plugins/nginx_ingress.py b/kubetool/plugins/nginx_ingress.py new file mode 100644 index 000000000..49571a2c5 --- /dev/null +++ b/kubetool/plugins/nginx_ingress.py @@ -0,0 +1,139 @@ +import io + +from kubetool.core import utils +from kubetool.core.group import NodeGroup + + +def verify_inventory(inventory, _): + if inventory["plugins"]["nginx-ingress-controller"]["install"] is not True: + return inventory + + nginx_plugin = inventory["plugins"]["nginx-ingress-controller"] + + # verify default certificate config + if nginx_plugin["controller"]["ssl"].get("default-certificate"): + default_cert = nginx_plugin["controller"]["ssl"]["default-certificate"] + + if default_cert.get("data") is None and default_cert.get("paths") is None: + raise Exception("Default ingress certificate is specified, but no certificate and private key provided") + if default_cert.get("data") and default_cert.get("paths"): + raise Exception("Default ingress certificate and key should be provided either as raw data, " + "or either as paths, but not both at the same time") + + cert = default_cert.get("data", default_cert.get("paths")) + if cert.get("cert") is None or cert.get("key") is None: + raise Exception("Default ingress certificate should have both certificate and private key specified, " + "but one of them is missing") + + return inventory + + +def enrich_inventory(inventory, _): + if inventory["plugins"]["nginx-ingress-controller"]["install"] is not True: + return inventory + + if inventory["plugins"]["nginx-ingress-controller"].get('custom_headers'): + if not inventory["plugins"]["nginx-ingress-controller"].get('config_map'): + inventory["plugins"]["nginx-ingress-controller"]['config_map'] = {} + if not inventory["plugins"]["nginx-ingress-controller"]['config_map'].get('proxy-set-headers'): + inventory["plugins"]["nginx-ingress-controller"]['config_map']['proxy-set-headers'] = 'ingress-nginx/custom-headers' + + return inventory + + +def cert_renew_enrichment(inventory, cluster): + # check that renewal is required for nginx + if not cluster.procedure_inventory.get("nginx-ingress-controller"): + return inventory + + nginx_plugin = inventory["plugins"]["nginx-ingress-controller"] + + # check that renewal is possible + if nginx_plugin["install"] is not True: + raise Exception("Certificates can not be renewed for nginx plugin since it is not installed") + + # update certificates in inventory, other check will be performed in "verify_inventory" function + nginx_plugin["controller"]["ssl"]["default-certificate"] = cluster.procedure_inventory["nginx-ingress-controller"] + + return inventory + + +def finalize_inventory(cluster, inventory_to_finalize): + # check that renewal is required for nginx + if not cluster.procedure_inventory.get("nginx-ingress-controller"): + return inventory_to_finalize + + if not inventory_to_finalize["plugins"].get("nginx-ingress-controller"): + inventory_to_finalize["plugins"]["nginx-ingress-controller"] = {} + + if not inventory_to_finalize["plugins"]["nginx-ingress-controller"].get("controller"): + inventory_to_finalize["plugins"]["nginx-ingress-controller"]["controller"] = {} + + if not inventory_to_finalize["plugins"]["nginx-ingress-controller"]["controller"].get("ssl"): + inventory_to_finalize["plugins"]["nginx-ingress-controller"]["controller"]["ssl"] = {} + + nginx_plugin = inventory_to_finalize["plugins"]["nginx-ingress-controller"] + nginx_plugin["controller"]["ssl"]["default-certificate"] = cluster.procedure_inventory["nginx-ingress-controller"] + + return inventory_to_finalize + + +def manage_custom_certificate(cluster): + if not cluster.inventory["plugins"]["nginx-ingress-controller"]["controller"]["ssl"].get("default-certificate"): + cluster.log.debug("No custom default ingress certificate specified, skipping...") + return + + base_path = "/etc/kubernetes/custom-certs" + certificate_path = base_path + "/cert" + private_key_path = base_path + "/key" + secret_name = "default-ingress-cert" + secret_namespace = "kube-system" + + first_master = cluster.nodes["master"].get_first_member() + default_cert = cluster.inventory["plugins"]["nginx-ingress-controller"]["controller"]["ssl"]["default-certificate"] + + # first, we need to load cert and key files to first master to known locations + first_master.sudo(f"mkdir -p {base_path}") + try: + first_master.call(put_custom_certificate, + default_cert=default_cert, + crt_path=certificate_path, + key_path=private_key_path) + + # second, we need to validate cert and key using openssl + first_master.call(verify_certificate_and_key, crt_path=certificate_path, key_path=private_key_path) + + # third, we need to create tls secret under well-known name + # this certificate is already configured to be used by controller + first_master.call(create_tls_secret, + crt_path=certificate_path, + key_path=private_key_path, + name=secret_name, + namespace=secret_namespace) + finally: + # fourth, we need to remove base path dir + first_master.sudo(f"rm -rf {base_path}") + + +def put_custom_certificate(first_master: NodeGroup, default_cert, crt_path, key_path): + if default_cert.get("data"): + cert = io.StringIO(default_cert["data"]["cert"]) + key = io.StringIO(default_cert["data"]["key"]) + else: + cert = utils.get_resource_absolute_path(default_cert["paths"]["cert"]) + key = utils.get_resource_absolute_path(default_cert["paths"]["key"]) + + first_master.put(cert, crt_path, sudo=True, binary=False) + first_master.put(key, key_path, sudo=True, binary=False) + + +def verify_certificate_and_key(first_master: NodeGroup, crt_path, key_path): + crt_md5 = first_master.sudo(f"openssl x509 -noout -modulus -in {crt_path} | openssl md5").get_simple_out() + key_md5 = first_master.sudo(f"openssl rsa -noout -modulus -in {key_path} | openssl md5").get_simple_out() + if crt_md5 != key_md5: + raise Exception("Custom default ingress certificate and key are not compatible!") + + +def create_tls_secret(first_master, crt_path, key_path, name, namespace): + first_master.sudo(f"kubectl create secret tls {name} --key {key_path} --cert {crt_path} -n {namespace} " + f"--dry-run -o yaml | sudo kubectl apply -f -") diff --git a/kubetool/procedures/__init__.py b/kubetool/procedures/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/kubetool/procedures/add_node.py b/kubetool/procedures/add_node.py new file mode 100755 index 000000000..4f04cc12a --- /dev/null +++ b/kubetool/procedures/add_node.py @@ -0,0 +1,156 @@ +#!/usr/bin/env python3 + +import copy + +from collections import OrderedDict +from kubetool import kubernetes, system +from kubetool.core import flow, utils +from kubetool.procedures import install + + +def deploy_kubernetes_join(cluster): + + group = cluster.nodes['master'].include_group(cluster.nodes.get('worker')).get_new_nodes() + + if cluster.context['initial_procedure'] == 'add_node' and group.is_empty(): + cluster.log.debug("No kubernetes nodes to perform") + return + + cluster.nodes['master'].get_new_nodes().call(kubernetes.join_new_master) + + if "worker" in cluster.nodes: + cluster.nodes["worker"].get_new_nodes().new_group(apply_filter=lambda node: 'master' not in node['roles']) \ + .call(kubernetes.init_workers) + + group.call_batch([ + kubernetes.apply_labels, + kubernetes.apply_taints + ]) + + if group.is_empty(): + cluster.log.debug("Skipped: no kubernetes nodes to wait") + return + else: + cluster.log.debug("Waiting for new kubernetes nodes...") + kubernetes.wait_for_nodes(group) + + +def add_node_finalize_inventory(cluster, inventory_to_finalize): + if cluster.context.get('initial_procedure') != 'add_node': + return inventory_to_finalize + + new_nodes = cluster.nodes['all'].get_new_nodes() + + # add nodes to inventory if they in new nodes + for new_node in new_nodes.get_ordered_members_list(provide_node_configs=True): + new_node_found = False + for i, node in enumerate(inventory_to_finalize['nodes']): + if node['name'] == new_node['name']: + # new node already presented in final inventory - ok, just remove label + if 'add_node' in inventory_to_finalize['nodes'][i]['roles']: + inventory_to_finalize['nodes'][i]['roles'].remove('add_node') + cluster.inventory['nodes'][i]['roles'].remove('add_node') + new_node_found = True + break + + # new node is not presented in final inventory - let's add it original config + if not new_node_found: + node_config = None + + # search for new node config in procedure inventory + if cluster.procedure_inventory.get('nodes', {}): + for node_from_procedure in cluster.procedure_inventory['nodes']: + if node_from_procedure['name'] == new_node['name']: + node_config = node_from_procedure + break + # maybe new nodes from other places? + + if node_config is None: + raise Exception('Not possible to find new node config for final inventory') + inventory_to_finalize["nodes"].append(node_config) + + # maybe merge vrrp ips only when adding? + if "vrrp_ips" in cluster.procedure_inventory: + utils.merge_vrrp_ips(cluster.procedure_inventory, inventory_to_finalize) + + return inventory_to_finalize + + +def cache_installed_packages(cluster): + """ + Task which is used to collect already installed packages versions on already existing nodes. + It is called first during "add_node" procedure, + so that new nodes install exactly the same packages as on other already existing nodes. + """ + + # avoid caching when unchanged nodes are not equal to GLOBAL os + global_os = system.get_os_family(cluster) + for node in cluster.nodes['all'].get_unchanged_nodes().get_ordered_members_list(provide_node_configs=True): + if cluster.context["nodes"][node['connect_to']]["os"]['family'] != global_os: + cluster.log.debug(f"New node has different OS ({global_os}) " + f"than some other nodes ({cluster.context['nodes'][node['connect_to']]['os']['family']}), " + "packages will not be cached.") + return + + cluster.cache_package_versions() + + +tasks = OrderedDict(copy.deepcopy(install.tasks)) +del tasks["deploy"]["plugins"] +del tasks["deploy"]["accounts"] +tasks["deploy"]["kubernetes"]["init"] = deploy_kubernetes_join +tasks["cache_packages"] = cache_installed_packages +tasks.move_to_end("cache_packages", last=False) + + +def main(cli_arguments=None): + + cli_help = ''' + Script for adding node to Kubernetes cluster. + + How to use: + + ''' + + parser = flow.new_parser(cli_help) + parser.add_argument('--tasks', + default='', + help='define comma-separated tasks to be executed') + + parser.add_argument('--exclude', + default='', + help='exclude comma-separated tasks from execution') + + parser.add_argument('procedure_config', metavar='procedure_config', type=str, + help='config file for add_node procedure') + + if cli_arguments is None: + args = parser.parse_args() + else: + args = parser.parse_args(cli_arguments) + + defined_tasks = [] + defined_excludes = [] + + if args.tasks != '': + defined_tasks = args.tasks.split(",") + + if args.exclude != '': + defined_excludes = args.exclude.split(",") + + context = flow.create_context(args, procedure='add_node') + context['inventory_regenerate_required'] = True + + flow.run( + tasks, + defined_tasks, + defined_excludes, + args.config, + context, + procedure_inventory_filepath=args.procedure_config, + cumulative_points=install.cumulative_points + ) + + +if __name__ == '__main__': + main() diff --git a/kubetool/procedures/backup.py b/kubetool/procedures/backup.py new file mode 100755 index 000000000..4ab04f67d --- /dev/null +++ b/kubetool/procedures/backup.py @@ -0,0 +1,485 @@ +#!/usr/bin/env python3 + +import datetime +import io +import json +import os +import shutil +import tarfile +import time +from collections import OrderedDict +import yaml + +from kubetool import packages, system +from kubetool.core import utils, flow +from kubetool.core.cluster import KubernetesCluster +from kubetool.core.group import NodeGroup +from kubetool.procedures import install + + +def get_default_backup_files_list(cluster): + haproxy_service = cluster.inventory['services']['packages']['associations']['haproxy']['service_name'] + keepalived_service = cluster.inventory['services']['packages']['associations']['keepalived']['service_name'] + + backup_files_list = [ + "/etc/resolv.conf", + "/etc/hosts", + "/etc/chrony.conf", + "/etc/selinux/config", + "/etc/yum.repos.d/", + "/var/lib/kubelet/pki", + "/etc/modules-load.d/", + "/etc/audit/rules.d/", + "/etc/haproxy/haproxy.cfg", + f"/etc/systemd/system/{haproxy_service}.service.d/{haproxy_service}.conf", + "/etc/keepalived/keepalived.conf", + f"/etc/systemd/system/{keepalived_service}.service.d/{keepalived_service}.conf", + "/usr/local/bin/check_haproxy.sh", + "/etc/kubernetes", + "/etc/systemd/system/kubelet.service" + ] + + cri_impl = cluster.inventory['services']['cri']['containerRuntime'] + if cri_impl == "docker": + backup_files_list.append("/etc/docker/daemon.json") + elif cri_impl == "containerd": + backup_files_list.append("/etc/containerd/config.toml") + backup_files_list.append("/etc/crictl.yaml") + + return backup_files_list + + +def prepare_backup_tmpdir(cluster): + backup_directory = cluster.context.get('backup_tmpdir') + if not backup_directory: + cluster.log.verbose('Backup directory is not ready yet, preparing..') + backup_directory = cluster.context['backup_tmpdir'] = cluster.context['execution_arguments'][ + 'dump_location'] + '/backup' + shutil.rmtree(backup_directory, ignore_errors=True) + os.mkdir(backup_directory) + cluster.log.verbose('Backup directory prepared') + return backup_directory + + +def verify_backup_location(cluster): + target = utils.get_resource_absolute_path(cluster.procedure_inventory.get('backup_location', 'backup.tar.gz')) + if not os.path.isdir(target) and not os.path.isdir('/'.join(target.split('/')[:-1])): + raise FileNotFoundError('Backup location directory not exists') + + +def export_ansible_inventory(cluster): + backup_directory = prepare_backup_tmpdir(cluster) + shutil.copyfile(cluster.context['execution_arguments']['ansible_inventory_location'], + os.path.join(backup_directory, 'ansible-inventory.ini')) + cluster.log.verbose('ansible-inventory.ini exported to backup') + + +def export_packages_list(cluster): + cluster.context['backup_descriptor']['nodes']['packages'] = {} + if system.get_os_family(cluster) in ['rhel', 'rhel8']: + cmd = r"rpm -qa" + else: + cmd = r"dpkg-query -f '${Package}=${Version}\n' -W" + results = cluster.nodes['all'].sudo(cmd) + for conn, result in results.items(): + cluster.context['backup_descriptor']['nodes']['packages'][conn.host] = result.stdout.strip().split('\n') + + +def export_hostname(cluster): + cluster.context['backup_descriptor']['nodes']['hostnames'] = {} + results = cluster.nodes['all'].sudo('hostnamectl status | head -n 1 | sed -e \'s/[a-zA-Z ]*://g\'') + cluster.log.verbose(results) + for conn, result in results.items(): + cluster.context['backup_descriptor']['nodes']['hostnames'][conn.host] = result.stdout.strip() + + +def export_cluster_yaml(cluster): + backup_directory = prepare_backup_tmpdir(cluster) + shutil.copyfile(os.path.join(cluster.context['execution_arguments']['dump_location'], 'cluster.yaml'), + os.path.join(backup_directory, 'cluster.yaml')) + shutil.copyfile(utils.get_resource_absolute_path(cluster.context['execution_arguments']['config']), + os.path.join(backup_directory, 'original_cluster.yaml')) + cluster.log.verbose('cluster.yaml exported to backup') + + +def export_nodes(cluster): + backup_directory = prepare_backup_tmpdir(cluster) + backup_nodes_data_dir = os.path.join(backup_directory, 'nodes_data') + os.mkdir(backup_nodes_data_dir) + + backup_list = get_default_backup_files_list(cluster) + for filepath, enabled in cluster.procedure_inventory.get('backup_plan', {}).get('nodes', {}).items(): + if not enabled and filepath in backup_list: + backup_list.remove(filepath) + if enabled and filepath not in backup_list: + backup_list.append(filepath) + + cluster.log.debug('Backing up the following files: \n' + ' - ' + '\n - '.join(backup_list)) + + backup_command = 'cd /tmp && ' \ + 'sudo tar -czvf /tmp/kubetools-backup.tar.gz -P $(sudo readlink -e %s) && ' \ + 'sudo ls -la /tmp/kubetools-backup.tar.gz && ' \ + 'sudo du -hs /tmp/kubetools-backup.tar.gz' % (' '.join(backup_list)) + + data_copy_res = cluster.nodes['all'].run(backup_command) + + cluster.log.debug('Backup created:\n%s' % data_copy_res) + + cluster.log.debug('Downloading nodes backups:') + for node in cluster.nodes['all'].get_ordered_members_list(provide_node_configs=True): + node['connection'].get('/tmp/kubetools-backup.tar.gz', + os.path.join(backup_nodes_data_dir, '%s.tar.gz' % node['name'])) + cluster.log.debug('Backup \'%s\' downloaded' % node['name']) + + cluster.log.verbose('Deleting backup file from nodes...') + cluster.nodes['all'].sudo('rm -f /tmp/kubetools-backup.tar.gz') + + +def export_etcd(cluster: KubernetesCluster): + backup_directory = prepare_backup_tmpdir(cluster) + etcd_node, is_custom_etcd_node = select_etcd_node(cluster) + cluster.context['backup_descriptor']['etcd']['image'] = retrieve_etcd_image(cluster, etcd_node) + + # Try to detect cluster health and other metadata like db size, leader + etcd_status = None + try: + result = etcd_node.sudo('etcdctl endpoint status --cluster -w json').get_simple_out() + cluster.log.verbose(result) + + etcd_status = json.load(io.StringIO(result.lower())) + parsed_etcd_status = {} + for item in etcd_status: + # get rid of https:// and :2379 + address = item['endpoint'].split('//')[1].split(':')[0] + node_name = cluster.nodes['all'].get_first_member(apply_filter={"internal_address": address}, provide_node_configs=True)['name'] + parsed_etcd_status[node_name] = item + cluster.context['backup_descriptor']['etcd']['status'] = parsed_etcd_status + except Exception: + cluster.log.verbose('Failed to load and parse ETCD status') + + if is_custom_etcd_node: + cluster.context['backup_descriptor']['etcd']['source'] = etcd_node.get_nodes_names()[0] + else: + # if user did not provide node, then we have to select leader by ourselves + if not etcd_status: + raise Exception('Failed to load ETCD status and impossible to detect ETCD leader for making snapshot from it') + etcd_leader_id = etcd_status[0]['status']['leader'] + etcd_leader_name = None + for name, item in parsed_etcd_status.items(): + if item['status']['header']['member_id'] == etcd_leader_id: + etcd_leader_name = name + if etcd_leader_name: + cluster.log.verbose('Detected ETCD leader: %s' % etcd_leader_name) + cluster.context['backup_descriptor']['etcd']['source'] = etcd_leader_name + etcd_node = cluster.nodes['master'].get_member_by_name(etcd_leader_name) + else: + raise Exception('Failed to detect ETCD leader - not possible to create backup from actual DB') + + snap_name = 'snapshot%s.db' % int(round(time.time() * 1000)) + endpoint_ip = etcd_node.get_ordered_members_list(provide_node_configs=True)[0]["internal_address"] + cluster.log.debug('Creating ETCD backup "%s"...' % snap_name) + result = etcd_node.sudo(f'etcdctl snapshot save /var/lib/etcd/{snap_name} --endpoints=https://{endpoint_ip}:2379 ' + f'&& sudo mv /var/lib/etcd/{snap_name} /tmp/{snap_name} ' + f'&& sudo ls -la /tmp/{snap_name} ' + f'&& sudo du -hs /tmp/{snap_name} ' + f'&& sudo chmod 666 /tmp/{snap_name}') + cluster.log.debug(result) + etcd_node.get('/tmp/' + snap_name, backup_directory + '/etcd.db') + cluster.log.verbose('Deleting ETCD snapshot file from "%s"...') + etcd_node.sudo('rm -f /tmp/%s' % snap_name) + + +def select_etcd_node(cluster): + custom_etcd_node = cluster.procedure_inventory.get('backup_plan', {}).get('etcd', {}).get('source_node') + + if custom_etcd_node: + etcd_node = cluster.nodes['all'].get_member_by_name(custom_etcd_node) + if etcd_node is None: + raise Exception('Unknown ETCD node selected as source') + return etcd_node, True + else: + return cluster.nodes['master'].get_any_member(), False + + +def retrieve_etcd_image(cluster, etcd_node): + """ + Todo: maybe take it from "/etc/kubernetes/manifests/etcd.yaml" ? + """ + node_name = etcd_node.get_nodes_names()[0] + if "docker" == cluster.inventory['services']['cri']['containerRuntime']: + cont_inspect = "docker inspect $(sudo docker ps -a | grep etcd-%s | awk '{print $1; exit}')" % node_name + etcd_container_json = json.loads(list(etcd_node.sudo(cont_inspect).values())[0].stdout)[0] + etcd_image_sha = etcd_container_json['Image'][7:] # remove "sha256:" prefix + + images_result = etcd_node.sudo("docker image ls --format '{{json .}}'") + formatted_images_result = "[" + ",".join(list(images_result.values())[0].stdout.strip().split('\n')) + "]" + images_json = json.loads(formatted_images_result) + for image in images_json: + if image['ID'] == etcd_image_sha[:len(image['ID'])]: + return f"{image['Repository']}:{image['Tag']}" + else: + cont_search = "sudo crictl ps --label io.kubernetes.pod.name=etcd-%s -aq | awk '{print $1; exit}'" % node_name + cont_inspect = f"crictl inspect $({cont_search})" + etcd_container_json = json.loads(list(etcd_node.sudo(cont_inspect).values())[0].stdout) + etcd_image_sha = etcd_container_json['info']['config']['image']['image'] + + images_json = json.loads(list(etcd_node.sudo("crictl images -v -o json").values())[0].stdout)['images'] + for image in images_json: + if image['id'] == etcd_image_sha: + return image['repoTags'][0] + + raise Exception("Unable to find etcd image on node %s" % node_name) + + +def export_kubernetes_version(cluster: KubernetesCluster): + master = cluster.nodes['master'].get_any_member() + version = master.sudo('kubectl get nodes --no-headers | head -n 1 | awk \'{print $5; exit}\'').get_simple_out() + cluster.context['backup_descriptor']['kubernetes']['version'] = version.strip() + + +# There is no way to parallel resources connection via Queue or Pool: +# the ssh connection is not possible to parallelize due to thread lock +def download_resources(log, resources, location, master: NodeGroup, namespace=None): + + if namespace: + log.debug('Downloading resources from namespace "%s"...' % namespace) + + actual_resources = [] + + if not resources: + log.debug('No resources found to download') + return actual_resources + + cmd = '' + for resource in resources: + if cmd != '': + cmd += ' && echo \'\n------------------------\n\' && ' + if namespace: + cmd += 'sudo kubectl -n %s get --ignore-not-found %s -o yaml' % (namespace, resource) + else: + cmd += 'sudo kubectl get --ignore-not-found %s -o yaml' % resource + + result = master.sudo(cmd).get_simple_out() + master.cluster.log.verbose(result) + + found_resources_results = result.split('------------------------') + for i, result in enumerate(found_resources_results): + resource = resources[i] + resource_file_path = os.path.join(location, '%s.yaml' % resource) + result = result.strip() + if result and result != '': + actual_resources.append(resource) + with open(resource_file_path, 'w') as resource_file_stream: + resource_file_stream.write(result) + + return actual_resources + + +def export_kubernetes(cluster): + backup_directory = prepare_backup_tmpdir(cluster) + master = cluster.nodes['master'].get_any_member() + + cluster.log.debug('Loading namespaces:') + namespaces_result = master.sudo('kubectl get ns -o yaml') + cluster.log.verbose(namespaces_result) + namespaces_string = list(namespaces_result.values())[0].stdout.strip() + namespaces_yaml = yaml.safe_load(namespaces_string) + + proposed_namespaces = cluster.procedure_inventory.get('backup_plan', {}).get('kubernetes', {}).get('namespaced_resources', {}).get('namespaces', 'all') + namespaces = [] + for item in namespaces_yaml['items']: + name = item['metadata']['name'] + if proposed_namespaces == 'all' or name in proposed_namespaces: + cluster.log.verbose('Namespace "%s" added' % name) + namespaces.append(name) + else: + cluster.log.verbose('Namespace "%s" excluded' % name) + + if proposed_namespaces != 'all': + for proposed_namespace in proposed_namespaces: + if proposed_namespace not in namespaces: + raise Exception('Proposed namespace "%s" not found in loaded cluster namespaces' % proposed_namespace) + + cluster.log.debug(namespaces) + kubernetes_res_dir = os.path.join(backup_directory, 'kubernetes_resources') + os.mkdir(kubernetes_res_dir) + + cluster.log.debug('Loading namespaced resources:') + resources_result = master.sudo('kubectl api-resources --verbs=list --namespaced -o name ' + '| grep -v "events.events.k8s.io" | grep -v "events" | sort | uniq') + cluster.log.verbose(resources_result) + parsed_resources = list(resources_result.values())[0].stdout.strip().split('\n') + proposed_resources = cluster.procedure_inventory.get('backup_plan', {}).get('kubernetes', {}).get('namespaced_resources', {}).get('resources', 'all') + + resources = [resource for resource in parsed_resources if proposed_resources == 'all' or resource in proposed_resources] + + for resource in parsed_resources: + if resource not in resources: + cluster.log.verbose('Resource "%s" excluded' % resource) + + if proposed_resources != 'all': + for proposed_resource in proposed_resources: + if proposed_resource not in resources: + raise Exception('Proposed resource "%s" not found in loaded cluster resources' % proposed_resource) + + cluster.log.debug(resources) + + namespaced_resources_map = {} + total_files = 0 + + for namespace in namespaces: + namespace_dir = os.path.join(kubernetes_res_dir, namespace) + os.mkdir(namespace_dir) + actual_resources = download_resources(cluster.log, resources, namespace_dir, master, namespace) + if actual_resources: + total_files += len(actual_resources) + namespaced_resources_map[namespace] = actual_resources + + cluster.log.debug('Loading non-namespaced resources:') + resources_result = master.sudo('kubectl api-resources --verbs=list --namespaced=false -o name ' + '| grep -v "events.events.k8s.io" | grep -v "events" | sort | uniq') + cluster.log.verbose(resources_result) + parsed_resources = list(resources_result.values())[0].stdout.strip().split('\n') + proposed_resources = cluster.procedure_inventory.get('backup_plan', {}).get('kubernetes', {}).get('nonnamespaced_resources', 'all') + + resources = [resource for resource in parsed_resources if proposed_resources == 'all' or resource in proposed_resources] + + for resource in parsed_resources: + if resource not in resources: + cluster.log.verbose('Resource "%s" excluded' % resource) + + if proposed_resources != 'all': + for proposed_resource in proposed_resources: + if proposed_resource not in resources: + raise Exception('Proposed resource "%s" not found in loaded cluster resources' % proposed_resource) + + cluster.log.debug(resources) + + nonnamespaced_resources_list = download_resources(cluster.log, resources, kubernetes_res_dir, master) + total_files += len(nonnamespaced_resources_list) + + cluster.log.verbose('Total files saved: %s' % total_files) + + if namespaced_resources_map: + cluster.context['backup_descriptor']['kubernetes']['resources']['namespaced'] = namespaced_resources_map + + if nonnamespaced_resources_list: + cluster.context['backup_descriptor']['kubernetes']['resources']['nonnamespaced'] = nonnamespaced_resources_list + + +def make_descriptor(cluster): + backup_directory = prepare_backup_tmpdir(cluster) + + cluster.context['backup_descriptor']['kubernetes']['thirdparties'] = cluster.inventory['services']['thirdparties'] + cluster.context['backup_descriptor']['meta']['time']['finished'] = datetime.datetime.now() + + with open(os.path.join(backup_directory, 'descriptor.yaml'), 'w') as output: + output.write(yaml.dump(cluster.context['backup_descriptor'])) + + +def pack_data(cluster): + cluster_name = cluster.inventory['cluster_name'] + backup_directory = prepare_backup_tmpdir(cluster) + + backup_filename = 'backup-%s-%s.tar.gz' % (cluster_name, utils.get_current_timestamp_formatted()) + + target = utils.get_resource_absolute_path(cluster.procedure_inventory.get('backup_location', backup_filename)) + if os.path.isdir(target): + target = os.path.join(target, backup_filename) + + cluster.log.debug('Packing all data...') + with tarfile.open(target, "w:gz") as tar_handle: + for root, dirs, files in os.walk(backup_directory): + for file in files: + pathname = os.path.join(root, file) + tar_handle.add(pathname, pathname.replace(backup_directory, '')) + tar_handle.close() + + cluster.log.verbose('Cleaning up...') + shutil.rmtree(backup_directory, ignore_errors=True) + + +tasks = OrderedDict({ + "verify_backup_location": verify_backup_location, + "export": { + "inventory": { + "cluster_yaml": export_cluster_yaml, + "ansible_inventory": export_ansible_inventory, + }, + "lists": { + "rpms": export_packages_list, + "hostname": export_hostname, + }, + "nodes": export_nodes, + "etcd": export_etcd, + "cluster_version": export_kubernetes_version, + "kubernetes": export_kubernetes, + }, + "make_descriptor": make_descriptor, + "pack": pack_data, +}) + + +def main(cli_arguments=None): + cli_help = ''' + Script for making backup of Kubernetes resources and nodes contents. + + How to use: + + ''' + + parser = flow.new_parser(cli_help) + parser.add_argument('--tasks', + default='', + help='define comma-separated tasks to be executed') + + parser.add_argument('--exclude', + default='', + help='exclude comma-separated tasks from execution') + + parser.add_argument('procedure_config', metavar='procedure_config', type=str, + help='config file for backup procedure') + + if cli_arguments is None: + args = parser.parse_args() + else: + args = parser.parse_args(cli_arguments) + + defined_tasks = [] + defined_excludes = [] + + if args.tasks != '': + defined_tasks = args.tasks.split(",") + + if args.exclude != '': + defined_excludes = args.exclude.split(",") + + context = flow.create_context(args, procedure='backup') + context['inventory_regenerate_required'] = False + context['execution_arguments']['disable_dump'] = False + context['backup_descriptor'] = { + 'meta': { + 'time': { + 'started': datetime.datetime.now() + } + }, + 'etcd': {}, + 'nodes': {}, + 'kubernetes': { + 'resources': {} + } + } + + flow.run( + tasks, + defined_tasks, + defined_excludes, + args.config, + context, + procedure_inventory_filepath=args.procedure_config, + cumulative_points=install.cumulative_points + ) + + +if __name__ == '__main__': + main() diff --git a/kubetool/procedures/cert_renew.py b/kubetool/procedures/cert_renew.py new file mode 100755 index 000000000..cc7ac5466 --- /dev/null +++ b/kubetool/procedures/cert_renew.py @@ -0,0 +1,89 @@ +#!/usr/bin/env python3 + +from collections import OrderedDict + +from kubetool import plugins, k8s_certs +from kubetool.core import flow + + +def renew_nginx_ingress_certs_task(cluster): + # check that renewal is required for nginx + if not cluster.procedure_inventory.get("nginx-ingress-controller"): + cluster.log.debug("Skipped: nginx ingress controller certs renewal is not required") + return + + cluster.log.debug("Starting certificate renewal for nginx ingress controller, plugin will be reinstalled") + plugin = cluster.inventory["plugins"]["nginx-ingress-controller"] + plugins.install_plugin(cluster, "nginx-ingress-controller", plugin["installation"]['procedures']) + + +def k8s_certs_renew_task(cluster): + if not cluster.procedure_inventory.get("kubernetes"): + cluster.log.debug("Skipped: kubernetes certs renewal is not required") + return + + cluster.log.debug("Starting certificate renewal for kubernetes") + cluster.nodes['master'].call(k8s_certs.renew_apply) + + +def k8s_certs_overview_task(cluster): + cluster.nodes['master'].call(k8s_certs.k8s_certs_overview) + + +tasks = OrderedDict({ + "kubernetes": k8s_certs_renew_task, + "nginx_ingress_controller": renew_nginx_ingress_certs_task, + "certs_overview": k8s_certs_overview_task +}) + + +def main(cli_arguments=None): + + cli_help = ''' + Script for certificates renewal on existing Kubernetes cluster. + + How to use: + + ''' + + parser = flow.new_parser(cli_help) + parser.add_argument('--tasks', + default='', + help='define comma-separated tasks to be executed') + + parser.add_argument('--exclude', + default='', + help='exclude comma-separated tasks from execution') + + parser.add_argument('procedure_config', metavar='procedure_config', type=str, + help='config file for add_node procedure') + + if cli_arguments is None: + args = parser.parse_args() + else: + args = parser.parse_args(cli_arguments) + + defined_tasks = [] + defined_excludes = [] + + if args.tasks != '': + defined_tasks = args.tasks.split(",") + + if args.exclude != '': + defined_excludes = args.exclude.split(",") + + context = flow.create_context(args, procedure='cert_renew') + context['inventory_regenerate_required'] = True + + flow.run( + tasks, + defined_tasks, + defined_excludes, + args.config, + context, + procedure_inventory_filepath=args.procedure_config, + ) + + +if __name__ == '__main__': + main() diff --git a/kubetool/procedures/check_iaas.py b/kubetool/procedures/check_iaas.py new file mode 100755 index 000000000..007441338 --- /dev/null +++ b/kubetool/procedures/check_iaas.py @@ -0,0 +1,533 @@ +#!/usr/bin/env python3 + +import argparse +import ipaddress +import math +import sys +from collections import OrderedDict +import time + +import fabric + +from kubetool.core import flow +from kubetool import system +from kubetool.testsuite import TestSuite, TestCase, TestFailure, TestWarn + + +def connection_ssh_connectivity(cluster): + with TestCase(cluster.context['testsuite'], '001', 'SSH', 'Connectivity', default_results='Connected'): + failed_nodes = [] + for node in cluster.nodes['all'].get_ordered_members_list(provide_node_configs=True): + try: + cluster.log.verbose(node['connection'].run("echo 1")) + except fabric.group.GroupException as e: + failed_nodes.append(node['name']) + cluster.log.error("Connection test failed for node \"%s\"" % node['name']) + cluster.log.error("Exception details:") + cluster.log.error(e) + if failed_nodes: + raise TestFailure("Failed to connect to %s nodes" % len(failed_nodes), + hint="Failed to connect from deploy node to the remote node of the cluster. Check that " + "the inventory is filled in correctly (key, username, nodes addresses), verify " + "access to remote nodes.") + + +def connection_ssh_latency_single(cluster): + with TestCase(cluster.context['testsuite'], '002', 'SSH', 'Latency - Single Thread', + minimal=cluster.globals['compatibility_map']['network']['connection']['latency']['single']['critical'], + recommended=cluster.globals['compatibility_map']['network']['connection']['latency']['single']['recommended']) as tc: + i = 0 + measurements = [] + while i < 5: + i += 1 + for node in cluster.nodes['all'].get_ordered_members_list(provide_node_configs=True): + time_start = time.time() + node['connection'].run("echo 1") + time_end = time.time() + diff = (time_end - time_start) * 1000 + cluster.log.debug('Connection to %s - %sms' % (node['name'], diff)) + measurements.append(diff) + average_latency = math.floor(sum(measurements) / cluster.nodes['all'].nodes_amount() / 5) + if average_latency > cluster.globals['compatibility_map']['network']['connection']['latency']['single']['critical']: + raise TestFailure("Very high latency: %sms" % average_latency, + hint="A very high latency was detected between the deploy node and cluster nodes. " + "Check your network settings and status. It is necessary to reduce latency to %sms." + % cluster.globals['compatibility_map']['network']['connection']['latency']['single']['critical']) + if average_latency > cluster.globals['compatibility_map']['network']['connection']['latency']['single']['recommended']: + raise TestWarn("High latency: %sms" % average_latency, + hint="Detected latency is higher than recommended value (%sms). Check your network settings " + "and status." % cluster.globals['compatibility_map']['network']['connection']['latency']['single']['recommended']) + tc.success(results="%sms" % average_latency) + + +def connection_ssh_latency_multiple(cluster): + with TestCase(cluster.context['testsuite'], '003', 'SSH', 'Latency - Multi Thread', + minimal=cluster.globals['compatibility_map']['network']['connection']['latency']['multi']['critical'], + recommended=cluster.globals['compatibility_map']['network']['connection']['latency']['multi']['recommended']) as tc: + i = 0 + measurements = [] + while i < 10: + i += 1 + time_start = time.time() + cluster.nodes['all'].run("echo 1") + time_end = time.time() + diff = (time_end - time_start) * 1000 + cluster.log.debug('Average latency at step %s - %sms' % (i, diff)) + measurements.append(diff) + average_latency = math.floor(sum(measurements) / 10) + if average_latency > cluster.globals['compatibility_map']['network']['connection']['latency']['multi']['critical']: + raise TestFailure("Very high latency: %sms" % average_latency, + hint="A very high latency was detected between the deploy node and cluster nodes. " + "Check your network settings and status. It is necessary to reduce latency to %sms." + % cluster.globals['compatibility_map']['network']['connection']['latency']['multi']['critical']) + if average_latency > cluster.globals['compatibility_map']['network']['connection']['latency']['multi']['recommended']: + raise TestWarn("High latency: %sms" % average_latency, + hint="Detected latency is higher than recommended value (%sms). Check your network settings " + "and status." % cluster.globals['compatibility_map']['network']['connection']['latency']['multi']['recommended']) + tc.success(results="%sms" % average_latency) + + +def connection_sudoer_access(cluster): + with TestCase(cluster.context['testsuite'], '004', 'SSH', 'Sudoer Access', default_results='Access provided'): + non_root = [] + results = cluster.nodes['all'].sudo("whoami") + cluster.log.verbose(results) + for connection, result in results.items(): + if result.stdout.strip() != 'root': + non_root.append(connection.host) + if non_root: + raise TestFailure("Non-sudoer access found at: %s" % ", ".join(non_root), + hint="Certain nodes do not have the appropriate sudoer access. At this nodes add " + "connection user to sudoers group.") + + +def hardware_members_amount(cluster, group_name): + beauty_name = group_name.capitalize() + if group_name == 'vip': + beauty_name = 'VIP' + if group_name == 'all': + beauty_name = 'Total Node' + + with TestCase(cluster.context['testsuite'], '005', 'Hardware', '%ss Amount' % beauty_name, + minimal=cluster.globals['compatibility_map']['hardware']['minimal'][group_name]['amount'], + recommended=cluster.globals['compatibility_map']['hardware']['recommended'][group_name]['amount']) as tc: + amount = 0 + if group_name == 'vip': + amount = len(cluster.inventory.get('vrrp_ips', [])) + else: + group = cluster.nodes.get(group_name) + if group is not None: + amount = group.nodes_amount() + + s = '' + if amount != 1: + s = 's' + + if amount < cluster.globals['compatibility_map']['hardware']['minimal'][group_name]['amount']: + beauty_name = group_name + if group_name == 'all': + beauty_name = 'all node' + raise TestFailure("Less than minimal. Detected %s item%s" % (amount, s), + hint="Increase the number of resources, so the number of %ss in the cluster should not " + "be less than %s" % (beauty_name, cluster.globals['compatibility_map']['hardware']['minimal'][group_name]['amount'])) + + if amount < cluster.globals['compatibility_map']['hardware']['recommended'][group_name]['amount']: + beauty_name = group_name + if group_name == 'all': + beauty_name = 'all node' + raise TestWarn("Less than recommended. Detected %s item%s" % (amount, s), + hint="Increase the number of resources, so the number of %ss in the cluster should not " + "be less than %s" % (beauty_name, cluster.globals['compatibility_map']['hardware']['minimal'][group_name]['amount'])) + + tc.success("%s item%s" % (amount, s)) + + +def hardware_cpu(cluster, group_name): + with TestCase(cluster.context['testsuite'], '006', 'Hardware', 'VCPUs Amount - %ss' % group_name.capitalize(), + minimal=cluster.globals['compatibility_map']['hardware']['minimal'][group_name]['vcpu'], + recommended=cluster.globals['compatibility_map']['hardware']['recommended'][group_name]['vcpu']) as tc: + if cluster.nodes.get(group_name) is None or cluster.nodes[group_name].is_empty(): + return tc.success(results='Skipped') + results = cluster.nodes[group_name].sudo("nproc --all") + cluster.log.verbose(results) + minimal_amount = None + for connection, result in results.items(): + amount = int(result.stdout) + if minimal_amount is None or minimal_amount > amount: + minimal_amount = amount + if amount < cluster.globals['compatibility_map']['hardware']['minimal'][group_name]['vcpu']: + cluster.log.error('%s node %s has insufficient VCPUs: expected %s, but %s found.' + % (group_name.capitalize(), connection.host, cluster.globals['compatibility_map']['hardware']['minimal'][group_name]['vcpu'], amount)) + elif amount < cluster.globals['compatibility_map']['hardware']['recommended'][group_name]['vcpu']: + cluster.log.warning('%s node %s has less VCPUs than recommended: recommended %s, but %s found.' + % (group_name.capitalize(), connection.host, cluster.globals['compatibility_map']['hardware']['recommended'][group_name]['vcpu'], amount)) + else: + cluster.log.debug('%s node %s has enough VCPUs: %s' % (group_name.capitalize(), connection.host, amount)) + + s = '' + if minimal_amount != 1: + s = 's' + + if minimal_amount < cluster.globals['compatibility_map']['hardware']['minimal'][group_name]['vcpu']: + raise TestFailure("Less than minimal. Detected %s VCPU%s" % (minimal_amount, s), + hint="Increase the number of VCPUs in the node configuration to at least the minimum " + "value: %s VCPUs." % cluster.globals['compatibility_map']['hardware']['minimal'][group_name]['vcpu']) + if minimal_amount < cluster.globals['compatibility_map']['hardware']['recommended'][group_name]['vcpu']: + raise TestWarn("Less than recommended. Detected %s VCPU%s" % (minimal_amount, s), + hint="Increase the number of VCPUs in the node configuration up to %s VCPUs." + % cluster.globals['compatibility_map']['hardware']['recommended'][group_name]['vcpu']) + tc.success(results='%s VCPU%s' % (minimal_amount, s)) + + +def hardware_ram(cluster, group_name): + with TestCase(cluster.context['testsuite'], '007', 'Hardware', 'RAM Amount - %ss' % group_name.capitalize(), + minimal=cluster.globals['compatibility_map']['hardware']['minimal'][group_name]['ram'], + recommended=cluster.globals['compatibility_map']['hardware']['recommended'][group_name]['ram']) as tc: + if cluster.nodes.get(group_name) is None or cluster.nodes[group_name].is_empty(): + return tc.success(results='Skipped') + results = cluster.nodes[group_name].sudo("cat /proc/meminfo | awk '/DirectMap/ { print $2 }'") + cluster.log.verbose(results) + minimal_amount = None + for connection, result in results.items(): + amount = math.floor(sum(map(lambda x: int(x), result.stdout.strip().split("\n"))) / 1000000) + if minimal_amount is None or minimal_amount > amount: + minimal_amount = amount + if amount < cluster.globals['compatibility_map']['hardware']['minimal'][group_name]['ram']: + cluster.log.error('%s node %s has insufficient RAM: expected %sGB, but %sGB found.' + % (group_name.capitalize(), connection.host, cluster.globals['compatibility_map']['hardware']['minimal'][group_name]['ram'], amount)) + elif amount < cluster.globals['compatibility_map']['hardware']['recommended'][group_name]['ram']: + cluster.log.warning('%s node %s has less RAM than recommended: recommended %sGB, but %sGB found.' + % (group_name.capitalize(), connection.host, cluster.globals['compatibility_map']['hardware']['recommended'][group_name]['ram'], amount)) + else: + cluster.log.debug('%s node %s has enough RAM: %sGB' % (group_name.capitalize(), connection.host, amount)) + if minimal_amount < cluster.globals['compatibility_map']['hardware']['minimal'][group_name]['ram']: + raise TestFailure("Less than minimal. Detected %sGB" % minimal_amount, + hint="Increase the number of RAM in the node configuration to at least the minimum " + "value: %sGB." % cluster.globals['compatibility_map']['hardware']['minimal'][group_name]['ram']) + if minimal_amount < cluster.globals['compatibility_map']['hardware']['recommended'][group_name]['ram']: + raise TestWarn("Less than recommended. Detected %sGB" % minimal_amount, + hint="Increase the number of RAM in the node configuration up to %sGB." + % cluster.globals['compatibility_map']['hardware']['recommended'][group_name]['ram']) + tc.success(results='%sGB' % minimal_amount) + + +def system_distributive(cluster): + with TestCase(cluster.context['testsuite'], '008', 'System', 'Distibutive') as tc: + supported_distributives = cluster.globals['compatibility_map']['distributives'].keys() + + cluster.log.debug(system.detect_os_family(cluster, suppress_exceptions=True)) + + detected_unsupported_os = [] + detected_supported_os = [] + detected_unsupported_version = [] + supported_versions = [] + for address, context_data in cluster.context["nodes"].items(): + detected_os = '%s %s' % (context_data['os']['name'], context_data['os']['version']) + if context_data['os']['family'] == 'unsupported': + detected_unsupported_os.append(detected_os) + cluster.log.error('Host %s running unsupported OS \"%s\"' % (address, detected_os)) + elif context_data['os']['family'] == 'unknown': + detected_unsupported_version.append(detected_os) + os_family_list = cluster.globals["compatibility_map"]["distributives"][context_data['os']['name']] + versions = [] + for os_family_item in os_family_list: + versions.extend(os_family_item["versions"]) + supported_versions.append('%s: %s' %(context_data['os']['name'], versions)) + cluster.log.error('Host %s running unknown OS family \"%s\"' % (address, detected_os)) + else: + detected_supported_os.append(detected_os) + cluster.log.debug('Host %s running \"%s\"' % (address, detected_os)) + + detected_supported_os = list(set(detected_supported_os)) + detected_unsupported_os = list(set(detected_unsupported_os)) + detected_unsupported_version = list(set(detected_unsupported_version)) + supported_versions = list(set(supported_versions)) + + if detected_unsupported_os: + raise TestFailure("Unsupported OS: %s" % ", ".join(detected_unsupported_os), + hint="Reinstall the OS on the host to one of the supported: %s" % ", ".join(supported_distributives)) + + if detected_unsupported_version: + raise TestFailure("Unsupported version: %s" % ", ".join(detected_unsupported_version), + hint="Reinstall the OS on the host to one of the supported versions: %s" % \ + ", ".join(supported_versions)) + + tc.success(results=", ".join(detected_supported_os)) + + +def pod_subnet_connectivity(cluster): + with TestCase(cluster.context['testsuite'], '009', 'Network', 'PodSubnet', default_results='Connected'): + pod_subnet = cluster.inventory['services']['kubeadm']['networking']['podSubnet'] + failed_nodes = check_subnet_connectivity(cluster, pod_subnet) + + if failed_nodes: + raise TestFailure(f"Failed to connect to {len(failed_nodes)} nodes.", + hint=f"Traffic is not allowed for pod subnet({pod_subnet}) on nodes: {failed_nodes}.") + + +def service_subnet_connectivity(cluster): + with TestCase(cluster.context['testsuite'], '010', 'Network', 'ServiceSubnet', default_results='Connected'): + service_subnet = cluster.inventory['services']['kubeadm']['networking']['serviceSubnet'] + failed_nodes = check_subnet_connectivity(cluster, service_subnet) + + if failed_nodes: + raise TestFailure(f"Failed to connect to {len(failed_nodes)} nodes.", + hint=f"Traffic is not allowed for service subnet({service_subnet}) on nodes: {failed_nodes}.") + + +def cmd_for_ports(ports, query): + result = "" + for port in ports: + result += f" && echo 'port: {port}' && ( {query % port} ) " + return result[3:] + + +def tcp_connect(log, node_from, node_to, tcp_ports, host_to_ip, mtu): + # 40 bites for headers + mtu -= 40 + log.verbose(f"Trying connection from '{node_from['name']}' to '{node_to['name']}") + cmd = cmd_for_ports(tcp_ports, f"echo $(dd if=/dev/urandom bs={mtu} count=1) >/dev/tcp/{host_to_ip[node_to['name']]}/%s") + node_from['connection'].sudo(cmd) + + +def get_start_socat_cmd(): + return "sudo nohup socat TCP-LISTEN:%s,reuseaddr,fork - &> /dev/null &" + + +def get_stop_socat_cmd(): + return "port=%s;pid=$(ps aux | grep ' socat ' | grep $port | grep -v grep | awk '{print $2}') " \ + "&& if [ ! -z $pid ]; then sudo kill -9 $pid; echo \"killed pid $pid for port $port\"; fi" + + +def check_tcp_connect_between_all_nodes(cluster, node_list, tcp_ports, host_to_ip): + mtu = cluster.inventory['plugins']['calico']['mtu'] + + cluster.log.verbose("Searching for success node...") + success_node = None + failed_nodes = [] + for node in node_list: + failed_nodes.append(node['name']) + nodes_for_check = [] + for node in node_list: + nodes_for_check.append(node) + + for i in range(0, len(node_list)): + for j in range(i + 1, len(node_list)): + try: + tcp_connect(cluster.log, node_list[j], node_list[i], tcp_ports, host_to_ip, mtu) + # If node has at least one successful connection with another node - this node has appropriate settings. + success_node = node_list[i] + cluster.log.verbose(f"Successful node found: {success_node['name']}") + failed_nodes.remove(success_node["name"]) + break + except Exception as e: + cluster.log.error(f"Subnet connectivity test failed from '{node_list[j]['name']}' to '{node_list[i]['name']}'") + cluster.log.verbose(f"Exception details: {e}") + + nodes_for_check.remove(node_list[i]) + if success_node is not None: + break + + # TCP connect from found successful node to every other node + if success_node is not None: + for node in nodes_for_check: + try: + tcp_connect(cluster.log, success_node, node, tcp_ports, host_to_ip, mtu) + failed_nodes.remove(node["name"]) + except Exception as e: + cluster.log.error(f"Subnet connectivity test failed from '{success_node['name']}' to '{node['name']}'") + cluster.log.verbose(f"Exception details: {e}") + + return failed_nodes + + +def check_subnet_connectivity(cluster, subnet): + inet = ipaddress.ip_network(subnet) + net_mask = str(inet.netmask) + subnet_hosts = list(inet.hosts()) + subnet_hosts_len = len(subnet_hosts) + + iface_cmd = "sudo ip -o a | grep %s | awk '{print $2}'" + tcp_ports = ["30050"] + node_list = cluster.nodes['all'].get_ordered_members_list(provide_node_configs=True) + host_to_ip = {} + + # Create alias from node network interface for subnet on every node + # And run process that LISTEN TCP port + i = 30 + for node in node_list: + random_host = subnet_hosts[subnet_hosts_len - i] + host_to_ip[node['name']] = random_host + iface = iface_cmd % node['internal_address'] + socat_cmd = cmd_for_ports(tcp_ports, get_start_socat_cmd()) + node['connection'].sudo(f"ip a add {random_host}/{net_mask} dev $({iface}); " + socat_cmd) + i = i + 1 + + failed_nodes = check_tcp_connect_between_all_nodes(cluster, node_list, tcp_ports, host_to_ip) + + i = 30 + # Remove created aliases form network interfaces and kill created during test processes + for node in node_list: + random_host = subnet_hosts[subnet_hosts_len - i] + iface = iface_cmd % node['internal_address'] + socat_cmd = cmd_for_ports(tcp_ports, get_stop_socat_cmd()) + node['connection'].sudo(socat_cmd + f" && ip a del {random_host}/{net_mask} dev $({iface})", warn=True) + i = i + 1 + + return failed_nodes + + +def check_tcp_ports(cluster): + with TestCase(cluster.context['testsuite'], '011', 'Network', 'TCPPorts', default_results='Connected'): + tcp_ports = ["80", "443", "6443", "2379", "2380", "10250", "10251", "10252", "30001", "30002"] + node_list = cluster.nodes['all'].get_ordered_members_list(provide_node_configs=True) + host_to_ip = {} + + # Run process that LISTEN TCP port + for node in node_list: + host_to_ip[node['name']] = node['internal_address'] + socat_cmd = cmd_for_ports(tcp_ports, get_start_socat_cmd()) + res = node['connection'].sudo(socat_cmd) + cluster.log.verbose(res) + + failed_nodes = check_tcp_connect_between_all_nodes(cluster, node_list, tcp_ports, host_to_ip) + + # Kill created during test processes + for node in node_list: + socat_cmd = cmd_for_ports(tcp_ports, get_stop_socat_cmd()) + node['connection'].sudo(socat_cmd) + + if failed_nodes: + raise TestFailure(f"Failed to connect to {len(failed_nodes)} nodes.", + hint=f"Not all needed tcp ports are opened on nodes: {failed_nodes}. " + f"Ports that should be opened: {tcp_ports}") + + +def make_reports(cluster): + if not cluster.context['execution_arguments'].get('disable_csv_report', False): + cluster.context['testsuite'].save_csv(cluster.context['execution_arguments']['csv_report'], cluster.context['execution_arguments']['csv_report_delimiter']) + if not cluster.context['execution_arguments'].get('disable_html_report', False): + cluster.context['testsuite'].save_html(cluster.context['execution_arguments']['html_report'], cluster.context['initial_procedure'].upper()) + + +tasks = OrderedDict({ + 'ssh': { + 'connectivity': connection_ssh_connectivity, + 'latency': { + 'single': connection_ssh_latency_single, + 'multiple': connection_ssh_latency_multiple + }, + 'sudoer_access': connection_sudoer_access, + }, + 'network': { + 'pod_subnet_connectivity': pod_subnet_connectivity, + 'service_subnet_connectivity': service_subnet_connectivity, + 'check_tcp_ports': check_tcp_ports + }, + 'hardware': { + 'members_amount': { + 'vips': lambda cluster: hardware_members_amount(cluster, 'vip'), + 'balancers': lambda cluster: hardware_members_amount(cluster, 'balancer'), + 'masters': lambda cluster: hardware_members_amount(cluster, 'master'), + 'workers': lambda cluster: hardware_members_amount(cluster, 'worker'), + 'total': lambda cluster: hardware_members_amount(cluster, 'all'), + }, + 'cpu': { + 'balancers': lambda cluster: hardware_cpu(cluster, 'balancer'), + 'masters': lambda cluster: hardware_cpu(cluster, 'master'), + 'workers': lambda cluster: hardware_cpu(cluster, 'worker') + }, + 'ram': { + 'balancers': lambda cluster: hardware_ram(cluster, 'balancer'), + 'masters': lambda cluster: hardware_ram(cluster, 'master'), + 'workers': lambda cluster: hardware_ram(cluster, 'worker') + } + }, + 'system': { + 'distributive': system_distributive + } +}) + + +def main(cli_arguments=None): + parser = argparse.ArgumentParser(description=''' +Script for checking Kubernetes cluster IAAS layer. + +Hot to use: + +''', formatter_class=argparse.RawTextHelpFormatter) + + parser.add_argument('-v', '--verbose', + action='store_true', + help='enable the verbosity mode') + + parser.add_argument('-c', '--config', + default='cluster.yaml', + help='define main cluster configuration file') + + parser.add_argument('--tasks', + default='', + help='define comma-separated tasks to be executed') + + parser.add_argument('--exclude', + default='', + help='exclude comma-separated tasks from execution') + + parser.add_argument('--csv-report', + default='report.csv', + help='define CSV report file location') + + parser.add_argument('--csv-report-delimiter', + default=';', + help='define delimiter type for CSV report') + + parser.add_argument('--html-report', + default='report.html', + help='define HTML report file location') + + parser.add_argument('--disable-csv-report', + action='store_true', + help='forcibly disable CSV report file creation') + + parser.add_argument('--disable-html-report', + action='store_true', + help='forcibly disable HTML report file creation') + + if cli_arguments is None: + args = parser.parse_args() + else: + args = parser.parse_args(cli_arguments) + + defined_tasks = [] + defined_excludes = [] + + if args.tasks != '': + defined_tasks = args.tasks.split(",") + + if args.exclude != '': + defined_excludes = args.exclude.split(",") + + context = flow.create_context(args, procedure='iaas') + context['testsuite'] = TestSuite() + + cluster = flow.run( + tasks, + defined_tasks, + defined_excludes, + args.config, + context, + print_final_message=False + ) + + # Final summary should be printed only to stdout with custom formatting + # If tests results required for parsing, they can be found in test results files + print(cluster.context['testsuite'].get_final_summary()) + cluster.context['testsuite'].print_final_status(cluster.log) + make_reports(cluster) + return cluster.context['testsuite'] + + +if __name__ == '__main__': + testsuite = main() + if testsuite.is_any_test_failed(): + sys.exit(1) diff --git a/kubetool/procedures/check_paas.py b/kubetool/procedures/check_paas.py new file mode 100755 index 000000000..37189bf6c --- /dev/null +++ b/kubetool/procedures/check_paas.py @@ -0,0 +1,659 @@ +#!/usr/bin/env python3 + +import argparse +import sys +import time +from collections import OrderedDict +import re +from typing import List + +import yaml +import ruamel.yaml + +from kubetool import packages as pckgs, system +from kubetool.procedures import check_iaas +from kubetool.core import flow +from kubetool.testsuite import TestSuite, TestCase, TestFailure, TestWarn + + +def services_status(cluster, service_type): + with TestCase(cluster.context['testsuite'], '201', "Services", "%s Status" % service_type.capitalize(), + default_results='active (running)'): + service_name = service_type + + if cluster.inventory['services']['packages']['associations'].get(service_type): + service_name = cluster.inventory['services']['packages']['associations'][service_type]['service_name'] + + group = cluster.nodes['all'] + if service_type == 'haproxy': + group = cluster.nodes.get('balancer', {}) + elif service_type == 'keepalived': + group = cluster.nodes.get('keepalived', {}) + elif service_type == 'docker' or service_type == "containerd" or service_type == 'kubelet': + group = cluster.nodes['master'].include_group(cluster.nodes['worker']) + + if not group or group.is_empty(): + raise TestWarn("No nodes to check service status", + hint="The node group to check the service is empty. Check skipped.") + + result = group.sudo('systemctl status %s' % service_name, warn=True) + cluster.log.verbose(result) + + status_regexp = re.compile("Active:\s([a-z\s()]*)(\ssince|$)", re.M) + + statuses = [] + failed = False + for connection, node_result in result.items(): + if node_result.return_code == 4: + statuses.append('service is missing') + failed = True + cluster.log.debug('%s is not presented on host %s, skipped' + % (service_type.capitalize(), connection.host)) + continue + matches = re.findall(status_regexp, node_result.stdout) + if matches: + status = matches[0][0].strip() + cluster.log.debug( + '%s status is \"%s\" at host %s' % (service_type.capitalize(), status, connection.host)) + if status != 'active (running)': + statuses.append(status) + failed = True + elif node_result.return_code != 0: + failed = True + cluster.log.error('%s status has bad exit code \"%s\" at host %s' + % (service_type.capitalize(), node_result.return_code, connection.host)) + else: + raise Exception('Failed to detect status for \"%s\"' % connection.host) + + statuses = list(set(statuses)) + + if failed: + raise TestFailure("Bad status detected: %s" % ', '.join(statuses), + hint="Fix the service to be enabled and has running status.") + + +def recommended_system_packages_versions(cluster): + """ + Task which checks if configured "system" packages versions are compatible with configured k8s version and OS. + Fails if unable to detect OS family. + Warns if configured not recommended k8s version or if configured not recommended system packages versions. + """ + with TestCase(cluster.context['testsuite'], '204', "Services", f"Recommended packages version") as tc: + version_key = system.get_compatibility_version_key(cluster) + if not version_key: + raise TestFailure("OS is unknown or multiple OS present") + k8s_version = cluster.inventory['services']['kubeadm']['kubernetesVersion'] + compatibility = cluster.globals["compatibility_map"]["software"] + if k8s_version not in compatibility["kubeadm"]: + raise TestWarn(f"Using not recommended k8s version: {k8s_version}") + + # Mapping "system_package_alias -> expected_packages_names -> expected_versions" + # We assume that system packages have word "haproxy"/"keepalived"/"docker"/"containerd"/"podman" in their name, + # if not - then we may miss such package + expected_system_packages = { + "haproxy": {"haproxy": compatibility["haproxy"][k8s_version][version_key]}, + "keepalived": {"keepalived": compatibility["keepalived"][k8s_version][version_key]} + } + if "docker" in cluster.inventory['services']['cri']['containerRuntime']: + expected_system_packages["docker"] = { + "docker": compatibility["docker"][k8s_version][version_key], + "containerd": compatibility["containerd"][k8s_version][version_key] + } + elif "containerd" in cluster.inventory["services"]["cri"]["containerRuntime"]: + expected_system_packages["containerd"] = { + "containerd": compatibility["containerd"][k8s_version][version_key], + "podman": compatibility["podman"][k8s_version][version_key] + } + + good_results = set() + bad_results = [] + for package_alias, expected_packages in expected_system_packages.items(): + actual_packages = cluster.inventory["services"]["packages"]["associations"][package_alias]["package_name"] + if not isinstance(actual_packages, list): + actual_packages = [actual_packages] + for expected_pckg, version in expected_packages.items(): + version = version.replace("*", "") + is_found = False + for actual_pckg in actual_packages: + if expected_pckg in actual_pckg: + is_found = True + if f"-{version}" in actual_pckg or f"={version}" in actual_pckg: + good_results.add(actual_pckg) + else: + bad_results.append(f"{actual_pckg} is not recommended, recommended version is {version}") + if not is_found: + bad_results.append(f"{expected_pckg} package is not found in inventory") + + if bad_results: + bad_results = yaml.safe_dump(bad_results) + raise TestWarn("detected not recommended packages versions", hint=bad_results) + cluster.log.debug(f"found packages: {good_results}") + tc.success("all packages have recommended versions") + + +def system_packages_versions(cluster, pckg_alias): + """ + Verifies that system packages are installed on required nodes and have equal versions. + Failure is shown if check is not successful. + :param cluster: main cluster object. + :param pckg_alias: system package alias to retrieve "package_name" association. + """ + with TestCase(cluster.context['testsuite'], '205', "Services", f"{pckg_alias} version") as tc: + if pckg_alias == "docker" or pckg_alias == "containerd": + group = cluster.nodes['master'].include_group(cluster.nodes['worker']) + elif pckg_alias == "keepalived" or pckg_alias == "haproxy": + if "balancer" in cluster.nodes and not cluster.nodes['balancer'].is_empty(): + group = cluster.nodes['balancer'] + else: + raise TestWarn("balancer group is not present") + else: + raise Exception(f"Unknown system package alias: {pckg_alias}") + + packages = cluster.inventory['services']['packages']['associations'][pckg_alias]['package_name'] + if not isinstance(packages, list): + packages = [packages] + return check_packages_versions(cluster, tc, group, packages) + + +def generic_packages_versions(cluster): + """ + Verifies that user-provided packages are installed on required nodes and have equal versions. + Warning is shown if check is not successful. + """ + with TestCase(cluster.context['testsuite'], '206', "Services", f"Generic packages version") as tc: + packages = cluster.inventory['services']['packages']['install']['include'] + return check_packages_versions(cluster, tc, cluster.nodes['all'], packages, warn_on_bad_result=True) + + +def check_packages_versions(cluster, tc, group, packages, warn_on_bad_result=False): + """ + Verifies that all packages are installed on required nodes and have equal versions + :param cluster: main cluster object + :param tc: current test case object + :param group: nodes where to check packages + :param packages: list of packages to check + :param warn_on_bad_result: if true then uses Warning instead of Failure. Default False. + """ + bad_results = {} + good_results = [] + + packages_map = pckgs.detect_installed_packages_version_groups(group, packages) + for package, version_map in packages_map.items(): + if len(version_map) != 1: + bad_results[f"package {package} has different versions"] = version_map + + version = list(version_map.keys())[0] + if "not installed" in version: + bad_results[f"package {package} is not installed on some nodes"] = version_map[version] + else: + good_results.append(version) + + if bad_results: + bad_results = yaml.safe_dump(bad_results) + if warn_on_bad_result: + raise TestWarn("detected incorrect packages versions", hint=bad_results) + raise TestFailure("detected incorrect packages versions", hint=bad_results) + cluster.log.debug(f"installed packages: {good_results}") + tc.success("all packages have correct versions") + + +def get_nodes_description(cluster): + result = cluster.nodes['master'].get_any_member().sudo('kubectl get node -o yaml') + cluster.log.verbose(result) + return yaml.safe_load(list(result.values())[0].stdout) + + +def kubelet_version(cluster): + with TestCase(cluster.context['testsuite'], '203', "Services", "Kubelet Version", + default_results=cluster.inventory['services']['kubeadm']['kubernetesVersion']): + nodes_description = get_nodes_description(cluster) + bad_versions = [] + for node_description in nodes_description['items']: + node_name = node_description['metadata']['name'] + kubelet_version = node_description['status']['nodeInfo']['kubeletVersion'] + cluster.log.debug("Node \"%s\" running kubelet %s" % (node_name, kubelet_version)) + if kubelet_version != cluster.inventory['services']['kubeadm']['kubernetesVersion']: + bad_versions.append(kubelet_version) + bad_versions = list(set(bad_versions)) + if bad_versions: + raise TestFailure("Invalid version detected: %s" % ', '.join(bad_versions), + hint="All nodes must have the same correct Kubelet version \"%s\". Remove nodes with the " + "incorrect version from the cluster and reinstall them to the corresponding " + "versions." % cluster.inventory['services']['kubeadm']['kubernetesVersion']) + + +def thirdparties_hashes(cluster): + """ + Task which is used to verify configured thirdparties hashes agains actual hashes on nodes. + If thirdparty is an archive, then archive files hashes are also verified. + If hash is not specified, then thirdparty is skipped. + If there is no thirdparties with hashes, then warning is shown. + """ + with TestCase(cluster.context['testsuite'], '212', "Thirdparties", "Hashes") as tc: + successful = [] + broken = [] + + for path, config in cluster.inventory['services']['thirdparties'].items(): + group = cluster.create_group_from_groups_nodes_names(config.get('groups', []), config.get('nodes', [])) + hosts_missing = find_hosts_missing_thirdparty(group, path) + if hosts_missing: + broken.append(f"thirdparty {path} is missing on {hosts_missing}") + # if thirdparty is missing somewhere, do not check anything further for it + continue + + if 'sha1' not in config: + # silently skip if SHA not defined + continue + + results = group.sudo(f'openssl sha1 {path} | sed "s/^.* //"', warn=True) + actual_sha = None + first_host = None + # Searching actual SHA, if possible + for host, result in results.items(): + if result.failed: + broken.append(f'failed to get {path} sha {host.host}: {result.stderr}') + continue + + found_sha = result.stdout.strip() + if actual_sha is None: + actual_sha = found_sha + first_host = host.host + elif actual_sha != found_sha: + broken.append(f'got inconsistent sha for {path}: {found_sha} on host {host.host}, ' + f'different from first host {first_host} sha {actual_sha}') + actual_sha = None + break + + expected_sha = config['sha1'] # expected SHA to compare with found actual SHA + if actual_sha is None: + # was not able to find single actual SHA, errors already collected, nothing to do + continue + if actual_sha != expected_sha: + broken.append(f'expected sha {expected_sha} is not equal to actual sha {actual_sha} for {path}') + continue + + successful.append(path) + # SHA is correct, now check if it is an archive and if it does, then also check SHA for archive content + if 'unpack' in config: + unpack_dir = config['unpack'] + res = group.sudo('tar tf %s | grep -vw "./" | while read file_name; do ' # for each file in archive + ' echo ${file_name} ' # print 1) filename + ' $(sudo tar xfO %s ${file_name} | openssl sha1 | cut -d\\ -f2) ' # 2) sha archive + ' $(sudo openssl sha1 %s/${file_name} | cut -d\\ -f2); ' # 3) sha unpacked + 'done' % (path, path, unpack_dir)) + # for each file on each host verify that sha in archive is equal to sha for unpacked + for host, result in res.items(): + if result.failed: + broken.append(f'can not verify files SHA for archive {path} ' + f'on host {host.host}, unpacked to {unpack_dir}') + continue + files_results = result.stdout.strip().split('\n') + for file_result in files_results: + result_parts = file_result.split() + if len(result_parts) != 3: + broken.append(f'can not verify files SHA for archive {path} ' + f'on host {host.host}, unpacked to {unpack_dir}') + continue + filename, archive_hash, fs_hash = result_parts[0], result_parts[1], result_parts[2] + if archive_hash != fs_hash: + broken.append(f'hash for file {filename} from archive {path} ' + f'on host {host.host} is not equal to hash for file unpacked to {unpack_dir}') + + if broken: + raise TestFailure('Found inconsistent hashes', hint=yaml.safe_dump(broken)) + if not successful: + raise TestWarn('Did not found any hashes') + tc.success('All found hashes are correct') + + +def find_hosts_missing_thirdparty(group, path) -> List[str]: + """ + Search group for a list of hosts where thirdparty is missing + :param group: group of hosts where to search thirdparty + :param path: path to thirdparty to search + :return: list of hosts where thirdparty is missing + """ + results = group.sudo(f'ls {path}', warn=True) + missing = [] + for host, result in results.items(): + if result.failed: + missing.append(host.host) + return missing + + +def kubernetes_nodes_existence(cluster): + with TestCase(cluster.context['testsuite'], '209', "Kubernetes", "Nodes Existence", + default_results="All nodes presented"): + nodes_description = get_nodes_description(cluster) + not_found = [] + for node in cluster.inventory['nodes']: + if 'master' in node['roles'] or 'worker' in node['roles']: + found = False + for node_description in nodes_description['items']: + node_name = node_description['metadata']['name'] + if node_name == node['name']: + found = True + break + if found: + cluster.log.debug("Node \"%s\" is found in cluster" % node['name']) + else: + not_found.append(node['name']) + cluster.log.error("Node \"%s\" is not found in cluster" % node['name']) + not_found = list(set(not_found)) + if not_found: + raise TestFailure("Nodes not found: %s" % ', '.join(not_found), + hint="The cluster must contain all the nodes that are described in the inventory. Add " + "the missing nodes to cluster.") + + +def kubernetes_nodes_roles(cluster): + with TestCase(cluster.context['testsuite'], '210', "Kubernetes", "Nodes Roles", + default_results="All nodes have the correct roles"): + nodes_description = get_nodes_description(cluster) + nodes_with_bad_roles = [] + for node in cluster.inventory['nodes']: + for node_description in nodes_description['items']: + node_name = node_description['metadata']['name'] + if node['name'] == node_name: + if 'master' in node['roles']: + if 'node-role.kubernetes.io/master' not in node_description['metadata']['labels']: + nodes_with_bad_roles.append(node['name']) + cluster.log.error("Node \"%s\" has to be master, but has invalid role" % node['name']) + else: + cluster.log.debug("Node \"%s\" has correct master role" % node['name']) + elif 'worker' in node['roles']: + if 'node-role.kubernetes.io/worker' not in node_description['metadata']['labels']: + nodes_with_bad_roles.append(node['name']) + cluster.log.error("Node \"%s\" has to be worker, but has invalid role" % node['name']) + else: + cluster.log.debug("Node \"%s\" has correct worker role" % node['name']) + break + nodes_with_bad_roles = list(set(nodes_with_bad_roles)) + if nodes_with_bad_roles: + raise TestFailure("Incorrect role detected at: %s" % ', '.join(nodes_with_bad_roles), + hint="There were detected some nodes, whose role differs from that specified in the " + "inventory. The configuration of these nodes should be fixed.") + + +def kubernetes_nodes_condition(cluster, condition_type): + with TestCase(cluster.context['testsuite'], '211', "Kubernetes", "Nodes Condition - %s" % condition_type) as tc: + nodes_description = get_nodes_description(cluster) + expected_status = 'False' + if condition_type == 'Ready': + expected_status = 'True' + positive_conditions = [] + negative_conditions = [] + for node_description in nodes_description['items']: + node_name = node_description['metadata']['name'] + condition_found = False + for condition in node_description['status']['conditions']: + if condition['type'] == condition_type: + condition_found = True + cluster.log.debug("Node \"%s\" condition \"%s\" is \"%s\"" + % (node_name, condition['type'], condition['reason'])) + if condition['status'] != expected_status: + negative_conditions.append(condition['reason']) + else: + positive_conditions.append(condition['reason']) + break + if not condition_found: + raise TestFailure("Failed to detect at %s" % node_name) + + negative_conditions = list(set(negative_conditions)) + positive_conditions = list(set(positive_conditions)) + + if negative_conditions: + raise TestFailure("%s" % ', '.join(negative_conditions), + hint="A condition in negative status means that there are problems with the health of " + "the node.") + + tc.success(results="%s" % ', '.join(positive_conditions)) + + +def get_not_running_pods(cluster): + get_pods_cmd = 'kubectl get pods -A --field-selector status.phase!=Running | awk \'{ print $1" "$2" "$4 }\'' + result = cluster.nodes['master'].get_any_member().sudo(get_pods_cmd) + cluster.log.verbose(result) + return list(result.values())[0].stdout.strip() + + +def kubernetes_pods_condition(cluster): + system_namespaces = ["kube-system", "ingress-nginx", "kube-public", "kubernetes-dashboard", "default"] + critical_states = cluster.globals['pods']['critical_states'] + with TestCase(cluster.context['testsuite'], '207', "Kubernetes", "Pods Condition") as tc: + pods_description = get_not_running_pods(cluster) + total_failed_amount = len(pods_description.split('\n')[1:]) + critical_system_failed_amount = 0 + + for pod_description in pods_description.split('\n')[1:]: + split_description = pod_description.split(' ') + if split_description[0] in system_namespaces and split_description[2] in critical_states: + critical_system_failed_amount += 1 + + if critical_system_failed_amount > 0: + s = '' + if critical_system_failed_amount != 1: + s = 's' + raise TestFailure("%s failed system pod%s" % (critical_system_failed_amount, s), + hint="Try to determine the cause of pods failure, redeploy, reapply or restart them. If " + "this is not fixed, the cluster may not work or do it incorrectly.") + elif total_failed_amount > 0: + s = '' + if total_failed_amount != 1: + s = 's' + raise TestWarn("%s pod%s are failed/not running" % (total_failed_amount, s), + hint="Try to determine the reason the pods are not operational, " + "try to wait, redeploy, reapply or restart them. " + "If this is not fixed, some deployed applications may not work or do it incorrectly.") + else: + tc.success(results="All pods are running") + + +def kubernetes_dashboard_status(cluster): + with TestCase(cluster.context['testsuite'], '208', "Plugins", "Dashboard Availability") as tc: + retries = 10 + test_succeeded = False + i = 0 + while not test_succeeded and i < retries: + i += 1 + if cluster.inventory['plugins']['kubernetes-dashboard']['install']: + results = cluster.nodes['master'].get_first_member().sudo("kubectl get svc -n kubernetes-dashboard kubernetes-dashboard -o=jsonpath=\"{['spec.clusterIP']}\"", warn=True) + for master, result in results.items(): + if result.failed: + cluster.log.debug(f'Can not resolve dashboard IP: {result.stderr} ') + raise TestFailure("not available",hint=f"Please verify the following Kubernetes Dashboard status and fix this issue") + found_url = result.stdout + check_url = cluster.nodes['master'].get_first_member().sudo(f'curl -k -I https://{found_url}:443', warn=True) + status = list(check_url.values())[0].stdout + if '200' in status: + cluster.log.debug(status) + test_succeeded = True + tc.success(results="available") + else: + cluster.log.debug(f'Dashboard is not running yet... Retries left: {retries - i}') + time.sleep(60) + else: + test_succeeded = True + tc.success(results="skipped") + if not test_succeeded: + raise TestFailure("not available", + hint=f"Please verify the following Kubernetes Dashboard status and fix this issue:\n{status}") + + +def nodes_pid_max(cluster): + with TestCase(cluster.context['testsuite'], '202', "Nodes", "Nodes pid_max correctly installed") as tc: + master = cluster.nodes['master'].get_any_member() + yaml = ruamel.yaml.YAML() + nodes_failed_pid_max_check = {} + for node in cluster.nodes['master'].include_group(cluster.nodes['worker']).get_ordered_members_list(provide_node_configs=True): + + node_info = master.sudo("kubectl get node %s -o yaml" % node["name"]).get_simple_out() + config = yaml.load(node_info) + max_pods = int(config['status']['capacity']['pods']) + + kubelet_config = node["connection"].sudo("cat /var/lib/kubelet/config.yaml").get_simple_out() + config = yaml.load(kubelet_config) + pod_pids_limit = int(config['podPidsLimit']) + + pid_max = int(node["connection"].sudo("cat /proc/sys/kernel/pid_max").get_simple_out()) + required_pid_max = max_pods * pod_pids_limit + 2048 + cluster.log.debug("Current values:\n maxPods = %s \n podPidsLimit = %s \n pid_max = %s" + % (max_pods, pod_pids_limit, pid_max)) + cluster.log.debug("Required pid_max for current kubelet configuration is %s for node '%s'" + % (required_pid_max, node["name"])) + if cluster.inventory['services']['sysctl'].get("kernel.pid_max"): + inventory_pid_max = cluster.inventory['services']['sysctl'].get("kernel.pid_max") + if pid_max != inventory_pid_max: + raise TestWarn("The 'kernel.pid_max' value defined in system = %s, " + "but 'kernel.pid_max', which defined in cluster.yaml = %s" + % (pid_max, inventory_pid_max)) + if pid_max < required_pid_max: + nodes_failed_pid_max_check[node["name"]] = [pid_max, required_pid_max] + + if nodes_failed_pid_max_check: + output = "The requirement for the 'pid_max' value is not met for nodes:\n" + for node in nodes_failed_pid_max_check: + output += ("For node %s pid_max value = '%s', but it should be >= then '%s'\n" + % (node, nodes_failed_pid_max_check[node][0], nodes_failed_pid_max_check[node][1])) + raise TestFailure(output) + tc.success(results="pid_max correctly installed on all nodes") + + +tasks = OrderedDict({ + 'services': { + 'haproxy': { + 'status': lambda cluster: services_status(cluster, 'haproxy'), + }, + 'keepalived': { + 'status': lambda cluster: services_status(cluster, 'keepalived'), + }, + 'container_runtime': { + 'status': lambda cluster: + services_status(cluster, cluster.inventory['services']['cri']['containerRuntime']), + }, + 'kubelet': { + 'status': lambda cluster: services_status(cluster, 'kubelet'), + 'configuration': lambda cluster: nodes_pid_max(cluster), + 'version': kubelet_version, + }, + 'packages': { + 'system': { + 'recommended_versions': recommended_system_packages_versions, + 'cri_version': lambda cluster: + system_packages_versions(cluster, cluster.inventory['services']['cri'][ 'containerRuntime']), + 'haproxy_version': lambda cluster: system_packages_versions(cluster, 'haproxy'), + 'keepalived_version': lambda cluster: system_packages_versions(cluster, 'keepalived') + }, + 'generic': { + 'version': generic_packages_versions + } + }, + }, + 'thirdparties': { + 'hashes': thirdparties_hashes, + }, + 'kubernetes': { + 'pods': kubernetes_pods_condition, + 'plugins': { + 'dashboard': kubernetes_dashboard_status + }, + 'nodes': { + 'existence': kubernetes_nodes_existence, + 'roles': kubernetes_nodes_roles, + 'condition': { + "network": lambda cluster: kubernetes_nodes_condition(cluster, 'NetworkUnavailable'), + "memory": lambda cluster: kubernetes_nodes_condition(cluster, 'MemoryPressure'), + "disk": lambda cluster: kubernetes_nodes_condition(cluster, 'DiskPressure'), + "pid": lambda cluster: kubernetes_nodes_condition(cluster, 'PIDPressure'), + "ready": lambda cluster: kubernetes_nodes_condition(cluster, 'Ready') + }, + }, + }, + # 'etcd': { + # 'health_status': etcd_health_status + # } +}) + + +def main(cli_arguments=None): + parser = argparse.ArgumentParser(description=''' +Script for checking Kubernetes cluster PAAS layer. + +Hot to use: + +''', formatter_class=argparse.RawTextHelpFormatter) + + parser.add_argument('-v', '--verbose', + action='store_true', + help='enable the verbosity mode') + + parser.add_argument('-c', '--config', + default='cluster.yaml', + help='define main cluster configuration file') + + parser.add_argument('--tasks', + default='', + help='define comma-separated tasks to be executed') + + parser.add_argument('--exclude', + default='', + help='exclude comma-separated tasks from execution') + + parser.add_argument('--csv-report', + default='report.csv', + help='define CSV report file location') + + parser.add_argument('--csv-report-delimiter', + default=';', + help='define delimiter type for CSV report') + + parser.add_argument('--html-report', + default='report.html', + help='define HTML report file location') + + parser.add_argument('--disable-csv-report', + action='store_true', + help='forcibly disable CSV report file creation') + + parser.add_argument('--disable-html-report', + action='store_true', + help='forcibly disable HTML report file creation') + + if cli_arguments is None: + args = parser.parse_args() + else: + args = parser.parse_args(cli_arguments) + + defined_tasks = [] + defined_excludes = [] + + if args.tasks != '': + defined_tasks = args.tasks.split(",") + + if args.exclude != '': + defined_excludes = args.exclude.split(",") + + context = flow.create_context(args, procedure='paas') + context['testsuite'] = TestSuite() + + cluster = flow.run( + tasks, + defined_tasks, + defined_excludes, + args.config, + context, + print_final_message=False + ) + + # Final summary should be printed only to stdout with custom formatting + # If tests results required for parsing, they can be found in test results files + print(cluster.context['testsuite'].get_final_summary(show_minimal=False, show_recommended=False)) + cluster.context['testsuite'].print_final_status(cluster.log) + check_iaas.make_reports(cluster) + return cluster.context['testsuite'] + + +if __name__ == '__main__': + testsuite = main() + if testsuite.is_any_test_failed(): + sys.exit(1) diff --git a/kubetool/procedures/do.py b/kubetool/procedures/do.py new file mode 100755 index 000000000..2263cc84d --- /dev/null +++ b/kubetool/procedures/do.py @@ -0,0 +1,92 @@ +#!/usr/bin/env python3 + +import argparse +import sys +from kubetool.core import utils +from kubetool.core.flow import load_inventory, create_context + + +def main(cli_arguments=None): + + if not cli_arguments: + cli_arguments = sys.argv + + configfile_path = 'cluster.yaml' + arguments = vars() + + kubetools_args = [] + remote_args = [] + + if '--' not in cli_arguments: + remote_args = cli_arguments + else: + split = False + for argument in cli_arguments: + if argument == '--': + split = True + continue + if not split: + kubetools_args.append(argument) + else: + remote_args.append(argument) + + if kubetools_args: + parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter) + + parser.add_argument('-c', '--config', + default='cluster.yaml', + help='define main cluster configuration file') + + parser.add_argument('-n', '--node', + help='node(s) name to execute on, can be combined with groups') + + parser.add_argument('-g', '--group', + help='group(s) name to execute on, can be combined with nodes') + + parser.add_argument('--no_stream', + action='store_true', + help='do not stream all remote results in real-time, show node names') + + arguments = vars(parser.parse_args(kubetools_args)) + configfile_path = arguments.get('config') + + cluster = load_inventory(utils.get_resource_absolute_path(configfile_path, script_relative=False), + create_context({ + 'disable_dump': True, + 'log': [ + ['stdout;level=error;colorize=true;correct_newlines=true'] + ] + }), silent=True) + + if kubetools_args: + executor_lists = { + 'node': [], + 'group': [] + } + for executors_type in executor_lists.keys(): + executors_str = arguments.get(executors_type) + if executors_str: + if "," in executors_str: + for executor_name in executors_str.split(','): + executor_lists[executors_type].append(executor_name.strip()) + else: + executor_lists[executors_type].append(executors_str.strip()) + executors_group = cluster.create_group_from_groups_nodes_names(executor_lists['group'], executor_lists['node']) + else: + executors_group = cluster.nodes['master'].get_any_member() + + if not executors_group or executors_group.nodes_amount() < 1: + print('Failed to find any of specified nodes or groups') + sys.exit(1) + + no_stream = arguments.get('no_stream') + res = executors_group.sudo(" ".join(remote_args), hide=no_stream, warn=True) + if no_stream: + res.print() + + if res.is_any_failed(): + sys.exit(1) + + +if __name__ == '__main__': + main() diff --git a/kubetool/procedures/install.py b/kubetool/procedures/install.py new file mode 100755 index 000000000..00103e9e9 --- /dev/null +++ b/kubetool/procedures/install.py @@ -0,0 +1,556 @@ +#!/usr/bin/env python3 + +from collections import OrderedDict +import fabric +import yaml + +from kubetool import system, sysctl, haproxy, keepalived, kubernetes, plugins, \ + kubernetes_accounts, selinux, thirdparties, psp, audit, coredns, cri, packages, apparmor +from kubetool.core import flow, utils +from kubetool.core.executor import RemoteExecutor + + +def system_prepare_check_sudoer(cluster): + for host, node_context in cluster.context['nodes'].items(): + if node_context['online'] and node_context['hasroot']: + cluster.log.debug("%s online and has root" % host) + else: + raise Exception('%s is not sudoer' % host) + + +def system_prepare_check_system(cluster): + group = cluster.nodes['all'].get_new_nodes_or_self() + cluster.log.debug(system.detect_os_family(cluster, suppress_exceptions=True)) + for address, context in cluster.context["nodes"].items(): + if address not in group.nodes or not context.get('os'): + continue + if context["os"]["family"] == "unsupported": + raise Exception('%s host operating system is unsupported' % address) + if context["os"]["family"] == "unknown": + supported_os_versions = [] + os_family_list = cluster.globals["compatibility_map"]["distributives"][context["os"]["name"]] + for os_family_item in os_family_list: + supported_os_versions.extend(os_family_item["versions"]) + raise Exception("%s running on unknown %s version. " + "Expected %s, got '%s'" % (address, + context["os"]["name"], + supported_os_versions, + context["os"]["version"])) + + +def system_prepare_check_cluster_installation(cluster): + if kubernetes.is_cluster_installed(cluster): + cluster.log.debug('Cluster already installed and available at %s' % cluster.context['controlplain_uri']) + else: + cluster.log.debug('There is no any installed cluster') + + +def system_prepare_system_chrony(cluster): + if cluster.inventory['services']['ntp'].get('chrony', {}).get('servers') is None: + cluster.log.debug("Skipped - NTP servers from chrony is not defined in config file") + return + cluster.nodes['all'].get_new_nodes_or_self().call(system.configure_chronyd) + + +def system_prepare_system_timesyncd(cluster): + if not cluster.inventory['services']['ntp'].get('timesyncd', {}).get('Time', {}).get('NTP') and \ + not cluster.inventory['services']['ntp'].get('timesyncd', {}).get('Time', {}).get('FallbackNTP'): + cluster.log.debug("Skipped - NTP servers from timesyncd is not defined in config file") + return + cluster.nodes['all'].get_new_nodes_or_self().call(system.configure_timesyncd) + + +def system_prepare_system_sysctl(cluster): + if cluster.inventory['services'].get('sysctl') is None or not cluster.inventory['services']['sysctl']: + cluster.log.debug("Skipped - sysctl is not defined or empty in config file") + return + cluster.nodes['all'].get_new_nodes_or_self().call_batch([ + sysctl.configure, + sysctl.reload, + ]) + + +def system_prepare_system_setup_selinux(cluster): + cluster.nodes['all'].get_new_nodes_or_self().call(selinux.setup_selinux) + + +def system_prepare_system_setup_apparmor(cluster): + cluster.nodes['all'].get_new_nodes_or_self().call(apparmor.setup_apparmor) + + +def system_prepare_system_disable_firewalld(cluster): + cluster.nodes['all'].get_new_nodes_or_self().call(system.disable_firewalld) + + +def system_prepare_system_disable_swap(cluster): + cluster.nodes['all'].get_new_nodes_or_self().call(system.disable_swap) + + +def system_prepare_system_modprobe(cluster): + cluster.nodes['all'].get_new_nodes_or_self().call(system.setup_modprobe) + + +def system_prepare_audit(cluster): + if 'worker' in cluster.roles: + group = cluster.nodes['master'].include_group(cluster.nodes['worker']).get_new_nodes_or_self() + else: + group = cluster.nodes['master'].get_new_nodes_or_self() + cluster.log.debug(f'Warning: You are creating a cluster from only one master') + + cluster.log.debug(group.call(audit.apply_audit_rules)) + + +def system_prepare_dns_hostname(cluster): + with RemoteExecutor(cluster.log): + for node in cluster.nodes['all'].get_new_nodes_or_self().get_ordered_members_list(provide_node_configs=True): + cluster.log.debug("Changing hostname '%s' = '%s'" % (node["connect_to"], node["name"])) + node["connection"].sudo("hostnamectl set-hostname %s" % node["name"]) + + +def system_prepare_dns_resolv_conf(cluster): + if cluster.inventory["services"].get("resolv.conf") is None: + cluster.log.debug("Skipped - resolv.conf section not defined in config file") + return + + group = cluster.nodes['all'].get_new_nodes_or_self() + + system.update_resolv_conf(group, config=cluster.inventory["services"].get("resolv.conf")) + cluster.log.debug(group.sudo("ls -la /etc/resolv.conf; sudo lsattr /etc/resolv.conf")) + + +def system_prepare_dns_etc_hosts(cluster): + config = system.generate_etc_hosts_config(cluster.inventory, cluster) + # TODO: here support custom hosts definition from cluster.yml + utils.dump_file(cluster, config, 'etc_hosts') + cluster.log.debug("\nUploading...") + + group = cluster.nodes['all'].get_final_nodes() + + system.update_etc_hosts(group, config=config) + cluster.log.debug(group.sudo("ls -la /etc/hosts")) + + +def system_prepare_package_manager_configure(cluster): + repositories = cluster.inventory['services']['packages']['package_manager'].get("repositories") + if not repositories: + cluster.log.debug("Skipped - no repositories defined for configuration") + return + + group = cluster.nodes['all'].get_new_nodes_or_self() + + group.call_batch([ + packages.backup_repo, + packages.add_repo + ], **{ + "kubetool.packages.add_repo": { + "repo_data": repositories, + "repo_filename": "predefined" + } + }) + + cluster.log.debug("Nodes contain the following repositories:") + cluster.log.debug(packages.ls_repofiles(group)) + + +def system_prepare_package_manager_manage_packages(cluster): + if not cluster.inventory["services"].get("packages", {}): + cluster.log.debug("Skipped - no packages configuration defined in config file") + return + + batch_tasks = [] + batch_parameters = {} + + if cluster.inventory["services"]["packages"].get("remove", []): + batch_tasks.append(packages.remove) + batch_parameters["kubetool.packages.remove"] = { + "include": cluster.inventory["services"]["packages"]['remove']['include'], + "exclude": cluster.inventory["services"]["packages"]['remove'].get('exclude') + } + + if cluster.inventory["services"]["packages"].get("install", []): + batch_tasks.append(packages.install) + batch_parameters["kubetool.packages.install"] = { + "include": cluster.inventory["services"]["packages"]['install']['include'], + "exclude": cluster.inventory["services"]["packages"]['install'].get('exclude') + } + + if cluster.inventory["services"]["packages"].get("upgrade", []): + batch_tasks.append(packages.upgrade) + batch_parameters["kubetool.packages.upgrade"] = { + "include": cluster.inventory["services"]["packages"]['upgrade']['include'], + "exclude": cluster.inventory["services"]["packages"]['upgrade'].get('exclude') + } + + try: + batch_results = cluster.nodes['all'].get_new_nodes_or_self().call_batch(batch_tasks, **batch_parameters) + except fabric.group.GroupException: + cluster.log.verbose('Exception occurred! Trying to handle is there anything updated or not...') + # todo develop cases when we can continue even if exception occurs + raise + + any_changes_found = False + for action, results in batch_results.items(): + cluster.log.verbose('Verifying packages changes after \'%s\' action...' % action) + for conn, result in results.items(): + if "Nothing to do" not in result.stdout: + cluster.log.verbose('Packages changed at %s' % conn.host) + any_changes_found = True + + if any_changes_found: + cluster.log.verbose('Packages changed, scheduling nodes restart...') + cluster.schedule_cumulative_point(system.reboot_nodes) + else: + cluster.log.verbose('No packages changed, nodes restart will not be scheduled') + + +def system_cri_install(cluster): + """ + Task which is used to install CRI. Could be skipped, if CRI already installed. + """ + group = cluster.nodes['master'].include_group(cluster.nodes.get('worker')) + + if cluster.context['initial_procedure'] == 'add_node': + group = group.get_new_nodes() + + group.call(cri.install) + + +def system_cri_configure(cluster): + """ + Task which is used to configure CRI. Could be skipped, if CRI already configured. + """ + group = cluster.nodes['master'].include_group(cluster.nodes.get('worker')) + + if cluster.context['initial_procedure'] == 'add_node': + group = group.get_new_nodes() + + group.call(cri.configure) + + +def system_prepare_thirdparties(cluster): + if not cluster.inventory['services'].get('thirdparties', {}): + cluster.log.debug("Skipped - no thirdparties defined in config file") + return + + cluster.nodes['all'].get_new_nodes_or_self().call(thirdparties.install_all_thirparties) + + +def deploy_loadbalancer_haproxy_install(cluster): + group = None + if "balancer" in cluster.nodes: + + group = cluster.nodes['balancer'] + + if cluster.context['initial_procedure'] == 'add_node': + group = cluster.nodes['balancer'].get_new_nodes() + + if group is None or group.is_empty(): + cluster.log.debug('Skipped - no balancers to perform') + return + + group.call(haproxy.install) + + +def deploy_loadbalancer_haproxy_configure(cluster): + group = None + if "balancer" in cluster.nodes: + + if cluster.context['initial_procedure'] != 'remove_node': + group = cluster.nodes['balancer'].get_new_nodes_or_self() + + if not cluster.nodes['master'].include_group(cluster.nodes.get('worker')).get_changed_nodes().is_empty(): + group = cluster.nodes['balancer'].get_final_nodes() + + if group is None or group.is_empty(): + cluster.log.debug('Skipped - no balancers to perform') + return + + with RemoteExecutor(cluster.log): + group.call_batch([ + haproxy.configure, + haproxy.override_haproxy18, + haproxy.restart + ]) + + +def deploy_loadbalancer_keepalived_install(cluster): + group = None + if 'vrrp_ips' in cluster.inventory and cluster.inventory['vrrp_ips']: + + group = cluster.nodes['keepalived'] + + # if remove/add node, then reconfigure only new keepalives + if cluster.context['initial_procedure'] != 'install': + group = cluster.nodes['keepalived'].get_new_nodes() + + # if balancer added or removed - reconfigure all keepalives + if not cluster.nodes['balancer'].get_changed_nodes().is_empty(): + group = cluster.nodes['keepalived'].get_final_nodes() + + if group is None or group.is_empty(): + cluster.log.debug('Skipped - no VRRP IPs to perform') + return + + # add_node will impact all keepalived + group.call(keepalived.install) + + +def deploy_loadbalancer_keepalived_configure(cluster): + group = None + if 'vrrp_ips' in cluster.inventory and cluster.inventory['vrrp_ips']: + + group = cluster.nodes['keepalived'].get_final_nodes() + + # if remove/add node, then reconfigure only new keepalives + if cluster.context['initial_procedure'] != 'install': + group = cluster.nodes['keepalived'].get_new_nodes() + + # if balancer added or removed - reconfigure all keepalives + if not cluster.nodes['balancer'].get_changed_nodes().is_empty(): + group = cluster.nodes['keepalived'].get_final_nodes() + + if group is None or group.is_empty(): + cluster.log.debug('Skipped - no VRRP IPs to perform') + return + + # add_node will impact all keepalived + group.call(keepalived.configure) + + +def deploy_kubernetes_reset(cluster): + group = cluster.nodes['master'].include_group(cluster.nodes.get('worker')) + + if cluster.context['initial_procedure'] == 'add_node' and group.get_new_nodes().is_empty(): + cluster.log.debug("No kubernetes nodes to perform") + return + + group.get_new_nodes_or_self().call(kubernetes.reset_installation_env) + + +def deploy_kubernetes_install(cluster): + cluster.log.debug("Setting up Kubernetes...") + + group = cluster.nodes['master'].include_group(cluster.nodes.get('worker')) + + if cluster.context['initial_procedure'] == 'add_node' and group.get_new_nodes().is_empty(): + cluster.log.debug("No kubernetes nodes to perform") + return + + group.get_new_nodes_or_self().call(kubernetes.install) + + + + +def deploy_kubernetes_prepull_images(cluster): + cluster.log.debug("Prepulling Kubernetes images...") + + group = cluster.nodes['master'].include_group(cluster.nodes.get('worker')) + + if cluster.context['initial_procedure'] == 'add_node' and group.get_new_nodes().is_empty(): + cluster.log.debug("No kubernetes nodes to perform") + return + + group.get_new_nodes_or_self().call(kubernetes.images_grouped_prepull) + + +def deploy_kubernetes_init(cluster): + group = cluster.nodes['master'].include_group(cluster.nodes.get('worker')) + + if cluster.context['initial_procedure'] == 'add_node' and group.get_new_nodes().is_empty(): + cluster.log.debug("No kubernetes nodes to perform") + return + + cluster.nodes['master'].get_new_nodes_or_self().call_batch([ + kubernetes.init_first_master, + kubernetes.join_other_masters + ]) + + if 'worker' in cluster.nodes: + cluster.nodes['worker'].get_new_nodes_or_self().new_group( + apply_filter=lambda node: 'master' not in node['roles']) \ + .call(kubernetes.init_workers) + + cluster.nodes['all'].get_new_nodes_or_self().call_batch([ + kubernetes.apply_labels, + kubernetes.apply_taints + ]) + + + +def deploy_coredns(cluster): + config = coredns.generate_configmap(cluster.inventory) + + cluster.log.debug('Applying patch...') + cluster.log.debug(coredns.apply_patch(cluster)) + + cluster.log.debug('Applying configmap...') + cluster.log.debug(coredns.apply_configmap(cluster, config)) + + +def deploy_plugins(cluster): + plugins.install(cluster) + + +def deploy_accounts(cluster): + kubernetes_accounts.install(cluster) + + +def overview(cluster): + cluster.log.debug("Retrieving cluster status...") + master = cluster.nodes["master"].get_final_nodes().get_first_member() + cluster.log.debug("\nNAMESPACES:") + master.sudo("kubectl get namespaces", hide=False) + cluster.log.debug("\nNODES:") + master.sudo("kubectl get nodes -o wide", hide=False) + cluster.log.debug("\nPODS:") + master.sudo("kubectl get pods -A -o=wide", hide=False) + cluster.log.debug("\nREPLICA SETS:") + master.sudo("kubectl get rs -A", hide=False) + cluster.log.debug("\nDAEMON SETS:") + master.sudo("kubectl get ds -A", hide=False) + cluster.log.debug("\nSERVICES:") + master.sudo("kubectl get svc -A -o wide", hide=False) + cluster.log.debug("\nINGRESS:") + master.sudo("kubectl get ing -A -o wide", hide=False) + cluster.log.debug("\nDESCRIPTION:") + master.sudo("kubectl describe nodes", hide=False) + cluster.log.debug("\n") + master.sudo("kubectl cluster-info", hide=False, warn=True) + + +tasks = OrderedDict({ + "prepare": { + "check": { + "sudoer": system_prepare_check_sudoer, + "system": system_prepare_check_system, + "cluster_installation": system_prepare_check_cluster_installation + }, + "dns": { + "hostname": system_prepare_dns_hostname, + "etc_hosts": system_prepare_dns_etc_hosts, + "resolv_conf": system_prepare_dns_resolv_conf + }, + "package_manager": { + "configure": system_prepare_package_manager_configure, + "manage_packages": system_prepare_package_manager_manage_packages + }, + "ntp": { + "chrony": system_prepare_system_chrony, + "timesyncd": system_prepare_system_timesyncd + }, + "system": { + "setup_selinux": system_prepare_system_setup_selinux, + "setup_apparmor": system_prepare_system_setup_apparmor, + "disable_firewalld": system_prepare_system_disable_firewalld, + "disable_swap": system_prepare_system_disable_swap, + "modprobe": system_prepare_system_modprobe, + "sysctl": system_prepare_system_sysctl, + "audit": system_prepare_audit + }, + "cri": { + "install": system_cri_install, + "configure": system_cri_configure + }, + "thirdparties": system_prepare_thirdparties + }, + "deploy": { + "loadbalancer": { + "haproxy": { + "install": deploy_loadbalancer_haproxy_install, + "configure": deploy_loadbalancer_haproxy_configure, + }, + "keepalived": { + "install": deploy_loadbalancer_keepalived_install, + "configure": deploy_loadbalancer_keepalived_configure, + } + }, + "kubernetes": { + "reset": deploy_kubernetes_reset, + "install": deploy_kubernetes_install, + "prepull_images": deploy_kubernetes_prepull_images, + "init": deploy_kubernetes_init + }, + "psp": psp.install_psp_task, + "coredns": deploy_coredns, + "plugins": deploy_plugins, + "accounts": deploy_accounts + }, + "overview": overview +}) + +cumulative_points = { + + # prepare.system.sysctl requires + # - /proc/sys/net/bridge/bridge-nf-call-iptables + # - /proc/sys/net/bridge/bridge-nf-call-ip6tables + # for the following records: + # - net.ipv4.ip_forward = 1 + # - net.ipv4.ip_nonlocal_bind = 1 + # - net.ipv6.ip_nonlocal_bind = 1 + # - net.ipv6.conf.all.forwarding = 1 + # That is why reboot required BEFORE this task + 'kubetool.system.reboot_nodes': [ + "prepare.system.sysctl" + ], + 'kubetool.system.verify_system': [ + "prepare.system.sysctl" + ] + +} + + +def main(cli_arguments=None): + cli_help = ''' + Script for installing Kubernetes cluster. + + How to use: + + ''' + + parser = flow.new_parser(cli_help) + + parser.add_argument('--tasks', + default='', + help='define comma-separated tasks to be executed') + + parser.add_argument('--exclude', + default='', + help='exclude comma-separated tasks from execution') + + if cli_arguments is None: + args = parser.parse_args() + else: + args = parser.parse_args(cli_arguments) + + defined_tasks = [] + defined_excludes = [] + + if args.tasks != '': + defined_tasks = args.tasks.split(",") + + if args.exclude != '': + defined_excludes = args.exclude.split(",") + + with open(args.config, 'r') as stream: + cluster_yml = yaml.safe_load(stream) + verification_version_result = "" + if (cluster_yml.get("services", {}) + and cluster_yml["services"].get("kubeadm", {}) + and cluster_yml["services"]["kubeadm"].get("kubernetesVersion")): + target_version = cluster_yml["services"]["kubeadm"].get("kubernetesVersion") + verification_version_result = kubernetes.verify_target_version(target_version) + + flow.run( + tasks, + defined_tasks, + defined_excludes, + args.config, + flow.create_context(args, procedure='install'), + cumulative_points=cumulative_points + ) + if verification_version_result: + print(verification_version_result) + + +if __name__ == '__main__': + main() diff --git a/kubetool/procedures/manage_psp.py b/kubetool/procedures/manage_psp.py new file mode 100755 index 000000000..a130b33b5 --- /dev/null +++ b/kubetool/procedures/manage_psp.py @@ -0,0 +1,66 @@ +#!/usr/bin/env python3 + +from collections import OrderedDict + +from kubetool import psp +from kubetool.core import flow + +tasks = OrderedDict({ + "delete_custom": psp.delete_custom_task, + "add_custom": psp.add_custom_task, + "reconfigure_oob": psp.reconfigure_oob_task, + "reconfigure_plugin": psp.reconfigure_plugin_task, + "restart_pods": psp.restart_pods_task +}) + + +def main(cli_arguments=None): + + cli_help = ''' + Script for managing psp on existing Kubernetes cluster. + + How to use: + + ''' + + parser = flow.new_parser(cli_help) + parser.add_argument('--tasks', + default='', + help='define comma-separated tasks to be executed') + + parser.add_argument('--exclude', + default='', + help='exclude comma-separated tasks from execution') + + parser.add_argument('procedure_config', metavar='procedure_config', type=str, + help='config file for add_node procedure') + + if cli_arguments is None: + args = parser.parse_args() + else: + args = parser.parse_args(cli_arguments) + + defined_tasks = [] + defined_excludes = [] + + if args.tasks != '': + defined_tasks = args.tasks.split(",") + + if args.exclude != '': + defined_excludes = args.exclude.split(",") + + context = flow.create_context(args, procedure='manage_psp') + context['inventory_regenerate_required'] = True + + flow.run( + tasks, + defined_tasks, + defined_excludes, + args.config, + context, + procedure_inventory_filepath=args.procedure_config, + ) + + +if __name__ == '__main__': + main() diff --git a/kubetool/procedures/migrate_cri.py b/kubetool/procedures/migrate_cri.py new file mode 100755 index 000000000..d873d8276 --- /dev/null +++ b/kubetool/procedures/migrate_cri.py @@ -0,0 +1,277 @@ +#!/usr/bin/env python3 + +from collections import OrderedDict + +import io +import ruamel.yaml + +from kubetool import kubernetes +from kubetool.core import flow +from kubetool.cri import docker +from kubetool.procedures import install +from kubetool.core.yaml_merger import default_merger +from kubetool import packages + + +def enrich_inventory(inventory, cluster): + if cluster.context.get("initial_procedure") != "migrate_cri": + return inventory + enrichment_functions = [ + _prepare_yum_repos, + _prepare_packages, + _configure_containerd_on_nodes, + _prepare_crictl + ] + for enrichment_fn in enrichment_functions: + cluster.log.verbose('Calling fn "%s"' % enrichment_fn.__qualname__) + inventory = enrichment_fn(cluster, inventory) + return inventory + + +def _prepare_yum_repos(cluster, inventory): + if not cluster.procedure_inventory.get("yum", {}): + cluster.log.debug("Skipped - no yum section defined in procedure config file") + return inventory + + if not cluster.procedure_inventory["yum"].get("repositories", {}): + cluster.log.debug("No repositories will be added on nodes") + return inventory + + if not inventory["services"].get("yum", {}): + inventory["services"]["yum"] = {} + + if inventory["services"]["yum"].get("repositories", {}): + default_merger.merge(inventory["services"]["yum"]["repositories"], + cluster.procedure_inventory["yum"]["repositories"]) + else: + default_merger.merge(inventory["services"]["yum"], + cluster.procedure_inventory["yum"]) + return inventory + + +def _prepare_packages(cluster, inventory): + if not cluster.procedure_inventory.get("packages", {}): + cluster.log.debug("Skipped - no packages defined in procedure config file") + return inventory + + if not cluster.procedure_inventory["packages"].get("associations", {}): + cluster.log.debug("Skipped - no associations defined in procedure config file") + return inventory + + if not inventory["services"].get("packages", {}): + inventory["services"]["packages"] = {} + + if inventory["services"]["packages"].get("associations", {}): + default_merger.merge(inventory["services"]["packages"]["associations"], + cluster.procedure_inventory["packages"]["associations"]) + else: + inventory["services"]["packages"]["associations"] = cluster.procedure_inventory["packages"]["associations"] + return inventory + + +def _prepare_crictl(cluster, inventory): + if cluster.procedure_inventory.get("thirdparties", {}) \ + and cluster.procedure_inventory["thirdparties"].get("/usr/bin/crictl.tar.gz", {}): + + if not inventory["services"].get("thirdparties", {}): + inventory["services"]["thirdparties"] = {} + + default_merger.merge(inventory["services"]["thirdparties"], + cluster.procedure_inventory["thirdparties"]) + cluster.log.debug("Third-party crictl added") + return inventory + else: + return inventory + + +def configure_containerd_on_nodes(cluster): + install.system_cri_install(cluster) + install.system_cri_configure(cluster) + install.system_prepare_thirdparties(cluster) + + +def _configure_containerd_on_nodes(cluster, inventory): + if "cri" not in cluster.procedure_inventory or "containerRuntime" not in cluster.procedure_inventory["cri"]: + raise Exception("Please specify mandatory parameter cri.containerRuntime in procedure.yaml") + + if cluster.procedure_inventory["cri"]["containerRuntime"] != "containerd": + raise Exception("Migration could be possible only to containerd") + + if inventory["services"]["cri"]["containerRuntime"] == cluster.procedure_inventory["cri"]["containerRuntime"]: + raise Exception("You already have such cri or you should explicitly specify 'cri.containerRuntime: docker' in cluster.yaml") + + inventory = _merge_containerd(cluster, inventory) + return inventory + + +def _merge_containerd(cluster, inventory): + if not inventory["services"].get("cri", {}): + inventory["services"]["cri"] = {} + + if inventory["services"]["cri"].get("dockerConfig", {}): + del inventory["services"]["cri"]["dockerConfig"] + + default_merger.merge(inventory["services"]["cri"], cluster.procedure_inventory["cri"]) + return inventory + + +def migrate_cri(cluster): + _migrate_cri(cluster, cluster.nodes["master"].get_ordered_members_list(provide_node_configs=True)) + _migrate_cri(cluster, cluster.nodes["worker"].exclude_group(cluster.nodes["master"]) + .get_ordered_members_list(provide_node_configs=True)) + + +def _migrate_cri(cluster, node_group): + """ + Migrate CRI from docker to already installed containerd. + This method works node-by-node, configuring kubelet to use containerd. + :param cluster: main object describing a cluster + :param node_group: group of nodes to migrate + """ + + for node in node_group: + if "master" in node["roles"]: + master = node + else: + master = cluster.nodes["master"].get_first_member(provide_node_configs=True) + + version = cluster.inventory["services"]["kubeadm"]["kubernetesVersion"] + cluster.log.debug("Upgrading \"%s\"" % node["name"]) + disable_eviction = True + drain_cmd = kubernetes.prepare_drain_command(node, version, cluster.globals, disable_eviction, cluster.nodes) + master["connection"].sudo(drain_cmd, is_async=False, hide=False) + + kubeadm_flags_file = "/var/lib/kubelet/kubeadm-flags.env" + kubeadm_flags = node["connection"].sudo(f"cat {kubeadm_flags_file}", + is_async=False).get_simple_out() + + kubeadm_flags = edit_config(kubeadm_flags) + + node["connection"].put(io.StringIO(kubeadm_flags), kubeadm_flags_file, backup=True, sudo=True) + + node["connection"].sudo("systemctl stop kubelet") + docker.prune(node["connection"]) + docker_associations = cluster.get_associations_for_node(node['connect_to'])['docker'] + node["connection"].sudo(f"systemctl disable {docker_associations['service_name']} --now;" + "sudo sh -c 'rm -rf /var/lib/docker/*'") + + # if there is a disk for docker in "/etc/fstab", then use this disk for containerd + docker_disk_result = node["connection"].sudo("cat /etc/fstab | grep ' /var/lib/docker '", warn=True) + docker_disk = list(docker_disk_result.values())[0].stdout.strip() + if docker_disk: + node['connection'].sudo("umount /var/lib/docker && " + "sudo sed -i 's/ \/var\/lib\/docker / \/var\/lib\/containerd /' /etc/fstab && " + "sudo sh -c 'rm -rf /var/lib/containerd/*' && " + "sudo mount -a && " + "sudo systemctl restart containerd") + + # flushing iptables to delete old cri's rules, + # existence of those rules could lead to services unreachable + node["connection"].sudo("sudo iptables -t nat -F && " + "sudo iptables -t raw -F && " + "sudo iptables -t filter -F && " + # start kubelet + "sudo systemctl restart kubelet") + master["connection"].sudo(f"sudo kubectl uncordon {node['name']}", is_async=False, hide=False) + if "master" in node["roles"]: + kubernetes.wait_for_any_pods(cluster, node["connection"], apply_filter=node["name"]) + + packages_list = [] + for package_name in docker_associations['package_name']: + if not package_name.startswith('containerd'): + packages_list.append(package_name) + cluster.log.warning("The following packages will be removed: %s" % packages_list) + packages.remove(node["connection"], include=packages_list) + +def edit_config(kubeadm_flags): + kubeadm_flags = _config_changer(kubeadm_flags, "--container-runtime=remote") + return _config_changer(kubeadm_flags, + "--container-runtime-endpoint=unix:///run/containerd/containerd.sock") + + +def _config_changer(config, word): + equal_pos = word.find("=") + 1 + param_begin_pos = config.find(word[:equal_pos]) + if param_begin_pos != -1: + param_end_pos = config[param_begin_pos:].find(" ") + if param_end_pos == -1: + return config[:param_begin_pos] + word + "\"" + return config[:param_begin_pos] + word + config[param_end_pos + param_begin_pos:] + else: + param_end_pos = config.rfind("\"") + return config[:param_end_pos] + " " + word[:] + "\"" + + +def migrate_cri_finalize_inventory(cluster, inventory_to_finalize): + if cluster.context.get("initial_procedure") != "migrate_cri": + return inventory_to_finalize + finalize_functions = [ + _prepare_yum_repos, + _prepare_packages, + _prepare_crictl, + _merge_containerd + ] + for finalize_fn in finalize_functions: + cluster.log.verbose('Calling fn "%s"' % finalize_fn.__qualname__) + inventory_to_finalize = finalize_fn(cluster, inventory_to_finalize) + + return inventory_to_finalize + + +tasks = OrderedDict({ + "add_repos": install.system_prepare_package_manager_configure, + "configure_containerd_on_nodes": configure_containerd_on_nodes, + "apply_new_cri": migrate_cri, +}) + + +def main(cli_arguments=None): + cli_help = ''' + Script for automated migration from docker to containerd. + + How to use: + + ''' + + parser = flow.new_parser(cli_help) + parser.add_argument('--tasks', + default='', + help='define comma-separated tasks to be executed') + + parser.add_argument('--exclude', + default='', + help='exclude comma-separated tasks from execution') + + parser.add_argument('procedure_config', metavar='procedure_config', type=str, + help='config file for upgrade parameters') + + if cli_arguments is None: + args = parser.parse_args() + else: + args = parser.parse_args(cli_arguments) + + defined_tasks = [] + defined_excludes = [] + + if args.tasks != '': + defined_tasks = args.tasks.split(",") + + if args.exclude != '': + defined_excludes = args.exclude.split(",") + + context = flow.create_context(args, procedure="migrate_cri") + context["inventory_regenerate_required"] = True + + flow.run( + tasks, + defined_tasks, + defined_excludes, + args.config, + context, + procedure_inventory_filepath=args.procedure_config, + cumulative_points=install.cumulative_points + ) + + +if __name__ == '__main__': + main() diff --git a/kubetool/procedures/reboot.py b/kubetool/procedures/reboot.py new file mode 100755 index 000000000..009a336ea --- /dev/null +++ b/kubetool/procedures/reboot.py @@ -0,0 +1,85 @@ +#!/usr/bin/env python3 + +from collections import OrderedDict + +from kubetool.core import flow +from kubetool.procedures import install +from kubetool import system + + +def reboot(cluster): + if cluster.context.get('initial_procedure') != 'reboot': + raise ImportError('Invalid reboot.py usage, please use system.reboot_nodes') + + if not cluster.procedure_inventory.get("nodes"): + cluster.log.verbose('No nodes defined in procedure: all nodes will be rebooted') + else: + cluster.log.verbose('There are nodes defined in procedure: only defined will be rebooted') + + nodes = [] + + cluster.log.verbose('The following nodes will be rebooted:') + for node in cluster.procedure_inventory.get("nodes", cluster.nodes['all'].get_ordered_members_list(provide_node_configs=True)): + nodes.append(node['name']) + cluster.log.verbose(' - ' + node['name']) + + system.reboot_nodes(cluster.make_group_from_nodes(nodes), + try_graceful=cluster.procedure_inventory.get("graceful_reboot")) + + +tasks = OrderedDict({ + "reboot": reboot, + "overview": install.overview, +}) + + +def main(cli_arguments=None): + cli_help = ''' + Script for Kubernetes nodes graceful rebooting. + + How to use: + + ''' + + parser = flow.new_parser(cli_help) + parser.add_argument('--tasks', + default='', + help='define comma-separated tasks to be executed') + + parser.add_argument('--exclude', + default='', + help='exclude comma-separated tasks from execution') + + parser.add_argument('procedure_config', nargs='?', metavar='procedure_config', type=str, + help='config file for reboot procedure') + + if cli_arguments is None: + args = parser.parse_args() + else: + args = parser.parse_args(cli_arguments) + + defined_tasks = [] + defined_excludes = [] + + if args.tasks != '': + defined_tasks = args.tasks.split(",") + + if args.exclude != '': + defined_excludes = args.exclude.split(",") + + context = flow.create_context(args, procedure='reboot') + context['inventory_regenerate_required'] = False + + flow.run( + tasks, + defined_tasks, + defined_excludes, + args.config, + context, + procedure_inventory_filepath=args.procedure_config, + cumulative_points=install.cumulative_points + ) + + +if __name__ == '__main__': + main() diff --git a/kubetool/procedures/remove_node.py b/kubetool/procedures/remove_node.py new file mode 100755 index 000000000..11a5c8be3 --- /dev/null +++ b/kubetool/procedures/remove_node.py @@ -0,0 +1,165 @@ +#!/usr/bin/env python3 + +from collections import OrderedDict + +from kubetool import kubernetes, haproxy, keepalived, coredns +from kubetool.core import flow +from kubetool.core.cluster import KubernetesCluster +from kubetool.core.group import NodeGroup +from kubetool.procedures import install + + +def _get_active_nodes(node_type: str, cluster: KubernetesCluster) -> NodeGroup: + all_nodes = None + if cluster.nodes.get(node_type) is not None: + all_nodes = cluster.nodes[node_type].get_nodes_for_removal() + if all_nodes is None or all_nodes.is_empty(): + cluster.log.debug("Skipped - no %s to remove" % node_type) + return + active_nodes = all_nodes.get_online_nodes() + disabled_nodes = all_nodes.exclude_group(active_nodes) + if active_nodes.is_empty(): + cluster.log.debug("Skipped - %s nodes are inactive: %s" % (node_type, ", ".join(disabled_nodes.nodes.keys()))) + return + if not disabled_nodes.is_empty(): + cluster.log.debug("Partly Skipped - several %s nodes are inactive: %s" + % (node_type, ", ".join(disabled_nodes.nodes.keys()))) + return active_nodes + + +def loadbalancer_remove_haproxy(cluster: KubernetesCluster): + nodes = _get_active_nodes("balancer", cluster) + if nodes is None: + return + nodes.call(haproxy.disable) + + +def loadbalancer_remove_keepalived(cluster: KubernetesCluster): + nodes = _get_active_nodes("keepalived", cluster) + if nodes is None: + return + nodes.call(keepalived.disable) + + +def remove_kubernetes_nodes(cluster: KubernetesCluster): + cluster.nodes['master'].include_group(cluster.nodes.get('worker')).get_nodes_for_removal() \ + .call(kubernetes.reset_installation_env) + + +def remove_node_finalize_inventory(cluster: KubernetesCluster, inventory_to_finalize): + if cluster.context.get('initial_procedure') != 'remove_node': + return inventory_to_finalize + + nodes_for_removal = cluster.nodes['all'].get_nodes_for_removal() + final_nodes = cluster.nodes['all'].get_final_nodes() + + # remove nodes from inventory if they in nodes for removal + # todo deletion of elements from collection to iterate over! + for i, node in enumerate(inventory_to_finalize['nodes']): + if nodes_for_removal.has_node(node["name"]): + del inventory_to_finalize['nodes'][i] + + # check if there are no more hosts where keepalived installed - remove according vrrp_ips + # todo deletion of elements from collection to iterate over! + for i, item in enumerate(inventory_to_finalize.get('vrrp_ips', [])): + if 'hosts' in item: + hosts = item['hosts'] + else: + from kubetool import keepalived + hosts = keepalived.get_default_node_names(inventory_to_finalize) + + for host in hosts: + if isinstance(host, dict): + host = host['name'] + if final_nodes.get_first_member(apply_filter={"name": host}) is None: + del inventory_to_finalize['vrrp_ips'][i] + + if inventory_to_finalize['services'].get('kubeadm', {}).get('apiServer', {}).get('certSANs'): + for node in nodes_for_removal.get_ordered_members_list(provide_node_configs=True): + hostnames = [node['name'], node['address'], node['internal_address']] + for name in hostnames: + if name in inventory_to_finalize['services']['kubeadm']['apiServer']['certSANs']: + inventory_to_finalize['services']['kubeadm']['apiServer']['certSANs'].remove(name) + + if inventory_to_finalize['services'].get('etc_hosts'): + for node in nodes_for_removal.get_ordered_members_list(provide_node_configs=True): + if inventory_to_finalize['services']['etc_hosts'].get(node['internal_address']): + del inventory_to_finalize['services']['etc_hosts'][node['internal_address']] + if inventory_to_finalize['services']['etc_hosts'].get(node['address']): + del inventory_to_finalize['services']['etc_hosts'][node['address']] + + coredns.enrich_add_hosts_config(inventory_to_finalize, cluster) + + return inventory_to_finalize + + +tasks = OrderedDict({ + "loadbalancer": { + "remove": { + "haproxy": loadbalancer_remove_haproxy, + "keepalived": loadbalancer_remove_keepalived + }, + "configure": { + "haproxy": install.deploy_loadbalancer_haproxy_configure, + "keepalived": install.deploy_loadbalancer_keepalived_configure + }, + }, + "update": { + "etc_hosts": install.system_prepare_dns_etc_hosts, + "coredns": install.deploy_coredns + }, + "remove_kubernetes_nodes": remove_kubernetes_nodes, + "overview": install.overview, +}) + + +def main(cli_arguments=None): + + cli_help = ''' + Script for removing node from Kubernetes cluster. + + How to use: + + ''' + + parser = flow.new_parser(cli_help) + parser.add_argument('--tasks', + default='', + help='define comma-separated tasks to be executed') + + parser.add_argument('--exclude', + default='', + help='exclude comma-separated tasks from execution') + + parser.add_argument('procedure_config', metavar='procedure_config', type=str, + help='config file for remove_node procedure') + + if cli_arguments is None: + args = parser.parse_args() + else: + args = parser.parse_args(cli_arguments) + + defined_tasks = [] + defined_excludes = [] + + if args.tasks != '': + defined_tasks = args.tasks.split(",") + + if args.exclude != '': + defined_excludes = args.exclude.split(",") + + context = flow.create_context(args, procedure='remove_node') + context['inventory_regenerate_required'] = True + + flow.run( + tasks, + defined_tasks, + defined_excludes, + args.config, + context, + procedure_inventory_filepath=args.procedure_config + ) + + +if __name__ == '__main__': + main() diff --git a/kubetool/procedures/restore.py b/kubetool/procedures/restore.py new file mode 100755 index 000000000..577cca854 --- /dev/null +++ b/kubetool/procedures/restore.py @@ -0,0 +1,333 @@ +#!/usr/bin/env python3 + +import io +import json +import os +import re +import tarfile +import time +from collections import OrderedDict +import yaml + +from kubetool.core import utils, flow, defaults +from kubetool.core.cluster import KubernetesCluster +from kubetool.core.group import NodeGroup +from kubetool.procedures import install, backup +from kubetool import system, kubernetes + + +def missing_or_empty(file): + if not os.path.exists(file): + return True + content = open(file, 'r').read() + if re.search(r'^\s*$', content): + return True + + +def replace_config_from_backup_if_needed(procedure_inventory_filepath, config): + if missing_or_empty(config): + print('Config is missing or empty - retrieving config from backup archive...') + with open(utils.get_resource_absolute_path(procedure_inventory_filepath), 'r') as stream: + procedure = yaml.safe_load(stream) + backup_location = procedure.get("backup_location") + if not backup_location: + raise Exception('Backup location is not specified in procedure') + + print('Unpacking cluster.yaml...') + with tarfile.open(backup_location, 'r:gz') as tar: + member = tar.getmember('original_cluster.yaml') + tar.makefile(member, config) + tar.close() + + +def unpack_data(cluster): + backup_tmp_directory = backup.prepare_backup_tmpdir(cluster) + backup_file_source = cluster.procedure_inventory.get('backup_location') + + if not backup_file_source: + raise Exception('Backup source not specified in procedure') + + backup_file_source = utils.get_resource_absolute_path(backup_file_source) + if not os.path.isfile(backup_file_source): + raise FileNotFoundError('Backup file "%s" not found' % backup_file_source) + + cluster.log.debug('Unpacking all data...') + with tarfile.open(backup_file_source, 'r:gz') as tar: + for member in tar: + if member.isdir(): + continue + fname = os.path.join(backup_tmp_directory, member.name) + cluster.log.debug(fname) + fname_parts = fname.split('/') + if len(fname_parts) > 1: + fname_dir = "/".join(fname_parts[:-1]) + if not os.path.isdir(fname_dir): + os.makedirs(fname_dir, exist_ok=True) + tar.makefile(member, fname) + tar.close() + + descriptor_filepath = os.path.join(backup_tmp_directory, 'descriptor.yaml') + if not os.path.isfile(descriptor_filepath): + raise FileNotFoundError('Descriptor not found in backup file') + + with open(descriptor_filepath, 'r') as stream: + cluster.context['backup_descriptor'] = yaml.safe_load(stream) + + +def verify_backup_data(cluster): + if not cluster.context['backup_descriptor'].get('kubernetes', {}).get('version'): + cluster.log.debug('Not possible to verify Kubernetes version, because descriptor do not contain such information') + return + + if cluster.context['backup_descriptor']['kubernetes']['version'] != cluster.inventory['services']['kubeadm']['kubernetesVersion']: + cluster.log.warning('Installed kubernetes versions do not match version from backup') + cluster.log.verbose('Cluster re-parse required') + if not cluster.raw_inventory.get('services'): + cluster.raw_inventory['services'] = {} + if not cluster.raw_inventory['services'].get('kubeadm'): + cluster.raw_inventory['services']['kubeadm'] = {} + cluster.raw_inventory['services']['kubeadm']['kubernetesVersion'] = cluster.context['backup_descriptor']['kubernetes']['version'] + cluster._inventory = defaults.enrich_inventory(cluster, cluster.raw_inventory) + else: + cluster.log.debug('Kubernetes version from backup is correct') + + +def stop_cluster(cluster): + cluster.log.debug('Stopping the existing cluster...') + cri_impl = cluster.inventory['services']['cri']['containerRuntime'] + if cri_impl == "docker": + result = cluster.nodes['master'].sudo('systemctl stop kubelet; ' + 'sudo docker kill $(sudo docker ps -q); ' + 'sudo docker rm -f $(sudo docker ps -a -q); ' + 'sudo docker ps -a; ' + 'sudo rm -rf /var/lib/etcd; ' + 'sudo mkdir -p /var/lib/etcd', warn=True) + else: + result = cluster.nodes['master'].sudo('systemctl stop kubelet; ' + 'sudo crictl rm -fa; ' + 'sudo crictl ps -a; ' + 'sudo rm -rf /var/lib/etcd; ' + 'sudo mkdir -p /var/lib/etcd', warn=True) + cluster.log.verbose(result) + + +def restore_thirdparties(cluster): + custom_thirdparties = cluster.procedure_inventory.get('restore_plan', {}).get('thirdparties', {}) + if custom_thirdparties: + for name, value in custom_thirdparties.items(): + cluster.inventory['services']['thirdparties'][name]['source'] = value['source'] + if value.get('sha1'): + cluster.inventory['services']['thirdparties'][name]['sha1'] = value['sha1'] + + install.system_prepare_thirdparties(cluster) + + +def import_nodes(cluster): + for node in cluster.nodes['all'].get_ordered_members_list(provide_node_configs=True): + node['connection'].put(os.path.join(cluster.context['backup_tmpdir'], 'nodes_data', '%s.tar.gz' % node['name']), + '/tmp/kubetools-backup.tar.gz') + cluster.log.debug('Backup \'%s\' uploaded' % node['name']) + + cluster.log.debug('Unpacking backup...') + result = cluster.nodes['all'].sudo( + 'chattr -i /etc/resolv.conf; sudo tar xzvf /tmp/kubetools-backup.tar.gz -C / --overwrite && sudo chattr +i /etc/resolv.conf') + cluster.log.debug(result) + + +def import_etcd(cluster: KubernetesCluster): + etcd_all_certificates = cluster.procedure_inventory.get('restore_plan', {}).get('etcd', {}).get('certificates', {}) + etcd_cert = etcd_all_certificates.get('cert', cluster.globals['etcd']['default_arguments']['cert']) + etcd_key = etcd_all_certificates.get('key', cluster.globals['etcd']['default_arguments']['key']) + etcd_cacert = etcd_all_certificates.get('cacert', cluster.globals['etcd']['default_arguments']['cacert']) + etcd_peer_cert = etcd_all_certificates.get('peer_cert', cluster.globals['etcd']['default_arguments']['peer_cert']) + etcd_peer_key = etcd_all_certificates.get('peer_key', cluster.globals['etcd']['default_arguments']['peer_key']) + etcd_peer_cacert = etcd_all_certificates.get('peer_cacert', + cluster.globals['etcd']['default_arguments']['peer_cacert']) + + etcd_image = cluster.procedure_inventory.get('restore_plan', {}).get('etcd', {}).get('image') + if not etcd_image: + etcd_image = cluster.context['backup_descriptor'].get('etcd', {}).get('image') + if not etcd_image: + raise Exception('Unknown ETCD image to restore from') + cluster.log.verbose('ETCD will be restored from the following image: ' + etcd_image) + + cluster.log.debug('Uploading ETCD snapshot...') + snap_name = '/var/lib/etcd/etcd-snapshot%s.db' % int(round(time.time() * 1000)) + cluster.nodes['master'].put(os.path.join(cluster.context['backup_tmpdir'], 'etcd.db'), snap_name, sudo=True) + + initial_cluster_list = [] + initial_cluster_list_without_names = [] + for master in cluster.nodes['master'].get_ordered_members_list(provide_node_configs=True): + initial_cluster_list.append(master['name'] + '=https://' + master["internal_address"] + ":2380") + initial_cluster_list_without_names.append(master["internal_address"] + ":2379") + initial_cluster = ','.join(initial_cluster_list) + + if "docker" == cluster.inventory['services']['cri']['containerRuntime']: + cont_runtime = "docker" + else: + cont_runtime = "podman" + + etcd_instances = 0 + for master in cluster.nodes['master'].get_ordered_members_list(provide_node_configs=True): + cluster.log.debug('Restoring ETCD member ' + master['name']) + master_conn: NodeGroup = master['connection'] + master_conn.sudo( + f'chmod 777 {snap_name} && ' + f'sudo ls -la {snap_name} && ' + f'sudo etcdctl snapshot restore {snap_name} ' + f'--name={master["name"]} ' + f'--data-dir=/var/lib/etcd/snapshot ' + f'--initial-cluster={initial_cluster} ' + f'--initial-advertise-peer-urls=https://{master["internal_address"]}:2380', + hide=False) + + etcd_id = master_conn.sudo( + f'mv /var/lib/etcd/snapshot/member /var/lib/etcd/member && ' + f'sudo rm -rf /var/lib/etcd/snapshot {snap_name} && ' + f'sudo {cont_runtime} run -d --network host -p 2379:2379 -p 2380:2380 ' + f'-e ETCDCTL_API=3 ' + f'-v /var/lib/etcd:/var/lib/etcd ' + f'-v /etc/kubernetes/pki:/etc/kubernetes/pki ' + f'{etcd_image} etcd ' + f'--advertise-client-urls=https://{master["internal_address"]}:2379 ' + f'--cert-file={etcd_cert} ' + f'--key-file={etcd_key} ' + f'--trusted-ca-file={etcd_cacert} ' + f'--client-cert-auth=true ' + f'--data-dir=/var/lib/etcd ' + f'--initial-advertise-peer-urls=https://{master["internal_address"]}:2380 ' + f'--initial-cluster={initial_cluster} ' + f'--listen-client-urls=https://127.0.0.1:2379,https://{master["internal_address"]}:2379 ' + f'--listen-peer-urls=https://{master["internal_address"]}:2380 ' + f'--name={master["name"]} ' + f'--peer-client-cert-auth=true ' + f'--peer-cert-file={etcd_peer_cert} ' + f'--peer-key-file={etcd_peer_key} ' + f'--peer-trusted-ca-file={etcd_peer_cacert} ' + ).get_simple_out().strip() + + master_conn.sudo(f'{cont_runtime} logs {etcd_id}', hide=False) + etcd_instances += 1 + + # After restore check db size equal, cluster health and leader elected + master_conn = cluster.nodes['master'].get_first_member() + cluster_health_raw = master_conn.sudo(f'etcdctl endpoint health --cluster -w json').get_simple_out() + cluster.log.verbose(cluster_health_raw) + cluster_status_raw = master_conn.sudo(f'etcdctl endpoint status --cluster -w json').get_simple_out() + cluster.log.verbose(cluster_status_raw) + cluster.nodes['master'].sudo(f"{cont_runtime} rm -f $(sudo {cont_runtime} ps -aq)") # delete containers after all + cluster_health = json.load(io.StringIO(cluster_health_raw.strip())) + cluster_status = json.load(io.StringIO(cluster_status_raw.lower().strip())) + + # Check all members are healthy + if len(cluster_health) != etcd_instances: + raise Exception('Some ETCD members are not healthy') + for item in cluster_health: + if not item.get('health'): + raise Exception('ETCD member "%s" is not healthy' % item.get('endpoint')) + cluster.log.debug('All ETCD members are healthy!') + + # Check leader elected + elected_leader = None + for item in cluster_status: + leader = item.get('status', {}).get('leader') + if not leader: + raise Exception('ETCD member "%s" do not have leader' % item.get('endpoint')) + if not elected_leader: + elected_leader = leader + elif elected_leader != leader: + raise Exception('ETCD leaders are not the same') + cluster.log.debug('Leader "%s" elected' % elected_leader) + + # Check DB size is correct + backup_source = cluster.context['backup_descriptor'].get('etcd', {}).get('source') + etcd_statuses_from_descriptor = cluster.context['backup_descriptor'].get('etcd', {}).get('status', {}) + if backup_source and etcd_statuses_from_descriptor and etcd_statuses_from_descriptor.get(backup_source, {}).get('status', {}).get('dbsize'): + expected_dbsize = int(etcd_statuses_from_descriptor.get(backup_source, {}).get('status', {}).get('dbsize')) + for item in cluster_status: + real_dbsize = int(item.get('status', {}).get('dbsize')) + if not real_dbsize: + raise Exception('ETCD member "%s" do not have DB size' % item.get('endpoint')) + cluster.log.verbose('Endpoint "%s" DB real size %s, expected size %s' % (item.get('endpoint'), expected_dbsize, real_dbsize)) + # restored db should have equal or greater DB size + if expected_dbsize > real_dbsize: + raise Exception('ETCD member "%s" has invalid DB size' % item.get('endpoint')) + cluster.log.debug('DB size "%s" is correct' % expected_dbsize) + else: + cluster.log.verbose('It is not possible to verify db size - descriptor do not contain such information') + + +def reboot(cluster): + system.reboot_nodes(cluster.nodes['all'], try_graceful=False) + kubernetes.wait_for_nodes(cluster.nodes['master']) + + +tasks = OrderedDict({ + "prepare": { + "unpack": unpack_data, + "verify_backup_data": verify_backup_data, + "stop_cluster": stop_cluster, + }, + "restore": { + "thirdparties": restore_thirdparties, + }, + "import": { + "nodes": import_nodes, + "etcd": import_etcd + }, + "reboot": reboot +}) + + +def main(cli_arguments=None): + cli_help = ''' + Script for restoring Kubernetes resources and nodes contents from backup file. + + How to use: + + ''' + + parser = flow.new_parser(cli_help) + parser.add_argument('--tasks', + default='', + help='define comma-separated tasks to be executed') + + parser.add_argument('--exclude', + default='', + help='exclude comma-separated tasks from execution') + + parser.add_argument('procedure_config', metavar='procedure_config', type=str, + help='config file for restore procedure') + + if cli_arguments is None: + args = parser.parse_args() + else: + args = parser.parse_args(cli_arguments) + + defined_tasks = [] + defined_excludes = [] + + if args.tasks != '': + defined_tasks = args.tasks.split(",") + + if args.exclude != '': + defined_excludes = args.exclude.split(",") + + context = flow.create_context(args, procedure='restore') + context['inventory_regenerate_required'] = False + + replace_config_from_backup_if_needed(args.procedure_config, args.config) + + flow.run( + tasks, + defined_tasks, + defined_excludes, + args.config, + context, + procedure_inventory_filepath=args.procedure_config, + cumulative_points=install.cumulative_points + ) + + +if __name__ == '__main__': + main() diff --git a/kubetool/procedures/upgrade.py b/kubetool/procedures/upgrade.py new file mode 100755 index 000000000..b64661b7e --- /dev/null +++ b/kubetool/procedures/upgrade.py @@ -0,0 +1,221 @@ +#!/usr/bin/env python3 + +from collections import OrderedDict + +import yaml + +from kubetool.core.flow import load_inventory +from kubetool.core.yaml_merger import default_merger +from kubetool.core import flow +from kubetool.procedures import install +from kubetool import kubernetes, plugins, system +from itertools import chain + + +def system_prepare_thirdparties(cluster): + if not cluster.inventory['services'].get('thirdparties', {}): + cluster.log.debug("Skipped - no thirdparties defined in config file") + return + + install.system_prepare_thirdparties(cluster) + + +def prepull_images(cluster): + cluster.log.debug("Prepulling Kubernetes images...") + upgrade_group = kubernetes.get_group_for_upgrade(cluster) + upgrade_group.call(kubernetes.images_grouped_prepull) + + +def kubernetes_upgrade(cluster): + version = cluster.inventory["services"]["kubeadm"]["kubernetesVersion"] + upgrade_group = kubernetes.get_group_for_upgrade(cluster) + + drain_timeout = cluster.procedure_inventory.get('drain_timeout') + grace_period = cluster.procedure_inventory.get('grace_period') + + kubernetes.upgrade_first_master(version, upgrade_group, cluster, + drain_timeout=drain_timeout, grace_period=grace_period) + + # After first master upgrade is finished we may loose our CoreDNS changes. + # Thus, we need to re-apply our CoreDNS changes immediately after first master upgrade. + install.deploy_coredns(cluster) + + kubernetes.upgrade_other_masters(version, upgrade_group, cluster, + drain_timeout=drain_timeout, grace_period=grace_period) + if cluster.nodes.get('worker', []): + kubernetes.upgrade_workers(version, upgrade_group, cluster, + drain_timeout=drain_timeout, grace_period=grace_period) + + cluster.nodes['master'].get_first_member().sudo('rm -f /etc/kubernetes/nodes-k8s-versions.txt') + cluster.context['cached_nodes_versions_cleaned'] = True + + +def kubernetes_cleanup_nodes_versions(cluster): + if not cluster.context.get('cached_nodes_versions_cleaned', False): + cluster.log.verbose('Cached nodes versions required') + cluster.nodes['master'].get_first_member().sudo('rm -f /etc/kubernetes/nodes-k8s-versions.txt') + else: + cluster.log.verbose('Cached nodes versions already cleaned') + + +def upgrade_packages(cluster): + upgrade_version = cluster.context["upgrade_version"] + + packages = cluster.procedure_inventory.get(upgrade_version, {}).get("packages", {}) + if packages.get("install") or packages.get("upgrade") or packages.get("remove"): + install.system_prepare_package_manager_manage_packages(cluster) + + +def upgrade_plugins(cluster): + upgrade_version = cluster.context["upgrade_version"] + + # upgrade_candidates is a source of upgradeable plugins, not list of plugins to upgrade. + # Some plugins from upgrade_candidates will not be upgraded, because they have "install: false" + upgrade_candidates = {} + defined_plugins = cluster.procedure_inventory.get(upgrade_version, {}).get("plugins", {}).keys() + for plugin in chain(defined_plugins, plugins.oob_plugins): + # todo: check for OOB plugins that the version has changed, so that we do not perform redundant installations + upgrade_candidates[plugin] = cluster.inventory["plugins"][plugin] + + plugins.install(cluster, upgrade_candidates) + + +tasks = OrderedDict({ + "verify_upgrade_versions": kubernetes.verify_upgrade_versions, + "thirdparties": system_prepare_thirdparties, + "prepull_images": prepull_images, + "kubernetes": kubernetes_upgrade, + "kubernetes_cleanup": kubernetes_cleanup_nodes_versions, + "packages": upgrade_packages, + "plugins": upgrade_plugins, + "overview": install.overview +}) + + +def upgrade_finalize_inventory(cluster, inventory): + if cluster.context.get("initial_procedure") != "upgrade": + return inventory + upgrade_version = cluster.context.get("upgrade_version") + + if not inventory['services'].get('kubeadm'): + inventory['services']['kubeadm'] = {} + inventory['services']['kubeadm']['kubernetesVersion'] = upgrade_version + + # if thirdparties was not defined in procedure.yaml, + # then no need to forcibly place them: user may want to use default + if cluster.procedure_inventory.get(upgrade_version, {}).get('thirdparties'): + inventory['services']['thirdparties'] = cluster.procedure_inventory[upgrade_version]['thirdparties'] + + if cluster.procedure_inventory.get(upgrade_version, {}).get("plugins"): + if not inventory.get("plugins"): + inventory["plugins"] = {} + default_merger.merge(inventory["plugins"], cluster.procedure_inventory[upgrade_version]["plugins"]) + + if cluster.procedure_inventory.get(upgrade_version, {}).get("packages"): + if not inventory.get("services"): + inventory["services"] = {} + if not inventory["services"].get("packages"): + inventory["services"]["packages"] = {} + packages = cluster.procedure_inventory[upgrade_version]["packages"] + default_merger.merge(inventory["services"]["packages"], packages) + + return inventory + + +def main(cli_arguments=None): + cli_help = ''' + Script for automated upgrade of the entire Kubernetes cluster to a new version. + + How to use: + + ''' + + parser = flow.new_parser(cli_help) + parser.add_argument('--tasks', + default='', + help='define comma-separated tasks to be executed') + + parser.add_argument('--exclude', + default='', + help='exclude comma-separated tasks from execution') + + parser.add_argument('procedure_config', metavar='procedure_config', type=str, + help='config file for upgrade parameters') + + if cli_arguments is None: + args = parser.parse_args() + else: + args = parser.parse_args(cli_arguments) + + defined_tasks = [] + defined_excludes = [] + + if args.tasks != '': + defined_tasks = args.tasks.split(",") + + if args.exclude != '': + defined_excludes = args.exclude.split(",") + + with open(args.procedure_config, 'r') as stream: + procedure_config = yaml.safe_load(stream) + + os_family = preload_os_family(args.config) + upgrade_plan = verify_upgrade_plan(procedure_config.get('upgrade_plan')) + verification_version_result = kubernetes.verify_target_version(upgrade_plan[-1]) + + if (args.tasks or args.exclude) and len(upgrade_plan) > 1: + raise Exception("Usage of '--tasks' and '--exclude' is not allowed when upgrading to more than one version") + + # We need to save dumps for all iterations, so we forcefully disable dump cleanup after first iteration onwards. + disable_dump_cleanup = False + for version in upgrade_plan: + + # reset context from previous installation + context = flow.create_context(args, procedure='upgrade') + context['inventory_regenerate_required'] = True + context['upgrade_version'] = version + context['dump_filename_prefix'] = version + context['os'] = os_family + if disable_dump_cleanup: + context['execution_arguments']['disable_dump_cleanup'] = True + + flow.run( + tasks, + defined_tasks, + defined_excludes, + args.config, + context, + procedure_inventory_filepath=args.procedure_config, + cumulative_points=install.cumulative_points + ) + + disable_dump_cleanup = True + if verification_version_result: + print(verification_version_result) + + +def verify_upgrade_plan(upgrade_plan): + if not upgrade_plan: + raise Exception('Upgrade plan is not specified or empty') + + upgrade_plan.sort() + + previous_version = None + for i in range(0, len(upgrade_plan)): + version = upgrade_plan[i] + if previous_version is not None: + kubernetes.test_version_upgrade_possible(previous_version, version) + previous_version = version + + print('Loaded upgrade plan: current ⭢', ' ⭢ '.join(upgrade_plan)) + + return upgrade_plan + + +def preload_os_family(inventory_filepath): + cluster = load_inventory(inventory_filepath, flow.create_context({'disable_dump': True})) + return system.get_os_family(cluster) + + +if __name__ == '__main__': + main() diff --git a/kubetool/psp.py b/kubetool/psp.py new file mode 100644 index 000000000..5550536b3 --- /dev/null +++ b/kubetool/psp.py @@ -0,0 +1,459 @@ +import io +import os +import uuid + +import ruamel.yaml +import yaml + +from kubetool import kubernetes +from kubetool.core import utils +from kubetool.core.group import NodeGroup + +privileged_policy_filename = "privileged.yaml" +policies_file_path = "./resources/psp/" +tmp_filepath_pattern = "/tmp/%s" + +psp_list_option = "psp-list" +roles_list_option = "roles-list" +bindings_list_option = "bindings-list" + +valid_flags = ["enabled", "disabled"] +provided_oob_policies = ["default", "host-network", "anyuid"] + +loaded_oob_policies = {} + + +def enrich_inventory(inventory, _): + global loaded_oob_policies + loaded_oob_policies = load_oob_policies_files() + + # check flags + verify_flag("pod-security", inventory["rbac"]["psp"]["pod-security"]) + for oob_name in provided_oob_policies: + verify_flag("oob-policies", inventory["rbac"]["psp"]["oob-policies"][oob_name]) + + # validate custom + custom_policies = inventory["rbac"]["psp"]["custom-policies"] + verify_custom(custom_policies) + + # do not perform enrichment if security disabled + if not is_security_enabled(inventory): + return inventory + + # if security enabled, then add PodSecurityPolicy admission plugin + enabled_admissions = inventory["services"]["kubeadm"]["apiServer"]["extraArgs"]["enable-admission-plugins"] + if 'PodSecurityPolicy' not in enabled_admissions: + enabled_admissions = "%s,PodSecurityPolicy" % enabled_admissions + inventory["services"]["kubeadm"]["apiServer"]["extraArgs"]["enable-admission-plugins"] = enabled_admissions + + return inventory + + +def manage_psp_enrichment(inventory, cluster): + if cluster.context.get('initial_procedure') != 'manage_psp': + return inventory + if "psp" not in cluster.procedure_inventory: + raise Exception("'manage_psp' config should have 'psp' in its root") + + procedure_config = cluster.procedure_inventory["psp"] + current_config = cluster.inventory["rbac"]["psp"] + + # check flags + if "pod-security" in procedure_config: + verify_flag("pod-security", procedure_config["pod-security"]) + if "oob-policies" in procedure_config: + for oob_policy in provided_oob_policies: + if oob_policy in procedure_config["oob-policies"]: + verify_flag("oob-policy", procedure_config["oob-policies"][oob_policy]) + + # validate added custom + custom_add_policies = procedure_config.get("add-policies", {}) + verify_custom(custom_add_policies) + + # validate deleted custom + custom_delete_policies = procedure_config.get("delete-policies", {}) + verify_custom(custom_delete_policies) + + # forbid managing OOB if security will be disabled + current_security_state = current_config["pod-security"] + final_security_state = procedure_config.get("pod-security", current_security_state) + if final_security_state == "disabled" and procedure_config.get("oob-policies"): + raise Exception("OOB policies can not be configured when security is disabled") + + # forbid defining 'custom-policies' in procedure inventory + if "custom-policies" in procedure_config: + raise Exception("'manage_psp' procedure should not be configured using 'custom-policies', " + "use 'add-policies' or 'delete-policies' instead") + + return inventory + + +def verify_flag(owner, value): + if value not in valid_flags: + raise Exception("incorrect value for %s, valid values: %s" % (owner, valid_flags)) + + +def verify_custom(custom_scope): + psp_list = custom_scope.get(psp_list_option, None) + if psp_list: + verify_custom_list(psp_list, "PSP", ["PodSecurityPolicy"]) + + roles_list = custom_scope.get(roles_list_option, None) + if roles_list: + verify_custom_list(roles_list, "role", ["Role", "ClusterRole"]) + + bindings_list = custom_scope.get(bindings_list_option, None) + if bindings_list: + verify_custom_list(bindings_list, "binding", ["RoleBinding", "ClusterRoleBinding"]) + + +def verify_custom_list(custom_list, type, supported_kinds): + for item in custom_list: + if item["kind"] not in supported_kinds: + raise Exception("Type %s should have %s kind" % (type, supported_kinds)) + # forbid using 'oob-' prefix in order to avoid conflicts of our policies and users policies + if item["metadata"]["name"].startswith("oob-"): + raise Exception("Name %s is not allowed for custom %s" % (item["metadata"]["name"], type)) + + +def finalize_inventory(cluster, inventory_to_finalize): + if cluster.context.get('initial_procedure') != 'manage_psp': + return inventory_to_finalize + procedure_config = cluster.procedure_inventory["psp"] + + if "rbac" not in inventory_to_finalize: + inventory_to_finalize["rbac"] = {} + if "psp" not in inventory_to_finalize["rbac"]: + inventory_to_finalize["rbac"]["psp"] = {} + current_config = inventory_to_finalize["rbac"]["psp"] + + # Perform custom-policies lists changes. + # Perform changes only if there are any "custom-policies" or "add-policies" in inventory, + # do not perform changes if only "delete-policies" defined, there is nothing to delete from inventory in this case. + adding_custom_policies = procedure_config.get("add-policies", {}) + deleting_custom_policies = procedure_config.get("delete-policies", {}) + existing_custom_policies = current_config.get("custom-policies", {}) + if existing_custom_policies or adding_custom_policies: + # if custom policies are not defined in inventory, then we need to create custom policies ourselves + if not existing_custom_policies: + current_config["custom-policies"] = {} + current_config["custom-policies"] = merge_custom_policies(existing_custom_policies, + adding_custom_policies, + deleting_custom_policies) + + # merge flags from procedure config and cluster config + current_config["pod-security"] = procedure_config.get("pod-security", current_config.get("pod-security", "enabled")) + if "oob-policies" in procedure_config: + if "oob-policies" not in current_config: + current_config["oob-policies"] = procedure_config["oob-policies"] + else: + for oob_policy in procedure_config["oob-policies"]: + current_config["oob-policies"][oob_policy] = procedure_config["oob-policies"][oob_policy] + + return inventory_to_finalize + + +def merge_custom_policies(old_policies, added_policies, deleted_policies): + return { + psp_list_option: merge_policy_lists(old_policies.get(psp_list_option, []), + added_policies.get(psp_list_option, []), + deleted_policies.get(psp_list_option, [])), + roles_list_option: merge_policy_lists(old_policies.get(roles_list_option, []), + added_policies.get(roles_list_option, []), + deleted_policies.get(roles_list_option, [])), + bindings_list_option: merge_policy_lists(old_policies.get(bindings_list_option, []), + added_policies.get(bindings_list_option, []), + deleted_policies.get(bindings_list_option, [])) + } + + +def merge_policy_lists(old_list, added_list, deleted_list): + resulting_list = added_list + added_names_list = [item["metadata"]["name"] for item in added_list] + deleted_names_list = [item["metadata"]["name"] for item in deleted_list] + for old_item in old_list: + old_item_name = old_item["metadata"]["name"] + if old_item_name in added_names_list or old_item_name in deleted_names_list: + # skip old item, since it was either deleted, replaced by new item, or deleted and then replaced + continue + # old item is nor deleted, nor updated, then we need to preserve it in resulting list + resulting_list.append(old_item) + + return resulting_list + + +def install_psp_task(cluster): + if not is_security_enabled(cluster.inventory): + cluster.log.debug("Pod security disabled, skipping policies installation...") + return + + first_master = cluster.nodes["master"].get_first_member() + + cluster.log.debug("Installing OOB policies...") + first_master.call(manage_policies, + manage_type="apply", + manage_scope=resolve_oob_scope(cluster.inventory["rbac"]["psp"]["oob-policies"], "enabled")) + + cluster.log.debug("Installing custom policies...") + first_master.call(manage_policies, + manage_type="apply", + manage_scope=cluster.inventory["rbac"]["psp"]["custom-policies"]) + + +def delete_custom_task(cluster): + if "delete-policies" not in cluster.procedure_inventory["psp"]: + cluster.log.debug("No 'delete-policies' specified, skipping...") + return + + cluster.log.debug("Deleting custom 'delete-policies'") + first_master = cluster.nodes["master"].get_first_member() + first_master.call(manage_policies, + manage_type="delete", + manage_scope=cluster.procedure_inventory["psp"]["delete-policies"]) + + +def add_custom_task(cluster): + if "add-policies" not in cluster.procedure_inventory["psp"]: + cluster.log.debug("No 'add-policies' specified, skipping...") + return + + cluster.log.debug("Applying custom 'add-policies'") + first_master = cluster.nodes["master"].get_first_member() + first_master.call(manage_policies, + manage_type="apply", + manage_scope=cluster.procedure_inventory["psp"]["add-policies"]) + + +def reconfigure_oob_task(cluster): + target_security_state = cluster.procedure_inventory["psp"].get("pod-security") + oob_policies = cluster.procedure_inventory["psp"].get("oob-policies") + + # reconfigure OOB only if state will be changed, or OOB configuration was changed + if not target_security_state and not oob_policies: + cluster.log.debug("No need to reconfigure OOB policies, skipping...") + return + + first_master = cluster.nodes["master"].get_first_member() + + cluster.log.debug("Deleting all OOB policies...") + first_master.call(delete_privileged_policy) + first_master.call(manage_policies, manage_type="delete", manage_scope=resolve_oob_scope(loaded_oob_policies, "all")) + + if target_security_state == "disabled": + cluster.log.debug("Security disabled, OOB will not be recreated") + return + + cluster.log.debug("Recreating all OOB policies...") + policies_to_recreate = {} + procedure_config = cluster.procedure_inventory["psp"].get("oob-policies", {}) + current_config = cluster.inventory["rbac"]["psp"]["oob-policies"] + for policy in provided_oob_policies: + if procedure_config.get(policy, current_config[policy]) == "enabled": + policies_to_recreate[policy] = True + first_master.call(apply_privileged_policy) + first_master.call(manage_policies, manage_type="apply", manage_scope=resolve_oob_scope(policies_to_recreate, "all")) + + +def reconfigure_plugin_task(cluster): + target_state = cluster.procedure_inventory["psp"].get("pod-security") + + if not target_state: + cluster.log.debug("Security plugin will not be reconfigured") + return + + first_master = cluster.nodes["master"].get_first_member() + + cluster.log.debug("Updating kubeadm config map") + result = first_master.call(update_kubeadm_configmap, target_state=target_state) + final_admission_plugins_list = list(result.values())[0] + + # update api-server config on all masters + cluster.log.debug("Updating kube-apiserver configs on masters") + cluster.nodes["master"].call(update_kubeapi_config, plugins_list=final_admission_plugins_list) + + +def restart_pods_task(cluster, disable_eviction=False): + first_master = cluster.nodes["master"].get_first_member() + + cluster.log.debug("Drain-Uncordon all nodes to restart pods") + kube_nodes = cluster.nodes["master"].include_group(cluster.nodes["worker"]) + for node in kube_nodes.get_ordered_members_list(provide_node_configs=True): + first_master.sudo( + kubernetes.prepare_drain_command(node, cluster.inventory['services']['kubeadm']['kubernetesVersion'], + cluster.globals, disable_eviction, cluster.nodes), hide=False) + first_master.sudo("kubectl uncordon %s" % node["name"], hide=False) + + cluster.log.debug("Restarting daemon-sets...") + daemon_sets = yaml.safe_load(list(first_master.sudo("kubectl get ds -A -o yaml").values())[0].stdout) + for ds in daemon_sets["items"]: + first_master.sudo("kubectl rollout restart ds %s -n %s" % (ds["metadata"]["name"], ds["metadata"]["namespace"])) + + # we do not know to wait for, only for system pods maybe + cluster.log.debug("Waiting for system pods...") + first_master.call(kubernetes.wait_for_any_pods, connection=None) + + +def update_kubeadm_configmap(first_master, target_state): + yaml = ruamel.yaml.YAML() + + # load kubeadm config map and retrieve cluster config + result = first_master.sudo("kubectl get cm kubeadm-config -n kube-system -o yaml") + kubeadm_cm = yaml.load(list(result.values())[0].stdout) + cluster_config = yaml.load(kubeadm_cm["data"]["ClusterConfiguration"]) + + # resolve resulting admission plugins list + final_plugins_string = resolve_final_plugins_list(cluster_config, target_state) + + # update kubeadm config map with updated plugins list + cluster_config["apiServer"]["extraArgs"]["enable-admission-plugins"] = final_plugins_string + buf = io.StringIO() + yaml.dump(cluster_config, buf) + kubeadm_cm["data"]["ClusterConfiguration"] = buf.getvalue() + + # apply updated kubeadm config map + buf = io.StringIO() + yaml.dump(kubeadm_cm, buf) + filename = uuid.uuid4().hex + first_master.put(buf, "/tmp/%s.yaml" % filename) + first_master.sudo("kubectl apply -f /tmp/%s.yaml" % filename) + first_master.sudo("rm -f /tmp/%s.yaml" % filename) + + return final_plugins_string + + +def update_kubeapi_config(masters, plugins_list): + yaml = ruamel.yaml.YAML() + + for master in masters.get_ordered_members_list(): + result = master.sudo("cat /etc/kubernetes/manifests/kube-apiserver.yaml") + + # update kube-apiserver config with updated plugins list + conf = yaml.load(list(result.values())[0].stdout) + new_command = [cmd for cmd in conf["spec"]["containers"][0]["command"] if "enable-admission-plugins" not in cmd] + new_command.append("--enable-admission-plugins=%s" % plugins_list) + conf["spec"]["containers"][0]["command"] = new_command + + # place updated config on master + buf = io.StringIO() + yaml.dump(conf, buf) + master.put(buf, "/etc/kubernetes/manifests/kube-apiserver.yaml", sudo=True) + + # force kube-apiserver pod restart, then wait for api to become available + masters.get_first_member().call(utils.wait_command_successful, + command="kubectl delete pod -n kube-system " + "$(sudo kubectl get pod -n kube-system " + "| grep 'kube-apiserver' | awk '{ print $1 }')") + masters.get_first_member().call(utils.wait_command_successful, command="kubectl get pod -A") + + +def is_security_enabled(inventory): + return inventory["rbac"]["psp"]["pod-security"] == "enabled" + + +def apply_privileged_policy(group): + return manage_privileged_from_file(group, privileged_policy_filename, "apply") + + +def delete_privileged_policy(group): + return manage_privileged_from_file(group, privileged_policy_filename, "delete") + + +def manage_privileged_from_file(group: NodeGroup, filename, manage_type): + if manage_type not in ["apply", "delete"]: + raise Exception("unexpected manage type for privileged policy") + local_path = utils.get_resource_absolute_path(os.path.join(policies_file_path, filename), script_relative=True) + remote_path = tmp_filepath_pattern % filename + group.put(local_path, remote_path, backup=True, sudo=True, binary=False) + + return group.sudo("kubectl %s -f %s" % (manage_type, remote_path), warn=True) + + +def resolve_oob_scope(oob_policies_conf, selector): + result = { + psp_list_option: [], + roles_list_option: [], + bindings_list_option: [] + } + + for key, value in oob_policies_conf.items(): + if key not in provided_oob_policies: + raise Exception("Unknown oob policy configured") + if value == selector or selector == "all": + policy = loaded_oob_policies[key] + if "psp" in policy: + result[psp_list_option].append(policy["psp"]) + if "role" in policy: + result[roles_list_option].append(policy["role"]) + if "binding" in policy: + result[bindings_list_option].append(policy["binding"]) + + return result + + +def load_oob_policies_files(): + oob_policies = {} + for oob_name in provided_oob_policies: + local_path = utils.get_resource_absolute_path(os.path.join(policies_file_path, "%s.yaml" % oob_name), + script_relative=True) + with open(local_path) as stream: + oob_policies[oob_name] = yaml.safe_load(stream) + + return oob_policies + + +def manage_policies(group, manage_type, manage_scope): + psp_to_manage = manage_scope.get(psp_list_option, None) + roles_to_manage = manage_scope.get(roles_list_option, None) + bindings_to_manage = manage_scope.get(bindings_list_option, None) + + if not psp_to_manage and not roles_to_manage and not bindings_to_manage: + group.cluster.log.verbose("No policies to %s" % manage_type) + return + + template = collect_policies_template(psp_to_manage, roles_to_manage, bindings_to_manage) + filename = uuid.uuid4().hex + remote_path = tmp_filepath_pattern % filename + group.put(io.StringIO(template), remote_path, backup=True, sudo=True) + result = group.sudo("kubectl %s -f %s" % (manage_type, remote_path), warn=True) + group.sudo("rm -f %s" % remote_path) + return result + + +def collect_policies_template(psp_list, roles_list, bindings_list): + yaml = ruamel.yaml.YAML() + + buf = io.StringIO() + if psp_list: + for psp in psp_list: + yaml.dump(psp, buf) + buf.write("\n---\n") + if roles_list: + for role in roles_list: + yaml.dump(role, buf) + buf.write("\n---\n") + if bindings_list: + for binding in bindings_list: + yaml.dump(binding, buf) + buf.write("\n---\n") + return buf.getvalue() + + +def resolve_final_plugins_list(cluster_config, target_state): + if "enable-admission-plugins" not in cluster_config["apiServer"]["extraArgs"]: + if target_state == "enabled": + return "PodSecurityPolicy" + else: + return None + else: + current_plugins = cluster_config["apiServer"]["extraArgs"]["enable-admission-plugins"] + if "PodSecurityPolicy" not in current_plugins: + if target_state == "enabled": + resulting_list = "%s,%s" % (current_plugins, "PodSecurityPolicy") + else: + resulting_list = current_plugins + elif target_state == "disabled": + resulting_list = current_plugins.replace("PodSecurityPolicy", "") + else: + resulting_list = current_plugins + + return resulting_list.replace(",,", ",").strip(",") diff --git a/kubetool/resources/__init__.py b/kubetool/resources/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/kubetool/resources/configurations/__init__.py b/kubetool/resources/configurations/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/kubetool/resources/configurations/defaults.yaml b/kubetool/resources/configurations/defaults.yaml new file mode 100644 index 000000000..e55c8bab4 --- /dev/null +++ b/kubetool/resources/configurations/defaults.yaml @@ -0,0 +1,538 @@ +vrrp_ips: [] + +node_defaults: {} + +nodes: [] + +public_cluster_ip: '{{ control_plain["external"] }}' + +services: + kubeadm_kubelet: + apiVersion: kubelet.config.k8s.io/v1beta1 + kind: KubeletConfiguration + readOnlyPort: 0 + protectKernelDefaults: true + podPidsLimit: 4096 + kubeadm: + apiVersion: kubeadm.k8s.io/v1beta2 + kind: ClusterConfiguration + kubernetesVersion: v1.20.2 + controlPlaneEndpoint: '{{ cluster_name }}:6443' + networking: + podSubnet: '{% if nodes[0]["internal_address"]|isipv4 %}10.128.0.0/14{% else %}fd02::/80{% endif %}' + serviceSubnet: '{% if nodes[0]["internal_address"]|isipv4 %}172.30.0.0/16{% else %}fd03::/112{% endif %}' + apiServer: + certSANs: [] + extraArgs: + enable-admission-plugins: NodeRestriction + profiling: "false" + audit-log-path: /var/log/apiserver/audit.log + audit-log-maxage: "30" + audit-log-maxbackup: "10" + audit-log-maxsize: "100" + scheduler: + extraArgs: + profiling: "false" + controllerManager: + extraArgs: + profiling: "false" + terminated-pod-gc-threshold: "1000" + + ntp: + chrony: + makestep: 5 -1 + rtcsync: True + timesyncd: + Time: + RootDistanceMaxSec: 5 + PollIntervalMinSec: 32 + PollIntervalMaxSec: 2048 + + kernel_security: + selinux: + state: enforcing + policy: targeted + permissive: + - haproxy_t + - container_t + - keepalived_t + + thirdparties: + /usr/bin/etcdctl: + source: 'resources/scripts/etcdctl.sh' + group: master + binary: false + /usr/bin/kubeadm: + source: 'https://storage.googleapis.com/kubernetes-release/release/{{ services.kubeadm.kubernetesVersion }}/bin/linux/amd64/kubeadm' + sha1: '{{ globals.compatibility_map.software.kubeadm[services.kubeadm.kubernetesVersion].sha1 }}' + /usr/bin/kubelet: + source: 'https://storage.googleapis.com/kubernetes-release/release/{{ services.kubeadm.kubernetesVersion }}/bin/linux/amd64/kubelet' + sha1: '{{ globals.compatibility_map.software.kubelet[services.kubeadm.kubernetesVersion].sha1 }}' + /usr/bin/kubectl: + source: 'https://storage.googleapis.com/kubernetes-release/release/{{ services.kubeadm.kubernetesVersion }}/bin/linux/amd64/kubectl' + sha1: '{{ globals.compatibility_map.software.kubectl[services.kubeadm.kubernetesVersion].sha1 }}' + group: master + /usr/bin/calicoctl: + source: 'https://github.com/projectcalico/calicoctl/releases/download/{{ plugins.calico.version }}/calicoctl-linux-amd64' + sha1: '{{ globals.compatibility_map.software.calico[services.kubeadm.kubernetesVersion|minorversion].sha1 }}' + group: master + # "crictl" is installed by default ONLY if "containerRuntime != docker", otherwise it is removed programmatically + /usr/bin/crictl.tar.gz: + source: 'https://github.com/kubernetes-sigs/cri-tools/releases/download/{{ globals.compatibility_map.software.crictl[services.kubeadm.kubernetesVersion].version }}/crictl-{{ globals.compatibility_map.software.crictl[services.kubeadm.kubernetesVersion].version }}-linux-amd64.tar.gz' + sha1: '{{ globals.compatibility_map.software.crictl[services.kubeadm.kubernetesVersion].sha1 }}' + unpack: /usr/bin/ + + cri: + containerRuntime: containerd + containerdConfig: + version: 2 + dockerConfig: + ipv6: False + log-driver: json-file + log-opts: + max-size: 64m + max-file: "3" + exec-opts: + - native.cgroupdriver=systemd + icc: False + live-restore: True + userland-proxy: False + + modprobe: + - br_netfilter + - ip_vs + - ip_vs_rr + - ip_vs_wrr + - ip_vs_sh + - '{% if not nodes[0]["internal_address"]|isipv4 %}ip6table_filter{% endif %}' + - '{% if not nodes[0]["internal_address"]|isipv4 %}nf_conntrack_ipv6{% endif %}' + - '{% if not nodes[0]["internal_address"]|isipv4 %}nf_nat_masquerade_ipv6{% endif %}' + - '{% if not nodes[0]["internal_address"]|isipv4 %}nf_reject_ipv6{% endif %}' + - '{% if not nodes[0]["internal_address"]|isipv4 %}nf_defrag_ipv6{% endif %}' + + sysctl: + net.bridge.bridge-nf-call-iptables: 1 + net.ipv4.ip_forward: 1 + net.ipv4.ip_nonlocal_bind: 1 + net.ipv4.conf.all.route_localnet: 1 + net.bridge.bridge-nf-call-ip6tables: '{% if not nodes[0]["internal_address"]|isipv4 %}1{% endif %}' + net.ipv6.conf.all.forwarding: '{% if not nodes[0]["internal_address"]|isipv4 %}1{% endif %}' + net.ipv6.ip_nonlocal_bind: '{% if not nodes[0]["internal_address"]|isipv4 %}1{% endif %}' + kernel.panic: 10 + vm.overcommit_memory: 1 + kernel.panic_on_oops: 1 + + etc_hosts: + 127.0.0.1: + - localhost + - localhost.localdomain + '::1': + - '{% if not nodes[0]["internal_address"]|isipv4 %}localhost{% endif %}' + - '{% if not nodes[0]["internal_address"]|isipv4 %}localhost.localdomain{% endif %}' + + audit: + rules: + - -w /var/lib/docker -k docker + - -w /etc/docker -k docker + - -w /usr/lib/systemd/system/docker.service -k docker + - -w /usr/lib/systemd/system/docker.socket -k docker + - -w /etc/default/docker -k docker + - -w /etc/docker/daemon.json -k docker + - -w /usr/bin/containerd -k docker + - -w /usr/sbin/runc -k dockerks + - -w /usr/bin/dockerd -k docker + + coredns: + deployment: + spec: + template: + spec: + volumes: + - configMap: + defaultMode: 420 + items: + - key: Corefile + path: Corefile + - key: Hosts + path: Hosts + name: coredns + name: config-volume + nodeSelector: + node-role.kubernetes.io/worker: worker + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - topologyKey: "kubernetes.io/hostname" + configmap: + Corefile: + '.:53': + errors: True + health: True + ready: True + prometheus: :9153 + cache: 30 + loop: True + reload: True + loadbalance: True + hosts: + default: + priority: 1 + file: /etc/coredns/Hosts + data: + fallthrough: '' + kubernetes: + default: + priority: 1 + zone: + - cluster.local + - in-addr.arpa + - ip6.arpa + data: + pods: insecure + fallthrough: + - in-addr.arpa + - ip6.arpa + ttl: 30 + template: + default: + priority: 1 + class: IN + type: A + zone: '{{ cluster_name }}' + data: + match: '^(.*\.)?{{ cluster_name }}\.$' + answer: '{% raw %}{{ .Name }}{% endraw %} 3600 IN A {{ control_plain["internal"] }}' + reject-aaaa: + enabled: '{{ nodes[0]["internal_address"]|isipv4 }}' + priority: 999 + class: IN + type: AAAA + data: + authority: '{% raw %}{{ .Name }}{% endraw %} 3600 IN SOA coredns.kube-system.svc.cluster.local. hostmaster.coredns.kube-system.svc.cluster.local. (3600 3600 3600 3600 3600)' + forward: + - . + - /etc/resolv.conf + + loadbalancer: + haproxy: + config: + defaults: + timeout_connect: '10s' + timeout_client: '1m' + timeout_server: '1m' + timeout_tunnel: '60m' + timeout_client_fin: '1m' + maxconn: 10000 + + packages: + package_manager: + replace-repositories: false + associations: + debian: + docker: + executable_name: 'docker' + package_name: + - 'docker-ce={{ globals.compatibility_map.software.docker[services.kubeadm.kubernetesVersion].version_debian }}' + - 'docker-ce-cli={{ globals.compatibility_map.software.docker[services.kubeadm.kubernetesVersion].version_debian }}' + - 'containerd.io={{ globals.compatibility_map.software.containerd[services.kubeadm.kubernetesVersion].version_debian }}' + service_name: 'docker' + config_location: '/etc/docker/daemon.json' + containerd: + executable_name: 'containerd' + package_name: + - 'containerd.io={{ globals.compatibility_map.software.containerd[services.kubeadm.kubernetesVersion].version_debian }}' + - 'podman={{ globals.compatibility_map.software.podman[services.kubeadm.kubernetesVersion].version_debian }}' + service_name: 'containerd' + config_location: '/etc/containerd/config.toml' + haproxy: + executable_name: '/usr/sbin/haproxy' + package_name: 'haproxy={{ globals.compatibility_map.software.haproxy[services.kubeadm.kubernetesVersion].version_debian }}' + service_name: 'haproxy' + config_location: '/etc/haproxy/haproxy.cfg' + keepalived: + executable_name: 'keepalived' + package_name: 'keepalived={{ globals.compatibility_map.software.keepalived[services.kubeadm.kubernetesVersion].version_debian }}' + service_name: 'keepalived' + config_location: '/etc/keepalived/keepalived.conf' + rhel: + docker: + executable_name: 'docker' + package_name: + - 'docker-ce-{{ globals.compatibility_map.software.docker[services.kubeadm.kubernetesVersion].version_rhel }}' + - 'docker-ce-cli-{{ globals.compatibility_map.software.docker[services.kubeadm.kubernetesVersion].version_rhel }}' + - 'containerd.io-{{ globals.compatibility_map.software.containerd[services.kubeadm.kubernetesVersion].version_rhel }}' + service_name: 'docker' + config_location: '/etc/docker/daemon.json' + containerd: + executable_name: 'containerd' + package_name: + - 'containerd.io-{{ globals.compatibility_map.software.containerd[services.kubeadm.kubernetesVersion].version_rhel }}' + - 'podman-{{ globals.compatibility_map.software.podman[services.kubeadm.kubernetesVersion].version_rhel }}' + service_name: 'containerd' + config_location: '/etc/containerd/config.toml' + haproxy: + executable_name: '/opt/rh/rh-haproxy18/root/usr/sbin/haproxy' + package_name: 'rh-haproxy18-haproxy-{{ globals.compatibility_map.software.haproxy[services.kubeadm.kubernetesVersion].version_rhel }}' + service_name: 'rh-haproxy18-haproxy' + config_location: '/etc/haproxy/haproxy.cfg' + keepalived: + executable_name: 'keepalived' + package_name: 'keepalived-{{ globals.compatibility_map.software.keepalived[services.kubeadm.kubernetesVersion].version_rhel }}' + service_name: 'keepalived' + config_location: '/etc/keepalived/keepalived.conf' + rhel8: + docker: + executable_name: 'docker' + package_name: + - 'docker-ce-{{ globals.compatibility_map.software.docker[services.kubeadm.kubernetesVersion].version_rhel8 }}' + - 'docker-ce-cli-{{ globals.compatibility_map.software.docker[services.kubeadm.kubernetesVersion].version_rhel8 }}' + - 'containerd.io-{{ globals.compatibility_map.software.containerd[services.kubeadm.kubernetesVersion].version_rhel8 }}' + service_name: 'docker' + config_location: '/etc/docker/daemon.json' + containerd: + executable_name: 'containerd' + package_name: + - 'containerd.io-{{ globals.compatibility_map.software.containerd[services.kubeadm.kubernetesVersion].version_rhel8 }}' + - 'podman-{{ globals.compatibility_map.software.podman[services.kubeadm.kubernetesVersion].version_rhel8 }}' + service_name: 'containerd' + config_location: '/etc/containerd/config.toml' + haproxy: + executable_name: '/usr/sbin/haproxy' + package_name: 'haproxy-{{ globals.compatibility_map.software.haproxy[services.kubeadm.kubernetesVersion].version_rhel8 }}' + service_name: 'haproxy' + config_location: '/etc/haproxy/haproxy.cfg' + keepalived: + executable_name: 'keepalived' + package_name: 'keepalived-{{ globals.compatibility_map.software.keepalived[services.kubeadm.kubernetesVersion].version_rhel8 }}' + service_name: 'keepalived' + config_location: '/etc/keepalived/keepalived.conf' + + +plugin_defaults: + installation: {} + +plugins: + + calico: + version: '{{ globals.compatibility_map.software["calico"][services.kubeadm.kubernetesVersion|minorversion].version }}' + installation: + priority: 0 + procedures: + - template: 'templates/plugins/calico-{{ plugins.calico.version|minorversion }}.yaml.j2' + - expect: + pods: + - coredns + - calico-kube-controllers + - calico-node + - thirdparty: /usr/bin/calicoctl + - shell: + command: mkdir -p /etc/calico + groups: ['master'] + sudo: true + - template: + source: templates/plugins/calicoctl.cfg.j2 + destination: /etc/calico/calicoctl.cfg + apply_required: false + - template: + source: templates/plugins/calico-ippool.yaml.j2 + destination: /etc/calico/ippool.yaml + apply_command: 'calicoctl apply -f /etc/calico/ippool.yaml' + mode: ipip + crossSubnet: true + natOutgoing: true + mtu: 1440 + typha: + enabled: false + replicas: '{{ (((nodes|length)/50) + 1) | round(1) | int }}' + image: 'calico/typha:{{ plugins.calico.version }}' + nodeSelector: + kubernetes.io/os: linux + env: + DATASTORE_TYPE: kubernetes + WAIT_FOR_DATASTORE: true + CLUSTER_TYPE: k8s,bgp + CALICO_ROUTER_ID: '' + IP: '{% if services.kubeadm.networking.podSubnet|isipv4 %}autodetect{% else %}none{% endif %}' + IP_AUTODETECTION_METHOD: first-found + CALICO_IPV4POOL_IPIP: '{% if plugins.calico.mode | default("vxlan") == "ipip" and services.kubeadm.networking.podSubnet|isipv4 %}Always{% else %}Never{% endif %}' + CALICO_IPV4POOL_VXLAN: '{% if plugins.calico.mode | default("vxlan") == "vxlan" and services.kubeadm.networking.podSubnet|isipv4 %}Always{% else %}Never{% endif %}' + CALICO_IPV4POOL_CIDR: '{{ plugins["calico"]["cni"]["ipam"]["ipv4_pools"][0] }}' + CALICO_IPV6POOL_CIDR: '{{ plugins["calico"]["cni"]["ipam"]["ipv6_pools"][0] }}' + IP6: '{% if not services.kubeadm.networking.podSubnet|isipv4 %}autodetect{% else %}none{% endif %}' + IP6_AUTODETECTION_METHOD: first-found + FELIX_IPV6SUPPORT: '{% if not services.kubeadm.networking.podSubnet|isipv4 %}true{% else %}false{% endif %}' + CALICO_IPV6POOL_IPIP: '{% if plugins.calico.mode | default("vxlan") == "ipip" and not services.kubeadm.networking.podSubnet|isipv4 %}Always{% else %}Never{% endif %}' + CALICO_IPV6POOL_VXLAN: '{% if plugins.calico.mode | default("vxlan") == "vxlan" and not services.kubeadm.networking.podSubnet|isipv4 %}Always{% else %}Never{% endif %}' + CALICO_DISABLE_FILE_LOGGING: true + FELIX_DEFAULTENDPOINTTOHOSTACTION: ACCEPT + FELIX_LOGSEVERITYSCREEN: info + FELIX_HEALTHENABLED: true + cni: + image: 'calico/cni:{{ plugins.calico.version }}' + ipam: + assign_ipv4: '{% if services.kubeadm.networking.podSubnet|isipv4 %}true{% else %}false{% endif %}' + assign_ipv6: '{% if not services.kubeadm.networking.podSubnet|isipv4 %}true{% else %}false{% endif %}' + ipv4_pools: + - '{% if services.kubeadm.networking.podSubnet|isipv4 %}{{ services.kubeadm.networking.podSubnet }}{% else %}192.168.0.0/16{% endif %}' + - default-ipv4-ippool + ipv6_pools: + - '{% if not services.kubeadm.networking.podSubnet|isipv4 %}{{ services.kubeadm.networking.podSubnet }}{% else %}fd02::/80{% endif %}' + - default-ipv6-ippool + type: calico-ipam + node: + image: 'calico/node:{{ plugins.calico.version }}' + kube-controllers: + image: 'calico/kube-controllers:{{ plugins.calico.version }}' + nodeSelector: + kubernetes.io/os: linux + flexvol: + image: 'calico/pod2daemon-flexvol:{{ plugins.calico.version }}' + + flannel: + installation: + priority: 0 + procedures: + - template: templates/plugins/flannel.yaml.j2 + - expect: + pods: + - coredns + - kube-flannel-ds-amd64 + image: quay.io/coreos/flannel:v0.11.0-amd64 + + nginx-ingress-controller: + version: '{{ globals.compatibility_map.software["nginx-ingress-controller"][services.kubeadm.kubernetesVersion|minorversion].version }}' + install: true + installation: + priority: 1 + procedures: + - python: + module: plugins/nginx_ingress.py + method: manage_custom_certificate + - template: 'templates/plugins/nginx-ingress-controller-{{ plugins["nginx-ingress-controller"].version|minorversion }}.yaml.j2' + - expect: + pods: + - '{{ globals.compatibility_map.software["nginx-ingress-controller"][services.kubeadm.kubernetesVersion|minorversion]["pod-name"] }}' + controller: + image: '{{ globals.compatibility_map.software["nginx-ingress-controller"][services.kubeadm.kubernetesVersion|minorversion]["image-name"] }}:{{ plugins["nginx-ingress-controller"].version }}' + ssl: + enableSslPassthrough: false + nodeSelector: + kubernetes.io/os: linux + ports: + - name: http + containerPort: 80 + hostPort: 80 + protocol: TCP + - name: https + containerPort: 443 + hostPort: 443 + protocol: TCP + + # todo: support hostPort for haproxy-ingress + haproxy-ingress-controller: + install: false + installation: + priority: 1 + procedures: + - template: templates/plugins/haproxy-ingress-controller.yaml.j2 + - expect: + pods: + - haproxy-ingress + - python: + module: plugins/haproxy_ingress.py + method: override_priviledged_ports + arguments: + service: haproxy-ingress + namespace: haproxy-controller + controller: + image: haproxytech/kubernetes-ingress:1.2.7 + nodeSelector: + kubernetes.io/os: linux + backend: + image: k8s.gcr.io/defaultbackend:1.0 + nodeSelector: + kubernetes.io/os: linux + + kubernetes-dashboard: + version: '{{ globals.compatibility_map.software["kubernetes-dashboard"][services.kubeadm.kubernetesVersion|minorversion].version }}' + install: false + installation: + priority: 2 + procedures: + - template: 'templates/plugins/dashboard-{{ plugins["kubernetes-dashboard"].version|minorversion }}.yaml.j2' + - expect: + pods: + - kubernetes-dashboard + - dashboard-metrics-scraper + - template: templates/plugins/dashboard-ingress.yaml.j2 + hostname: 'dashboard.{{ cluster_name }}' + dashboard: + image: 'kubernetesui/dashboard:{{ plugins["kubernetes-dashboard"].version }}' + nodeSelector: + kubernetes.io/os: linux + metrics-scraper: + image: 'kubernetesui/metrics-scraper:{{ globals.compatibility_map.software["kubernetes-dashboard"][services.kubeadm.kubernetesVersion|minorversion]["metrics-scraper-version"] }}' + nodeSelector: + kubernetes.io/os: linux + ingress: + metadata: + name: kubernetes-dashboard + namespace: kubernetes-dashboard + annotations: + nginx.ingress.kubernetes.io/backend-protocol: "HTTPS" + spec: + tls: + # this section enables tls and ssl-redirect for dashboard + # since certificate is not provided here, default controller certificate will be used + - hosts: + - '{{ plugins["kubernetes-dashboard"].hostname }}' + rules: + - host: '{{ plugins["kubernetes-dashboard"].hostname }}' + http: + paths: + - path: / + backend: + serviceName: kubernetes-dashboard + servicePort: 443 + + local-path-provisioner: + install: false + installation: + priority: 2 + procedures: + - template: 'templates/plugins/local-path-provisioner.yaml.j2' + - expect: + pods: + - local-path-provisioner + storage-class: + name: local-path + is-default: "false" + volume-dir: /opt/local-path-provisioner + image: rancher/local-path-provisioner:v0.0.19 + helper-pod-image: busybox:1.29.3 + +rbac: + account_defaults: + namespace: kube-system + configs: + - apiVersion: v1 + kind: ServiceAccount + metadata: {} + - apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: {} + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + subjects: + - kind: ServiceAccount + psp: + pod-security: enabled + oob-policies: + default: enabled + host-network: enabled + anyuid: enabled + custom-policies: + psp-list: [] + roles-list: [] + bindings-list: [] diff --git a/kubetool/resources/configurations/globals.yaml b/kubetool/resources/configurations/globals.yaml new file mode 100644 index 000000000..17a6e1023 --- /dev/null +++ b/kubetool/resources/configurations/globals.yaml @@ -0,0 +1,468 @@ +kubernetes_versions: + v1.16: + supported: false + v1.17: + supported: false + v1.18: + supported: true + v1.19: + supported: false + v1.20: + supported: true + v1.21: + supported: true + +connection: + defaults: + port: 22 + username: root + timeout: 10 + bad_connection_exceptions: + - Unable to connect to port + - timed out + - Network is unreachable + - Error reading SSH protocol banner + - Connection reset by peer + - Connect fail + - No existing session + - encountered RSA key + - Socket is closed + - WinError 10060 +etcd: + default_arguments: + cert: /etc/kubernetes/pki/etcd/server.crt + key: /etc/kubernetes/pki/etcd/server.key + cacert: /etc/kubernetes/pki/etcd/ca.crt + peer_cert: /etc/kubernetes/pki/etcd/peer.crt + peer_key: /etc/kubernetes/pki/etcd/peer.key + peer_cacert: /etc/kubernetes/pki/etcd/ca.crt + temporary_exceptions: + - leader changed + - "etcdserver: request timed out" +kubernetes: + temporary_exceptions: + - has prevented the request from succeeding +pods: + allowed_failures: 10 + expect: + kubernetes: + timeout: 5 + retries: 30 + plugins: + timeout: 5 + retries: 30 + critical_states: + - Error + - ErrImagePull + - ImagePullBackOff + - RunContainerError + - InvalidImageName + - CrashLoopBackOff + - CreateContainerConfigError +nodes: + expect: + kubernetes_version: + timeout: 10 + retries: 30 + ready: + retries: 15 + timeout: 5 + boot: + reboot_command: 'reboot 2>/dev/null >/dev/null &' + defaults: + timeout: 180 + delay_period: 5 + drain: + timeout: 10 + grace_period: 60 + remove: + check_active_timeout: 30 +error_handling: + failure_message: > + An unexpected error occurred. It is failed to solve the problem automatically. + Follow the instructions from the Troubleshooting Guide available to you. + If it is impossible to solve the problem, provide the dump and the technical information above to the support team. + + You can restart the script from the last task with the following command: + %s --tasks="%s" +keepalived: + restart_wait: 5 + defaults: + priority: + max_value: 255 + step: 1 + label_size: 10 + password_size: 8 +haproxy: + restart_wait: 5 +workaround: + retries: 10 + delay_period: 5 + +compatibility_map: + software: + docker: + v1.18.4: + version_rhel: 19.03* + version_debian: 5:19.03.* + v1.18.8: + version_rhel: 19.03* + version_debian: 5:19.03.* + v1.18.10: + version_rhel: 19.03* + version_debian: 5:19.03.* + v1.18.18: + version_rhel: 19.03* + version_rhel8: 19.03* + version_debian: 5:19.03.* + v1.19.3: + version_rhel: 19.03* + version_rhel8: 19.03* + version_debian: 5:19.03.* + v1.20.2: + version_rhel: 19.03* + version_rhel8: 19.03* + version_debian: 5:19.03.* + v1.21.2: + version_rhel: 19.03* + version_rhel8: 19.03* + version_debian: 5:19.03.* + containerd: + v1.18.4: + version_rhel: 1.4.6* + version_debian: 1.4.6* + v1.18.8: + version_rhel: 1.4.6* + version_debian: 1.4.6* + v1.18.10: + version_rhel: 1.4.6* + version_debian: 1.4.6* + v1.18.18: + version_rhel: 1.4.6* + version_rhel8: 1.4.8* + version_debian: 1.4.6* + v1.19.3: + version_rhel: 1.4.6* + version_rhel8: 1.4.8* + version_debian: 1.4.6* + v1.20.2: + version_rhel: 1.4.6* + version_rhel8: 1.4.8* + version_debian: 1.4.6* + v1.21.2: + version_rhel: 1.4.6* + version_rhel8: 1.4.8* + version_debian: 1.4.6* + podman: + v1.18.4: + version_rhel: 1.6.4* + version_debian: 100:3.1.2* + v1.18.8: + version_rhel: 1.6.4* + version_debian: 100:3.1.2* + v1.18.10: + version_rhel: 1.6.4* + version_debian: 100:3.1.2* + v1.18.18: + version_rhel: 1.6.4* + version_rhel8: 3.0.1* + version_debian: 100:3.1.2* + v1.19.3: + version_rhel: 1.6.4* + version_rhel8: 3.0.1* + version_debian: 100:3.1.2* + v1.20.2: + version_rhel: 1.6.4* + version_rhel8: 3.0.1* + version_debian: 100:3.1.2* + v1.21.2: + version_rhel: 1.6.4* + version_rhel8: 3.0.1* + version_debian: 100:3.1.2* + haproxy: + v1.18.4: + version_rhel: 1.8* + version_debian: 2.0.* + v1.18.8: + version_rhel: 1.8* + version_debian: 2.0.* + v1.18.10: + version_rhel: 1.8* + version_debian: 2.0.* + v1.18.18: + version_rhel: 1.8* + version_rhel8: 1.8* + version_debian: 2.0.* + v1.19.3: + version_rhel: 1.8* + version_rhel8: 1.8* + version_debian: 2.0.* + v1.20.2: + version_rhel: 1.8* + version_rhel8: 1.8* + version_debian: 2.0.* + v1.21.2: + version_rhel: 1.8* + version_rhel8: 1.8* + version_debian: 2.0.* + keepalived: + v1.18.4: + version_rhel: 1.3* + version_debian: 1:2.0.* + v1.18.8: + version_rhel: 1.3* + version_debian: 1:2.0.* + v1.18.10: + version_rhel: 1.3* + version_debian: 1:2.0.* + v1.18.18: + version_rhel: 1.3* + version_rhel8: 2.1* + version_debian: 1:2.0.* + v1.19.3: + version_rhel: 1.3* + version_rhel8: 2.1* + version_debian: 1:2.0.* + v1.20.2: + version_rhel: 1.3* + version_rhel8: 2.1* + version_debian: 1:2.0.* + v1.21.2: + version_rhel: 1.3* + version_rhel8: 2.1* + version_debian: 1:2.0.* + crictl: + v1.18.4: + version: v1.20.0 + sha1: eaf4ffa1cfac5c69ec522d9562c8ee6ddd873f3e + v1.18.8: + version: v1.20.0 + sha1: eaf4ffa1cfac5c69ec522d9562c8ee6ddd873f3e + v1.18.10: + version: v1.20.0 + sha1: eaf4ffa1cfac5c69ec522d9562c8ee6ddd873f3e + v1.18.18: + version: v1.20.0 + sha1: eaf4ffa1cfac5c69ec522d9562c8ee6ddd873f3e + v1.19.3: + version: v1.20.0 + sha1: eaf4ffa1cfac5c69ec522d9562c8ee6ddd873f3e + v1.20.2: + version: v1.20.0 + sha1: eaf4ffa1cfac5c69ec522d9562c8ee6ddd873f3e + v1.21.2: + version: v1.20.0 + sha1: eaf4ffa1cfac5c69ec522d9562c8ee6ddd873f3e + kubeadm: + v1.18.4: + sha1: d17b585fe64fcb0bd21fad724a9f899d214e665f + v1.18.8: + sha1: f99912e8cf617f75011aa930078397115940dd7e + v1.18.10: + sha1: 9b4163970ea1236091927a528079e823e1f09843 + v1.18.18: + sha1: fd7cc264070a71834bafbb39838ee24d26c5f0c5 + v1.19.3: + sha1: 0db8a6e2493b8956259ea2d5f2d431cf5d38d971 + v1.20.2: + sha1: 4c025ebf29eb7aa32012a1c8f81e7b85df2bf92f + v1.21.2: + sha1: cbb07d380de4ef73d43d594a1055839fa9753138 + kubelet: + v1.18.4: + sha1: 5ca1b807bf7a4a8c8737c57437354cf7299eaa90 + v1.18.8: + sha1: d77df7122b0669f152aa141bb839c34fc070c71f + v1.18.10: + sha1: 26bc249f9ac38a6ceb202a54e2cc1281bea41e5d + v1.18.18: + sha1: f8d0ba8941176cd7f9b263b3f533a0fd5ab17a2d + v1.19.3: + sha1: 4c6ff5ffa4b2e6f3f886f319c977c57905186f72 + v1.20.2: + sha1: 25ca655cce261cdbeb7c3337185f669ee0b53cc3 + v1.21.2: + sha1: 024e458aa0f74cba6b773401b779590437812fc6 + kubectl: + v1.18.4: + sha1: 1351db2b0b92bb0c46d6e18eda7b3063e7e044bf + v1.18.8: + sha1: 74c282c9d8b7e4c59250f4775bff388eaa596878 + v1.18.10: + sha1: 6bcc1d8434e4eba2680ee7399acaea5ad1008629 + v1.18.18: + sha1: 6bcbf6f2cb7998e736d177bbd1f95f1305921d6c + v1.19.3: + sha1: 8790862689bd3b022ba012228c1734316c968d6a + v1.20.2: + sha1: 202e00c35fa2a4085135061e5d0965ebbffed19c + v1.21.2: + sha1: 2c7a7de9fff41ac49f7c2546a9b1aff2c1d9c468 + calico: + v1.18: + version: v3.16.1 + sha1: 04a53f7bc08d7cb488a9939144ee5bfd606c6fff + v1.19: + version: v3.16.1 + sha1: 04a53f7bc08d7cb488a9939144ee5bfd606c6fff + v1.20: + version: v3.19.1 + sha1: dde3851a977280f7c0d54538526bb9459fa7a7ac + v1.21: + version: v3.19.1 + sha1: dde3851a977280f7c0d54538526bb9459fa7a7ac + nginx-ingress-controller: + v1.18: + image-name: k8s-artifacts-prod/ingress-nginx/controller + version: v0.34.1 + pod-name: ingress-nginx-controller + v1.19: + image-name: k8s.gcr.io/ingress-nginx/controller + version: v0.35.0 + pod-name: ingress-nginx-controller + v1.20: + image-name: k8s.gcr.io/ingress-nginx/controller + version: v0.43.0 + pod-name: ingress-nginx-controller + v1.21: + image-name: k8s.gcr.io/ingress-nginx/controller + version: v0.48.1 + pod-name: ingress-nginx-controller + kubernetes-dashboard: + v1.18: + version: v2.0.1 + metrics-scraper-version: v1.0.4 + v1.19: + version: v2.0.4 + metrics-scraper-version: v1.0.4 + v1.20: + version: v2.1.0 + metrics-scraper-version: v1.0.6 + v1.21: + version: v2.3.1 + metrics-scraper-version: v1.0.6 + local-path-provisioner: + v1.18: + version: v0.0.19 + v1.19: + version: v0.0.19 + v1.20: + version: v0.0.19 + v1.21: + version: v0.0.19 + hardware: + minimal: + balancer: + amount: 1 + vcpu: 1 + ram: 1 + master: + amount: 3 + vcpu: 2 + ram: 2 + worker: + amount: 3 + vcpu: 4 + ram: 4 + vip: + amount: 0 + all: + amount: 3 + recommended: + balancer: + amount: 2 + vcpu: 2 + ram: 1 + master: + amount: 3 + vcpu: 4 + ram: 4 + worker: + amount: 3 + vcpu: 8 + ram: 8 + vip: + amount: 1 + all: + amount: 8 + + distributives: + centos: + - os_family: 'rhel' + versions: + - '7.5' + - '7.6' + - '7.7' + - '7.8' + - '7.9' + - os_family: 'rhel8' + versions: + - '8.4' + rhel: + - os_family: 'rhel' + versions: + - '7.5' + - '7.6' + - '7.7' + - '7.8' + - '7.9' + - os_family: 'rhel8' + versions: + - '8.4' + ubuntu: + - os_family: 'debian' + versions: + - '20.04' + debian: + - os_family: 'debian' + versions: + - '10.9' + - '10.10' + ol: + - os_family: 'rhel' + versions: + - '7.5' + - '7.6' + - '7.7' + - '7.8' + - '7.9' + - os_family: 'rhel8' + versions: + - '8.4' + + network: + connection: + latency: + single: + critical: 10000 + recommended: 1000 + multi: + critical: 15000 + recommended: 2000 + ports: + internal: + - 80 + - 443 + - 6443 + - 2379 + - 2380 + - 10250 + - 10251 + - 10252 + - [30000, 32767] + external: + - 80 + - 443 +logging: + default_targets: + stdout: + level: debug + colorize: True + correct_newlines: True + dump: + level: verbose + format: "%(asctime)s %(process)s %(thread)s %(name)s %(levelname)s [%(module)s.%(funcName)s] %(message)s" + colorize: False + correct_newlines: True + +prepull_group_size: 20 \ No newline at end of file diff --git a/kubetool/resources/drop_ins/__init__.py b/kubetool/resources/drop_ins/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/kubetool/resources/drop_ins/haproxy.conf b/kubetool/resources/drop_ins/haproxy.conf new file mode 100644 index 000000000..8a764e347 --- /dev/null +++ b/kubetool/resources/drop_ins/haproxy.conf @@ -0,0 +1,2 @@ +[Service] +Restart=always diff --git a/kubetool/resources/drop_ins/keepalived.conf b/kubetool/resources/drop_ins/keepalived.conf new file mode 100644 index 000000000..8a764e347 --- /dev/null +++ b/kubetool/resources/drop_ins/keepalived.conf @@ -0,0 +1,2 @@ +[Service] +Restart=always diff --git a/kubetool/resources/psp/__init__.py b/kubetool/resources/psp/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/kubetool/resources/psp/anyuid.yaml b/kubetool/resources/psp/anyuid.yaml new file mode 100644 index 000000000..bef286989 --- /dev/null +++ b/kubetool/resources/psp/anyuid.yaml @@ -0,0 +1,61 @@ +psp: + apiVersion: policy/v1beta1 + kind: PodSecurityPolicy + metadata: + name: oob-anyuid-psp + annotations: + seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'docker/default,runtime/default' + seccomp.security.alpha.kubernetes.io/defaultProfileName: 'runtime/default' + spec: + privileged: false + # Allow core volume types. + hostPID: false + hostIPC: false + hostNetwork: false + volumes: + - 'configMap' + - 'emptyDir' + - 'projected' + - 'secret' + - 'downwardAPI' + # Assume that persistentVolumes set up by the cluster admin are safe to use. + - 'persistentVolumeClaim' + fsGroup: + rule: 'RunAsAny' + readOnlyRootFilesystem: false + runAsUser: + rule: 'RunAsAny' + supplementalGroups: + rule: 'RunAsAny' + runAsGroup: + rule: 'RunAsAny' + allowPrivilegeEscalation: true + seLinux: + rule: 'RunAsAny' + # Default docker capabilities + allowedCapabilities: + - SETPCAP + - MKNOD + - AUDIT_WRITE + - CHOWN + - NET_RAW + - DAC_OVERRIDE + - FOWNER + - FSETID + - KILL + - SETGID + - SETUID + - NET_BIND_SERVICE + - SYS_CHROOT + - SETFCAP +role: + kind: ClusterRole + apiVersion: rbac.authorization.k8s.io/v1 + metadata: + name: oob-anyuid-psp-cr + rules: + - apiGroups: ['policy'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: + - oob-anyuid-psp diff --git a/kubetool/resources/psp/default.yaml b/kubetool/resources/psp/default.yaml new file mode 100644 index 000000000..c7c3c65d4 --- /dev/null +++ b/kubetool/resources/psp/default.yaml @@ -0,0 +1,70 @@ +psp: + apiVersion: policy/v1beta1 + kind: PodSecurityPolicy + metadata: + name: oob-default-psp + annotations: + seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'docker/default,runtime/default' + seccomp.security.alpha.kubernetes.io/defaultProfileName: 'runtime/default' + spec: + privileged: false + # Required to prevent escalations to root. + allowPrivilegeEscalation: false + # This is redundant with non-root + disallow privilege escalation, + # but we can provide it for defense in depth. + requiredDropCapabilities: + - ALL + # Allow core volume types. + volumes: + - 'configMap' + - 'emptyDir' + - 'projected' + - 'secret' + - 'downwardAPI' + # Assume that persistentVolumes set up by the cluster admin are safe to use. + - 'persistentVolumeClaim' + hostNetwork: false + hostIPC: false + hostPID: false + runAsUser: + # Require the container to run without root privileges. + rule: 'MustRunAsNonRoot' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + readOnlyRootFilesystem: false +role: + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + name: oob-default-psp-cr + rules: + - apiGroups: ['policy'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: + - oob-default-psp +binding: + kind: ClusterRoleBinding + apiVersion: rbac.authorization.k8s.io/v1 + metadata: + name: oob-default-psp-crb + roleRef: + kind: ClusterRole + name: oob-default-psp-cr + apiGroup: rbac.authorization.k8s.io + subjects: + - kind: Group + name: system:authenticated + apiGroup: rbac.authorization.k8s.io diff --git a/kubetool/resources/psp/host-network.yaml b/kubetool/resources/psp/host-network.yaml new file mode 100644 index 000000000..fb960d633 --- /dev/null +++ b/kubetool/resources/psp/host-network.yaml @@ -0,0 +1,65 @@ +psp: + apiVersion: policy/v1beta1 + kind: PodSecurityPolicy + metadata: + name: oob-host-network-psp + annotations: + seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'docker/default,runtime/default' + seccomp.security.alpha.kubernetes.io/defaultProfileName: 'runtime/default' + spec: + privileged: false + # Allow core volume types. + hostPID: false + hostIPC: false + hostNetwork: true + hostPorts: + - min: 80 + max: 65535 + volumes: + - 'configMap' + - 'emptyDir' + - 'projected' + - 'secret' + - 'downwardAPI' + # Assume that persistentVolumes set up by the cluster admin are safe to use. + - 'persistentVolumeClaim' + fsGroup: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + readOnlyRootFilesystem: false + runAsUser: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + supplementalGroups: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + runAsGroup: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + allowPrivilegeEscalation: true + seLinux: + rule: 'RunAsAny' + allowedCapabilities: + - NET_BIND_SERVICE +role: + apiVersion: rbac.authorization.k8s.io/v1beta1 + kind: ClusterRole + metadata: + name: oob-host-network-psp-cr + rules: + - apiGroups: ['policy'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: + - oob-host-network-psp diff --git a/kubetool/resources/psp/privileged.yaml b/kubetool/resources/psp/privileged.yaml new file mode 100644 index 000000000..5ca65b59c --- /dev/null +++ b/kubetool/resources/psp/privileged.yaml @@ -0,0 +1,58 @@ +--- +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: oob-privileged-psp + annotations: + seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*' +spec: + privileged: true + allowPrivilegeEscalation: true + allowedCapabilities: + - '*' + volumes: + - '*' + hostNetwork: true + hostPorts: + - min: 0 + max: 65535 + hostIPC: true + hostPID: true + runAsUser: + rule: 'RunAsAny' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'RunAsAny' + fsGroup: + rule: 'RunAsAny' +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: oob-privileged-psp-cr +rules: + - apiGroups: ['policy'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: + - oob-privileged-psp +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: oob-privileged-psp-crb +roleRef: + kind: ClusterRole + name: oob-privileged-psp-cr + apiGroup: rbac.authorization.k8s.io +subjects: + - kind: Group + name: system:nodes + apiGroup: rbac.authorization.k8s.io + - kind: ServiceAccount + name: kube-proxy + namespace: kube-system + - kind: ServiceAccount + name: coredns + namespace: kube-system \ No newline at end of file diff --git a/kubetool/resources/reports/__init__.py b/kubetool/resources/reports/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/kubetool/resources/reports/check_report.css b/kubetool/resources/reports/check_report.css new file mode 100644 index 000000000..30c9559f1 --- /dev/null +++ b/kubetool/resources/reports/check_report.css @@ -0,0 +1,73 @@ + body { + font-size: 14px; + font-family: Arial; + } + table { + position: absolute; + width: 1000px; + left: 50%; + margin-left: -500px; + margin-top: 100px; + border:none; + border-collapse: collapse; + font-size: 14px; + } + table tr { + height: 40px; + } + table > tbody > tr:nth-child(odd) { + background: #FAFAFA; + } + table > thead > tr { + height: 60px; + opacity: .5; + } + table > tbody > tr > td:nth-child(2) > div, #stats > div { + height: 26px; + color: #fff; + text-align: center; + line-height: 27px; + text-transform: uppercase; + font-size: 10px; + border-radius: 8px; + padding: 0 1px; + margin: 0 10px 0 0; + } + table > tbody > tr.ok > td:nth-child(2) > div, #stats > div.succeeded{ + background: #4CAF50; + } + table > tbody > tr.fail > td:nth-child(2) > div, #stats > div.failed { + background: #f44336; + } + table > tbody > tr.warning > td:nth-child(2) > div, #stats > div.warned { + background: #FF9800; + } + table > tbody > tr.exception > td:nth-child(2) > div, #stats > div.excepted { + background: #607D8B; + } + h1 { + width: 1000px; + position: absolute; + left: 50%; + margin-left: -500px; + margin-top: 50px; + } + #date { + position: absolute; + margin-top: 30px; + left: 50%; + width: 1000px; + margin-left: -500px; + } + #stats { + position: absolute; + margin-top: 50px; + margin-left: -500px; + left: 50%; + width: 1000px; + } + #stats > div { + float: right; + padding: 2px 10px; + margin: 0 0 0 10px; + } diff --git a/kubetool/resources/scripts/__init__.py b/kubetool/resources/scripts/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/kubetool/resources/scripts/check_haproxy.sh b/kubetool/resources/scripts/check_haproxy.sh new file mode 100755 index 000000000..6c9938c83 --- /dev/null +++ b/kubetool/resources/scripts/check_haproxy.sh @@ -0,0 +1,12 @@ +#!/bin/bash + +# Script simply checks that ports 80, 443 and 6443 are listened by haproxy process + +HAPROXY_PROC_NAME='haproxy' + +for port in 80 443 6443; do + if ! ss -ntpl sport = :${port} | grep ${HAPROXY_PROC_NAME}; then + echo "Haproxy do not listen on port $port" + exit 1 + fi +done \ No newline at end of file diff --git a/kubetool/resources/scripts/etcdctl.sh b/kubetool/resources/scripts/etcdctl.sh new file mode 100755 index 000000000..f862fd659 --- /dev/null +++ b/kubetool/resources/scripts/etcdctl.sh @@ -0,0 +1,78 @@ +#!/bin/bash + +if [[ $EUID -ne 0 ]]; then + echo "Root access required" + exit 1 +fi + +if podman --version &> /dev/null; then + CONT_RUNTIME="podman" +elif systemctl is-active --quiet docker; then + CONT_RUNTIME="docker" +else + echo "Neither podman nor docker are available to run container, exiting with error..." + exit 1 +fi + +# Try to read pod yaml from kubernetes +ETCD_POD_NAME=$(kubectl get pods -n kube-system | tac | grep Running | grep '1/1' | grep etcd | awk '{print $1; exit}') +if [ -n "${ETCD_POD_NAME}" ] && [ "$?" -eq '0' ]; then + ETCD_POD_CONFIG=$(kubectl get pod "${ETCD_POD_NAME}" -n kube-system -o yaml) +fi + +# If failed to get configuration from kubernetes +if [ -z "${ETCD_POD_CONFIG}" ] || [ "$?" -ne '0' ]; then + # Try to read pod yaml config from local dir + if [ -f "/etc/kubernetes/manifests/etcd.yaml" ]; then + ETCD_POD_CONFIG=$(cat /etc/kubernetes/manifests/etcd.yaml) + else + echo "Unable to find etcd configuration neither in kubernetes nor on host, exiting with error..." + exit 1 + fi +fi + +# If any pod configuration detected +if [ -n "${ETCD_POD_CONFIG}" ]; then + ETCD_IMAGE=$(echo "${ETCD_POD_CONFIG}" | grep ' image:' | awk '{print $2; exit}') + ETCD_MOUNTS="" + ETCD_MOUNTS_RAW=$(echo "${ETCD_POD_CONFIG}" | grep ' mountPath: ') + ETCD_CERT=$(echo "${ETCD_POD_CONFIG}" | grep '\- --cert-file' | sed s/=/\\n/g | sed -n 2p) + ETCD_KEY=$(echo "${ETCD_POD_CONFIG}" | grep '\- --key-file' | sed s/=/\\n/g | sed -n 2p) + ETCD_CA=$(echo "${ETCD_POD_CONFIG}" | grep '\- --trusted-ca-file' | sed s/=/\\n/g | sed -n 2p) + ETCD_ENDPOINTS=$(echo "${ETCD_POD_CONFIG}" | grep '\- --initial-cluster=' | sed -e 's/\s*- --initial-cluster=//g' -e "s/[a-zA-Z0-9-]*=//g" -e "s/2380/2379/g") + while IFS= read -r line; do + volume=$(echo "${line}" | awk '{print $3; exit}') + ETCD_MOUNTS="${ETCD_MOUNTS} -v ${volume}:${volume}" + done <<< "${ETCD_MOUNTS_RAW}" + + # User can override some of our "default" etcdctl args (see cases). + # If user passed his own arg, then our "default" arg will be NULLed. + USER_ARGS=("$@") + opts=$(getopt --quiet --longoptions "endpoints:," "$@") + eval set --$opts + while [[ $# -gt 0 ]]; do + case "$1" in + --endpoints) + ETCD_ENDPOINTS="" + shift 2 + ;; + *) + # skip unknown options + shift 1 + ;; + esac + done + # If our default arg is not NULLed, then user did not provided his own flag and we should append our default. + # Otherwise arg is already provided by user and thus our default arg should not be appended. + if [ -n "$ETCD_ENDPOINTS" ]; then + USER_ARGS+=("--endpoints=$ETCD_ENDPOINTS") + fi + + if [ "$CONT_RUNTIME" == "podman" ]; then + podman pull ${ETCD_IMAGE} &> /dev/null + podman run --rm ${ETCD_MOUNTS} -e ETCDCTL_API=3 ${ETCD_IMAGE} etcdctl --cert=${ETCD_CERT} --key=${ETCD_KEY} --cacert=${ETCD_CA} "${USER_ARGS[@]}" + else + docker run --rm ${ETCD_MOUNTS} -e ETCDCTL_API=3 ${ETCD_IMAGE} etcdctl --cert=${ETCD_CERT} --key=${ETCD_KEY} --cacert=${ETCD_CA} "${USER_ARGS[@]}" + fi + exit $? +fi \ No newline at end of file diff --git a/kubetool/selinux.py b/kubetool/selinux.py new file mode 100644 index 000000000..0de78b04f --- /dev/null +++ b/kubetool/selinux.py @@ -0,0 +1,213 @@ +import io +import re + +from kubetool import system +from kubetool.core import utils + + +# Common regexp should support the following schemes: +# SELinux status: enabled +# SELinuxfs mount: /selinux +# Policy version: 23 + +# Commong regexp +common_regexp = "%s:\\s*([\\w/\\d]*)" + +# Structure with names to parse and keys to put the data to parsed map: +parsed_names_map = { + re.compile(common_regexp % 'SELinux status', re.M): 'status', + re.compile(common_regexp % 'SELinuxfs mount', re.M): 'mount', + re.compile(common_regexp % 'SELinux root directory', re.M): 'root_directory', + re.compile(common_regexp % 'Current mode', re.M): 'mode', + re.compile(common_regexp % 'Mode from config file', re.M): 'mode_from_file', + re.compile(common_regexp % 'Loaded policy name', re.M): 'policy', + re.compile(common_regexp % 'Policy from config file', re.M): 'policy_from_file', + re.compile(common_regexp % 'Policy MLS status', re.M): 'policy_mls', + re.compile(common_regexp % 'Policy deny_unknown status', re.M): 'policy_deny_unknown', + re.compile(common_regexp % 'Policy version', re.M): 'policy_version', + re.compile(common_regexp % 'Max kernel policy version', re.M): 'policy_version_max' +} + +# Cutomized permissive types regexp +# It should from the following: +# +# > Customized Permissive Types +# > +# > keepalived_t +# > something_else_t +# > +# > Builtin Permissive Types +# > +# > and_another_one +# +# Cut only the following: +# +# > keepalived_t +# > something_else_t +# +permissive_types_regex = re.compile("Customized Permissive Types\\s*([\\w_\\s]*)\\s*", re.M) + + +def get_expected_state(inventory): + return inventory['services']['kernel_security'].get('selinux', {}).get('state', 'enforcing') + + +def get_expected_policy(inventory): + return inventory['services']['kernel_security'].get('selinux', {}).get('policy', 'targeted') + + +def get_expected_permissive(inventory): + return inventory['services']['kernel_security'].get('selinux', {}).get('permissive', []) + + +def verify_inventory(inventory, cluster): + + expected_selinux_states = ['enforcing', 'permissive', 'disabled'] + if inventory['services']['kernel_security'].get('selinux', {}).get('state') and \ + inventory['services']['kernel_security']['selinux']['state'] not in expected_selinux_states: + raise Exception('Unknown selinux state found in configfile. Expected %s, but \'%s\' found.' + % (expected_selinux_states, inventory['services']['kernel_security']['selinux']['state'])) + + expected_selinux_policies = ['targeted', 'strict'] + if inventory['services']['kernel_security'].get('selinux', {}).get('policy') and \ + inventory['services']['kernel_security']['selinux']['policy'] not in expected_selinux_policies: + raise Exception('Unknown selinux policy found in configfile. Expected %s, but \'%s\' found.' + % (expected_selinux_policies, inventory['services']['kernel_security']['selinux']['policy'])) + + return inventory + + +def parse_selinux_status(log, stdout): + result = {} + if stdout is not None and stdout.strip() != '': + for regex, key in parsed_names_map.items(): + matches = re.findall(regex, stdout) + if matches: + result[key] = matches[0].strip() + log.verbose('Parsed status: %s' % result) + return result + + +def parse_selinux_permissive_types(log, stdout): + if stdout is None or stdout.strip() == '': + log.verbose('Permissive types pattern not found - presented stdout is empty') + return [] + + matches = re.findall(permissive_types_regex, stdout) + if not matches: + log.verbose('Permissive types pattern not found') + return [] + + types_string = matches[0] + if types_string.strip() == '': + log.verbose('Permissive types pattern found, but value is empty') + return [] + + result = types_string.split('\n') + log.verbose('Permissive types parsed: %s' % result) + return result + + +def get_selinux_status(group): + log = group.cluster.log + + # 2 commands in 1 run + # Seems not good at all, but: + # 1) This will reduce connections across all nodes + # 2) This will show in log all selinux settings in a single return + result = group.sudo("sestatus && sudo semanage permissive -l") + + parsed_result = {} + for connection, node_result in result.items(): + log.verbose('Parsing status for %s...' % connection.host) + parsed_result[connection] = parse_selinux_status(log, node_result.stdout) + parsed_result[connection]['permissive_types'] = parse_selinux_permissive_types(log, node_result.stdout) + log.verbose("Parsed remote sestatus summary:\n%s" % parsed_result) + return result, parsed_result + + +def is_config_valid(group, state=None, policy=None, permissive=None): + log = group.cluster.log + + log.verbose('Verifying selinux configs...') + + if state is None: + state = get_expected_state(group.cluster.inventory) + + if policy is None: + policy = get_expected_policy(group.cluster.inventory) + + if permissive is None: + permissive = get_expected_permissive(group.cluster.inventory) + + result, parsed_result = get_selinux_status(group) + valid = True + + for connection, selinux_status in parsed_result.items(): + + if selinux_status['status'] == 'disabled' and state == 'disabled': + continue + + # for some different selinux versions some statuses may be absent + # that is why such construction was made - when no status, then cause true + if state != selinux_status.get('mode', state) or \ + state != selinux_status.get('mode_from_file', state) or \ + policy != selinux_status.get('policy_from_file', policy) or \ + policy != selinux_status.get('policy', policy): + valid = False + log.verbose('Selinux configs are not matched at %s' % connection.host) + break + + if permissive: + for permissive_type in permissive: + if permissive_type not in selinux_status['permissive_types']: + valid = False + log.verbose('Permissive type %s not found in types %s at %s ' + % (permissive_type, selinux_status['permissive_types'], connection.host)) + break + # if no break was called in previous for loop, then else called and no break will be called in current loop + else: + continue + # if break was called in previous for loop, then do break in current loop + break + + return valid, result, parsed_result + + +def setup_selinux(group): + log = group.cluster.log + + if system.get_os_family(group.cluster) == 'debian': + log.debug("Skipped - selinux is not supported on Ubuntu/Debian os family") + return + + expected_state = get_expected_state(group.cluster.inventory) + expected_policy = get_expected_policy(group.cluster.inventory) + expected_permissive = get_expected_permissive(group.cluster.inventory) + + valid, result, parsed_result = is_config_valid(group, + state=expected_state, + policy=expected_policy, + permissive=expected_permissive) + + if valid: + log.debug("Skipped - selinux already correctly configured") + return result + + config = io.StringIO('SELINUX=%s\nSELINUXTYPE=%s\n' % (expected_state, expected_policy)) + + log.debug("Uploading selinux config...") + utils.dump_file(group.cluster, config, 'selinux_config') + group.put(config, '/etc/selinux/config', backup=True, sudo=True) + + semanage_commands = '' + for item in expected_permissive: + if semanage_commands != '': + semanage_commands = semanage_commands + ' && sudo ' + semanage_commands = semanage_commands + 'semanage permissive -a %s' % item + log.verbose("The following command will be executed to configure permissive:\n%s" % semanage_commands) + + group.sudo(semanage_commands) + + group.cluster.schedule_cumulative_point(system.reboot_nodes) + group.cluster.schedule_cumulative_point(system.verify_system) diff --git a/kubetool/sysctl.py b/kubetool/sysctl.py new file mode 100644 index 000000000..152b92946 --- /dev/null +++ b/kubetool/sysctl.py @@ -0,0 +1,71 @@ +""" +This module works with sysctl on remote systems. +Using this module you can generate new sysctl configs, install and apply them. +""" + +import io + +from kubetool.core import utils +from kubetool.core.group import NodeGroup, NodeGroupResult + + +def make_config(cluster): + """ + Converts parameters from inventory['services']['sysctl'] to a string in the format of systcl.conf. + """ + config = "" + if cluster.inventory['services'].get('sysctl') is not None: + for key, value in cluster.inventory['services']['sysctl'].items(): + if isinstance(value, str): + value = value.strip() + if value is not None and value != '': + if key == "kernel.pid_max": + required_pid_max = get_pid_max(cluster.inventory) + if value > 2 ** 22: + raise Exception( + "The 'kernel.pid_max' value = '%s' is greater than the maximum allowable '%s'" + % (value, 2 ** 22)) + if value < required_pid_max: + raise Exception( + "The 'kernel.pid_max' value = '%s' is lower than " + "the minimum required for kubelet configuration = '%s'" + % (value, required_pid_max)) + if value < 32768: + cluster.log.warning("The 'kernel.pid_max' value = '%s' is lower than " + "default system value = '32768'" % value) + config += "%s = %s\n" % (key, value) + if not cluster.inventory['services']['sysctl'].get("kernel.pid_max"): + pid_max = get_pid_max(cluster.inventory) + if pid_max < 32768: + cluster.log.warning("The 'kernel.pid_max' value = '%s' is lower than " + "default system value = '32768'" % pid_max) + if pid_max > 2**22: + raise Exception("Calculated 'pid_max' value = '%s' is greater than the maximum allowable '%s'" + % (pid_max, 2**22)) + config += "%s = %s\n" % ("kernel.pid_max", pid_max) + return config + + +def configure(group: NodeGroup) -> NodeGroupResult: + """ + Generates and uploads sysctl configuration to the group. + The configuration will be placed in sysctl daemon directory. + """ + config = make_config(group.cluster) + utils.dump_file(group.cluster, config, '98-kubetools-sysctl.conf') + group.put(io.StringIO(config), '/etc/sysctl.d/98-kubetools-sysctl.conf', backup=True, sudo=True) + return group.sudo('ls -la /etc/sysctl.d/98-kubetools-sysctl.conf') + + +def reload(group: NodeGroup) -> NodeGroupResult: + """ + Reloads sysctl configuration in the specified group. + """ + return group.sudo('sysctl -p /etc/sysctl.d/98-kubetools-sysctl.conf') + + +def get_pid_max(inventory): + max_pods = inventory["services"]["kubeadm_kubelet"].get("maxPods", 110) + pod_pids_limit = inventory["services"]["kubeadm_kubelet"].get("podPidsLimit", 4096) + return max_pods * pod_pids_limit + 2048 + diff --git a/kubetool/system.py b/kubetool/system.py new file mode 100644 index 000000000..6e4b92edd --- /dev/null +++ b/kubetool/system.py @@ -0,0 +1,684 @@ +import configparser +import io +import re +import time +from copy import deepcopy + +import fabric +import yaml + +from kubetool import selinux, kubernetes, packages +from kubetool.core import utils +from kubetool.core.cluster import KubernetesCluster +from kubetool.core.executor import RemoteExecutor +from kubetool.core.group import NodeGroupResult, NodeGroup +from kubetool.core.yaml_merger import default_merger + + +def verify_inventory(inventory, cluster): + + if cluster.inventory['services']['ntp'].get('chrony', {}).get('servers') \ + and (cluster.inventory['services']['ntp'].get('timesyncd', {}).get('Time', {}).get('NTP') or + cluster.inventory['services']['ntp'].get('timesyncd', {}).get('Time', {}).get('FallbackNTP')): + raise Exception('chrony and timesyncd configured both at the same time') + + # TODO: verify selinux and apparmor are not enabled at the same time + + return inventory + + +def enrich_inventory(inventory, cluster): + if inventory['services'].get('packages'): + for _type in ['install', 'upgrade', 'remove']: + if inventory['services']['packages'].get(_type) is not None: + if isinstance(inventory['services']['packages'][_type], list): + inventory['services']['packages'][_type] = { + 'include': inventory['services']['packages'][_type] + } + for __type in ['include', 'exclude']: + if inventory['services']['packages'][_type].get(__type) is not None: + if not isinstance(inventory['services']['packages'][_type][__type], list): + raise Exception('Packages %s section in configfile has invalid type. ' + 'Expected \'list\', but found \'%s\'' + % (__type, type(inventory['services']['packages'][_type][__type]))) + if not inventory['services']['packages'][_type][__type]: + raise Exception('Packages %s section contains empty \'%s\' definition. ' % (__type, __type)) + elif __type == 'include': + if _type != 'install': + inventory['services']['packages'][_type]['include'] = ['*'] + else: + raise Exception('Definition \'include\' is missing in \'install\' packages section, ' + 'but should be specified.') + + if inventory['services'].get('etc_hosts'): + + control_plain = inventory['control_plain']['internal'] + + control_plain_names = inventory['services']['etc_hosts'].get(control_plain, []) + control_plain_names.append(cluster.inventory['cluster_name']) + control_plain_names.append('control-plain') + inventory['services']['etc_hosts'][control_plain] = control_plain_names + + for node in cluster.inventory['nodes']: + if 'remove_node' in node['roles']: + continue + + internal_node_ip_names = inventory['services']['etc_hosts'].get(node['internal_address'], []) + internal_node_ip_names.append("%s.%s" % (node['name'], cluster.inventory['cluster_name'])) + internal_node_ip_names.append(node['name']) + inventory['services']['etc_hosts'][node['internal_address']] = internal_node_ip_names + + if node.get('address'): + external_node_ip_names = inventory['services']['etc_hosts'].get(node['address'], []) + external_node_ip_names.append("%s-external.%s" % (node['name'], cluster.inventory['cluster_name'])) + external_node_ip_names.append(node['name'] + "-external") + inventory['services']['etc_hosts'][node['address']] = external_node_ip_names + + uniq_node_hostnames = list(set(inventory['services']['etc_hosts'][node['address']])) + inventory['services']['etc_hosts'][node['address']] = uniq_node_hostnames + + + return inventory + + +def enrich_upgrade_inventory(inventory, cluster): + if cluster.context.get("initial_procedure") != "upgrade": + return inventory + + # validate all packages sections in procedure inventory + with open(utils.get_resource_absolute_path('resources/configurations/defaults.yaml', script_relative=True), 'r') \ + as stream: + base_associations = yaml.safe_load(stream)["services"]["packages"]["associations"][get_os_family(cluster)] + + cluster_associations = deepcopy(cluster.inventory["services"]["packages"]["associations"]) + previous_ver = cluster.context["initial_kubernetes_version"] + upgrade_plan = cluster.procedure_inventory.get('upgrade_plan') + for version in upgrade_plan: + upgrade_associations = cluster.procedure_inventory.get(version, {}).get("packages", {}).get("associations", {}) + for package in get_system_packages(cluster): + if base_associations[package]["package_name"] != cluster_associations[package]["package_name"] \ + and not upgrade_associations.get(package, {}).get("package_name"): + raise Exception(f"Associations are redefined for {package} in cluster.yaml for version {previous_ver}, " + f"but not present in procedure inventory for version {version}. " + f"Please, specify required associations explicitly in procedure inventory " + f"for all versions since {previous_ver}.") + if upgrade_associations.get(package, {}).get("package_name"): + cluster_associations[package]["package_name"] = upgrade_associations[package]["package_name"] + previous_ver = version + + upgrade_required = get_system_packages_for_upgrade(cluster) + cluster.context["packages"] = {"upgrade_required": upgrade_required} + + upgrade_ver = cluster.context["upgrade_version"] + packages_section = cluster.procedure_inventory.get(upgrade_ver, {}).get("packages") + if packages_section: + default_merger.merge(inventory["services"]["packages"], packages_section) + + return inventory + + +def get_system_packages_for_upgrade(cluster): + upgrade_ver = cluster.context["upgrade_version"] + previous_ver = cluster.context["initial_kubernetes_version"] + compatibility = cluster.globals["compatibility_map"]["software"] + + # handle special cases in which upgrade is not required for particular package + cluster_associations = cluster.inventory["services"]["packages"]["associations"] + upgrade_associations = cluster.procedure_inventory.get(upgrade_ver, {}).get("packages", {}).get("associations", {}) + system_packages = get_system_packages(cluster) + upgrade_required = list(system_packages) + for package in system_packages: + defined_association = upgrade_associations.get(package, {}).get("package_name") + if defined_association and defined_association == cluster_associations[package]['package_name']: + # case 1: package_name is defined in upgrade inventory but is equal to one already defined in cluster.yaml + upgrade_required.remove(package) + elif compatibility.get(package) and compatibility[package][upgrade_ver] == compatibility[package][previous_ver] \ + and not defined_association: + # case 2: package_name is not defined in upgrade inventory and default versions are equal + upgrade_required.remove(package) + + # all other packages should be updated + return upgrade_required + + +def get_system_packages(cluster): + return ["haproxy", "keepalived", cluster.inventory['services']['cri']['containerRuntime']] + + +def detect_os_family(cluster, suppress_exceptions=False): + group = cluster.nodes['all'].get_online_nodes() + if cluster.context.get("initial_procedure") == "remove_node": + # TODO: get rid of this construction + active_timeout = int(cluster.globals["nodes"]["remove"]["check_active_timeout"]) + group = cluster.nodes['all'].wait_active_nodes(timeout=active_timeout) + + detected_os_family = None + ''' + For Red Hat, CentOS, Oracle Linux, and Ubuntu information in /etc/os-release /etc/redhat-release is sufficient but, + Debian stores the full version in a special file. sed transforms version string, eg 10.10 becomes DEBIAN_VERSION="10.10" + ''' + results = group.run("cat /etc/*elease; cat /etc/debian_version 2> /dev/null | sed 's/\\(.\\+\\)/DEBIAN_VERSION=\"\\1\"/' || true") + + for connection, result in results.items(): + stdout = result.stdout.lower() + + version = None + versions = [] + lines = '' + + version_regex = re.compile("\\s\\d*\\.\\d*", re.M) + for line in stdout.split("\n"): + if 'centos' in line or 'rhel' in line: + # CentOS and Red Hat have a major version in VERSION_ID string + matches = re.findall(version_regex, line) + if matches: + version = matches[0].strip() + if '=' in line: + lines += line + "\n" + + os_release = configparser.ConfigParser() + os_release.read_string("[system]\n" + lines) + name = os_release.get("system", "id").replace('"', '') + if version is None: + if name == 'debian': + version = os_release.get("system", "debian_version").replace('"', '') + else: + # Oracle Linux and Ubuntu have full version in VERSION_ID string + version = os_release.get("system", "version_id").replace('"', '') + + cluster.log.debug("Distribution: %s; Version: %s" % (name, version)) + + if name in cluster.globals["compatibility_map"]["distributives"]: + os_family_list = cluster.globals["compatibility_map"]["distributives"][name] + for os_family_item in os_family_list: + versions.extend(os_family_item["versions"]) + if version in versions: + os_family = os_family_item["os_family"] + versions = [] + break + else: + os_family = 'unknown' + else: + os_family = 'unsupported' + + cluster.log.debug("OS family: %s" % os_family) + + group.cluster.context["nodes"][connection.host]["os"] = { + 'name': name, + 'version': version, + 'family': os_family + } + + # todo: this is not good, we need to know if "old" nodes have different OS family + # maybe we should not use global static OS and use group-wise calculated OS? + for node in group.get_new_nodes_or_self().get_ordered_members_list(provide_node_configs=True): + os_family = group.cluster.context["nodes"][node['connect_to']]["os"]['family'] + if os_family == 'unknown' and not suppress_exceptions: + raise Exception('OS family is unknown') + if not detected_os_family: + detected_os_family = os_family + elif detected_os_family != os_family: + detected_os_family = 'multiple' + if not suppress_exceptions: + raise Exception('OS families differ: detected %s and %s in same cluster' % (detected_os_family, os_family)) + + group.cluster.context["os"] = detected_os_family + + return results + + +def get_os_family(cluster): + if not is_os_detected(cluster): + detect_os_family(cluster) + return cluster.context.get("os") + + +def get_compatibility_version_key(cluster: KubernetesCluster) -> str or None: + """ + Get os-specific version key to be used in software compatibility map. + :param cluster: Cluster object for which to resolve compatibility version key. + :return: String to use as version key. None if OS is unknown or multiple OS present. + """ + """ + Return os-specific version compatibility key. + If OS is unknown or multiple OS, then returns None. + """ + os = get_os_family(cluster) + if os == "rhel": + return "version_rhel" + elif os == "rhel8": + return "version_rhel8" + elif os == "debian": + return "version_debian" + else: + return None + + +def is_multiple_os_detected(cluster): + return get_os_family(cluster) == 'multiple' + + +def update_resolv_conf(group, config=None): + if config is None: + raise Exception("Data can't be empty") + + # TODO: use jinja template + buffer = io.StringIO() + if config.get("search") is not None: + buffer.write("search %s\n" % config["search"]) + if config.get("nameservers") is not None: + for address in config.get("nameservers"): + buffer.write("nameserver %s\n" % address) + + utils.dump_file(group.cluster, buffer, 'resolv.conf') + + group.put(buffer, "/etc/resolv.conf", backup=True, immutable=True, sudo=True, hide=True) + + +def generate_etc_hosts_config(inventory, cluster=None): + result = "" + + max_len_ip = 0 + + ignore_ips = [] + if cluster and cluster.context['initial_procedure'] == 'remove_node': + for removal_node in cluster.procedure_inventory.get("nodes"): + if isinstance(removal_node, str): + removal_node_name = removal_node + elif isinstance(removal_node, dict) and removal_node.get('name'): + removal_node_name = removal_node['name'] + else: + raise Exception('Invalid node specification in procedure.yaml') + for node in inventory['nodes']: + if node['name'] == removal_node_name: + if node.get('address'): + ignore_ips.append(node['address']) + if node.get('internal_address'): + ignore_ips.append(node['internal_address']) + + ignore_ips = list(set(ignore_ips)) + + for ip in list(inventory['services']['etc_hosts'].keys()): + if len(ip) > max_len_ip: + max_len_ip = len(ip) + + for ip, names in inventory['services']['etc_hosts'].items(): + if isinstance(names, list): + # remove records with empty values from list + names = list(filter(len, names)) + # if list is empty, then skip + if not names: + continue + names = " ".join(names) + if ip not in ignore_ips: + result += "%s%s %s\n" % (ip, " " * (max_len_ip - len(ip)), names) + + return result + + +def update_etc_hosts(group, config=None): + if config is None: + raise Exception("Data can't be empty") + utils.dump_file(group.cluster, config, 'etc_hosts') + group.put(io.StringIO(config), "/etc/hosts", backup=True, sudo=True, hide=True) + + +def is_os_detected(cluster): + return bool(cluster.context.get("os")) + + +def restart_service(group, name=None): + if name is None: + raise Exception("Service name can't be empty") + return group.sudo('systemctl restart %s' % name) + + +def enable_service(group, name=None, now=True): + if name is None: + raise Exception("Service name can't be empty") + + cmd = 'systemctl enable %s' % name + if now: + cmd = cmd + " --now" + return group.sudo(cmd) + + +def disable_service(group, name=None, now=True): + if name is None: + raise Exception("Service name can't be empty") + + cmd = 'systemctl disable %s' % name + if now: + cmd = cmd + " --now" + return group.sudo(cmd) + + +def patch_systemd_service(group: NodeGroup, service_name, patch_source): + group.sudo(f"mkdir -p /etc/systemd/system/{service_name}.service.d") + group.put(patch_source, f"/etc/systemd/system/{service_name}.service.d/{service_name}.conf", + sudo=True, binary=False) + group.sudo("systemctl daemon-reload") + + +def is_firewalld_disabled(group): + result = group.sudo("systemctl status firewalld", warn=True) + disabled_status = True + + for node_result in list(result.values()): + if node_result.return_code != 4 and "disabled" not in node_result.stdout: + disabled_status = False + + return disabled_status, result + + +def disable_firewalld(group): + log = group.cluster.log + + already_disabled, result = is_firewalld_disabled(group) + + if already_disabled: + log.debug("Skipped - FirewallD already disabled or not installed") + return result + + log.verbose("Trying to stop and disable FirewallD...") + + result = disable_service(group, name='firewalld', now=True) + + group.cluster.schedule_cumulative_point(reboot_nodes) + group.cluster.schedule_cumulative_point(verify_system) + + return result + + +def is_swap_disabled(group): + result = group.sudo("cat /proc/swaps", warn=True) + disabled_status = True + + for node_result in list(result.values()): + # is there any other lines excluding first head line? + if node_result.stdout.strip().split('\n')[1:]: + disabled_status = False + + return disabled_status, result + + +def disable_swap(group): + + + group.cluster.schedule_cumulative_point(reboot_nodes) + group.cluster.schedule_cumulative_point(verify_system) + + log = group.cluster.log + + already_disabled, result = is_swap_disabled(group) + + if already_disabled: + log.debug("Skipped - swap already disabled") + return result + + log.verbose("Switching swap off...") + + group.sudo('swapoff -a', warn=True) + group.sudo('sed -i.bak \'/swap/d\' /etc/fstab', warn=True) + + group.cluster.schedule_cumulative_point(reboot_nodes) + group.cluster.schedule_cumulative_point(verify_system) + + +def reboot_nodes(group, try_graceful=None, cordone_on_graceful=True): + log = group.cluster.log + + if try_graceful is None: + if 'controlplain_uri' not in group.cluster.context.keys(): + kubernetes.is_cluster_installed(group.cluster) + + graceful_reboot = try_graceful is True or \ + (try_graceful is None and group.cluster.context['controlplain_uri'] is not None) + + if not graceful_reboot: + return perform_group_reboot(group) + + log.verbose('Graceful reboot required') + + first_master = group.cluster.nodes['master'].get_first_member() + results = NodeGroupResult() + + for node in group.get_ordered_members_list(provide_node_configs=True): + cordon_required = cordone_on_graceful and ('master' in node['roles'] or 'worker' in node['roles']) + if cordon_required: + res = first_master.sudo( + kubernetes.prepare_drain_command(node, group.cluster.inventory['services']['kubeadm']['kubernetesVersion'], + group.cluster.globals, False, group.cluster.nodes), warn=True) + log.verbose(res) + log.debug(f'Rebooting node "{node["name"]}"') + raw_results = perform_group_reboot(node['connection']) + if cordon_required: + res = first_master.sudo(f'kubectl uncordon {node["name"]}', warn=True) + log.verbose(res) + results.update(raw_results) + + return results + + +def get_reboot_history(group: NodeGroup): + return group.run('last reboot') + + +def perform_group_reboot(group: NodeGroup): + log = group.cluster.log + + initial_boot_history = get_reboot_history(group) + result = group.sudo(group.cluster.globals['nodes']['boot']['reboot_command'], warn=True) + log.debug("Waiting for boot up...") + group.wait_for_reboot(initial_boot_history) + return result + + +def reload_systemctl(group): + return group.sudo('systemctl daemon-reload') + + +def add_to_path(group, string): + # TODO: write to .bash_profile + group.sudo("export PATH=$PATH:%s" % string) + + +def configure_chronyd(group, retries=60): + log = group.cluster.log + chronyd_config = '' + + for server in group.cluster.inventory['services']['ntp']['chrony']['servers']: + chronyd_config += "server " + server + "\n" + + if group.cluster.inventory['services']['ntp']['chrony'].get('makestep'): + chronyd_config += "\nmakestep " + group.cluster.inventory['services']['ntp']['chrony']['makestep'] + + if group.cluster.inventory['services']['ntp']['chrony'].get('rtcsync', False): + chronyd_config += "\nrtcsync" + + utils.dump_file(group.cluster, chronyd_config, 'chrony.conf') + group.put(io.StringIO(chronyd_config), '/etc/chrony.conf', backup=True, sudo=True) + group.sudo('systemctl restart chronyd') + while retries > 0: + log.debug("Waiting for time sync, retries left: %s" % retries) + result = group.sudo('chronyc tracking && sudo chronyc sources') + if "Normal" in list(result.values())[0].stdout: + log.debug("Time synced!") + return result + else: + log.debug("Time is not synced yet") + log.debug(result) + time.sleep(1) + retries -= 1 + + raise Exception("Time not synced, but timeout is reached") + + +def configure_timesyncd(group, retries=120): + log = group.cluster.log + timesyncd_config = '' + + for section, options in group.cluster.inventory['services']['ntp']['timesyncd'].items(): + timesyncd_config += '[%s]' % section + for option_name, option_value in options.items(): + if isinstance(option_value, list): + option_value_str = " ".join(option_value) + else: + option_value_str = str(option_value) + timesyncd_config += '\n%s=%s' % (option_name, option_value_str) + timesyncd_config += '\n\n' + + utils.dump_file(group.cluster, timesyncd_config, 'timesyncd.conf') + group.put(io.StringIO(timesyncd_config), '/etc/systemd/timesyncd.conf', backup=True, sudo=True) + res = group.sudo('timedatectl set-ntp true ' + '&& sudo systemctl enable --now systemd-timesyncd.service ' + '&& sudo systemctl restart systemd-timesyncd.service ' + '&& sudo systemctl status systemd-timesyncd.service') + log.verbose(res) + while retries > 0: + log.debug("Waiting for time sync, retries left: %s" % retries) + result = group.sudo('timedatectl timesync-status && sudo timedatectl status') + if "synchronized: yes" in list(result.values())[0].stdout: + log.debug("Time synced!") + return result + else: + log.debug("Time is not synced yet") + log.debug(result) + time.sleep(1) + retries -= 1 + + raise Exception("Time not synced, but timeout is reached") + + +def setup_modprobe(group): + log = group.cluster.log + + if group.cluster.inventory['services'].get('modprobe') is None \ + or not group.cluster.inventory['services']['modprobe']: + log.debug('Skipped - no modprobe configs in inventory') + return + + is_valid, result = is_modprobe_valid(group) + + if is_valid: + log.debug("Skipped - all necessary kernel modules are presented") + return result + + config = '' + for module_name in group.cluster.inventory['services']['modprobe']: + module_name = module_name.strip() + if module_name is not None and module_name != '': + config += module_name + "\n" + + log.debug("Uploading config...") + utils.dump_file(group.cluster, config, 'modprobe_predefined.conf') + group.put(io.StringIO(config), "/etc/modules-load.d/predefined.conf", backup=True, sudo=True, hide=True) + + group.cluster.schedule_cumulative_point(reboot_nodes) + group.cluster.schedule_cumulative_point(verify_system) + + +def is_modprobe_valid(group): + log = group.cluster.log + + verify_results = group.sudo("lsmod", warn=True) + is_valid = True + + for module_name in group.cluster.inventory['services']['modprobe']: + for conn, result in verify_results.items(): + if module_name not in result.stdout: + log.verbose('Kernel module %s not found at %s' % (module_name, conn.host)) + is_valid = False + + return is_valid, verify_results + + +def verify_system(group): + log = group.cluster.log + + if group.cluster.is_task_completed('prepare.system.setup_selinux'): + log.debug("Verifying Selinux...") + selinux_configured, selinux_result, selinux_parsed_result = \ + selinux.is_config_valid(group, + state=selinux.get_expected_state(group.cluster.inventory), + policy=selinux.get_expected_policy(group.cluster.inventory), + permissive=selinux.get_expected_permissive(group.cluster.inventory)) + log.debug(selinux_result) + if not selinux_configured: + raise Exception("Selinux is still not configured") + else: + log.verbose('Selinux verification skipped - origin task was not completed') + + if group.cluster.is_task_completed('prepare.system.setup_apparmor'): + log.debug("Verifying Apparmor...") + # TODO + # if not apparmor_configured: + # raise Exception("Selinux is still not configured") + else: + log.verbose('Apparmor verification skipped - origin task was not completed') + + if group.cluster.is_task_completed('prepare.system.disable_firewalld'): + log.debug("Verifying FirewallD...") + firewalld_disabled, firewalld_result = is_firewalld_disabled(group) + log.debug(firewalld_result) + if not firewalld_disabled: + raise Exception("FirewallD is still enabled") + else: + log.verbose('FirewallD verification skipped - origin disable task was not completed') + + if group.cluster.is_task_completed('prepare.system.disable_swap'): + log.debug("Verifying swap...") + swap_disabled, swap_result = is_swap_disabled(group) + log.debug(swap_result) + if not swap_disabled: + raise Exception("Swap is still enabled") + else: + log.verbose('Swap verification skipped - origin disable task was not completed') + + if group.cluster.is_task_completed('prepare.system.modprobe'): + log.debug("Verifying modprobe...") + modprobe_valid, swap_result = is_modprobe_valid(group) + log.debug(swap_result) + if not modprobe_valid: + raise Exception("Required kernel modules are not presented") + else: + log.verbose('Modprobe verification skipped - origin setup task was not completed') + + +def detect_active_interface(group: NodeGroup): + with RemoteExecutor(group.cluster.log) as exe: + for node in group.get_ordered_members_list(provide_node_configs=True): + detect_interface_by_address(node['connection'], node['internal_address']) + for host, host_results in exe.get_last_results().items(): + try: + interface = list(host_results.values())[0].stdout.strip() + except Exception: + interface = None + group.cluster.context['nodes'][host]['online'] = True + group.cluster.context['nodes'][host]['active_interface'] = interface + + return exe.get_last_results_str() + + +def detect_interface_by_address(connection: fabric.connection.Connection, address: str): + return connection.sudo("sudo ip -o a | grep %s | awk '{print $2}'" % address) + + +def whoami(group: NodeGroup) -> NodeGroupResult: + ''' + Determines which nodes are enabled and which ones are disabled + ''' + if group.cluster.context['initial_procedure'] == 'remove_node': + online_nodes = group.wait_active_nodes() + else: + online_nodes = group + offline_nodes = group.exclude_group(online_nodes) + results = online_nodes.sudo("whoami") + for connection, result in results.items(): + group.cluster.context['nodes'][connection.host]['online'] = True + group.cluster.context['nodes'][connection.host]['hasroot'] = result.stdout.strip() == "root" + if not offline_nodes.is_empty(): + for node in offline_nodes.get_ordered_members_list(provide_node_configs=True): + group.cluster.context['nodes'][node['connect_to']]['online'] = False + group.cluster.context['nodes'][node['connect_to']]['hasroot'] = False + return results diff --git a/kubetool/templates/__init__.py b/kubetool/templates/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/kubetool/templates/haproxy.cfg.j2 b/kubetool/templates/haproxy.cfg.j2 new file mode 100644 index 000000000..a9e53c799 --- /dev/null +++ b/kubetool/templates/haproxy.cfg.j2 @@ -0,0 +1,69 @@ +global + log 127.0.0.1 local2 debug + +defaults + log global + option dontlognull + timeout connect {{ config_options['config']['defaults']['timeout_connect'] }} + timeout client {{ config_options['config']['defaults']['timeout_client'] }} + timeout server {{ config_options['config']['defaults']['timeout_server'] }} + timeout tunnel {{ config_options['config']['defaults']['timeout_tunnel'] }} + timeout client-fin {{ config_options['config']['defaults']['timeout_client_fin'] }} + maxconn {{ config_options['config']['defaults']['maxconn'] }} + +frontend http +{%- for binding in bindings %} + bind {{ binding }}:80 +{%- endfor %} + option tcplog + mode tcp + default_backend http_backend + +backend http_backend + mode tcp + balance source + option tcp-check + default-server inter 2s fall 2 rise 3 +{%- for node in nodes -%} +{% if 'worker' in node['roles'] %} + server {{ node['name'] }} {{ node['internal_address'] }}:80 check port 80 +{%- endif %} +{%- endfor %} + +frontend https +{%- for binding in bindings %} + bind {{ binding }}:443 +{%- endfor %} + option tcplog + mode tcp + default_backend https_backend + +backend https_backend + mode tcp + balance source + option tcp-check + default-server inter 2s fall 2 rise 3 +{%- for node in nodes -%} +{% if 'worker' in node['roles'] %} + server {{ node['name'] }} {{ node['internal_address'] }}:443 check port 443 +{%- endif %} +{%- endfor %} + +frontend kubernetes_api +{%- for binding in bindings %} + bind {{ binding }}:6443 +{%- endfor %} + option tcplog + mode tcp + default_backend kubernetes_master_nodes + +backend kubernetes_master_nodes + mode tcp + balance roundrobin + option tcp-check + default-server inter 2s fall 2 rise 3 +{%- for node in nodes -%} +{% if 'master' in node['roles'] %} + server {{ node['name'] }} {{ node['internal_address'] }}:6443 check port 6443 +{%- endif %} +{%- endfor %} diff --git a/kubetool/templates/keepalived.conf.j2 b/kubetool/templates/keepalived.conf.j2 new file mode 100644 index 000000000..281176732 --- /dev/null +++ b/kubetool/templates/keepalived.conf.j2 @@ -0,0 +1,35 @@ +vrrp_script script_{{ item['id'] }} { + script "/usr/local/bin/check_haproxy.sh" + interval 2 + fall 2 + rise 2 +} + + +vrrp_instance balancer_{{ item['id'] }} { + state BACKUP + interface {{ interface }} + virtual_router_id {{ item['router_id'] }} + priority {{ priority }} + nopreempt + virtual_ipaddress { + {{ item['ip'] }} dev {{ interface }} label vip_{{ item['id'] }} + } + + track_script { + script_{{ item['id'] }} + } + + authentication { + auth_type PASS + auth_pass {{ item['password'] }} + } +{%- if peers | length > 0 %} + unicast_src_ip {{ source }} + unicast_peer { +{%- for ip in peers %} + {{ ip }} +{%- endfor %} + } +{%- endif %} +} diff --git a/kubetool/templates/kubelet.service.j2 b/kubetool/templates/kubelet.service.j2 new file mode 100644 index 000000000..a7da61bb4 --- /dev/null +++ b/kubetool/templates/kubelet.service.j2 @@ -0,0 +1,18 @@ +# Note: This dropin only works with kubeadm and kubelet v1.11+ +[Service] +Environment="KUBELET_KUBECONFIG_ARGS=--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --kubeconfig=/etc/kubernetes/kubelet.conf" +Environment="KUBELET_CONFIG_ARGS=--config=/var/lib/kubelet/config.yaml" +# This is a file that "kubeadm init" and "kubeadm join" generates at runtime, populating the KUBELET_KUBEADM_ARGS variable dynamically +EnvironmentFile=-/var/lib/kubelet/kubeadm-flags.env +# This is a file that the user can use for overrides of the kubelet args as a last resort. Preferably, the user should use +# the .NodeRegistration.KubeletExtraArgs object in the configuration files instead. KUBELET_EXTRA_ARGS should be sourced from this file. +EnvironmentFile=-/etc/sysconfig/kubelet +ExecStart= +ExecStart=/usr/bin/kubelet --runtime-cgroups=/systemd/system.slice --kubelet-cgroups=/systemd/system.slice --hostname-override={{ hostname }} $KUBELET_KUBECONFIG_ARGS $KUBELET_CONFIG_ARGS $KUBELET_KUBEADM_ARGS $KUBELET_EXTRA_ARGS +Restart=on-failure +RestartSec=5 +CPUAccounting=true +MemoryAccounting=true + +[Install] +WantedBy=multi-user.target diff --git a/kubetool/templates/plugins/__init__.py b/kubetool/templates/plugins/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/kubetool/templates/plugins/calico-ippool.yaml.j2 b/kubetool/templates/plugins/calico-ippool.yaml.j2 new file mode 100644 index 000000000..8bdb50f4b --- /dev/null +++ b/kubetool/templates/plugins/calico-ippool.yaml.j2 @@ -0,0 +1,12 @@ +apiVersion: projectcalico.org/v3 +kind: IPPool +metadata: + name: {% if services.kubeadm.networking.podSubnet|isipv4 %}default-ipv4-ippool{% else %}default-ipv6-ippool{% endif %} +spec: + cidr: {% if services.kubeadm.networking.podSubnet|isipv4 %}{{ plugins['calico']['cni']['ipam']['ipv4_pools'][0] }}{% else %}{{ plugins['calico']['cni']['ipam']['ipv6_pools'][0] }}{% endif %} + natOutgoing: {{ plugins['calico']['natOutgoing'] }} + nodeSelector: all() +{% if services.kubeadm.networking.podSubnet|isipv4 %} + vxlanMode: {% if plugins.calico.mode == 'vxlan' and plugins['calico']['crossSubnet'] %}CrossSubnet{% elif plugins.calico.mode == 'vxlan' %}Always{% else %}Never{% endif %} + ipipMode: {% if plugins.calico.mode == 'ipip' and plugins['calico']['crossSubnet'] %}CrossSubnet{% elif plugins.calico.mode == 'ipip' %}Always{% else %}Never{% endif %} +{% endif %} diff --git a/kubetool/templates/plugins/calico-v3.16.yaml.j2 b/kubetool/templates/plugins/calico-v3.16.yaml.j2 new file mode 100644 index 000000000..aa02cfb9f --- /dev/null +++ b/kubetool/templates/plugins/calico-v3.16.yaml.j2 @@ -0,0 +1,4001 @@ +--- +# Source: calico/templates/calico-config.yaml +# This ConfigMap is used to configure a self-hosted Calico installation. +kind: ConfigMap +apiVersion: v1 +metadata: + name: calico-config + namespace: kube-system +data: + typha_service_name: "{% if plugins.calico.typha.enabled | default(false) == true %}calico-typha{% else %}none{% endif %}" + # Configure the backend to use. + calico_backend: "bird" + + # Configure the MTU to use for workload interfaces and the + # tunnels. For IPIP, set to your network MTU - 20; for VXLAN + # set to your network MTU - 50. + veth_mtu: "{{ plugins['calico']['mtu'] }}" + + # The CNI network configuration to install on each node. The special + # values in this config will be automatically populated. + cni_network_config: |- + { + "name": "k8s-pod-network", + "cniVersion": "0.3.1", + "plugins": [ + { + "type": "calico", + "log_level": "info", + "log_file_path": "/var/log/calico/cni/cni.log", + "datastore_type": "kubernetes", + "nodename": "__KUBERNETES_NODE_NAME__", + "mtu": __CNI_MTU__, + "ipam": {{ plugins['calico']['cni']['ipam']|tojson|safe }}, + "policy": { + "type": "k8s" + }, + "kubernetes": { + "kubeconfig": "__KUBECONFIG_FILEPATH__" + } + }, + { + "type": "portmap", + "snat": true, + "capabilities": {"portMappings": true} + }, + { + "type": "bandwidth", + "capabilities": {"bandwidth": true} + } + ] + } + +--- +# Source: calico/templates/kdd-crds.yaml + + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: (devel) + creationTimestamp: null + name: bgpconfigurations.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: BGPConfiguration + listKind: BGPConfigurationList + plural: bgpconfigurations + singular: bgpconfiguration + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: BGPConfiguration contains the configuration for any BGP routing. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: BGPConfigurationSpec contains the values of the BGP configuration. + properties: + asNumber: + description: 'ASNumber is the default AS number used by a node. [Default: + 64512]' + format: int32 + type: integer + communities: + description: Communities is a list of BGP community values and their + arbitrary names for tagging routes. + items: + description: Community contains standard or large community value + and its name. + properties: + name: + description: Name given to community value. + type: string + value: + description: Value must be of format `aa:nn` or `aa:nn:mm`. + For standard community use `aa:nn` format, where `aa` and + `nn` are 16 bit number. For large community use `aa:nn:mm` + format, where `aa`, `nn` and `mm` are 32 bit number. Where, + `aa` is an AS Number, `nn` and `mm` are per-AS identifier. + pattern: ^(\d+):(\d+)$|^(\d+):(\d+):(\d+)$ + type: string + type: object + type: array + listenPort: + description: ListenPort is the port where BGP protocol should listen. + Defaults to 179 + maximum: 65535 + minimum: 1 + type: integer + logSeverityScreen: + description: 'LogSeverityScreen is the log severity above which logs + are sent to the stdout. [Default: INFO]' + type: string + nodeToNodeMeshEnabled: + description: 'NodeToNodeMeshEnabled sets whether full node to node + BGP mesh is enabled. [Default: true]' + type: boolean + prefixAdvertisements: + description: PrefixAdvertisements contains per-prefix advertisement + configuration. + items: + description: PrefixAdvertisement configures advertisement properties + for the specified CIDR. + properties: + cidr: + description: CIDR for which properties should be advertised. + type: string + communities: + description: Communities can be list of either community names + already defined in `Specs.Communities` or community value + of format `aa:nn` or `aa:nn:mm`. For standard community use + `aa:nn` format, where `aa` and `nn` are 16 bit number. For + large community use `aa:nn:mm` format, where `aa`, `nn` and + `mm` are 32 bit number. Where,`aa` is an AS Number, `nn` and + `mm` are per-AS identifier. + items: + type: string + type: array + type: object + type: array + serviceClusterIPs: + description: ServiceClusterIPs are the CIDR blocks from which service + cluster IPs are allocated. If specified, Calico will advertise these + blocks, as well as any cluster IPs within them. + items: + description: ServiceClusterIPBlock represents a single allowed ClusterIP + CIDR block. + properties: + cidr: + type: string + type: object + type: array + serviceExternalIPs: + description: ServiceExternalIPs are the CIDR blocks for Kubernetes + Service External IPs. Kubernetes Service ExternalIPs will only be + advertised if they are within one of these blocks. + items: + description: ServiceExternalIPBlock represents a single allowed + External IP CIDR block. + properties: + cidr: + type: string + type: object + type: array + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +--- + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: (devel) + creationTimestamp: null + name: bgppeers.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: BGPPeer + listKind: BGPPeerList + plural: bgppeers + singular: bgppeer + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: BGPPeerSpec contains the specification for a BGPPeer resource. + properties: + asNumber: + description: The AS Number of the peer. + format: int32 + type: integer + keepOriginalNextHop: + description: Option to keep the original nexthop field when routes + are sent to a BGP Peer. Setting "true" configures the selected BGP + Peers node to use the "next hop keep;" instead of "next hop self;"(default) + in the specific branch of the Node on "bird.cfg". + type: boolean + node: + description: The node name identifying the Calico node instance that + is peering with this peer. If this is not set, this represents a + global peer, i.e. a peer that peers with every node in the deployment. + type: string + nodeSelector: + description: Selector for the nodes that should have this peering. When + this is set, the Node field must be empty. + type: string + peerIP: + description: The IP address of the peer followed by an optional port + number to peer with. If port number is given, format should be `[]:port` + or `:` for IPv4. If optional port number is not set, + and this peer IP and ASNumber belongs to a calico/node with ListenPort + set in BGPConfiguration, then we use that port to peer. + type: string + peerSelector: + description: Selector for the remote nodes to peer with. When this + is set, the PeerIP and ASNumber fields must be empty. For each + peering between the local node and selected remote nodes, we configure + an IPv4 peering if both ends have NodeBGPSpec.IPv4Address specified, + and an IPv6 peering if both ends have NodeBGPSpec.IPv6Address specified. The + remote AS number comes from the remote nodes NodeBGPSpec.ASNumber, + or the global default if that is not set. + type: string + required: + - asNumber + - peerIP + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +--- + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: (devel) + creationTimestamp: null + name: blockaffinities.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: BlockAffinity + listKind: BlockAffinityList + plural: blockaffinities + singular: blockaffinity + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: BlockAffinitySpec contains the specification for a BlockAffinity + resource. + properties: + cidr: + type: string + deleted: + description: Deleted indicates that this block affinity is being deleted. + This field is a string for compatibility with older releases that + mistakenly treat this field as a string. + type: string + node: + type: string + state: + type: string + required: + - cidr + - deleted + - node + - state + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +--- + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: (devel) + creationTimestamp: null + name: clusterinformations.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: ClusterInformation + listKind: ClusterInformationList + plural: clusterinformations + singular: clusterinformation + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: ClusterInformation contains the cluster specific information. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: ClusterInformationSpec contains the values of describing + the cluster. + properties: + calicoVersion: + description: CalicoVersion is the version of Calico that the cluster + is running + type: string + clusterGUID: + description: ClusterGUID is the GUID of the cluster + type: string + clusterType: + description: ClusterType describes the type of the cluster + type: string + datastoreReady: + description: DatastoreReady is used during significant datastore migrations + to signal to components such as Felix that it should wait before + accessing the datastore. + type: boolean + variant: + description: Variant declares which variant of Calico should be active. + type: string + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +--- + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: (devel) + creationTimestamp: null + name: felixconfigurations.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: FelixConfiguration + listKind: FelixConfigurationList + plural: felixconfigurations + singular: felixconfiguration + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: Felix Configuration contains the configuration for Felix. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: FelixConfigurationSpec contains the values of the Felix configuration. + properties: + allowIPIPPacketsFromWorkloads: + description: 'AllowIPIPPacketsFromWorkloads controls whether Felix + will add a rule to drop IPIP encapsulated traffic from workloads + [Default: false]' + type: boolean + allowVXLANPacketsFromWorkloads: + description: 'AllowVXLANPacketsFromWorkloads controls whether Felix + will add a rule to drop VXLAN encapsulated traffic from workloads + [Default: false]' + type: boolean + awsSrcDstCheck: + description: 'Set source-destination-check on AWS EC2 instances. Accepted + value must be one of "DoNothing", "Enabled" or "Disabled". [Default: + DoNothing]' + enum: + - DoNothing + - Enable + - Disable + type: string + bpfConnectTimeLoadBalancingEnabled: + description: 'BPFConnectTimeLoadBalancingEnabled when in BPF mode, + controls whether Felix installs the connection-time load balancer. The + connect-time load balancer is required for the host to be able to + reach Kubernetes services and it improves the performance of pod-to-service + connections. The only reason to disable it is for debugging purposes. [Default: + true]' + type: boolean + bpfDataIfacePattern: + description: 'BPFDataIfacePattern is a regular expression that controls + which interfaces Felix should attach BPF programs to in order to + catch traffic to/from the network. This needs to match the interfaces + that Calico workload traffic flows over as well as any interfaces + that handle incoming traffic to nodeports and services from outside + the cluster. It should not match the workload interfaces (usually + named cali...). [Default: ^(en.*|eth.*|tunl0$)]' + type: string + bpfDisableUnprivileged: + description: 'BPFDisableUnprivileged, if enabled, Felix sets the kernel.unprivileged_bpf_disabled + sysctl to disable unprivileged use of BPF. This ensures that unprivileged + users cannot access Calico''s BPF maps and cannot insert their own + BPF programs to interfere with Calico''s. [Default: true]' + type: boolean + bpfEnabled: + description: 'BPFEnabled, if enabled Felix will use the BPF dataplane. + [Default: false]' + type: boolean + bpfExternalServiceMode: + description: 'BPFExternalServiceMode in BPF mode, controls how connections + from outside the cluster to services (node ports and cluster IPs) + are forwarded to remote workloads. If set to "Tunnel" then both + request and response traffic is tunneled to the remote node. If + set to "DSR", the request traffic is tunneled but the response traffic + is sent directly from the remote node. In "DSR" mode, the remote + node appears to use the IP of the ingress node; this requires a + permissive L2 network. [Default: Tunnel]' + type: string + bpfKubeProxyEndpointSlicesEnabled: + description: BPFKubeProxyEndpointSlicesEnabled in BPF mode, controls + whether Felix's embedded kube-proxy accepts EndpointSlices or not. + type: boolean + bpfKubeProxyIptablesCleanupEnabled: + description: 'BPFKubeProxyIptablesCleanupEnabled, if enabled in BPF + mode, Felix will proactively clean up the upstream Kubernetes kube-proxy''s + iptables chains. Should only be enabled if kube-proxy is not running. [Default: + true]' + type: boolean + bpfKubeProxyMinSyncPeriod: + description: 'BPFKubeProxyMinSyncPeriod, in BPF mode, controls the + minimum time between updates to the dataplane for Felix''s embedded + kube-proxy. Lower values give reduced set-up latency. Higher values + reduce Felix CPU usage by batching up more work. [Default: 1s]' + type: string + bpfLogLevel: + description: 'BPFLogLevel controls the log level of the BPF programs + when in BPF dataplane mode. One of "Off", "Info", or "Debug". The + logs are emitted to the BPF trace pipe, accessible with the command + `tc exec bpf debug`. [Default: Off].' + type: string + chainInsertMode: + description: 'ChainInsertMode controls whether Felix hooks the kernels + top-level iptables chains by inserting a rule at the top of the + chain or by appending a rule at the bottom. insert is the safe default + since it prevents Calicos rules from being bypassed. If you switch + to append mode, be sure that the other rules in the chains signal + acceptance by falling through to the Calico rules, otherwise the + Calico policy will be bypassed. [Default: insert]' + type: string + dataplaneDriver: + type: string + debugDisableLogDropping: + type: boolean + debugMemoryProfilePath: + type: string + debugSimulateCalcGraphHangAfter: + type: string + debugSimulateDataplaneHangAfter: + type: string + defaultEndpointToHostAction: + description: 'DefaultEndpointToHostAction controls what happens to + traffic that goes from a workload endpoint to the host itself (after + the traffic hits the endpoint egress policy). By default Calico + blocks traffic from workload endpoints to the host itself with an + iptables DROP action. If you want to allow some or all traffic + from endpoint to host, set this parameter to RETURN or ACCEPT. Use + RETURN if you have your own rules in the iptables INPUT chain; + Calico will insert its rules at the top of that chain, then RETURN + packets to the INPUT chain once it has completed processing workload + endpoint egress policy. Use ACCEPT to unconditionally accept packets + from workloads after processing workload endpoint egress policy. + [Default: Drop]' + type: string + deviceRouteProtocol: + description: This defines the route protocol added to programmed device + routes, by default this will be RTPROT_BOOT when left blank. + type: integer + deviceRouteSourceAddress: + description: This is the source address to use on programmed device + routes. By default the source address is left blank, leaving the + kernel to choose the source address used. + type: string + disableConntrackInvalidCheck: + type: boolean + endpointReportingDelay: + type: string + endpointReportingEnabled: + type: boolean + externalNodesList: + description: ExternalNodesCIDRList is a list of CIDR's of external-non-calico-nodes + which may source tunnel traffic and have the tunneled traffic be + accepted at calico nodes. + items: + type: string + type: array + failsafeInboundHostPorts: + description: 'FailsafeInboundHostPorts is a comma-delimited list of + UDP/TCP ports that Felix will allow incoming traffic to host endpoints + on irrespective of the security policy. This is useful to avoid + accidentally cutting off a host with incorrect configuration. Each + port should be specified as tcp: or udp:. + For back-compatibility, if the protocol is not specified, it defaults + to tcp. To disable all inbound host ports, use the value none. + The default value allows ssh access and DHCP. [Default: tcp:22, + udp:68, tcp:179, tcp:2379, tcp:2380, tcp:6443, tcp:6666, tcp:6667]' + items: + description: ProtoPort is combination of protocol and port, both + must be specified. + properties: + port: + type: integer + protocol: + type: string + required: + - port + - protocol + type: object + type: array + failsafeOutboundHostPorts: + description: 'FailsafeOutboundHostPorts is a comma-delimited list + of UDP/TCP ports that Felix will allow outgoing traffic from host + endpoints to irrespective of the security policy. This is useful + to avoid accidentally cutting off a host with incorrect configuration. + Each port should be specified as tcp: or udp:. + For back-compatibility, if the protocol is not specified, it defaults + to tcp. To disable all outbound host ports, use the value none. + The default value opens etcds standard ports to ensure that Felix + does not get cut off from etcd as well as allowing DHCP and DNS. + [Default: tcp:179, tcp:2379, tcp:2380, tcp:6443, tcp:6666, tcp:6667, + udp:53, udp:67]' + items: + description: ProtoPort is combination of protocol and port, both + must be specified. + properties: + port: + type: integer + protocol: + type: string + required: + - port + - protocol + type: object + type: array + featureDetectOverride: + description: FeatureDetectOverride is used to override the feature + detection. Values are specified in a comma separated list with no + spaces, example; "SNATFullyRandom=true,MASQFullyRandom=false,RestoreSupportsLock=". + "true" or "false" will force the feature, empty or omitted values + are auto-detected. + type: string + genericXDPEnabled: + description: 'GenericXDPEnabled enables Generic XDP so network cards + that don''t support XDP offload or driver modes can use XDP. This + is not recommended since it doesn''t provide better performance + than iptables. [Default: false]' + type: boolean + healthEnabled: + type: boolean + healthHost: + type: string + healthPort: + type: integer + interfaceExclude: + description: 'InterfaceExclude is a comma-separated list of interfaces + that Felix should exclude when monitoring for host endpoints. The + default value ensures that Felix ignores Kubernetes'' IPVS dummy + interface, which is used internally by kube-proxy. If you want to + exclude multiple interface names using a single value, the list + supports regular expressions. For regular expressions you must wrap + the value with ''/''. For example having values ''/^kube/,veth1'' + will exclude all interfaces that begin with ''kube'' and also the + interface ''veth1''. [Default: kube-ipvs0]' + type: string + interfacePrefix: + description: 'InterfacePrefix is the interface name prefix that identifies + workload endpoints and so distinguishes them from host endpoint + interfaces. Note: in environments other than bare metal, the orchestrators + configure this appropriately. For example our Kubernetes and Docker + integrations set the cali value, and our OpenStack integration + sets the tap value. [Default: cali]' + type: string + interfaceRefreshInterval: + description: InterfaceRefreshInterval is the period at which Felix + rescans local interfaces to verify their state. The rescan can be + disabled by setting the interval to 0. + type: string + ipipEnabled: + type: boolean + ipipMTU: + description: 'IPIPMTU is the MTU to set on the tunnel device. See + Configuring MTU [Default: 1440]' + type: integer + ipsetsRefreshInterval: + description: 'IpsetsRefreshInterval is the period at which Felix re-checks + all iptables state to ensure that no other process has accidentally + broken Calicos rules. Set to 0 to disable iptables refresh. [Default: + 90s]' + type: string + iptablesBackend: + description: IptablesBackend specifies which backend of iptables will + be used. The default is legacy. + type: string + iptablesFilterAllowAction: + type: string + iptablesLockFilePath: + description: 'IptablesLockFilePath is the location of the iptables + lock file. You may need to change this if the lock file is not in + its standard location (for example if you have mapped it into Felixs + container at a different path). [Default: /run/xtables.lock]' + type: string + iptablesLockProbeInterval: + description: 'IptablesLockProbeInterval is the time that Felix will + wait between attempts to acquire the iptables lock if it is not + available. Lower values make Felix more responsive when the lock + is contended, but use more CPU. [Default: 50ms]' + type: string + iptablesLockTimeout: + description: 'IptablesLockTimeout is the time that Felix will wait + for the iptables lock, or 0, to disable. To use this feature, Felix + must share the iptables lock file with all other processes that + also take the lock. When running Felix inside a container, this + requires the /run directory of the host to be mounted into the calico/node + or calico/felix container. [Default: 0s disabled]' + type: string + iptablesMangleAllowAction: + type: string + iptablesMarkMask: + description: 'IptablesMarkMask is the mask that Felix selects its + IPTables Mark bits from. Should be a 32 bit hexadecimal number with + at least 8 bits set, none of which clash with any other mark bits + in use on the system. [Default: 0xff000000]' + format: int32 + type: integer + iptablesNATOutgoingInterfaceFilter: + type: string + iptablesPostWriteCheckInterval: + description: 'IptablesPostWriteCheckInterval is the period after Felix + has done a write to the dataplane that it schedules an extra read + back in order to check the write was not clobbered by another process. + This should only occur if another application on the system doesnt + respect the iptables lock. [Default: 1s]' + type: string + iptablesRefreshInterval: + description: 'IptablesRefreshInterval is the period at which Felix + re-checks the IP sets in the dataplane to ensure that no other process + has accidentally broken Calicos rules. Set to 0 to disable IP sets + refresh. Note: the default for this value is lower than the other + refresh intervals as a workaround for a Linux kernel bug that was + fixed in kernel version 4.11. If you are using v4.11 or greater + you may want to set this to, a higher value to reduce Felix CPU + usage. [Default: 10s]' + type: string + ipv6Support: + type: boolean + kubeNodePortRanges: + description: 'KubeNodePortRanges holds list of port ranges used for + service node ports. Only used if felix detects kube-proxy running + in ipvs mode. Felix uses these ranges to separate host and workload + traffic. [Default: 30000:32767].' + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + logFilePath: + description: 'LogFilePath is the full path to the Felix log. Set to + none to disable file logging. [Default: /var/log/calico/felix.log]' + type: string + logPrefix: + description: 'LogPrefix is the log prefix that Felix uses when rendering + LOG rules. [Default: calico-packet]' + type: string + logSeverityFile: + description: 'LogSeverityFile is the log severity above which logs + are sent to the log file. [Default: Info]' + type: string + logSeverityScreen: + description: 'LogSeverityScreen is the log severity above which logs + are sent to the stdout. [Default: Info]' + type: string + logSeveritySys: + description: 'LogSeveritySys is the log severity above which logs + are sent to the syslog. Set to None for no logging to syslog. [Default: + Info]' + type: string + maxIpsetSize: + type: integer + metadataAddr: + description: 'MetadataAddr is the IP address or domain name of the + server that can answer VM queries for cloud-init metadata. In OpenStack, + this corresponds to the machine running nova-api (or in Ubuntu, + nova-api-metadata). A value of none (case insensitive) means that + Felix should not set up any NAT rule for the metadata path. [Default: + 127.0.0.1]' + type: string + metadataPort: + description: 'MetadataPort is the port of the metadata server. This, + combined with global.MetadataAddr (if not None), is used to set + up a NAT rule, from 169.254.169.254:80 to MetadataAddr:MetadataPort. + In most cases this should not need to be changed [Default: 8775].' + type: integer + natOutgoingAddress: + description: NATOutgoingAddress specifies an address to use when performing + source NAT for traffic in a natOutgoing pool that is leaving the + network. By default the address used is an address on the interface + the traffic is leaving on (ie it uses the iptables MASQUERADE target) + type: string + natPortRange: + anyOf: + - type: integer + - type: string + description: NATPortRange specifies the range of ports that is used + for port mapping when doing outgoing NAT. When unset the default + behavior of the network stack is used. + pattern: ^.* + x-kubernetes-int-or-string: true + netlinkTimeout: + type: string + openstackRegion: + description: 'OpenstackRegion is the name of the region that a particular + Felix belongs to. In a multi-region Calico/OpenStack deployment, + this must be configured somehow for each Felix (here in the datamodel, + or in felix.cfg or the environment on each compute node), and must + match the [calico] openstack_region value configured in neutron.conf + on each node. [Default: Empty]' + type: string + policySyncPathPrefix: + description: 'PolicySyncPathPrefix is used to by Felix to communicate + policy changes to external services, like Application layer policy. + [Default: Empty]' + type: string + prometheusGoMetricsEnabled: + description: 'PrometheusGoMetricsEnabled disables Go runtime metrics + collection, which the Prometheus client does by default, when set + to false. This reduces the number of metrics reported, reducing + Prometheus load. [Default: true]' + type: boolean + prometheusMetricsEnabled: + description: 'PrometheusMetricsEnabled enables the Prometheus metrics + server in Felix if set to true. [Default: false]' + type: boolean + prometheusMetricsHost: + description: 'PrometheusMetricsHost is the host that the Prometheus + metrics server should bind to. [Default: empty]' + type: string + prometheusMetricsPort: + description: 'PrometheusMetricsPort is the TCP port that the Prometheus + metrics server should bind to. [Default: 9091]' + type: integer + prometheusProcessMetricsEnabled: + description: 'PrometheusProcessMetricsEnabled disables process metrics + collection, which the Prometheus client does by default, when set + to false. This reduces the number of metrics reported, reducing + Prometheus load. [Default: true]' + type: boolean + removeExternalRoutes: + description: Whether or not to remove device routes that have not + been programmed by Felix. Disabling this will allow external applications + to also add device routes. This is enabled by default which means + we will remove externally added routes. + type: boolean + reportingInterval: + description: 'ReportingInterval is the interval at which Felix reports + its status into the datastore or 0 to disable. Must be non-zero + in OpenStack deployments. [Default: 30s]' + type: string + reportingTTL: + description: 'ReportingTTL is the time-to-live setting for process-wide + status reports. [Default: 90s]' + type: string + routeRefreshInterval: + description: 'RouterefreshInterval is the period at which Felix re-checks + the routes in the dataplane to ensure that no other process has + accidentally broken Calicos rules. Set to 0 to disable route refresh. + [Default: 90s]' + type: string + routeSource: + description: 'RouteSource configures where Felix gets its routing + information. - WorkloadIPs: use workload endpoints to construct + routes. - CalicoIPAM: the default - use IPAM data to construct routes.' + type: string + routeTableRange: + description: Calico programs additional Linux route tables for various + purposes. RouteTableRange specifies the indices of the route tables + that Calico should use. + properties: + max: + type: integer + min: + type: integer + required: + - max + - min + type: object + sidecarAccelerationEnabled: + description: 'SidecarAccelerationEnabled enables experimental sidecar + acceleration [Default: false]' + type: boolean + usageReportingEnabled: + description: 'UsageReportingEnabled reports anonymous Calico version + number and cluster size to projectcalico.org. Logs warnings returned + by the usage server. For example, if a significant security vulnerability + has been discovered in the version of Calico being used. [Default: + true]' + type: boolean + usageReportingInitialDelay: + description: 'UsageReportingInitialDelay controls the minimum delay + before Felix makes a report. [Default: 300s]' + type: string + usageReportingInterval: + description: 'UsageReportingInterval controls the interval at which + Felix makes reports. [Default: 86400s]' + type: string + useInternalDataplaneDriver: + type: boolean + vxlanEnabled: + type: boolean + vxlanMTU: + description: 'VXLANMTU is the MTU to set on the tunnel device. See + Configuring MTU [Default: 1440]' + type: integer + vxlanPort: + type: integer + vxlanVNI: + type: integer + wireguardEnabled: + description: 'WireguardEnabled controls whether Wireguard is enabled. + [Default: false]' + type: boolean + wireguardInterfaceName: + description: 'WireguardInterfaceName specifies the name to use for + the Wireguard interface. [Default: wg.calico]' + type: string + wireguardListeningPort: + description: 'WireguardListeningPort controls the listening port used + by Wireguard. [Default: 51820]' + type: integer + wireguardMTU: + description: 'WireguardMTU controls the MTU on the Wireguard interface. + See Configuring MTU [Default: 1420]' + type: integer + wireguardRoutingRulePriority: + description: 'WireguardRoutingRulePriority controls the priority value + to use for the Wireguard routing rule. [Default: 99]' + type: integer + xdpEnabled: + description: 'XDPEnabled enables XDP acceleration for suitable untracked + incoming deny rules. [Default: true]' + type: boolean + xdpRefreshInterval: + description: 'XDPRefreshInterval is the period at which Felix re-checks + all XDP state to ensure that no other process has accidentally broken + Calico''s BPF maps or attached programs. Set to 0 to disable XDP + refresh. [Default: 90s]' + type: string + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +--- + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: (devel) + creationTimestamp: null + name: globalnetworkpolicies.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: GlobalNetworkPolicy + listKind: GlobalNetworkPolicyList + plural: globalnetworkpolicies + singular: globalnetworkpolicy + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + properties: + applyOnForward: + description: ApplyOnForward indicates to apply the rules in this policy + on forward traffic. + type: boolean + doNotTrack: + description: DoNotTrack indicates whether packets matched by the rules + in this policy should go through the data plane's connection tracking, + such as Linux conntrack. If True, the rules in this policy are + applied before any data plane connection tracking, and packets allowed + by this policy are marked as not to be tracked. + type: boolean + egress: + description: The ordered set of egress rules. Each rule contains + a set of packet match criteria and a corresponding action to apply. + items: + description: "A Rule encapsulates a set of match criteria and an + action. Both selector-based security Policy and security Profiles + reference rules - separated out as a list of rules for both ingress + and egress packet matching. \n Each positive match criteria has + a negated version, prefixed with Not. All the match criteria + within a rule must be satisfied for a packet to match. A single + rule can contain the positive and negative version of a match + and both must be satisfied for the rule to match." + properties: + action: + type: string + destination: + description: Destination contains the match criteria that apply + to destination entity. + properties: + namespaceSelector: + description: "NamespaceSelector is an optional field that + contains a selector expression. Only traffic that originates + from (or terminates at) endpoints within the selected + namespaces will be matched. When both NamespaceSelector + and Selector are defined on the same rule, then only workload + endpoints that are matched by both selectors will be selected + by the rule. \n For NetworkPolicy, an empty NamespaceSelector + implies that the Selector is limited to selecting only + workload endpoints in the same namespace as the NetworkPolicy. + \n For NetworkPolicy, `global()` NamespaceSelector implies + that the Selector is limited to selecting only GlobalNetworkSet + or HostEndpoint. \n For GlobalNetworkPolicy, an empty + NamespaceSelector implies the Selector applies to workload + endpoints across all namespaces." + type: string + nets: + description: Nets is an optional field that restricts the + rule to only apply to traffic that originates from (or + terminates at) IP addresses in any of the given subnets. + items: + type: string + type: array + notNets: + description: NotNets is the negated version of the Nets + field. + items: + type: string + type: array + notPorts: + description: NotPorts is the negated version of the Ports + field. Since only some protocols have ports, if any ports + are specified it requires the Protocol match in the Rule + to be set to "TCP" or "UDP". + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + notSelector: + description: NotSelector is the negated version of the Selector + field. See Selector field for subtleties with negated + selectors. + type: string + ports: + description: "Ports is an optional field that restricts + the rule to only apply to traffic that has a source (destination) + port that matches one of these ranges/values. This value + is a list of integers or strings that represent ranges + of ports. \n Since only some protocols have ports, if + any ports are specified it requires the Protocol match + in the Rule to be set to \"TCP\" or \"UDP\"." + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + selector: + description: "Selector is an optional field that contains + a selector expression (see Policy for sample syntax). + \ Only traffic that originates from (terminates at) endpoints + matching the selector will be matched. \n Note that: in + addition to the negated version of the Selector (see NotSelector + below), the selector expression syntax itself supports + negation. The two types of negation are subtly different. + One negates the set of matched endpoints, the other negates + the whole match: \n \tSelector = \"!has(my_label)\" matches + packets that are from other Calico-controlled \tendpoints + that do not have the label my_label. \n \tNotSelector + = \"has(my_label)\" matches packets that are not from + Calico-controlled \tendpoints that do have the label my_label. + \n The effect is that the latter will accept packets from + non-Calico sources whereas the former is limited to packets + from Calico-controlled endpoints." + type: string + serviceAccounts: + description: ServiceAccounts is an optional field that restricts + the rule to only apply to traffic that originates from + (or terminates at) a pod running as a matching service + account. + properties: + names: + description: Names is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account whose name is in the list. + items: + type: string + type: array + selector: + description: Selector is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account that matches the given label selector. If + both Names and Selector are specified then they are + AND'ed. + type: string + type: object + type: object + http: + description: HTTP contains match criteria that apply to HTTP + requests. + properties: + methods: + description: Methods is an optional field that restricts + the rule to apply only to HTTP requests that use one of + the listed HTTP Methods (e.g. GET, PUT, etc.) Multiple + methods are OR'd together. + items: + type: string + type: array + paths: + description: 'Paths is an optional field that restricts + the rule to apply to HTTP requests that use one of the + listed HTTP Paths. Multiple paths are OR''d together. + e.g: - exact: /foo - prefix: /bar NOTE: Each entry may + ONLY specify either a `exact` or a `prefix` match. The + validator will check for it.' + items: + description: 'HTTPPath specifies an HTTP path to match. + It may be either of the form: exact: : which matches + the path exactly or prefix: : which matches + the path prefix' + properties: + exact: + type: string + prefix: + type: string + type: object + type: array + type: object + icmp: + description: ICMP is an optional field that restricts the rule + to apply to a specific type and code of ICMP traffic. This + should only be specified if the Protocol field is set to "ICMP" + or "ICMPv6". + properties: + code: + description: Match on a specific ICMP code. If specified, + the Type value must also be specified. This is a technical + limitation imposed by the kernels iptables firewall, + which Calico uses to enforce the rule. + type: integer + type: + description: Match on a specific ICMP type. For example + a value of 8 refers to ICMP Echo Request (i.e. pings). + type: integer + type: object + ipVersion: + description: IPVersion is an optional field that restricts the + rule to only match a specific IP version. + type: integer + metadata: + description: Metadata contains additional information for this + rule + properties: + annotations: + additionalProperties: + type: string + description: Annotations is a set of key value pairs that + give extra information about the rule + type: object + type: object + notICMP: + description: NotICMP is the negated version of the ICMP field. + properties: + code: + description: Match on a specific ICMP code. If specified, + the Type value must also be specified. This is a technical + limitation imposed by the kernels iptables firewall, + which Calico uses to enforce the rule. + type: integer + type: + description: Match on a specific ICMP type. For example + a value of 8 refers to ICMP Echo Request (i.e. pings). + type: integer + type: object + notProtocol: + anyOf: + - type: integer + - type: string + description: NotProtocol is the negated version of the Protocol + field. + pattern: ^.* + x-kubernetes-int-or-string: true + protocol: + anyOf: + - type: integer + - type: string + description: "Protocol is an optional field that restricts the + rule to only apply to traffic of a specific IP protocol. Required + if any of the EntityRules contain Ports (because ports only + apply to certain protocols). \n Must be one of these string + values: \"TCP\", \"UDP\", \"ICMP\", \"ICMPv6\", \"SCTP\", + \"UDPLite\" or an integer in the range 1-255." + pattern: ^.* + x-kubernetes-int-or-string: true + source: + description: Source contains the match criteria that apply to + source entity. + properties: + namespaceSelector: + description: "NamespaceSelector is an optional field that + contains a selector expression. Only traffic that originates + from (or terminates at) endpoints within the selected + namespaces will be matched. When both NamespaceSelector + and Selector are defined on the same rule, then only workload + endpoints that are matched by both selectors will be selected + by the rule. \n For NetworkPolicy, an empty NamespaceSelector + implies that the Selector is limited to selecting only + workload endpoints in the same namespace as the NetworkPolicy. + \n For NetworkPolicy, `global()` NamespaceSelector implies + that the Selector is limited to selecting only GlobalNetworkSet + or HostEndpoint. \n For GlobalNetworkPolicy, an empty + NamespaceSelector implies the Selector applies to workload + endpoints across all namespaces." + type: string + nets: + description: Nets is an optional field that restricts the + rule to only apply to traffic that originates from (or + terminates at) IP addresses in any of the given subnets. + items: + type: string + type: array + notNets: + description: NotNets is the negated version of the Nets + field. + items: + type: string + type: array + notPorts: + description: NotPorts is the negated version of the Ports + field. Since only some protocols have ports, if any ports + are specified it requires the Protocol match in the Rule + to be set to "TCP" or "UDP". + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + notSelector: + description: NotSelector is the negated version of the Selector + field. See Selector field for subtleties with negated + selectors. + type: string + ports: + description: "Ports is an optional field that restricts + the rule to only apply to traffic that has a source (destination) + port that matches one of these ranges/values. This value + is a list of integers or strings that represent ranges + of ports. \n Since only some protocols have ports, if + any ports are specified it requires the Protocol match + in the Rule to be set to \"TCP\" or \"UDP\"." + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + selector: + description: "Selector is an optional field that contains + a selector expression (see Policy for sample syntax). + \ Only traffic that originates from (terminates at) endpoints + matching the selector will be matched. \n Note that: in + addition to the negated version of the Selector (see NotSelector + below), the selector expression syntax itself supports + negation. The two types of negation are subtly different. + One negates the set of matched endpoints, the other negates + the whole match: \n \tSelector = \"!has(my_label)\" matches + packets that are from other Calico-controlled \tendpoints + that do not have the label my_label. \n \tNotSelector + = \"has(my_label)\" matches packets that are not from + Calico-controlled \tendpoints that do have the label my_label. + \n The effect is that the latter will accept packets from + non-Calico sources whereas the former is limited to packets + from Calico-controlled endpoints." + type: string + serviceAccounts: + description: ServiceAccounts is an optional field that restricts + the rule to only apply to traffic that originates from + (or terminates at) a pod running as a matching service + account. + properties: + names: + description: Names is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account whose name is in the list. + items: + type: string + type: array + selector: + description: Selector is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account that matches the given label selector. If + both Names and Selector are specified then they are + AND'ed. + type: string + type: object + type: object + required: + - action + type: object + type: array + ingress: + description: The ordered set of ingress rules. Each rule contains + a set of packet match criteria and a corresponding action to apply. + items: + description: "A Rule encapsulates a set of match criteria and an + action. Both selector-based security Policy and security Profiles + reference rules - separated out as a list of rules for both ingress + and egress packet matching. \n Each positive match criteria has + a negated version, prefixed with Not. All the match criteria + within a rule must be satisfied for a packet to match. A single + rule can contain the positive and negative version of a match + and both must be satisfied for the rule to match." + properties: + action: + type: string + destination: + description: Destination contains the match criteria that apply + to destination entity. + properties: + namespaceSelector: + description: "NamespaceSelector is an optional field that + contains a selector expression. Only traffic that originates + from (or terminates at) endpoints within the selected + namespaces will be matched. When both NamespaceSelector + and Selector are defined on the same rule, then only workload + endpoints that are matched by both selectors will be selected + by the rule. \n For NetworkPolicy, an empty NamespaceSelector + implies that the Selector is limited to selecting only + workload endpoints in the same namespace as the NetworkPolicy. + \n For NetworkPolicy, `global()` NamespaceSelector implies + that the Selector is limited to selecting only GlobalNetworkSet + or HostEndpoint. \n For GlobalNetworkPolicy, an empty + NamespaceSelector implies the Selector applies to workload + endpoints across all namespaces." + type: string + nets: + description: Nets is an optional field that restricts the + rule to only apply to traffic that originates from (or + terminates at) IP addresses in any of the given subnets. + items: + type: string + type: array + notNets: + description: NotNets is the negated version of the Nets + field. + items: + type: string + type: array + notPorts: + description: NotPorts is the negated version of the Ports + field. Since only some protocols have ports, if any ports + are specified it requires the Protocol match in the Rule + to be set to "TCP" or "UDP". + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + notSelector: + description: NotSelector is the negated version of the Selector + field. See Selector field for subtleties with negated + selectors. + type: string + ports: + description: "Ports is an optional field that restricts + the rule to only apply to traffic that has a source (destination) + port that matches one of these ranges/values. This value + is a list of integers or strings that represent ranges + of ports. \n Since only some protocols have ports, if + any ports are specified it requires the Protocol match + in the Rule to be set to \"TCP\" or \"UDP\"." + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + selector: + description: "Selector is an optional field that contains + a selector expression (see Policy for sample syntax). + \ Only traffic that originates from (terminates at) endpoints + matching the selector will be matched. \n Note that: in + addition to the negated version of the Selector (see NotSelector + below), the selector expression syntax itself supports + negation. The two types of negation are subtly different. + One negates the set of matched endpoints, the other negates + the whole match: \n \tSelector = \"!has(my_label)\" matches + packets that are from other Calico-controlled \tendpoints + that do not have the label my_label. \n \tNotSelector + = \"has(my_label)\" matches packets that are not from + Calico-controlled \tendpoints that do have the label my_label. + \n The effect is that the latter will accept packets from + non-Calico sources whereas the former is limited to packets + from Calico-controlled endpoints." + type: string + serviceAccounts: + description: ServiceAccounts is an optional field that restricts + the rule to only apply to traffic that originates from + (or terminates at) a pod running as a matching service + account. + properties: + names: + description: Names is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account whose name is in the list. + items: + type: string + type: array + selector: + description: Selector is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account that matches the given label selector. If + both Names and Selector are specified then they are + AND'ed. + type: string + type: object + type: object + http: + description: HTTP contains match criteria that apply to HTTP + requests. + properties: + methods: + description: Methods is an optional field that restricts + the rule to apply only to HTTP requests that use one of + the listed HTTP Methods (e.g. GET, PUT, etc.) Multiple + methods are OR'd together. + items: + type: string + type: array + paths: + description: 'Paths is an optional field that restricts + the rule to apply to HTTP requests that use one of the + listed HTTP Paths. Multiple paths are OR''d together. + e.g: - exact: /foo - prefix: /bar NOTE: Each entry may + ONLY specify either a `exact` or a `prefix` match. The + validator will check for it.' + items: + description: 'HTTPPath specifies an HTTP path to match. + It may be either of the form: exact: : which matches + the path exactly or prefix: : which matches + the path prefix' + properties: + exact: + type: string + prefix: + type: string + type: object + type: array + type: object + icmp: + description: ICMP is an optional field that restricts the rule + to apply to a specific type and code of ICMP traffic. This + should only be specified if the Protocol field is set to "ICMP" + or "ICMPv6". + properties: + code: + description: Match on a specific ICMP code. If specified, + the Type value must also be specified. This is a technical + limitation imposed by the kernels iptables firewall, + which Calico uses to enforce the rule. + type: integer + type: + description: Match on a specific ICMP type. For example + a value of 8 refers to ICMP Echo Request (i.e. pings). + type: integer + type: object + ipVersion: + description: IPVersion is an optional field that restricts the + rule to only match a specific IP version. + type: integer + metadata: + description: Metadata contains additional information for this + rule + properties: + annotations: + additionalProperties: + type: string + description: Annotations is a set of key value pairs that + give extra information about the rule + type: object + type: object + notICMP: + description: NotICMP is the negated version of the ICMP field. + properties: + code: + description: Match on a specific ICMP code. If specified, + the Type value must also be specified. This is a technical + limitation imposed by the kernels iptables firewall, + which Calico uses to enforce the rule. + type: integer + type: + description: Match on a specific ICMP type. For example + a value of 8 refers to ICMP Echo Request (i.e. pings). + type: integer + type: object + notProtocol: + anyOf: + - type: integer + - type: string + description: NotProtocol is the negated version of the Protocol + field. + pattern: ^.* + x-kubernetes-int-or-string: true + protocol: + anyOf: + - type: integer + - type: string + description: "Protocol is an optional field that restricts the + rule to only apply to traffic of a specific IP protocol. Required + if any of the EntityRules contain Ports (because ports only + apply to certain protocols). \n Must be one of these string + values: \"TCP\", \"UDP\", \"ICMP\", \"ICMPv6\", \"SCTP\", + \"UDPLite\" or an integer in the range 1-255." + pattern: ^.* + x-kubernetes-int-or-string: true + source: + description: Source contains the match criteria that apply to + source entity. + properties: + namespaceSelector: + description: "NamespaceSelector is an optional field that + contains a selector expression. Only traffic that originates + from (or terminates at) endpoints within the selected + namespaces will be matched. When both NamespaceSelector + and Selector are defined on the same rule, then only workload + endpoints that are matched by both selectors will be selected + by the rule. \n For NetworkPolicy, an empty NamespaceSelector + implies that the Selector is limited to selecting only + workload endpoints in the same namespace as the NetworkPolicy. + \n For NetworkPolicy, `global()` NamespaceSelector implies + that the Selector is limited to selecting only GlobalNetworkSet + or HostEndpoint. \n For GlobalNetworkPolicy, an empty + NamespaceSelector implies the Selector applies to workload + endpoints across all namespaces." + type: string + nets: + description: Nets is an optional field that restricts the + rule to only apply to traffic that originates from (or + terminates at) IP addresses in any of the given subnets. + items: + type: string + type: array + notNets: + description: NotNets is the negated version of the Nets + field. + items: + type: string + type: array + notPorts: + description: NotPorts is the negated version of the Ports + field. Since only some protocols have ports, if any ports + are specified it requires the Protocol match in the Rule + to be set to "TCP" or "UDP". + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + notSelector: + description: NotSelector is the negated version of the Selector + field. See Selector field for subtleties with negated + selectors. + type: string + ports: + description: "Ports is an optional field that restricts + the rule to only apply to traffic that has a source (destination) + port that matches one of these ranges/values. This value + is a list of integers or strings that represent ranges + of ports. \n Since only some protocols have ports, if + any ports are specified it requires the Protocol match + in the Rule to be set to \"TCP\" or \"UDP\"." + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + selector: + description: "Selector is an optional field that contains + a selector expression (see Policy for sample syntax). + \ Only traffic that originates from (terminates at) endpoints + matching the selector will be matched. \n Note that: in + addition to the negated version of the Selector (see NotSelector + below), the selector expression syntax itself supports + negation. The two types of negation are subtly different. + One negates the set of matched endpoints, the other negates + the whole match: \n \tSelector = \"!has(my_label)\" matches + packets that are from other Calico-controlled \tendpoints + that do not have the label my_label. \n \tNotSelector + = \"has(my_label)\" matches packets that are not from + Calico-controlled \tendpoints that do have the label my_label. + \n The effect is that the latter will accept packets from + non-Calico sources whereas the former is limited to packets + from Calico-controlled endpoints." + type: string + serviceAccounts: + description: ServiceAccounts is an optional field that restricts + the rule to only apply to traffic that originates from + (or terminates at) a pod running as a matching service + account. + properties: + names: + description: Names is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account whose name is in the list. + items: + type: string + type: array + selector: + description: Selector is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account that matches the given label selector. If + both Names and Selector are specified then they are + AND'ed. + type: string + type: object + type: object + required: + - action + type: object + type: array + namespaceSelector: + description: NamespaceSelector is an optional field for an expression + used to select a pod based on namespaces. + type: string + order: + description: Order is an optional field that specifies the order in + which the policy is applied. Policies with higher "order" are applied + after those with lower order. If the order is omitted, it may be + considered to be "infinite" - i.e. the policy will be applied last. Policies + with identical order will be applied in alphanumerical order based + on the Policy "Name". + type: number + preDNAT: + description: PreDNAT indicates to apply the rules in this policy before + any DNAT. + type: boolean + selector: + description: "The selector is an expression used to pick pick out + the endpoints that the policy should be applied to. \n Selector + expressions follow this syntax: \n \tlabel == \"string_literal\" + \ -> comparison, e.g. my_label == \"foo bar\" \tlabel != \"string_literal\" + \ -> not equal; also matches if label is not present \tlabel in + { \"a\", \"b\", \"c\", ... } -> true if the value of label X is + one of \"a\", \"b\", \"c\" \tlabel not in { \"a\", \"b\", \"c\", + ... } -> true if the value of label X is not one of \"a\", \"b\", + \"c\" \thas(label_name) -> True if that label is present \t! expr + -> negation of expr \texpr && expr -> Short-circuit and \texpr + || expr -> Short-circuit or \t( expr ) -> parens for grouping \tall() + or the empty selector -> matches all endpoints. \n Label names are + allowed to contain alphanumerics, -, _ and /. String literals are + more permissive but they do not support escape characters. \n Examples + (with made-up labels): \n \ttype == \"webserver\" && deployment + == \"prod\" \ttype in {\"frontend\", \"backend\"} \tdeployment != + \"dev\" \t! has(label_name)" + type: string + serviceAccountSelector: + description: ServiceAccountSelector is an optional field for an expression + used to select a pod based on service accounts. + type: string + types: + description: "Types indicates whether this policy applies to ingress, + or to egress, or to both. When not explicitly specified (and so + the value on creation is empty or nil), Calico defaults Types according + to what Ingress and Egress rules are present in the policy. The + default is: \n - [ PolicyTypeIngress ], if there are no Egress rules + (including the case where there are also no Ingress rules) \n + - [ PolicyTypeEgress ], if there are Egress rules but no Ingress + rules \n - [ PolicyTypeIngress, PolicyTypeEgress ], if there are + both Ingress and Egress rules. \n When the policy is read back again, + Types will always be one of these values, never empty or nil." + items: + description: PolicyType enumerates the possible values of the PolicySpec + Types field. + type: string + type: array + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +--- + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: (devel) + creationTimestamp: null + name: globalnetworksets.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: GlobalNetworkSet + listKind: GlobalNetworkSetList + plural: globalnetworksets + singular: globalnetworkset + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: GlobalNetworkSet contains a set of arbitrary IP sub-networks/CIDRs + that share labels to allow rules to refer to them via selectors. The labels + of GlobalNetworkSet are not namespaced. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: GlobalNetworkSetSpec contains the specification for a NetworkSet + resource. + properties: + nets: + description: The list of IP networks that belong to this set. + items: + type: string + type: array + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +--- + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: (devel) + creationTimestamp: null + name: hostendpoints.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: HostEndpoint + listKind: HostEndpointList + plural: hostendpoints + singular: hostendpoint + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: HostEndpointSpec contains the specification for a HostEndpoint + resource. + properties: + expectedIPs: + description: "The expected IP addresses (IPv4 and IPv6) of the endpoint. + If \"InterfaceName\" is not present, Calico will look for an interface + matching any of the IPs in the list and apply policy to that. Note: + \tWhen using the selector match criteria in an ingress or egress + security Policy \tor Profile, Calico converts the selector into + a set of IP addresses. For host \tendpoints, the ExpectedIPs field + is used for that purpose. (If only the interface \tname is specified, + Calico does not learn the IPs of the interface for use in match + \tcriteria.)" + items: + type: string + type: array + interfaceName: + description: "Either \"*\", or the name of a specific Linux interface + to apply policy to; or empty. \"*\" indicates that this HostEndpoint + governs all traffic to, from or through the default network namespace + of the host named by the \"Node\" field; entering and leaving that + namespace via any interface, including those from/to non-host-networked + local workloads. \n If InterfaceName is not \"*\", this HostEndpoint + only governs traffic that enters or leaves the host through the + specific interface named by InterfaceName, or - when InterfaceName + is empty - through the specific interface that has one of the IPs + in ExpectedIPs. Therefore, when InterfaceName is empty, at least + one expected IP must be specified. Only external interfaces (such + as eth0) are supported here; it isn't possible for a HostEndpoint + to protect traffic through a specific local workload interface. + \n Note: Only some kinds of policy are implemented for \"*\" HostEndpoints; + initially just pre-DNAT policy. Please check Calico documentation + for the latest position." + type: string + node: + description: The node name identifying the Calico node instance. + type: string + ports: + description: Ports contains the endpoint's named ports, which may + be referenced in security policy rules. + items: + properties: + name: + type: string + port: + type: integer + protocol: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + required: + - name + - port + - protocol + type: object + type: array + profiles: + description: A list of identifiers of security Profile objects that + apply to this endpoint. Each profile is applied in the order that + they appear in this list. Profile rules are applied after the selector-based + security policy. + items: + type: string + type: array + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +--- + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: (devel) + creationTimestamp: null + name: ipamblocks.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: IPAMBlock + listKind: IPAMBlockList + plural: ipamblocks + singular: ipamblock + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: IPAMBlockSpec contains the specification for an IPAMBlock + resource. + properties: + affinity: + type: string + allocations: + items: + type: integer + # TODO: This nullable is manually added in. We should update controller-gen + # to handle []*int properly itself. + nullable: true + type: array + attributes: + items: + properties: + handle_id: + type: string + secondary: + additionalProperties: + type: string + type: object + type: object + type: array + cidr: + type: string + deleted: + type: boolean + strictAffinity: + type: boolean + unallocated: + items: + type: integer + type: array + required: + - allocations + - attributes + - cidr + - deleted + - strictAffinity + - unallocated + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +--- + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: (devel) + creationTimestamp: null + name: ipamconfigs.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: IPAMConfig + listKind: IPAMConfigList + plural: ipamconfigs + singular: ipamconfig + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: IPAMConfigSpec contains the specification for an IPAMConfig + resource. + properties: + autoAllocateBlocks: + type: boolean + strictAffinity: + type: boolean + required: + - autoAllocateBlocks + - strictAffinity + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +--- + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: (devel) + creationTimestamp: null + name: ipamhandles.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: IPAMHandle + listKind: IPAMHandleList + plural: ipamhandles + singular: ipamhandle + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: IPAMHandleSpec contains the specification for an IPAMHandle + resource. + properties: + block: + additionalProperties: + type: integer + type: object + handleID: + type: string + required: + - block + - handleID + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +--- + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: (devel) + creationTimestamp: null + name: ippools.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: IPPool + listKind: IPPoolList + plural: ippools + singular: ippool + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: IPPoolSpec contains the specification for an IPPool resource. + properties: + blockSize: + description: The block size to use for IP address assignments from + this pool. Defaults to 26 for IPv4 and 112 for IPv6. + type: integer + cidr: + description: The pool CIDR. + type: string + disabled: + description: When disabled is true, Calico IPAM will not assign addresses + from this pool. + type: boolean + ipip: + description: 'Deprecated: this field is only used for APIv1 backwards + compatibility. Setting this field is not allowed, this field is + for internal use only.' + properties: + enabled: + description: When enabled is true, ipip tunneling will be used + to deliver packets to destinations within this pool. + type: boolean + mode: + description: The IPIP mode. This can be one of "always" or "cross-subnet". A + mode of "always" will also use IPIP tunneling for routing to + destination IP addresses within this pool. A mode of "cross-subnet" + will only use IPIP tunneling when the destination node is on + a different subnet to the originating node. The default value + (if not specified) is "always". + type: string + type: object + ipipMode: + description: Contains configuration for IPIP tunneling for this pool. + If not specified, then this is defaulted to "Never" (i.e. IPIP tunneling + is disabled). + type: string + nat-outgoing: + description: 'Deprecated: this field is only used for APIv1 backwards + compatibility. Setting this field is not allowed, this field is + for internal use only.' + type: boolean + natOutgoing: + description: When nat-outgoing is true, packets sent from Calico networked + containers in this pool to destinations outside of this pool will + be masqueraded. + type: boolean + nodeSelector: + description: Allows IPPool to allocate for a specific node by label + selector. + type: string + vxlanMode: + description: Contains configuration for VXLAN tunneling for this pool. + If not specified, then this is defaulted to "Never" (i.e. VXLAN + tunneling is disabled). + type: string + required: + - cidr + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +--- + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: (devel) + creationTimestamp: null + name: kubecontrollersconfigurations.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: KubeControllersConfiguration + listKind: KubeControllersConfigurationList + plural: kubecontrollersconfigurations + singular: kubecontrollersconfiguration + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: KubeControllersConfigurationSpec contains the values of the + Kubernetes controllers configuration. + properties: + controllers: + description: Controllers enables and configures individual Kubernetes + controllers + properties: + namespace: + description: Namespace enables and configures the namespace controller. + Enabled by default, set to nil to disable. + properties: + reconcilerPeriod: + description: 'ReconcilerPeriod is the period to perform reconciliation + with the Calico datastore. [Default: 5m]' + type: string + type: object + node: + description: Node enables and configures the node controller. + Enabled by default, set to nil to disable. + properties: + hostEndpoint: + description: HostEndpoint controls syncing nodes to host endpoints. + Disabled by default, set to nil to disable. + properties: + autoCreate: + description: 'AutoCreate enables automatic creation of + host endpoints for every node. [Default: Disabled]' + type: string + type: object + reconcilerPeriod: + description: 'ReconcilerPeriod is the period to perform reconciliation + with the Calico datastore. [Default: 5m]' + type: string + syncLabels: + description: 'SyncLabels controls whether to copy Kubernetes + node labels to Calico nodes. [Default: Enabled]' + type: string + type: object + policy: + description: Policy enables and configures the policy controller. + Enabled by default, set to nil to disable. + properties: + reconcilerPeriod: + description: 'ReconcilerPeriod is the period to perform reconciliation + with the Calico datastore. [Default: 5m]' + type: string + type: object + serviceAccount: + description: ServiceAccount enables and configures the service + account controller. Enabled by default, set to nil to disable. + properties: + reconcilerPeriod: + description: 'ReconcilerPeriod is the period to perform reconciliation + with the Calico datastore. [Default: 5m]' + type: string + type: object + workloadEndpoint: + description: WorkloadEndpoint enables and configures the workload + endpoint controller. Enabled by default, set to nil to disable. + properties: + reconcilerPeriod: + description: 'ReconcilerPeriod is the period to perform reconciliation + with the Calico datastore. [Default: 5m]' + type: string + type: object + type: object + etcdV3CompactionPeriod: + description: 'EtcdV3CompactionPeriod is the period between etcdv3 + compaction requests. Set to 0 to disable. [Default: 10m]' + type: string + healthChecks: + description: 'HealthChecks enables or disables support for health + checks [Default: Enabled]' + type: string + logSeverityScreen: + description: 'LogSeverityScreen is the log severity above which logs + are sent to the stdout. [Default: Info]' + type: string + required: + - controllers + type: object + status: + description: KubeControllersConfigurationStatus represents the status + of the configuration. It's useful for admins to be able to see the actual + config that was applied, which can be modified by environment variables + on the kube-controllers process. + properties: + environmentVars: + additionalProperties: + type: string + description: EnvironmentVars contains the environment variables on + the kube-controllers that influenced the RunningConfig. + type: object + runningConfig: + description: RunningConfig contains the effective config that is running + in the kube-controllers pod, after merging the API resource with + any environment variables. + properties: + controllers: + description: Controllers enables and configures individual Kubernetes + controllers + properties: + namespace: + description: Namespace enables and configures the namespace + controller. Enabled by default, set to nil to disable. + properties: + reconcilerPeriod: + description: 'ReconcilerPeriod is the period to perform + reconciliation with the Calico datastore. [Default: + 5m]' + type: string + type: object + node: + description: Node enables and configures the node controller. + Enabled by default, set to nil to disable. + properties: + hostEndpoint: + description: HostEndpoint controls syncing nodes to host + endpoints. Disabled by default, set to nil to disable. + properties: + autoCreate: + description: 'AutoCreate enables automatic creation + of host endpoints for every node. [Default: Disabled]' + type: string + type: object + reconcilerPeriod: + description: 'ReconcilerPeriod is the period to perform + reconciliation with the Calico datastore. [Default: + 5m]' + type: string + syncLabels: + description: 'SyncLabels controls whether to copy Kubernetes + node labels to Calico nodes. [Default: Enabled]' + type: string + type: object + policy: + description: Policy enables and configures the policy controller. + Enabled by default, set to nil to disable. + properties: + reconcilerPeriod: + description: 'ReconcilerPeriod is the period to perform + reconciliation with the Calico datastore. [Default: + 5m]' + type: string + type: object + serviceAccount: + description: ServiceAccount enables and configures the service + account controller. Enabled by default, set to nil to disable. + properties: + reconcilerPeriod: + description: 'ReconcilerPeriod is the period to perform + reconciliation with the Calico datastore. [Default: + 5m]' + type: string + type: object + workloadEndpoint: + description: WorkloadEndpoint enables and configures the workload + endpoint controller. Enabled by default, set to nil to disable. + properties: + reconcilerPeriod: + description: 'ReconcilerPeriod is the period to perform + reconciliation with the Calico datastore. [Default: + 5m]' + type: string + type: object + type: object + etcdV3CompactionPeriod: + description: 'EtcdV3CompactionPeriod is the period between etcdv3 + compaction requests. Set to 0 to disable. [Default: 10m]' + type: string + healthChecks: + description: 'HealthChecks enables or disables support for health + checks [Default: Enabled]' + type: string + logSeverityScreen: + description: 'LogSeverityScreen is the log severity above which + logs are sent to the stdout. [Default: Info]' + type: string + required: + - controllers + type: object + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +--- + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: (devel) + creationTimestamp: null + name: networkpolicies.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: NetworkPolicy + listKind: NetworkPolicyList + plural: networkpolicies + singular: networkpolicy + scope: Namespaced + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + properties: + egress: + description: The ordered set of egress rules. Each rule contains + a set of packet match criteria and a corresponding action to apply. + items: + description: "A Rule encapsulates a set of match criteria and an + action. Both selector-based security Policy and security Profiles + reference rules - separated out as a list of rules for both ingress + and egress packet matching. \n Each positive match criteria has + a negated version, prefixed with Not. All the match criteria + within a rule must be satisfied for a packet to match. A single + rule can contain the positive and negative version of a match + and both must be satisfied for the rule to match." + properties: + action: + type: string + destination: + description: Destination contains the match criteria that apply + to destination entity. + properties: + namespaceSelector: + description: "NamespaceSelector is an optional field that + contains a selector expression. Only traffic that originates + from (or terminates at) endpoints within the selected + namespaces will be matched. When both NamespaceSelector + and Selector are defined on the same rule, then only workload + endpoints that are matched by both selectors will be selected + by the rule. \n For NetworkPolicy, an empty NamespaceSelector + implies that the Selector is limited to selecting only + workload endpoints in the same namespace as the NetworkPolicy. + \n For NetworkPolicy, `global()` NamespaceSelector implies + that the Selector is limited to selecting only GlobalNetworkSet + or HostEndpoint. \n For GlobalNetworkPolicy, an empty + NamespaceSelector implies the Selector applies to workload + endpoints across all namespaces." + type: string + nets: + description: Nets is an optional field that restricts the + rule to only apply to traffic that originates from (or + terminates at) IP addresses in any of the given subnets. + items: + type: string + type: array + notNets: + description: NotNets is the negated version of the Nets + field. + items: + type: string + type: array + notPorts: + description: NotPorts is the negated version of the Ports + field. Since only some protocols have ports, if any ports + are specified it requires the Protocol match in the Rule + to be set to "TCP" or "UDP". + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + notSelector: + description: NotSelector is the negated version of the Selector + field. See Selector field for subtleties with negated + selectors. + type: string + ports: + description: "Ports is an optional field that restricts + the rule to only apply to traffic that has a source (destination) + port that matches one of these ranges/values. This value + is a list of integers or strings that represent ranges + of ports. \n Since only some protocols have ports, if + any ports are specified it requires the Protocol match + in the Rule to be set to \"TCP\" or \"UDP\"." + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + selector: + description: "Selector is an optional field that contains + a selector expression (see Policy for sample syntax). + \ Only traffic that originates from (terminates at) endpoints + matching the selector will be matched. \n Note that: in + addition to the negated version of the Selector (see NotSelector + below), the selector expression syntax itself supports + negation. The two types of negation are subtly different. + One negates the set of matched endpoints, the other negates + the whole match: \n \tSelector = \"!has(my_label)\" matches + packets that are from other Calico-controlled \tendpoints + that do not have the label my_label. \n \tNotSelector + = \"has(my_label)\" matches packets that are not from + Calico-controlled \tendpoints that do have the label my_label. + \n The effect is that the latter will accept packets from + non-Calico sources whereas the former is limited to packets + from Calico-controlled endpoints." + type: string + serviceAccounts: + description: ServiceAccounts is an optional field that restricts + the rule to only apply to traffic that originates from + (or terminates at) a pod running as a matching service + account. + properties: + names: + description: Names is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account whose name is in the list. + items: + type: string + type: array + selector: + description: Selector is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account that matches the given label selector. If + both Names and Selector are specified then they are + AND'ed. + type: string + type: object + type: object + http: + description: HTTP contains match criteria that apply to HTTP + requests. + properties: + methods: + description: Methods is an optional field that restricts + the rule to apply only to HTTP requests that use one of + the listed HTTP Methods (e.g. GET, PUT, etc.) Multiple + methods are OR'd together. + items: + type: string + type: array + paths: + description: 'Paths is an optional field that restricts + the rule to apply to HTTP requests that use one of the + listed HTTP Paths. Multiple paths are OR''d together. + e.g: - exact: /foo - prefix: /bar NOTE: Each entry may + ONLY specify either a `exact` or a `prefix` match. The + validator will check for it.' + items: + description: 'HTTPPath specifies an HTTP path to match. + It may be either of the form: exact: : which matches + the path exactly or prefix: : which matches + the path prefix' + properties: + exact: + type: string + prefix: + type: string + type: object + type: array + type: object + icmp: + description: ICMP is an optional field that restricts the rule + to apply to a specific type and code of ICMP traffic. This + should only be specified if the Protocol field is set to "ICMP" + or "ICMPv6". + properties: + code: + description: Match on a specific ICMP code. If specified, + the Type value must also be specified. This is a technical + limitation imposed by the kernels iptables firewall, + which Calico uses to enforce the rule. + type: integer + type: + description: Match on a specific ICMP type. For example + a value of 8 refers to ICMP Echo Request (i.e. pings). + type: integer + type: object + ipVersion: + description: IPVersion is an optional field that restricts the + rule to only match a specific IP version. + type: integer + metadata: + description: Metadata contains additional information for this + rule + properties: + annotations: + additionalProperties: + type: string + description: Annotations is a set of key value pairs that + give extra information about the rule + type: object + type: object + notICMP: + description: NotICMP is the negated version of the ICMP field. + properties: + code: + description: Match on a specific ICMP code. If specified, + the Type value must also be specified. This is a technical + limitation imposed by the kernels iptables firewall, + which Calico uses to enforce the rule. + type: integer + type: + description: Match on a specific ICMP type. For example + a value of 8 refers to ICMP Echo Request (i.e. pings). + type: integer + type: object + notProtocol: + anyOf: + - type: integer + - type: string + description: NotProtocol is the negated version of the Protocol + field. + pattern: ^.* + x-kubernetes-int-or-string: true + protocol: + anyOf: + - type: integer + - type: string + description: "Protocol is an optional field that restricts the + rule to only apply to traffic of a specific IP protocol. Required + if any of the EntityRules contain Ports (because ports only + apply to certain protocols). \n Must be one of these string + values: \"TCP\", \"UDP\", \"ICMP\", \"ICMPv6\", \"SCTP\", + \"UDPLite\" or an integer in the range 1-255." + pattern: ^.* + x-kubernetes-int-or-string: true + source: + description: Source contains the match criteria that apply to + source entity. + properties: + namespaceSelector: + description: "NamespaceSelector is an optional field that + contains a selector expression. Only traffic that originates + from (or terminates at) endpoints within the selected + namespaces will be matched. When both NamespaceSelector + and Selector are defined on the same rule, then only workload + endpoints that are matched by both selectors will be selected + by the rule. \n For NetworkPolicy, an empty NamespaceSelector + implies that the Selector is limited to selecting only + workload endpoints in the same namespace as the NetworkPolicy. + \n For NetworkPolicy, `global()` NamespaceSelector implies + that the Selector is limited to selecting only GlobalNetworkSet + or HostEndpoint. \n For GlobalNetworkPolicy, an empty + NamespaceSelector implies the Selector applies to workload + endpoints across all namespaces." + type: string + nets: + description: Nets is an optional field that restricts the + rule to only apply to traffic that originates from (or + terminates at) IP addresses in any of the given subnets. + items: + type: string + type: array + notNets: + description: NotNets is the negated version of the Nets + field. + items: + type: string + type: array + notPorts: + description: NotPorts is the negated version of the Ports + field. Since only some protocols have ports, if any ports + are specified it requires the Protocol match in the Rule + to be set to "TCP" or "UDP". + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + notSelector: + description: NotSelector is the negated version of the Selector + field. See Selector field for subtleties with negated + selectors. + type: string + ports: + description: "Ports is an optional field that restricts + the rule to only apply to traffic that has a source (destination) + port that matches one of these ranges/values. This value + is a list of integers or strings that represent ranges + of ports. \n Since only some protocols have ports, if + any ports are specified it requires the Protocol match + in the Rule to be set to \"TCP\" or \"UDP\"." + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + selector: + description: "Selector is an optional field that contains + a selector expression (see Policy for sample syntax). + \ Only traffic that originates from (terminates at) endpoints + matching the selector will be matched. \n Note that: in + addition to the negated version of the Selector (see NotSelector + below), the selector expression syntax itself supports + negation. The two types of negation are subtly different. + One negates the set of matched endpoints, the other negates + the whole match: \n \tSelector = \"!has(my_label)\" matches + packets that are from other Calico-controlled \tendpoints + that do not have the label my_label. \n \tNotSelector + = \"has(my_label)\" matches packets that are not from + Calico-controlled \tendpoints that do have the label my_label. + \n The effect is that the latter will accept packets from + non-Calico sources whereas the former is limited to packets + from Calico-controlled endpoints." + type: string + serviceAccounts: + description: ServiceAccounts is an optional field that restricts + the rule to only apply to traffic that originates from + (or terminates at) a pod running as a matching service + account. + properties: + names: + description: Names is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account whose name is in the list. + items: + type: string + type: array + selector: + description: Selector is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account that matches the given label selector. If + both Names and Selector are specified then they are + AND'ed. + type: string + type: object + type: object + required: + - action + type: object + type: array + ingress: + description: The ordered set of ingress rules. Each rule contains + a set of packet match criteria and a corresponding action to apply. + items: + description: "A Rule encapsulates a set of match criteria and an + action. Both selector-based security Policy and security Profiles + reference rules - separated out as a list of rules for both ingress + and egress packet matching. \n Each positive match criteria has + a negated version, prefixed with Not. All the match criteria + within a rule must be satisfied for a packet to match. A single + rule can contain the positive and negative version of a match + and both must be satisfied for the rule to match." + properties: + action: + type: string + destination: + description: Destination contains the match criteria that apply + to destination entity. + properties: + namespaceSelector: + description: "NamespaceSelector is an optional field that + contains a selector expression. Only traffic that originates + from (or terminates at) endpoints within the selected + namespaces will be matched. When both NamespaceSelector + and Selector are defined on the same rule, then only workload + endpoints that are matched by both selectors will be selected + by the rule. \n For NetworkPolicy, an empty NamespaceSelector + implies that the Selector is limited to selecting only + workload endpoints in the same namespace as the NetworkPolicy. + \n For NetworkPolicy, `global()` NamespaceSelector implies + that the Selector is limited to selecting only GlobalNetworkSet + or HostEndpoint. \n For GlobalNetworkPolicy, an empty + NamespaceSelector implies the Selector applies to workload + endpoints across all namespaces." + type: string + nets: + description: Nets is an optional field that restricts the + rule to only apply to traffic that originates from (or + terminates at) IP addresses in any of the given subnets. + items: + type: string + type: array + notNets: + description: NotNets is the negated version of the Nets + field. + items: + type: string + type: array + notPorts: + description: NotPorts is the negated version of the Ports + field. Since only some protocols have ports, if any ports + are specified it requires the Protocol match in the Rule + to be set to "TCP" or "UDP". + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + notSelector: + description: NotSelector is the negated version of the Selector + field. See Selector field for subtleties with negated + selectors. + type: string + ports: + description: "Ports is an optional field that restricts + the rule to only apply to traffic that has a source (destination) + port that matches one of these ranges/values. This value + is a list of integers or strings that represent ranges + of ports. \n Since only some protocols have ports, if + any ports are specified it requires the Protocol match + in the Rule to be set to \"TCP\" or \"UDP\"." + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + selector: + description: "Selector is an optional field that contains + a selector expression (see Policy for sample syntax). + \ Only traffic that originates from (terminates at) endpoints + matching the selector will be matched. \n Note that: in + addition to the negated version of the Selector (see NotSelector + below), the selector expression syntax itself supports + negation. The two types of negation are subtly different. + One negates the set of matched endpoints, the other negates + the whole match: \n \tSelector = \"!has(my_label)\" matches + packets that are from other Calico-controlled \tendpoints + that do not have the label my_label. \n \tNotSelector + = \"has(my_label)\" matches packets that are not from + Calico-controlled \tendpoints that do have the label my_label. + \n The effect is that the latter will accept packets from + non-Calico sources whereas the former is limited to packets + from Calico-controlled endpoints." + type: string + serviceAccounts: + description: ServiceAccounts is an optional field that restricts + the rule to only apply to traffic that originates from + (or terminates at) a pod running as a matching service + account. + properties: + names: + description: Names is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account whose name is in the list. + items: + type: string + type: array + selector: + description: Selector is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account that matches the given label selector. If + both Names and Selector are specified then they are + AND'ed. + type: string + type: object + type: object + http: + description: HTTP contains match criteria that apply to HTTP + requests. + properties: + methods: + description: Methods is an optional field that restricts + the rule to apply only to HTTP requests that use one of + the listed HTTP Methods (e.g. GET, PUT, etc.) Multiple + methods are OR'd together. + items: + type: string + type: array + paths: + description: 'Paths is an optional field that restricts + the rule to apply to HTTP requests that use one of the + listed HTTP Paths. Multiple paths are OR''d together. + e.g: - exact: /foo - prefix: /bar NOTE: Each entry may + ONLY specify either a `exact` or a `prefix` match. The + validator will check for it.' + items: + description: 'HTTPPath specifies an HTTP path to match. + It may be either of the form: exact: : which matches + the path exactly or prefix: : which matches + the path prefix' + properties: + exact: + type: string + prefix: + type: string + type: object + type: array + type: object + icmp: + description: ICMP is an optional field that restricts the rule + to apply to a specific type and code of ICMP traffic. This + should only be specified if the Protocol field is set to "ICMP" + or "ICMPv6". + properties: + code: + description: Match on a specific ICMP code. If specified, + the Type value must also be specified. This is a technical + limitation imposed by the kernels iptables firewall, + which Calico uses to enforce the rule. + type: integer + type: + description: Match on a specific ICMP type. For example + a value of 8 refers to ICMP Echo Request (i.e. pings). + type: integer + type: object + ipVersion: + description: IPVersion is an optional field that restricts the + rule to only match a specific IP version. + type: integer + metadata: + description: Metadata contains additional information for this + rule + properties: + annotations: + additionalProperties: + type: string + description: Annotations is a set of key value pairs that + give extra information about the rule + type: object + type: object + notICMP: + description: NotICMP is the negated version of the ICMP field. + properties: + code: + description: Match on a specific ICMP code. If specified, + the Type value must also be specified. This is a technical + limitation imposed by the kernels iptables firewall, + which Calico uses to enforce the rule. + type: integer + type: + description: Match on a specific ICMP type. For example + a value of 8 refers to ICMP Echo Request (i.e. pings). + type: integer + type: object + notProtocol: + anyOf: + - type: integer + - type: string + description: NotProtocol is the negated version of the Protocol + field. + pattern: ^.* + x-kubernetes-int-or-string: true + protocol: + anyOf: + - type: integer + - type: string + description: "Protocol is an optional field that restricts the + rule to only apply to traffic of a specific IP protocol. Required + if any of the EntityRules contain Ports (because ports only + apply to certain protocols). \n Must be one of these string + values: \"TCP\", \"UDP\", \"ICMP\", \"ICMPv6\", \"SCTP\", + \"UDPLite\" or an integer in the range 1-255." + pattern: ^.* + x-kubernetes-int-or-string: true + source: + description: Source contains the match criteria that apply to + source entity. + properties: + namespaceSelector: + description: "NamespaceSelector is an optional field that + contains a selector expression. Only traffic that originates + from (or terminates at) endpoints within the selected + namespaces will be matched. When both NamespaceSelector + and Selector are defined on the same rule, then only workload + endpoints that are matched by both selectors will be selected + by the rule. \n For NetworkPolicy, an empty NamespaceSelector + implies that the Selector is limited to selecting only + workload endpoints in the same namespace as the NetworkPolicy. + \n For NetworkPolicy, `global()` NamespaceSelector implies + that the Selector is limited to selecting only GlobalNetworkSet + or HostEndpoint. \n For GlobalNetworkPolicy, an empty + NamespaceSelector implies the Selector applies to workload + endpoints across all namespaces." + type: string + nets: + description: Nets is an optional field that restricts the + rule to only apply to traffic that originates from (or + terminates at) IP addresses in any of the given subnets. + items: + type: string + type: array + notNets: + description: NotNets is the negated version of the Nets + field. + items: + type: string + type: array + notPorts: + description: NotPorts is the negated version of the Ports + field. Since only some protocols have ports, if any ports + are specified it requires the Protocol match in the Rule + to be set to "TCP" or "UDP". + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + notSelector: + description: NotSelector is the negated version of the Selector + field. See Selector field for subtleties with negated + selectors. + type: string + ports: + description: "Ports is an optional field that restricts + the rule to only apply to traffic that has a source (destination) + port that matches one of these ranges/values. This value + is a list of integers or strings that represent ranges + of ports. \n Since only some protocols have ports, if + any ports are specified it requires the Protocol match + in the Rule to be set to \"TCP\" or \"UDP\"." + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + selector: + description: "Selector is an optional field that contains + a selector expression (see Policy for sample syntax). + \ Only traffic that originates from (terminates at) endpoints + matching the selector will be matched. \n Note that: in + addition to the negated version of the Selector (see NotSelector + below), the selector expression syntax itself supports + negation. The two types of negation are subtly different. + One negates the set of matched endpoints, the other negates + the whole match: \n \tSelector = \"!has(my_label)\" matches + packets that are from other Calico-controlled \tendpoints + that do not have the label my_label. \n \tNotSelector + = \"has(my_label)\" matches packets that are not from + Calico-controlled \tendpoints that do have the label my_label. + \n The effect is that the latter will accept packets from + non-Calico sources whereas the former is limited to packets + from Calico-controlled endpoints." + type: string + serviceAccounts: + description: ServiceAccounts is an optional field that restricts + the rule to only apply to traffic that originates from + (or terminates at) a pod running as a matching service + account. + properties: + names: + description: Names is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account whose name is in the list. + items: + type: string + type: array + selector: + description: Selector is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account that matches the given label selector. If + both Names and Selector are specified then they are + AND'ed. + type: string + type: object + type: object + required: + - action + type: object + type: array + order: + description: Order is an optional field that specifies the order in + which the policy is applied. Policies with higher "order" are applied + after those with lower order. If the order is omitted, it may be + considered to be "infinite" - i.e. the policy will be applied last. Policies + with identical order will be applied in alphanumerical order based + on the Policy "Name". + type: number + selector: + description: "The selector is an expression used to pick pick out + the endpoints that the policy should be applied to. \n Selector + expressions follow this syntax: \n \tlabel == \"string_literal\" + \ -> comparison, e.g. my_label == \"foo bar\" \tlabel != \"string_literal\" + \ -> not equal; also matches if label is not present \tlabel in + { \"a\", \"b\", \"c\", ... } -> true if the value of label X is + one of \"a\", \"b\", \"c\" \tlabel not in { \"a\", \"b\", \"c\", + ... } -> true if the value of label X is not one of \"a\", \"b\", + \"c\" \thas(label_name) -> True if that label is present \t! expr + -> negation of expr \texpr && expr -> Short-circuit and \texpr + || expr -> Short-circuit or \t( expr ) -> parens for grouping \tall() + or the empty selector -> matches all endpoints. \n Label names are + allowed to contain alphanumerics, -, _ and /. String literals are + more permissive but they do not support escape characters. \n Examples + (with made-up labels): \n \ttype == \"webserver\" && deployment + == \"prod\" \ttype in {\"frontend\", \"backend\"} \tdeployment != + \"dev\" \t! has(label_name)" + type: string + serviceAccountSelector: + description: ServiceAccountSelector is an optional field for an expression + used to select a pod based on service accounts. + type: string + types: + description: "Types indicates whether this policy applies to ingress, + or to egress, or to both. When not explicitly specified (and so + the value on creation is empty or nil), Calico defaults Types according + to what Ingress and Egress are present in the policy. The default + is: \n - [ PolicyTypeIngress ], if there are no Egress rules (including + the case where there are also no Ingress rules) \n - [ PolicyTypeEgress + ], if there are Egress rules but no Ingress rules \n - [ PolicyTypeIngress, + PolicyTypeEgress ], if there are both Ingress and Egress rules. + \n When the policy is read back again, Types will always be one + of these values, never empty or nil." + items: + description: PolicyType enumerates the possible values of the PolicySpec + Types field. + type: string + type: array + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +--- + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: (devel) + creationTimestamp: null + name: networksets.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: NetworkSet + listKind: NetworkSetList + plural: networksets + singular: networkset + scope: Namespaced + versions: + - name: v1 + schema: + openAPIV3Schema: + description: NetworkSet is the Namespaced-equivalent of the GlobalNetworkSet. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: NetworkSetSpec contains the specification for a NetworkSet + resource. + properties: + nets: + description: The list of IP networks that belong to this set. + items: + type: string + type: array + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +--- +--- +# Source: calico/templates/calico-kube-controllers-rbac.yaml + +# Include a clusterrole for the kube-controllers component, +# and bind it to the calico-kube-controllers serviceaccount. +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: calico-kube-controllers +rules: + # Nodes are watched to monitor for deletions. + - apiGroups: [""] + resources: + - nodes + verbs: + - watch + - list + - get + # Pods are queried to check for existence. + - apiGroups: [""] + resources: + - pods + verbs: + - get + # IPAM resources are manipulated when nodes are deleted. + - apiGroups: ["crd.projectcalico.org"] + resources: + - ippools + verbs: + - list + - apiGroups: ["crd.projectcalico.org"] + resources: + - blockaffinities + - ipamblocks + - ipamhandles + verbs: + - get + - list + - create + - update + - delete + # kube-controllers manages hostendpoints. + - apiGroups: ["crd.projectcalico.org"] + resources: + - hostendpoints + verbs: + - get + - list + - create + - update + - delete + # Needs access to update clusterinformations. + - apiGroups: ["crd.projectcalico.org"] + resources: + - clusterinformations + verbs: + - get + - create + - update + # KubeControllersConfiguration is where it gets its config + - apiGroups: ["crd.projectcalico.org"] + resources: + - kubecontrollersconfigurations + verbs: + # read its own config + - get + # create a default if none exists + - create + # update status + - update + # watch for changes + - watch + # calico-kube-controllers requires root privileges + - apiGroups: ['policy'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: + - oob-anyuid-psp +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: calico-kube-controllers +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: calico-kube-controllers +subjects: +- kind: ServiceAccount + name: calico-kube-controllers + namespace: kube-system +--- + +--- +# Source: calico/templates/calico-node-rbac.yaml +# Include a clusterrole for the calico-node DaemonSet, +# and bind it to the calico-node serviceaccount. +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: calico-node +rules: + # The CNI plugin needs to get pods, nodes, and namespaces. + - apiGroups: [""] + resources: + - pods + - nodes + - namespaces + verbs: + - get + - apiGroups: [""] + resources: + - endpoints + - services + verbs: + # Used to discover service IPs for advertisement. + - watch + - list + # Used to discover Typhas. + - get + # Pod CIDR auto-detection on kubeadm needs access to config maps. + - apiGroups: [""] + resources: + - configmaps + verbs: + - get + - apiGroups: [""] + resources: + - nodes/status + verbs: + # Needed for clearing NodeNetworkUnavailable flag. + - patch + # Calico stores some configuration information in node annotations. + - update + # Watch for changes to Kubernetes NetworkPolicies. + - apiGroups: ["networking.k8s.io"] + resources: + - networkpolicies + verbs: + - watch + - list + # Used by Calico for policy information. + - apiGroups: [""] + resources: + - pods + - namespaces + - serviceaccounts + verbs: + - list + - watch + # The CNI plugin patches pods/status. + - apiGroups: [""] + resources: + - pods/status + verbs: + - patch + # Calico monitors various CRDs for config. + - apiGroups: ["crd.projectcalico.org"] + resources: + - globalfelixconfigs + - felixconfigurations + - bgppeers + - globalbgpconfigs + - bgpconfigurations + - ippools + - ipamblocks + - globalnetworkpolicies + - globalnetworksets + - networkpolicies + - networksets + - clusterinformations + - hostendpoints + - blockaffinities + verbs: + - get + - list + - watch + # Calico must create and update some CRDs on startup. + - apiGroups: ["crd.projectcalico.org"] + resources: + - ippools + - felixconfigurations + - clusterinformations + verbs: + - create + - update + # Calico stores some configuration information on the node. + - apiGroups: [""] + resources: + - nodes + verbs: + - get + - list + - watch + # These permissions are only required for upgrade from v2.6, and can + # be removed after upgrade or on fresh installations. + - apiGroups: ["crd.projectcalico.org"] + resources: + - bgpconfigurations + - bgppeers + verbs: + - create + - update + # These permissions are required for Calico CNI to perform IPAM allocations. + - apiGroups: ["crd.projectcalico.org"] + resources: + - blockaffinities + - ipamblocks + - ipamhandles + verbs: + - get + - list + - create + - update + - delete + - apiGroups: ["crd.projectcalico.org"] + resources: + - ipamconfigs + verbs: + - get + # Block affinities must also be watchable by confd for route aggregation. + - apiGroups: ["crd.projectcalico.org"] + resources: + - blockaffinities + verbs: + - watch + # The Calico IPAM migration needs to get daemonsets. These permissions can be + # removed if not upgrading from an installation using host-local IPAM. + - apiGroups: ["apps"] + resources: + - daemonsets + verbs: + - get + # calico-node requires full privileges + - apiGroups: ['policy'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: + - oob-privileged-psp + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: calico-node +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: calico-node +subjects: +- kind: ServiceAccount + name: calico-node + namespace: kube-system + +{% if plugins.calico.typha.enabled | default(false) == true -%} +--- +# Source: calico/templates/calico-typha.yaml +# This manifest creates a Service, which will be backed by Calico's Typha daemon. +# Typha sits in between Felix and the API server, reducing Calico's load on the API server. + +apiVersion: v1 +kind: Service +metadata: + name: calico-typha + namespace: kube-system + labels: + k8s-app: calico-typha +spec: + ports: + - port: 5473 + protocol: TCP + targetPort: calico-typha + name: calico-typha + selector: + k8s-app: calico-typha + +--- + +# This manifest creates a Deployment of Typha to back the above service. + +apiVersion: apps/v1 +kind: Deployment +metadata: + name: calico-typha + namespace: kube-system + labels: + k8s-app: calico-typha +spec: + # Number of Typha replicas. To enable Typha, set this to a non-zero value *and* set the + # typha_service_name variable in the calico-config ConfigMap above. + # + # We recommend using Typha if you have more than 50 nodes. Above 100 nodes it is essential + # (when using the Kubernetes datastore). Use one replica for every 100-200 nodes. In + # production, we recommend running at least 3 replicas to reduce the impact of rolling upgrade. + replicas: {{ plugins['calico']['typha']['replicas'] }} + revisionHistoryLimit: 2 + selector: + matchLabels: + k8s-app: calico-typha + template: + metadata: + labels: + k8s-app: calico-typha + annotations: + cluster-autoscaler.kubernetes.io/safe-to-evict: 'true' + spec: + nodeSelector: + {{ plugins['calico']['typha']['nodeSelector'] | toyaml | indent(width=8, indentfirst=False) }} + hostNetwork: true + tolerations: + # Mark the pod as a critical add-on for rescheduling. + - key: CriticalAddonsOnly + operator: Exists + # Since Calico can't network a pod until Typha is up, we need to run Typha itself + # as a host-networked pod. + serviceAccountName: calico-node + priorityClassName: system-cluster-critical + # fsGroup allows using projected serviceaccount tokens as described here kubernetes/kubernetes#82573 + securityContext: + fsGroup: 65534 + containers: + - image: {% if plugins['calico']['installation']['registry'] is defined and plugins['calico']['installation']['registry']|length %}{{ plugins['calico']['installation']['registry'] }}/{% endif %}{{ plugins['calico']['typha']['image'] }} + name: calico-typha + ports: + - containerPort: 5473 + name: calico-typha + protocol: TCP + envFrom: + - configMapRef: + # Allow KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT to be overridden for eBPF mode. + name: kubernetes-services-endpoint + optional: true + env: + # Enable "info" logging by default. Can be set to "debug" to increase verbosity. + - name: TYPHA_LOGSEVERITYSCREEN + value: "info" + # Disable logging to file and syslog since those don't make sense in Kubernetes. + - name: TYPHA_LOGFILEPATH + value: "none" + - name: TYPHA_LOGSEVERITYSYS + value: "none" + # Monitor the Kubernetes API to find the number of running instances and rebalance + # connections. + - name: TYPHA_CONNECTIONREBALANCINGMODE + value: "kubernetes" + - name: TYPHA_DATASTORETYPE + value: "kubernetes" + - name: TYPHA_HEALTHENABLED + value: "true" + # Uncomment these lines to enable prometheus metrics. Since Typha is host-networked, + # this opens a port on the host, which may need to be secured. + #- name: TYPHA_PROMETHEUSMETRICSENABLED + # value: "true" + #- name: TYPHA_PROMETHEUSMETRICSPORT + # value: "9093" + livenessProbe: + httpGet: + path: /liveness + port: 9098 + host: localhost + periodSeconds: 30 + initialDelaySeconds: 30 + securityContext: + runAsNonRoot: true + allowPrivilegeEscalation: false + readinessProbe: + httpGet: + path: /readiness + port: 9098 + host: localhost + periodSeconds: 10 + +--- + +# This manifest creates a Pod Disruption Budget for Typha to allow K8s Cluster Autoscaler to evict + +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: calico-typha + namespace: kube-system + labels: + k8s-app: calico-typha +spec: + maxUnavailable: 1 + selector: + matchLabels: + k8s-app: calico-typha +{%- endif %} +--- +# Source: calico/templates/calico-node.yaml +# This manifest installs the calico-node container, as well +# as the CNI plugins and network config on +# each master and worker node in a Kubernetes cluster. +kind: DaemonSet +apiVersion: apps/v1 +metadata: + name: calico-node + namespace: kube-system + labels: + k8s-app: calico-node +spec: + selector: + matchLabels: + k8s-app: calico-node + updateStrategy: + type: RollingUpdate + rollingUpdate: + maxUnavailable: 1 + template: + metadata: + labels: + k8s-app: calico-node + annotations: + # This, along with the CriticalAddonsOnly toleration below, + # marks the pod as a critical add-on, ensuring it gets + # priority scheduling and that its resources are reserved + # if it ever gets evicted. + scheduler.alpha.kubernetes.io/critical-pod: '' + spec: + nodeSelector: + kubernetes.io/os: linux + hostNetwork: true + tolerations: + # Make sure calico-node gets scheduled on all nodes. + - effect: NoSchedule + operator: Exists + # Mark the pod as a critical add-on for rescheduling. + - key: CriticalAddonsOnly + operator: Exists + - effect: NoExecute + operator: Exists + serviceAccountName: calico-node + # Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force + # deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods. + terminationGracePeriodSeconds: 0 + priorityClassName: system-node-critical + initContainers: + # This container performs upgrade from host-local IPAM to calico-ipam. + # It can be deleted if this is a fresh installation, or if you have already + # upgraded to use calico-ipam. + - name: upgrade-ipam + image: {% if plugins['calico']['installation']['registry'] is defined and plugins['calico']['installation']['registry']|length %}{{ plugins['calico']['installation']['registry'] }}/{% endif %}{{ plugins['calico']['cni']['image'] }} + command: ["/opt/cni/bin/calico-ipam", "-upgrade"] + env: + - name: KUBERNETES_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: CALICO_NETWORKING_BACKEND + valueFrom: + configMapKeyRef: + name: calico-config + key: calico_backend + volumeMounts: + - mountPath: /var/lib/cni/networks + name: host-local-net-dir + - mountPath: /host/opt/cni/bin + name: cni-bin-dir + securityContext: + privileged: true + # This container installs the CNI binaries + # and CNI network config file on each node. + - name: install-cni + image: {% if plugins['calico']['installation']['registry'] is defined and plugins['calico']['installation']['registry']|length %}{{ plugins['calico']['installation']['registry'] }}/{% endif %}{{ plugins['calico']['cni']['image'] }} + command: ["/opt/cni/bin/install"] + env: + # Name of the CNI config file to create. + - name: CNI_CONF_NAME + value: "10-calico.conflist" + # The CNI network config to install on each node. + - name: CNI_NETWORK_CONFIG + valueFrom: + configMapKeyRef: + name: calico-config + key: cni_network_config + # Set the hostname based on the k8s node name. + - name: KUBERNETES_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + # CNI MTU Config variable + - name: CNI_MTU + valueFrom: + configMapKeyRef: + name: calico-config + key: veth_mtu + # Prevents the container from sleeping forever. + - name: SLEEP + value: "false" + volumeMounts: + - mountPath: /host/opt/cni/bin + name: cni-bin-dir + - mountPath: /host/etc/cni/net.d + name: cni-net-dir + securityContext: + privileged: true + # Adds a Flex Volume Driver that creates a per-pod Unix Domain Socket to allow Dikastes + # to communicate with Felix over the Policy Sync API. + - name: flexvol-driver + image: {% if plugins['calico']['installation']['registry'] is defined and plugins['calico']['installation']['registry']|length %}{{ plugins['calico']['installation']['registry'] }}/{% endif %}{{ plugins['calico']['flexvol']['image'] }} + volumeMounts: + - name: flexvol-driver-host + mountPath: /host/driver + securityContext: + privileged: true + containers: + # Runs calico-node container on each Kubernetes node. This + # container programs network policy and routes on each + # host. + - name: calico-node + image: {% if plugins['calico']['installation']['registry'] is defined and plugins['calico']['installation']['registry']|length %}{{ plugins['calico']['installation']['registry'] }}/{% endif %}{{ plugins['calico']['node']['image'] }} + env: + # Use Kubernetes API as the backing datastore. + - name: DATASTORE_TYPE + value: "{{ plugins['calico']['env']['DATASTORE_TYPE'] }}" +{%- if plugins.calico.typha.enabled | default(false) == true %} + # Typha support: controlled by the ConfigMap. + - name: FELIX_TYPHAK8SSERVICENAME + valueFrom: + configMapKeyRef: + name: calico-config + key: typha_service_name +{%- endif %} + # Wait for the datastore. + - name: WAIT_FOR_DATASTORE + value: "{{ plugins['calico']['env']['WAIT_FOR_DATASTORE'] }}" + # Set based on the k8s node name. + - name: NODENAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + # Choose the backend to use. + - name: CALICO_NETWORKING_BACKEND + valueFrom: + configMapKeyRef: + name: calico-config + key: calico_backend + # Cluster type to identify the deployment type + - name: CLUSTER_TYPE + value: "{{ plugins['calico']['env']['CLUSTER_TYPE'] }}" + # Set MTU for tunnel device used if ipip is enabled + - name: FELIX_IPINIPMTU + valueFrom: + configMapKeyRef: + name: calico-config + key: veth_mtu + # Set MTU for the VXLAN tunnel device. + - name: FELIX_VXLANMTU + valueFrom: + configMapKeyRef: + name: calico-config + key: veth_mtu + + - name: CALICO_ROUTER_ID + value: "{{ plugins['calico']['env']['CALICO_ROUTER_ID'] }}" + + # IPv4 OPTIONS + - name: IP + value: "{{ plugins['calico']['env']['IP'] }}" + - name: IP_AUTODETECTION_METHOD + value: "{{ plugins['calico']['env']['IP_AUTODETECTION_METHOD'] }}" + - name: CALICO_IPV4POOL_IPIP + value: "{{ plugins['calico']['env']['CALICO_IPV4POOL_IPIP'] }}" + - name: CALICO_IPV4POOL_VXLAN + value: "{{ plugins['calico']['env']['CALICO_IPV4POOL_VXLAN'] }}" + - name: CALICO_IPV4POOL_CIDR + value: "{{ plugins['calico']['env']['CALICO_IPV4POOL_CIDR'] }}" + +{%- if not services.kubeadm.networking.podSubnet|isipv4 %} + # IPv6 OPTIONS + - name: CALICO_IPV6POOL_CIDR + value: "{{ plugins['calico']['env']['CALICO_IPV6POOL_CIDR'] }}" + - name: IP6 + value: "{{ plugins['calico']['env']['IP6'] }}" + - name: IP6_AUTODETECTION_METHOD + value: "{{ plugins['calico']['env']['IP6_AUTODETECTION_METHOD'] }}" + - name: FELIX_IPV6SUPPORT + value: "{{ plugins['calico']['env']['FELIX_IPV6SUPPORT'] }}" + - name: CALICO_IPV6POOL_IPIP + value: "{{ plugins['calico']['env']['CALICO_IPV6POOL_IPIP'] }}" + - name: CALICO_IPV6POOL_VXLAN + value: "{{ plugins['calico']['env']['CALICO_IPV6POOL_VXLAN'] }}" +{%- endif %} + + # Disable file logging so `kubectl logs` works. + - name: CALICO_DISABLE_FILE_LOGGING + value: "{{ plugins['calico']['env']['CALICO_DISABLE_FILE_LOGGING'] }}" + # Set Felix endpoint to host default action to ACCEPT. + - name: FELIX_DEFAULTENDPOINTTOHOSTACTION + value: "{{ plugins['calico']['env']['FELIX_DEFAULTENDPOINTTOHOSTACTION'] }}" + # Set Felix logging to "info" + - name: FELIX_LOGSEVERITYSCREEN + value: "{{ plugins['calico']['env']['FELIX_LOGSEVERITYSCREEN'] }}" + - name: FELIX_HEALTHENABLED + value: "{{ plugins['calico']['env']['FELIX_HEALTHENABLED'] }}" + securityContext: + privileged: true + resources: + requests: + cpu: 250m + livenessProbe: + exec: + command: + - /bin/calico-node + - -felix-live + - -bird-live + periodSeconds: 10 + initialDelaySeconds: 10 + failureThreshold: 6 + readinessProbe: + exec: + command: + - /bin/calico-node + - -felix-ready + - -bird-ready + periodSeconds: 10 + volumeMounts: + - mountPath: /lib/modules + name: lib-modules + readOnly: true + - mountPath: /run/xtables.lock + name: xtables-lock + readOnly: false + - mountPath: /var/run/calico + name: var-run-calico + readOnly: false + - mountPath: /var/lib/calico + name: var-lib-calico + readOnly: false + - name: policysync + mountPath: /var/run/nodeagent + volumes: + # Used by calico-node. + - name: lib-modules + hostPath: + path: /lib/modules + - name: var-run-calico + hostPath: + path: /var/run/calico + - name: var-lib-calico + hostPath: + path: /var/lib/calico + - name: xtables-lock + hostPath: + path: /run/xtables.lock + type: FileOrCreate + # Used to install CNI. + - name: cni-bin-dir + hostPath: + path: /opt/cni/bin + - name: cni-net-dir + hostPath: + path: /etc/cni/net.d + # Mount in the directory for host-local IPAM allocations. This is + # used when upgrading from host-local to calico-ipam, and can be removed + # if not using the upgrade-ipam init container. + - name: host-local-net-dir + hostPath: + path: /var/lib/cni/networks + # Used to create per-pod Unix Domain Sockets + - name: policysync + hostPath: + type: DirectoryOrCreate + path: /var/run/nodeagent + # Used to install Flex Volume Driver + - name: flexvol-driver-host + hostPath: + type: DirectoryOrCreate + path: /usr/libexec/kubernetes/kubelet-plugins/volume/exec/nodeagent~uds +--- + +apiVersion: v1 +kind: ServiceAccount +metadata: + name: calico-node + namespace: kube-system + +--- +# Source: calico/templates/calico-kube-controllers.yaml +# See https://github.com/projectcalico/kube-controllers +apiVersion: apps/v1 +kind: Deployment +metadata: + name: calico-kube-controllers + namespace: kube-system + labels: + k8s-app: calico-kube-controllers +spec: + # The controllers can only have a single active instance. + replicas: 1 + selector: + matchLabels: + k8s-app: calico-kube-controllers + strategy: + type: Recreate + template: + metadata: + name: calico-kube-controllers + namespace: kube-system + labels: + k8s-app: calico-kube-controllers + annotations: + scheduler.alpha.kubernetes.io/critical-pod: '' + spec: + nodeSelector: + {{ plugins['calico']['kube-controllers']['nodeSelector'] | toyaml | indent(width=8, indentfirst=False) }} + tolerations: + # Mark the pod as a critical add-on for rescheduling. + - key: CriticalAddonsOnly + operator: Exists + - key: node-role.kubernetes.io/master + effect: NoSchedule + serviceAccountName: calico-kube-controllers + priorityClassName: system-cluster-critical + containers: + - name: calico-kube-controllers + image: {% if plugins['calico']['installation']['registry'] is defined and plugins['calico']['installation']['registry']|length %}{{ plugins['calico']['installation']['registry'] }}/{% endif %}{{ plugins['calico']['kube-controllers']['image'] }} + env: + # Choose which controllers to run. + - name: ENABLED_CONTROLLERS + value: node + - name: DATASTORE_TYPE + value: kubernetes + readinessProbe: + exec: + command: + - /usr/bin/check-status + - -r + +--- + +apiVersion: v1 +kind: ServiceAccount +metadata: + name: calico-kube-controllers + namespace: kube-system + +--- +# Source: calico/templates/calico-etcd-secrets.yaml + +--- +# Source: calico/templates/calico-typha.yaml + +--- +# Source: calico/templates/configure-canal.yaml + + diff --git a/kubetool/templates/plugins/calico-v3.17.yaml.j2 b/kubetool/templates/plugins/calico-v3.17.yaml.j2 new file mode 100644 index 000000000..c57fa1a50 --- /dev/null +++ b/kubetool/templates/plugins/calico-v3.17.yaml.j2 @@ -0,0 +1,3962 @@ +--- +# Source: calico/templates/calico-config.yaml +# This ConfigMap is used to configure a self-hosted Calico installation. +kind: ConfigMap +apiVersion: v1 +metadata: + name: calico-config + namespace: kube-system +data: + typha_service_name: "{% if plugins.calico.typha.enabled | default(false) == true %}calico-typha{% else %}none{% endif %}" + # Configure the backend to use. + calico_backend: "bird" + + # Configure the MTU to use for workload interfaces and the + # tunnels. For IPIP, set to your network MTU - 20; for VXLAN + # set to your network MTU - 50. + veth_mtu: "{{ plugins['calico']['mtu'] }}" + + # The CNI network configuration to install on each node. The special + # values in this config will be automatically populated. + cni_network_config: |- + { + "name": "k8s-pod-network", + "cniVersion": "0.3.1", + "plugins": [ + { + "type": "calico", + "log_level": "info", + "log_file_path": "/var/log/calico/cni/cni.log", + "datastore_type": "kubernetes", + "nodename": "__KUBERNETES_NODE_NAME__", + "mtu": __CNI_MTU__, + "ipam": {{ plugins['calico']['cni']['ipam']|tojson|safe }}, + "policy": { + "type": "k8s" + }, + "kubernetes": { + "kubeconfig": "__KUBECONFIG_FILEPATH__" + } + }, + { + "type": "portmap", + "snat": true, + "capabilities": {"portMappings": true} + }, + { + "type": "bandwidth", + "capabilities": {"bandwidth": true} + } + ] + } + +--- +# Source: calico/templates/kdd-crds.yaml + +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: bgpconfigurations.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: BGPConfiguration + listKind: BGPConfigurationList + plural: bgpconfigurations + singular: bgpconfiguration + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: BGPConfiguration contains the configuration for any BGP routing. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: BGPConfigurationSpec contains the values of the BGP configuration. + properties: + asNumber: + description: 'ASNumber is the default AS number used by a node. [Default: + 64512]' + format: int32 + type: integer + communities: + description: Communities is a list of BGP community values and their + arbitrary names for tagging routes. + items: + description: Community contains standard or large community value + and its name. + properties: + name: + description: Name given to community value. + type: string + value: + description: Value must be of format `aa:nn` or `aa:nn:mm`. + For standard community use `aa:nn` format, where `aa` and + `nn` are 16 bit number. For large community use `aa:nn:mm` + format, where `aa`, `nn` and `mm` are 32 bit number. Where, + `aa` is an AS Number, `nn` and `mm` are per-AS identifier. + pattern: ^(\d+):(\d+)$|^(\d+):(\d+):(\d+)$ + type: string + type: object + type: array + listenPort: + description: ListenPort is the port where BGP protocol should listen. + Defaults to 179 + maximum: 65535 + minimum: 1 + type: integer + logSeverityScreen: + description: 'LogSeverityScreen is the log severity above which logs + are sent to the stdout. [Default: INFO]' + type: string + nodeToNodeMeshEnabled: + description: 'NodeToNodeMeshEnabled sets whether full node to node + BGP mesh is enabled. [Default: true]' + type: boolean + prefixAdvertisements: + description: PrefixAdvertisements contains per-prefix advertisement + configuration. + items: + description: PrefixAdvertisement configures advertisement properties + for the specified CIDR. + properties: + cidr: + description: CIDR for which properties should be advertised. + type: string + communities: + description: Communities can be list of either community names + already defined in `Specs.Communities` or community value + of format `aa:nn` or `aa:nn:mm`. For standard community use + `aa:nn` format, where `aa` and `nn` are 16 bit number. For + large community use `aa:nn:mm` format, where `aa`, `nn` and + `mm` are 32 bit number. Where,`aa` is an AS Number, `nn` and + `mm` are per-AS identifier. + items: + type: string + type: array + type: object + type: array + serviceClusterIPs: + description: ServiceClusterIPs are the CIDR blocks from which service + cluster IPs are allocated. If specified, Calico will advertise these + blocks, as well as any cluster IPs within them. + items: + description: ServiceClusterIPBlock represents a single allowed ClusterIP + CIDR block. + properties: + cidr: + type: string + type: object + type: array + serviceExternalIPs: + description: ServiceExternalIPs are the CIDR blocks for Kubernetes + Service External IPs. Kubernetes Service ExternalIPs will only be + advertised if they are within one of these blocks. + items: + description: ServiceExternalIPBlock represents a single allowed + External IP CIDR block. + properties: + cidr: + type: string + type: object + type: array + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: bgppeers.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: BGPPeer + listKind: BGPPeerList + plural: bgppeers + singular: bgppeer + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: BGPPeerSpec contains the specification for a BGPPeer resource. + properties: + asNumber: + description: The AS Number of the peer. + format: int32 + type: integer + keepOriginalNextHop: + description: Option to keep the original nexthop field when routes + are sent to a BGP Peer. Setting "true" configures the selected BGP + Peers node to use the "next hop keep;" instead of "next hop self;"(default) + in the specific branch of the Node on "bird.cfg". + type: boolean + node: + description: The node name identifying the Calico node instance that + is targeted by this peer. If this is not set, and no nodeSelector + is specified, then this BGP peer selects all nodes in the cluster. + type: string + nodeSelector: + description: Selector for the nodes that should have this peering. When + this is set, the Node field must be empty. + type: string + password: + description: Optional BGP password for the peerings generated by this + BGPPeer resource. + properties: + secretKeyRef: + description: Selects a key of a secret in the node pods namespace. + properties: + key: + description: The key of the secret to select from. Must be + a valid secret key. + type: string + name: + description: Name of the referent. + type: string + optional: + description: Specify whether the Secret or its key must be + defined + type: boolean + required: + - key + type: object + type: object + peerIP: + description: The IP address of the peer followed by an optional port + number to peer with. If port number is given, format should be `[]:port` + or `:` for IPv4. If optional port number is not set, + and this peer IP and ASNumber belongs to a calico/node with ListenPort + set in BGPConfiguration, then we use that port to peer. + type: string + peerSelector: + description: Selector for the remote nodes to peer with. When this + is set, the PeerIP and ASNumber fields must be empty. For each + peering between the local node and selected remote nodes, we configure + an IPv4 peering if both ends have NodeBGPSpec.IPv4Address specified, + and an IPv6 peering if both ends have NodeBGPSpec.IPv6Address specified. The + remote AS number comes from the remote nodes NodeBGPSpec.ASNumber, + or the global default if that is not set. + type: string + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: blockaffinities.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: BlockAffinity + listKind: BlockAffinityList + plural: blockaffinities + singular: blockaffinity + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: BlockAffinitySpec contains the specification for a BlockAffinity + resource. + properties: + cidr: + type: string + deleted: + description: Deleted indicates that this block affinity is being deleted. + This field is a string for compatibility with older releases that + mistakenly treat this field as a string. + type: string + node: + type: string + state: + type: string + required: + - cidr + - deleted + - node + - state + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: clusterinformations.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: ClusterInformation + listKind: ClusterInformationList + plural: clusterinformations + singular: clusterinformation + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: ClusterInformation contains the cluster specific information. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: ClusterInformationSpec contains the values of describing + the cluster. + properties: + calicoVersion: + description: CalicoVersion is the version of Calico that the cluster + is running + type: string + clusterGUID: + description: ClusterGUID is the GUID of the cluster + type: string + clusterType: + description: ClusterType describes the type of the cluster + type: string + datastoreReady: + description: DatastoreReady is used during significant datastore migrations + to signal to components such as Felix that it should wait before + accessing the datastore. + type: boolean + variant: + description: Variant declares which variant of Calico should be active. + type: string + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: felixconfigurations.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: FelixConfiguration + listKind: FelixConfigurationList + plural: felixconfigurations + singular: felixconfiguration + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: Felix Configuration contains the configuration for Felix. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: FelixConfigurationSpec contains the values of the Felix configuration. + properties: + allowIPIPPacketsFromWorkloads: + description: 'AllowIPIPPacketsFromWorkloads controls whether Felix + will add a rule to drop IPIP encapsulated traffic from workloads + [Default: false]' + type: boolean + allowVXLANPacketsFromWorkloads: + description: 'AllowVXLANPacketsFromWorkloads controls whether Felix + will add a rule to drop VXLAN encapsulated traffic from workloads + [Default: false]' + type: boolean + awsSrcDstCheck: + description: 'Set source-destination-check on AWS EC2 instances. Accepted + value must be one of "DoNothing", "Enabled" or "Disabled". [Default: + DoNothing]' + enum: + - DoNothing + - Enable + - Disable + type: string + bpfConnectTimeLoadBalancingEnabled: + description: 'BPFConnectTimeLoadBalancingEnabled when in BPF mode, + controls whether Felix installs the connection-time load balancer. The + connect-time load balancer is required for the host to be able to + reach Kubernetes services and it improves the performance of pod-to-service + connections. The only reason to disable it is for debugging purposes. [Default: + true]' + type: boolean + bpfDataIfacePattern: + description: 'BPFDataIfacePattern is a regular expression that controls + which interfaces Felix should attach BPF programs to in order to + catch traffic to/from the network. This needs to match the interfaces + that Calico workload traffic flows over as well as any interfaces + that handle incoming traffic to nodeports and services from outside + the cluster. It should not match the workload interfaces (usually + named cali...). [Default: ^(en.*|eth.*|tunl0$)]' + type: string + bpfDisableUnprivileged: + description: 'BPFDisableUnprivileged, if enabled, Felix sets the kernel.unprivileged_bpf_disabled + sysctl to disable unprivileged use of BPF. This ensures that unprivileged + users cannot access Calico''s BPF maps and cannot insert their own + BPF programs to interfere with Calico''s. [Default: true]' + type: boolean + bpfEnabled: + description: 'BPFEnabled, if enabled Felix will use the BPF dataplane. + [Default: false]' + type: boolean + bpfExternalServiceMode: + description: 'BPFExternalServiceMode in BPF mode, controls how connections + from outside the cluster to services (node ports and cluster IPs) + are forwarded to remote workloads. If set to "Tunnel" then both + request and response traffic is tunneled to the remote node. If + set to "DSR", the request traffic is tunneled but the response traffic + is sent directly from the remote node. In "DSR" mode, the remote + node appears to use the IP of the ingress node; this requires a + permissive L2 network. [Default: Tunnel]' + type: string + bpfKubeProxyEndpointSlicesEnabled: + description: BPFKubeProxyEndpointSlicesEnabled in BPF mode, controls + whether Felix's embedded kube-proxy accepts EndpointSlices or not. + type: boolean + bpfKubeProxyIptablesCleanupEnabled: + description: 'BPFKubeProxyIptablesCleanupEnabled, if enabled in BPF + mode, Felix will proactively clean up the upstream Kubernetes kube-proxy''s + iptables chains. Should only be enabled if kube-proxy is not running. [Default: + true]' + type: boolean + bpfKubeProxyMinSyncPeriod: + description: 'BPFKubeProxyMinSyncPeriod, in BPF mode, controls the + minimum time between updates to the dataplane for Felix''s embedded + kube-proxy. Lower values give reduced set-up latency. Higher values + reduce Felix CPU usage by batching up more work. [Default: 1s]' + type: string + bpfLogLevel: + description: 'BPFLogLevel controls the log level of the BPF programs + when in BPF dataplane mode. One of "Off", "Info", or "Debug". The + logs are emitted to the BPF trace pipe, accessible with the command + `tc exec bpf debug`. [Default: Off].' + type: string + chainInsertMode: + description: 'ChainInsertMode controls whether Felix hooks the kernels + top-level iptables chains by inserting a rule at the top of the + chain or by appending a rule at the bottom. insert is the safe default + since it prevents Calicos rules from being bypassed. If you switch + to append mode, be sure that the other rules in the chains signal + acceptance by falling through to the Calico rules, otherwise the + Calico policy will be bypassed. [Default: insert]' + type: string + dataplaneDriver: + type: string + debugDisableLogDropping: + type: boolean + debugMemoryProfilePath: + type: string + debugSimulateCalcGraphHangAfter: + type: string + debugSimulateDataplaneHangAfter: + type: string + defaultEndpointToHostAction: + description: 'DefaultEndpointToHostAction controls what happens to + traffic that goes from a workload endpoint to the host itself (after + the traffic hits the endpoint egress policy). By default Calico + blocks traffic from workload endpoints to the host itself with an + iptables DROP action. If you want to allow some or all traffic + from endpoint to host, set this parameter to RETURN or ACCEPT. Use + RETURN if you have your own rules in the iptables INPUT chain; + Calico will insert its rules at the top of that chain, then RETURN + packets to the INPUT chain once it has completed processing workload + endpoint egress policy. Use ACCEPT to unconditionally accept packets + from workloads after processing workload endpoint egress policy. + [Default: Drop]' + type: string + deviceRouteProtocol: + description: This defines the route protocol added to programmed device + routes, by default this will be RTPROT_BOOT when left blank. + type: integer + deviceRouteSourceAddress: + description: This is the source address to use on programmed device + routes. By default the source address is left blank, leaving the + kernel to choose the source address used. + type: string + disableConntrackInvalidCheck: + type: boolean + endpointReportingDelay: + type: string + endpointReportingEnabled: + type: boolean + externalNodesList: + description: ExternalNodesCIDRList is a list of CIDR's of external-non-calico-nodes + which may source tunnel traffic and have the tunneled traffic be + accepted at calico nodes. + items: + type: string + type: array + failsafeInboundHostPorts: + description: 'FailsafeInboundHostPorts is a comma-delimited list of + UDP/TCP ports that Felix will allow incoming traffic to host endpoints + on irrespective of the security policy. This is useful to avoid + accidentally cutting off a host with incorrect configuration. Each + port should be specified as tcp: or udp:. + For back-compatibility, if the protocol is not specified, it defaults + to tcp. To disable all inbound host ports, use the value none. + The default value allows ssh access and DHCP. [Default: tcp:22, + udp:68, tcp:179, tcp:2379, tcp:2380, tcp:6443, tcp:6666, tcp:6667]' + items: + description: ProtoPort is combination of protocol and port, both + must be specified. + properties: + port: + type: integer + protocol: + type: string + required: + - port + - protocol + type: object + type: array + failsafeOutboundHostPorts: + description: 'FailsafeOutboundHostPorts is a comma-delimited list + of UDP/TCP ports that Felix will allow outgoing traffic from host + endpoints to irrespective of the security policy. This is useful + to avoid accidentally cutting off a host with incorrect configuration. + Each port should be specified as tcp: or udp:. + For back-compatibility, if the protocol is not specified, it defaults + to tcp. To disable all outbound host ports, use the value none. + The default value opens etcds standard ports to ensure that Felix + does not get cut off from etcd as well as allowing DHCP and DNS. + [Default: tcp:179, tcp:2379, tcp:2380, tcp:6443, tcp:6666, tcp:6667, + udp:53, udp:67]' + items: + description: ProtoPort is combination of protocol and port, both + must be specified. + properties: + port: + type: integer + protocol: + type: string + required: + - port + - protocol + type: object + type: array + featureDetectOverride: + description: FeatureDetectOverride is used to override the feature + detection. Values are specified in a comma separated list with no + spaces, example; "SNATFullyRandom=true,MASQFullyRandom=false,RestoreSupportsLock=". + "true" or "false" will force the feature, empty or omitted values + are auto-detected. + type: string + genericXDPEnabled: + description: 'GenericXDPEnabled enables Generic XDP so network cards + that don''t support XDP offload or driver modes can use XDP. This + is not recommended since it doesn''t provide better performance + than iptables. [Default: false]' + type: boolean + healthEnabled: + type: boolean + healthHost: + type: string + healthPort: + type: integer + interfaceExclude: + description: 'InterfaceExclude is a comma-separated list of interfaces + that Felix should exclude when monitoring for host endpoints. The + default value ensures that Felix ignores Kubernetes'' IPVS dummy + interface, which is used internally by kube-proxy. If you want to + exclude multiple interface names using a single value, the list + supports regular expressions. For regular expressions you must wrap + the value with ''/''. For example having values ''/^kube/,veth1'' + will exclude all interfaces that begin with ''kube'' and also the + interface ''veth1''. [Default: kube-ipvs0]' + type: string + interfacePrefix: + description: 'InterfacePrefix is the interface name prefix that identifies + workload endpoints and so distinguishes them from host endpoint + interfaces. Note: in environments other than bare metal, the orchestrators + configure this appropriately. For example our Kubernetes and Docker + integrations set the cali value, and our OpenStack integration + sets the tap value. [Default: cali]' + type: string + interfaceRefreshInterval: + description: InterfaceRefreshInterval is the period at which Felix + rescans local interfaces to verify their state. The rescan can be + disabled by setting the interval to 0. + type: string + ipipEnabled: + type: boolean + ipipMTU: + description: 'IPIPMTU is the MTU to set on the tunnel device. See + Configuring MTU [Default: 1440]' + type: integer + ipsetsRefreshInterval: + description: 'IpsetsRefreshInterval is the period at which Felix re-checks + all iptables state to ensure that no other process has accidentally + broken Calicos rules. Set to 0 to disable iptables refresh. [Default: + 90s]' + type: string + iptablesBackend: + description: IptablesBackend specifies which backend of iptables will + be used. The default is legacy. + type: string + iptablesFilterAllowAction: + type: string + iptablesLockFilePath: + description: 'IptablesLockFilePath is the location of the iptables + lock file. You may need to change this if the lock file is not in + its standard location (for example if you have mapped it into Felixs + container at a different path). [Default: /run/xtables.lock]' + type: string + iptablesLockProbeInterval: + description: 'IptablesLockProbeInterval is the time that Felix will + wait between attempts to acquire the iptables lock if it is not + available. Lower values make Felix more responsive when the lock + is contended, but use more CPU. [Default: 50ms]' + type: string + iptablesLockTimeout: + description: 'IptablesLockTimeout is the time that Felix will wait + for the iptables lock, or 0, to disable. To use this feature, Felix + must share the iptables lock file with all other processes that + also take the lock. When running Felix inside a container, this + requires the /run directory of the host to be mounted into the calico/node + or calico/felix container. [Default: 0s disabled]' + type: string + iptablesMangleAllowAction: + type: string + iptablesMarkMask: + description: 'IptablesMarkMask is the mask that Felix selects its + IPTables Mark bits from. Should be a 32 bit hexadecimal number with + at least 8 bits set, none of which clash with any other mark bits + in use on the system. [Default: 0xff000000]' + format: int32 + type: integer + iptablesNATOutgoingInterfaceFilter: + type: string + iptablesPostWriteCheckInterval: + description: 'IptablesPostWriteCheckInterval is the period after Felix + has done a write to the dataplane that it schedules an extra read + back in order to check the write was not clobbered by another process. + This should only occur if another application on the system doesnt + respect the iptables lock. [Default: 1s]' + type: string + iptablesRefreshInterval: + description: 'IptablesRefreshInterval is the period at which Felix + re-checks the IP sets in the dataplane to ensure that no other process + has accidentally broken Calicos rules. Set to 0 to disable IP sets + refresh. Note: the default for this value is lower than the other + refresh intervals as a workaround for a Linux kernel bug that was + fixed in kernel version 4.11. If you are using v4.11 or greater + you may want to set this to, a higher value to reduce Felix CPU + usage. [Default: 10s]' + type: string + ipv6Support: + type: boolean + kubeNodePortRanges: + description: 'KubeNodePortRanges holds list of port ranges used for + service node ports. Only used if felix detects kube-proxy running + in ipvs mode. Felix uses these ranges to separate host and workload + traffic. [Default: 30000:32767].' + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + logFilePath: + description: 'LogFilePath is the full path to the Felix log. Set to + none to disable file logging. [Default: /var/log/calico/felix.log]' + type: string + logPrefix: + description: 'LogPrefix is the log prefix that Felix uses when rendering + LOG rules. [Default: calico-packet]' + type: string + logSeverityFile: + description: 'LogSeverityFile is the log severity above which logs + are sent to the log file. [Default: Info]' + type: string + logSeverityScreen: + description: 'LogSeverityScreen is the log severity above which logs + are sent to the stdout. [Default: Info]' + type: string + logSeveritySys: + description: 'LogSeveritySys is the log severity above which logs + are sent to the syslog. Set to None for no logging to syslog. [Default: + Info]' + type: string + maxIpsetSize: + type: integer + metadataAddr: + description: 'MetadataAddr is the IP address or domain name of the + server that can answer VM queries for cloud-init metadata. In OpenStack, + this corresponds to the machine running nova-api (or in Ubuntu, + nova-api-metadata). A value of none (case insensitive) means that + Felix should not set up any NAT rule for the metadata path. [Default: + 127.0.0.1]' + type: string + metadataPort: + description: 'MetadataPort is the port of the metadata server. This, + combined with global.MetadataAddr (if not None), is used to set + up a NAT rule, from 169.254.169.254:80 to MetadataAddr:MetadataPort. + In most cases this should not need to be changed [Default: 8775].' + type: integer + mtuIfacePattern: + description: MTUIfacePattern is a regular expression that controls + which interfaces Felix should scan in order to calculate the host's + MTU. This should not match workload interfaces (usually named cali...). + type: string + natOutgoingAddress: + description: NATOutgoingAddress specifies an address to use when performing + source NAT for traffic in a natOutgoing pool that is leaving the + network. By default the address used is an address on the interface + the traffic is leaving on (ie it uses the iptables MASQUERADE target) + type: string + natPortRange: + anyOf: + - type: integer + - type: string + description: NATPortRange specifies the range of ports that is used + for port mapping when doing outgoing NAT. When unset the default + behavior of the network stack is used. + pattern: ^.* + x-kubernetes-int-or-string: true + netlinkTimeout: + type: string + openstackRegion: + description: 'OpenstackRegion is the name of the region that a particular + Felix belongs to. In a multi-region Calico/OpenStack deployment, + this must be configured somehow for each Felix (here in the datamodel, + or in felix.cfg or the environment on each compute node), and must + match the [calico] openstack_region value configured in neutron.conf + on each node. [Default: Empty]' + type: string + policySyncPathPrefix: + description: 'PolicySyncPathPrefix is used to by Felix to communicate + policy changes to external services, like Application layer policy. + [Default: Empty]' + type: string + prometheusGoMetricsEnabled: + description: 'PrometheusGoMetricsEnabled disables Go runtime metrics + collection, which the Prometheus client does by default, when set + to false. This reduces the number of metrics reported, reducing + Prometheus load. [Default: true]' + type: boolean + prometheusMetricsEnabled: + description: 'PrometheusMetricsEnabled enables the Prometheus metrics + server in Felix if set to true. [Default: false]' + type: boolean + prometheusMetricsHost: + description: 'PrometheusMetricsHost is the host that the Prometheus + metrics server should bind to. [Default: empty]' + type: string + prometheusMetricsPort: + description: 'PrometheusMetricsPort is the TCP port that the Prometheus + metrics server should bind to. [Default: 9091]' + type: integer + prometheusProcessMetricsEnabled: + description: 'PrometheusProcessMetricsEnabled disables process metrics + collection, which the Prometheus client does by default, when set + to false. This reduces the number of metrics reported, reducing + Prometheus load. [Default: true]' + type: boolean + removeExternalRoutes: + description: Whether or not to remove device routes that have not + been programmed by Felix. Disabling this will allow external applications + to also add device routes. This is enabled by default which means + we will remove externally added routes. + type: boolean + reportingInterval: + description: 'ReportingInterval is the interval at which Felix reports + its status into the datastore or 0 to disable. Must be non-zero + in OpenStack deployments. [Default: 30s]' + type: string + reportingTTL: + description: 'ReportingTTL is the time-to-live setting for process-wide + status reports. [Default: 90s]' + type: string + routeRefreshInterval: + description: 'RouteRefreshInterval is the period at which Felix re-checks + the routes in the dataplane to ensure that no other process has + accidentally broken Calicos rules. Set to 0 to disable route refresh. + [Default: 90s]' + type: string + routeSource: + description: 'RouteSource configures where Felix gets its routing + information. - WorkloadIPs: use workload endpoints to construct + routes. - CalicoIPAM: the default - use IPAM data to construct routes.' + type: string + routeTableRange: + description: Calico programs additional Linux route tables for various + purposes. RouteTableRange specifies the indices of the route tables + that Calico should use. + properties: + max: + type: integer + min: + type: integer + required: + - max + - min + type: object + serviceLoopPrevention: + description: 'When service IP advertisement is enabled, prevent routing + loops to service IPs that are not in use, by dropping or rejecting + packets that do not get DNATd by kube-proxy. Unless set to Disabled, + in which case such routing loops continue to be allowed. [Default: + Drop]' + type: string + sidecarAccelerationEnabled: + description: 'SidecarAccelerationEnabled enables experimental sidecar + acceleration [Default: false]' + type: boolean + usageReportingEnabled: + description: 'UsageReportingEnabled reports anonymous Calico version + number and cluster size to projectcalico.org. Logs warnings returned + by the usage server. For example, if a significant security vulnerability + has been discovered in the version of Calico being used. [Default: + true]' + type: boolean + usageReportingInitialDelay: + description: 'UsageReportingInitialDelay controls the minimum delay + before Felix makes a report. [Default: 300s]' + type: string + usageReportingInterval: + description: 'UsageReportingInterval controls the interval at which + Felix makes reports. [Default: 86400s]' + type: string + useInternalDataplaneDriver: + type: boolean + vxlanEnabled: + type: boolean + vxlanMTU: + description: 'VXLANMTU is the MTU to set on the tunnel device. See + Configuring MTU [Default: 1440]' + type: integer + vxlanPort: + type: integer + vxlanVNI: + type: integer + wireguardEnabled: + description: 'WireguardEnabled controls whether Wireguard is enabled. + [Default: false]' + type: boolean + wireguardInterfaceName: + description: 'WireguardInterfaceName specifies the name to use for + the Wireguard interface. [Default: wg.calico]' + type: string + wireguardListeningPort: + description: 'WireguardListeningPort controls the listening port used + by Wireguard. [Default: 51820]' + type: integer + wireguardMTU: + description: 'WireguardMTU controls the MTU on the Wireguard interface. + See Configuring MTU [Default: 1420]' + type: integer + wireguardRoutingRulePriority: + description: 'WireguardRoutingRulePriority controls the priority value + to use for the Wireguard routing rule. [Default: 99]' + type: integer + xdpEnabled: + description: 'XDPEnabled enables XDP acceleration for suitable untracked + incoming deny rules. [Default: true]' + type: boolean + xdpRefreshInterval: + description: 'XDPRefreshInterval is the period at which Felix re-checks + all XDP state to ensure that no other process has accidentally broken + Calico''s BPF maps or attached programs. Set to 0 to disable XDP + refresh. [Default: 90s]' + type: string + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: globalnetworkpolicies.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: GlobalNetworkPolicy + listKind: GlobalNetworkPolicyList + plural: globalnetworkpolicies + singular: globalnetworkpolicy + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + properties: + applyOnForward: + description: ApplyOnForward indicates to apply the rules in this policy + on forward traffic. + type: boolean + doNotTrack: + description: DoNotTrack indicates whether packets matched by the rules + in this policy should go through the data plane's connection tracking, + such as Linux conntrack. If True, the rules in this policy are + applied before any data plane connection tracking, and packets allowed + by this policy are marked as not to be tracked. + type: boolean + egress: + description: The ordered set of egress rules. Each rule contains + a set of packet match criteria and a corresponding action to apply. + items: + description: "A Rule encapsulates a set of match criteria and an + action. Both selector-based security Policy and security Profiles + reference rules - separated out as a list of rules for both ingress + and egress packet matching. \n Each positive match criteria has + a negated version, prefixed with Not. All the match criteria + within a rule must be satisfied for a packet to match. A single + rule can contain the positive and negative version of a match + and both must be satisfied for the rule to match." + properties: + action: + type: string + destination: + description: Destination contains the match criteria that apply + to destination entity. + properties: + namespaceSelector: + description: "NamespaceSelector is an optional field that + contains a selector expression. Only traffic that originates + from (or terminates at) endpoints within the selected + namespaces will be matched. When both NamespaceSelector + and Selector are defined on the same rule, then only workload + endpoints that are matched by both selectors will be selected + by the rule. \n For NetworkPolicy, an empty NamespaceSelector + implies that the Selector is limited to selecting only + workload endpoints in the same namespace as the NetworkPolicy. + \n For NetworkPolicy, `global()` NamespaceSelector implies + that the Selector is limited to selecting only GlobalNetworkSet + or HostEndpoint. \n For GlobalNetworkPolicy, an empty + NamespaceSelector implies the Selector applies to workload + endpoints across all namespaces." + type: string + nets: + description: Nets is an optional field that restricts the + rule to only apply to traffic that originates from (or + terminates at) IP addresses in any of the given subnets. + items: + type: string + type: array + notNets: + description: NotNets is the negated version of the Nets + field. + items: + type: string + type: array + notPorts: + description: NotPorts is the negated version of the Ports + field. Since only some protocols have ports, if any ports + are specified it requires the Protocol match in the Rule + to be set to "TCP" or "UDP". + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + notSelector: + description: NotSelector is the negated version of the Selector + field. See Selector field for subtleties with negated + selectors. + type: string + ports: + description: "Ports is an optional field that restricts + the rule to only apply to traffic that has a source (destination) + port that matches one of these ranges/values. This value + is a list of integers or strings that represent ranges + of ports. \n Since only some protocols have ports, if + any ports are specified it requires the Protocol match + in the Rule to be set to \"TCP\" or \"UDP\"." + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + selector: + description: "Selector is an optional field that contains + a selector expression (see Policy for sample syntax). + \ Only traffic that originates from (terminates at) endpoints + matching the selector will be matched. \n Note that: in + addition to the negated version of the Selector (see NotSelector + below), the selector expression syntax itself supports + negation. The two types of negation are subtly different. + One negates the set of matched endpoints, the other negates + the whole match: \n \tSelector = \"!has(my_label)\" matches + packets that are from other Calico-controlled \tendpoints + that do not have the label my_label. \n \tNotSelector + = \"has(my_label)\" matches packets that are not from + Calico-controlled \tendpoints that do have the label my_label. + \n The effect is that the latter will accept packets from + non-Calico sources whereas the former is limited to packets + from Calico-controlled endpoints." + type: string + serviceAccounts: + description: ServiceAccounts is an optional field that restricts + the rule to only apply to traffic that originates from + (or terminates at) a pod running as a matching service + account. + properties: + names: + description: Names is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account whose name is in the list. + items: + type: string + type: array + selector: + description: Selector is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account that matches the given label selector. If + both Names and Selector are specified then they are + AND'ed. + type: string + type: object + type: object + http: + description: HTTP contains match criteria that apply to HTTP + requests. + properties: + methods: + description: Methods is an optional field that restricts + the rule to apply only to HTTP requests that use one of + the listed HTTP Methods (e.g. GET, PUT, etc.) Multiple + methods are OR'd together. + items: + type: string + type: array + paths: + description: 'Paths is an optional field that restricts + the rule to apply to HTTP requests that use one of the + listed HTTP Paths. Multiple paths are OR''d together. + e.g: - exact: /foo - prefix: /bar NOTE: Each entry may + ONLY specify either a `exact` or a `prefix` match. The + validator will check for it.' + items: + description: 'HTTPPath specifies an HTTP path to match. + It may be either of the form: exact: : which matches + the path exactly or prefix: : which matches + the path prefix' + properties: + exact: + type: string + prefix: + type: string + type: object + type: array + type: object + icmp: + description: ICMP is an optional field that restricts the rule + to apply to a specific type and code of ICMP traffic. This + should only be specified if the Protocol field is set to "ICMP" + or "ICMPv6". + properties: + code: + description: Match on a specific ICMP code. If specified, + the Type value must also be specified. This is a technical + limitation imposed by the kernels iptables firewall, + which Calico uses to enforce the rule. + type: integer + type: + description: Match on a specific ICMP type. For example + a value of 8 refers to ICMP Echo Request (i.e. pings). + type: integer + type: object + ipVersion: + description: IPVersion is an optional field that restricts the + rule to only match a specific IP version. + type: integer + metadata: + description: Metadata contains additional information for this + rule + properties: + annotations: + additionalProperties: + type: string + description: Annotations is a set of key value pairs that + give extra information about the rule + type: object + type: object + notICMP: + description: NotICMP is the negated version of the ICMP field. + properties: + code: + description: Match on a specific ICMP code. If specified, + the Type value must also be specified. This is a technical + limitation imposed by the kernels iptables firewall, + which Calico uses to enforce the rule. + type: integer + type: + description: Match on a specific ICMP type. For example + a value of 8 refers to ICMP Echo Request (i.e. pings). + type: integer + type: object + notProtocol: + anyOf: + - type: integer + - type: string + description: NotProtocol is the negated version of the Protocol + field. + pattern: ^.* + x-kubernetes-int-or-string: true + protocol: + anyOf: + - type: integer + - type: string + description: "Protocol is an optional field that restricts the + rule to only apply to traffic of a specific IP protocol. Required + if any of the EntityRules contain Ports (because ports only + apply to certain protocols). \n Must be one of these string + values: \"TCP\", \"UDP\", \"ICMP\", \"ICMPv6\", \"SCTP\", + \"UDPLite\" or an integer in the range 1-255." + pattern: ^.* + x-kubernetes-int-or-string: true + source: + description: Source contains the match criteria that apply to + source entity. + properties: + namespaceSelector: + description: "NamespaceSelector is an optional field that + contains a selector expression. Only traffic that originates + from (or terminates at) endpoints within the selected + namespaces will be matched. When both NamespaceSelector + and Selector are defined on the same rule, then only workload + endpoints that are matched by both selectors will be selected + by the rule. \n For NetworkPolicy, an empty NamespaceSelector + implies that the Selector is limited to selecting only + workload endpoints in the same namespace as the NetworkPolicy. + \n For NetworkPolicy, `global()` NamespaceSelector implies + that the Selector is limited to selecting only GlobalNetworkSet + or HostEndpoint. \n For GlobalNetworkPolicy, an empty + NamespaceSelector implies the Selector applies to workload + endpoints across all namespaces." + type: string + nets: + description: Nets is an optional field that restricts the + rule to only apply to traffic that originates from (or + terminates at) IP addresses in any of the given subnets. + items: + type: string + type: array + notNets: + description: NotNets is the negated version of the Nets + field. + items: + type: string + type: array + notPorts: + description: NotPorts is the negated version of the Ports + field. Since only some protocols have ports, if any ports + are specified it requires the Protocol match in the Rule + to be set to "TCP" or "UDP". + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + notSelector: + description: NotSelector is the negated version of the Selector + field. See Selector field for subtleties with negated + selectors. + type: string + ports: + description: "Ports is an optional field that restricts + the rule to only apply to traffic that has a source (destination) + port that matches one of these ranges/values. This value + is a list of integers or strings that represent ranges + of ports. \n Since only some protocols have ports, if + any ports are specified it requires the Protocol match + in the Rule to be set to \"TCP\" or \"UDP\"." + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + selector: + description: "Selector is an optional field that contains + a selector expression (see Policy for sample syntax). + \ Only traffic that originates from (terminates at) endpoints + matching the selector will be matched. \n Note that: in + addition to the negated version of the Selector (see NotSelector + below), the selector expression syntax itself supports + negation. The two types of negation are subtly different. + One negates the set of matched endpoints, the other negates + the whole match: \n \tSelector = \"!has(my_label)\" matches + packets that are from other Calico-controlled \tendpoints + that do not have the label my_label. \n \tNotSelector + = \"has(my_label)\" matches packets that are not from + Calico-controlled \tendpoints that do have the label my_label. + \n The effect is that the latter will accept packets from + non-Calico sources whereas the former is limited to packets + from Calico-controlled endpoints." + type: string + serviceAccounts: + description: ServiceAccounts is an optional field that restricts + the rule to only apply to traffic that originates from + (or terminates at) a pod running as a matching service + account. + properties: + names: + description: Names is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account whose name is in the list. + items: + type: string + type: array + selector: + description: Selector is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account that matches the given label selector. If + both Names and Selector are specified then they are + AND'ed. + type: string + type: object + type: object + required: + - action + type: object + type: array + ingress: + description: The ordered set of ingress rules. Each rule contains + a set of packet match criteria and a corresponding action to apply. + items: + description: "A Rule encapsulates a set of match criteria and an + action. Both selector-based security Policy and security Profiles + reference rules - separated out as a list of rules for both ingress + and egress packet matching. \n Each positive match criteria has + a negated version, prefixed with Not. All the match criteria + within a rule must be satisfied for a packet to match. A single + rule can contain the positive and negative version of a match + and both must be satisfied for the rule to match." + properties: + action: + type: string + destination: + description: Destination contains the match criteria that apply + to destination entity. + properties: + namespaceSelector: + description: "NamespaceSelector is an optional field that + contains a selector expression. Only traffic that originates + from (or terminates at) endpoints within the selected + namespaces will be matched. When both NamespaceSelector + and Selector are defined on the same rule, then only workload + endpoints that are matched by both selectors will be selected + by the rule. \n For NetworkPolicy, an empty NamespaceSelector + implies that the Selector is limited to selecting only + workload endpoints in the same namespace as the NetworkPolicy. + \n For NetworkPolicy, `global()` NamespaceSelector implies + that the Selector is limited to selecting only GlobalNetworkSet + or HostEndpoint. \n For GlobalNetworkPolicy, an empty + NamespaceSelector implies the Selector applies to workload + endpoints across all namespaces." + type: string + nets: + description: Nets is an optional field that restricts the + rule to only apply to traffic that originates from (or + terminates at) IP addresses in any of the given subnets. + items: + type: string + type: array + notNets: + description: NotNets is the negated version of the Nets + field. + items: + type: string + type: array + notPorts: + description: NotPorts is the negated version of the Ports + field. Since only some protocols have ports, if any ports + are specified it requires the Protocol match in the Rule + to be set to "TCP" or "UDP". + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + notSelector: + description: NotSelector is the negated version of the Selector + field. See Selector field for subtleties with negated + selectors. + type: string + ports: + description: "Ports is an optional field that restricts + the rule to only apply to traffic that has a source (destination) + port that matches one of these ranges/values. This value + is a list of integers or strings that represent ranges + of ports. \n Since only some protocols have ports, if + any ports are specified it requires the Protocol match + in the Rule to be set to \"TCP\" or \"UDP\"." + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + selector: + description: "Selector is an optional field that contains + a selector expression (see Policy for sample syntax). + \ Only traffic that originates from (terminates at) endpoints + matching the selector will be matched. \n Note that: in + addition to the negated version of the Selector (see NotSelector + below), the selector expression syntax itself supports + negation. The two types of negation are subtly different. + One negates the set of matched endpoints, the other negates + the whole match: \n \tSelector = \"!has(my_label)\" matches + packets that are from other Calico-controlled \tendpoints + that do not have the label my_label. \n \tNotSelector + = \"has(my_label)\" matches packets that are not from + Calico-controlled \tendpoints that do have the label my_label. + \n The effect is that the latter will accept packets from + non-Calico sources whereas the former is limited to packets + from Calico-controlled endpoints." + type: string + serviceAccounts: + description: ServiceAccounts is an optional field that restricts + the rule to only apply to traffic that originates from + (or terminates at) a pod running as a matching service + account. + properties: + names: + description: Names is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account whose name is in the list. + items: + type: string + type: array + selector: + description: Selector is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account that matches the given label selector. If + both Names and Selector are specified then they are + AND'ed. + type: string + type: object + type: object + http: + description: HTTP contains match criteria that apply to HTTP + requests. + properties: + methods: + description: Methods is an optional field that restricts + the rule to apply only to HTTP requests that use one of + the listed HTTP Methods (e.g. GET, PUT, etc.) Multiple + methods are OR'd together. + items: + type: string + type: array + paths: + description: 'Paths is an optional field that restricts + the rule to apply to HTTP requests that use one of the + listed HTTP Paths. Multiple paths are OR''d together. + e.g: - exact: /foo - prefix: /bar NOTE: Each entry may + ONLY specify either a `exact` or a `prefix` match. The + validator will check for it.' + items: + description: 'HTTPPath specifies an HTTP path to match. + It may be either of the form: exact: : which matches + the path exactly or prefix: : which matches + the path prefix' + properties: + exact: + type: string + prefix: + type: string + type: object + type: array + type: object + icmp: + description: ICMP is an optional field that restricts the rule + to apply to a specific type and code of ICMP traffic. This + should only be specified if the Protocol field is set to "ICMP" + or "ICMPv6". + properties: + code: + description: Match on a specific ICMP code. If specified, + the Type value must also be specified. This is a technical + limitation imposed by the kernels iptables firewall, + which Calico uses to enforce the rule. + type: integer + type: + description: Match on a specific ICMP type. For example + a value of 8 refers to ICMP Echo Request (i.e. pings). + type: integer + type: object + ipVersion: + description: IPVersion is an optional field that restricts the + rule to only match a specific IP version. + type: integer + metadata: + description: Metadata contains additional information for this + rule + properties: + annotations: + additionalProperties: + type: string + description: Annotations is a set of key value pairs that + give extra information about the rule + type: object + type: object + notICMP: + description: NotICMP is the negated version of the ICMP field. + properties: + code: + description: Match on a specific ICMP code. If specified, + the Type value must also be specified. This is a technical + limitation imposed by the kernels iptables firewall, + which Calico uses to enforce the rule. + type: integer + type: + description: Match on a specific ICMP type. For example + a value of 8 refers to ICMP Echo Request (i.e. pings). + type: integer + type: object + notProtocol: + anyOf: + - type: integer + - type: string + description: NotProtocol is the negated version of the Protocol + field. + pattern: ^.* + x-kubernetes-int-or-string: true + protocol: + anyOf: + - type: integer + - type: string + description: "Protocol is an optional field that restricts the + rule to only apply to traffic of a specific IP protocol. Required + if any of the EntityRules contain Ports (because ports only + apply to certain protocols). \n Must be one of these string + values: \"TCP\", \"UDP\", \"ICMP\", \"ICMPv6\", \"SCTP\", + \"UDPLite\" or an integer in the range 1-255." + pattern: ^.* + x-kubernetes-int-or-string: true + source: + description: Source contains the match criteria that apply to + source entity. + properties: + namespaceSelector: + description: "NamespaceSelector is an optional field that + contains a selector expression. Only traffic that originates + from (or terminates at) endpoints within the selected + namespaces will be matched. When both NamespaceSelector + and Selector are defined on the same rule, then only workload + endpoints that are matched by both selectors will be selected + by the rule. \n For NetworkPolicy, an empty NamespaceSelector + implies that the Selector is limited to selecting only + workload endpoints in the same namespace as the NetworkPolicy. + \n For NetworkPolicy, `global()` NamespaceSelector implies + that the Selector is limited to selecting only GlobalNetworkSet + or HostEndpoint. \n For GlobalNetworkPolicy, an empty + NamespaceSelector implies the Selector applies to workload + endpoints across all namespaces." + type: string + nets: + description: Nets is an optional field that restricts the + rule to only apply to traffic that originates from (or + terminates at) IP addresses in any of the given subnets. + items: + type: string + type: array + notNets: + description: NotNets is the negated version of the Nets + field. + items: + type: string + type: array + notPorts: + description: NotPorts is the negated version of the Ports + field. Since only some protocols have ports, if any ports + are specified it requires the Protocol match in the Rule + to be set to "TCP" or "UDP". + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + notSelector: + description: NotSelector is the negated version of the Selector + field. See Selector field for subtleties with negated + selectors. + type: string + ports: + description: "Ports is an optional field that restricts + the rule to only apply to traffic that has a source (destination) + port that matches one of these ranges/values. This value + is a list of integers or strings that represent ranges + of ports. \n Since only some protocols have ports, if + any ports are specified it requires the Protocol match + in the Rule to be set to \"TCP\" or \"UDP\"." + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + selector: + description: "Selector is an optional field that contains + a selector expression (see Policy for sample syntax). + \ Only traffic that originates from (terminates at) endpoints + matching the selector will be matched. \n Note that: in + addition to the negated version of the Selector (see NotSelector + below), the selector expression syntax itself supports + negation. The two types of negation are subtly different. + One negates the set of matched endpoints, the other negates + the whole match: \n \tSelector = \"!has(my_label)\" matches + packets that are from other Calico-controlled \tendpoints + that do not have the label my_label. \n \tNotSelector + = \"has(my_label)\" matches packets that are not from + Calico-controlled \tendpoints that do have the label my_label. + \n The effect is that the latter will accept packets from + non-Calico sources whereas the former is limited to packets + from Calico-controlled endpoints." + type: string + serviceAccounts: + description: ServiceAccounts is an optional field that restricts + the rule to only apply to traffic that originates from + (or terminates at) a pod running as a matching service + account. + properties: + names: + description: Names is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account whose name is in the list. + items: + type: string + type: array + selector: + description: Selector is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account that matches the given label selector. If + both Names and Selector are specified then they are + AND'ed. + type: string + type: object + type: object + required: + - action + type: object + type: array + namespaceSelector: + description: NamespaceSelector is an optional field for an expression + used to select a pod based on namespaces. + type: string + order: + description: Order is an optional field that specifies the order in + which the policy is applied. Policies with higher "order" are applied + after those with lower order. If the order is omitted, it may be + considered to be "infinite" - i.e. the policy will be applied last. Policies + with identical order will be applied in alphanumerical order based + on the Policy "Name". + type: number + preDNAT: + description: PreDNAT indicates to apply the rules in this policy before + any DNAT. + type: boolean + selector: + description: "The selector is an expression used to pick pick out + the endpoints that the policy should be applied to. \n Selector + expressions follow this syntax: \n \tlabel == \"string_literal\" + \ -> comparison, e.g. my_label == \"foo bar\" \tlabel != \"string_literal\" + \ -> not equal; also matches if label is not present \tlabel in + { \"a\", \"b\", \"c\", ... } -> true if the value of label X is + one of \"a\", \"b\", \"c\" \tlabel not in { \"a\", \"b\", \"c\", + ... } -> true if the value of label X is not one of \"a\", \"b\", + \"c\" \thas(label_name) -> True if that label is present \t! expr + -> negation of expr \texpr && expr -> Short-circuit and \texpr + || expr -> Short-circuit or \t( expr ) -> parens for grouping \tall() + or the empty selector -> matches all endpoints. \n Label names are + allowed to contain alphanumerics, -, _ and /. String literals are + more permissive but they do not support escape characters. \n Examples + (with made-up labels): \n \ttype == \"webserver\" && deployment + == \"prod\" \ttype in {\"frontend\", \"backend\"} \tdeployment != + \"dev\" \t! has(label_name)" + type: string + serviceAccountSelector: + description: ServiceAccountSelector is an optional field for an expression + used to select a pod based on service accounts. + type: string + types: + description: "Types indicates whether this policy applies to ingress, + or to egress, or to both. When not explicitly specified (and so + the value on creation is empty or nil), Calico defaults Types according + to what Ingress and Egress rules are present in the policy. The + default is: \n - [ PolicyTypeIngress ], if there are no Egress rules + (including the case where there are also no Ingress rules) \n + - [ PolicyTypeEgress ], if there are Egress rules but no Ingress + rules \n - [ PolicyTypeIngress, PolicyTypeEgress ], if there are + both Ingress and Egress rules. \n When the policy is read back again, + Types will always be one of these values, never empty or nil." + items: + description: PolicyType enumerates the possible values of the PolicySpec + Types field. + type: string + type: array + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: globalnetworksets.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: GlobalNetworkSet + listKind: GlobalNetworkSetList + plural: globalnetworksets + singular: globalnetworkset + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: GlobalNetworkSet contains a set of arbitrary IP sub-networks/CIDRs + that share labels to allow rules to refer to them via selectors. The labels + of GlobalNetworkSet are not namespaced. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: GlobalNetworkSetSpec contains the specification for a NetworkSet + resource. + properties: + nets: + description: The list of IP networks that belong to this set. + items: + type: string + type: array + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: hostendpoints.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: HostEndpoint + listKind: HostEndpointList + plural: hostendpoints + singular: hostendpoint + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: HostEndpointSpec contains the specification for a HostEndpoint + resource. + properties: + expectedIPs: + description: "The expected IP addresses (IPv4 and IPv6) of the endpoint. + If \"InterfaceName\" is not present, Calico will look for an interface + matching any of the IPs in the list and apply policy to that. Note: + \tWhen using the selector match criteria in an ingress or egress + security Policy \tor Profile, Calico converts the selector into + a set of IP addresses. For host \tendpoints, the ExpectedIPs field + is used for that purpose. (If only the interface \tname is specified, + Calico does not learn the IPs of the interface for use in match + \tcriteria.)" + items: + type: string + type: array + interfaceName: + description: "Either \"*\", or the name of a specific Linux interface + to apply policy to; or empty. \"*\" indicates that this HostEndpoint + governs all traffic to, from or through the default network namespace + of the host named by the \"Node\" field; entering and leaving that + namespace via any interface, including those from/to non-host-networked + local workloads. \n If InterfaceName is not \"*\", this HostEndpoint + only governs traffic that enters or leaves the host through the + specific interface named by InterfaceName, or - when InterfaceName + is empty - through the specific interface that has one of the IPs + in ExpectedIPs. Therefore, when InterfaceName is empty, at least + one expected IP must be specified. Only external interfaces (such + as eth0) are supported here; it isn't possible for a HostEndpoint + to protect traffic through a specific local workload interface. + \n Note: Only some kinds of policy are implemented for \"*\" HostEndpoints; + initially just pre-DNAT policy. Please check Calico documentation + for the latest position." + type: string + node: + description: The node name identifying the Calico node instance. + type: string + ports: + description: Ports contains the endpoint's named ports, which may + be referenced in security policy rules. + items: + properties: + name: + type: string + port: + type: integer + protocol: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + required: + - name + - port + - protocol + type: object + type: array + profiles: + description: A list of identifiers of security Profile objects that + apply to this endpoint. Each profile is applied in the order that + they appear in this list. Profile rules are applied after the selector-based + security policy. + items: + type: string + type: array + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: ipamblocks.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: IPAMBlock + listKind: IPAMBlockList + plural: ipamblocks + singular: ipamblock + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: IPAMBlockSpec contains the specification for an IPAMBlock + resource. + properties: + affinity: + type: string + allocations: + items: + type: integer + # TODO: This nullable is manually added in. We should update controller-gen + # to handle []*int properly itself. + nullable: true + type: array + attributes: + items: + properties: + handle_id: + type: string + secondary: + additionalProperties: + type: string + type: object + type: object + type: array + cidr: + type: string + deleted: + type: boolean + strictAffinity: + type: boolean + unallocated: + items: + type: integer + type: array + required: + - allocations + - attributes + - cidr + - strictAffinity + - unallocated + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: ipamconfigs.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: IPAMConfig + listKind: IPAMConfigList + plural: ipamconfigs + singular: ipamconfig + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: IPAMConfigSpec contains the specification for an IPAMConfig + resource. + properties: + autoAllocateBlocks: + type: boolean + maxBlocksPerHost: + description: MaxBlocksPerHost, if non-zero, is the max number of blocks + that can be affine to each host. + type: integer + strictAffinity: + type: boolean + required: + - autoAllocateBlocks + - strictAffinity + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: ipamhandles.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: IPAMHandle + listKind: IPAMHandleList + plural: ipamhandles + singular: ipamhandle + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: IPAMHandleSpec contains the specification for an IPAMHandle + resource. + properties: + block: + additionalProperties: + type: integer + type: object + deleted: + type: boolean + handleID: + type: string + required: + - block + - handleID + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: ippools.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: IPPool + listKind: IPPoolList + plural: ippools + singular: ippool + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: IPPoolSpec contains the specification for an IPPool resource. + properties: + blockSize: + description: The block size to use for IP address assignments from + this pool. Defaults to 26 for IPv4 and 112 for IPv6. + type: integer + cidr: + description: The pool CIDR. + type: string + disabled: + description: When disabled is true, Calico IPAM will not assign addresses + from this pool. + type: boolean + ipip: + description: 'Deprecated: this field is only used for APIv1 backwards + compatibility. Setting this field is not allowed, this field is + for internal use only.' + properties: + enabled: + description: When enabled is true, ipip tunneling will be used + to deliver packets to destinations within this pool. + type: boolean + mode: + description: The IPIP mode. This can be one of "always" or "cross-subnet". A + mode of "always" will also use IPIP tunneling for routing to + destination IP addresses within this pool. A mode of "cross-subnet" + will only use IPIP tunneling when the destination node is on + a different subnet to the originating node. The default value + (if not specified) is "always". + type: string + type: object + ipipMode: + description: Contains configuration for IPIP tunneling for this pool. + If not specified, then this is defaulted to "Never" (i.e. IPIP tunneling + is disabled). + type: string + nat-outgoing: + description: 'Deprecated: this field is only used for APIv1 backwards + compatibility. Setting this field is not allowed, this field is + for internal use only.' + type: boolean + natOutgoing: + description: When nat-outgoing is true, packets sent from Calico networked + containers in this pool to destinations outside of this pool will + be masqueraded. + type: boolean + nodeSelector: + description: Allows IPPool to allocate for a specific node by label + selector. + type: string + vxlanMode: + description: Contains configuration for VXLAN tunneling for this pool. + If not specified, then this is defaulted to "Never" (i.e. VXLAN + tunneling is disabled). + type: string + required: + - cidr + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: kubecontrollersconfigurations.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: KubeControllersConfiguration + listKind: KubeControllersConfigurationList + plural: kubecontrollersconfigurations + singular: kubecontrollersconfiguration + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: KubeControllersConfigurationSpec contains the values of the + Kubernetes controllers configuration. + properties: + controllers: + description: Controllers enables and configures individual Kubernetes + controllers + properties: + namespace: + description: Namespace enables and configures the namespace controller. + Enabled by default, set to nil to disable. + properties: + reconcilerPeriod: + description: 'ReconcilerPeriod is the period to perform reconciliation + with the Calico datastore. [Default: 5m]' + type: string + type: object + node: + description: Node enables and configures the node controller. + Enabled by default, set to nil to disable. + properties: + hostEndpoint: + description: HostEndpoint controls syncing nodes to host endpoints. + Disabled by default, set to nil to disable. + properties: + autoCreate: + description: 'AutoCreate enables automatic creation of + host endpoints for every node. [Default: Disabled]' + type: string + type: object + reconcilerPeriod: + description: 'ReconcilerPeriod is the period to perform reconciliation + with the Calico datastore. [Default: 5m]' + type: string + syncLabels: + description: 'SyncLabels controls whether to copy Kubernetes + node labels to Calico nodes. [Default: Enabled]' + type: string + type: object + policy: + description: Policy enables and configures the policy controller. + Enabled by default, set to nil to disable. + properties: + reconcilerPeriod: + description: 'ReconcilerPeriod is the period to perform reconciliation + with the Calico datastore. [Default: 5m]' + type: string + type: object + serviceAccount: + description: ServiceAccount enables and configures the service + account controller. Enabled by default, set to nil to disable. + properties: + reconcilerPeriod: + description: 'ReconcilerPeriod is the period to perform reconciliation + with the Calico datastore. [Default: 5m]' + type: string + type: object + workloadEndpoint: + description: WorkloadEndpoint enables and configures the workload + endpoint controller. Enabled by default, set to nil to disable. + properties: + reconcilerPeriod: + description: 'ReconcilerPeriod is the period to perform reconciliation + with the Calico datastore. [Default: 5m]' + type: string + type: object + type: object + etcdV3CompactionPeriod: + description: 'EtcdV3CompactionPeriod is the period between etcdv3 + compaction requests. Set to 0 to disable. [Default: 10m]' + type: string + healthChecks: + description: 'HealthChecks enables or disables support for health + checks [Default: Enabled]' + type: string + logSeverityScreen: + description: 'LogSeverityScreen is the log severity above which logs + are sent to the stdout. [Default: Info]' + type: string + required: + - controllers + type: object + status: + description: KubeControllersConfigurationStatus represents the status + of the configuration. It's useful for admins to be able to see the actual + config that was applied, which can be modified by environment variables + on the kube-controllers process. + properties: + environmentVars: + additionalProperties: + type: string + description: EnvironmentVars contains the environment variables on + the kube-controllers that influenced the RunningConfig. + type: object + runningConfig: + description: RunningConfig contains the effective config that is running + in the kube-controllers pod, after merging the API resource with + any environment variables. + properties: + controllers: + description: Controllers enables and configures individual Kubernetes + controllers + properties: + namespace: + description: Namespace enables and configures the namespace + controller. Enabled by default, set to nil to disable. + properties: + reconcilerPeriod: + description: 'ReconcilerPeriod is the period to perform + reconciliation with the Calico datastore. [Default: + 5m]' + type: string + type: object + node: + description: Node enables and configures the node controller. + Enabled by default, set to nil to disable. + properties: + hostEndpoint: + description: HostEndpoint controls syncing nodes to host + endpoints. Disabled by default, set to nil to disable. + properties: + autoCreate: + description: 'AutoCreate enables automatic creation + of host endpoints for every node. [Default: Disabled]' + type: string + type: object + reconcilerPeriod: + description: 'ReconcilerPeriod is the period to perform + reconciliation with the Calico datastore. [Default: + 5m]' + type: string + syncLabels: + description: 'SyncLabels controls whether to copy Kubernetes + node labels to Calico nodes. [Default: Enabled]' + type: string + type: object + policy: + description: Policy enables and configures the policy controller. + Enabled by default, set to nil to disable. + properties: + reconcilerPeriod: + description: 'ReconcilerPeriod is the period to perform + reconciliation with the Calico datastore. [Default: + 5m]' + type: string + type: object + serviceAccount: + description: ServiceAccount enables and configures the service + account controller. Enabled by default, set to nil to disable. + properties: + reconcilerPeriod: + description: 'ReconcilerPeriod is the period to perform + reconciliation with the Calico datastore. [Default: + 5m]' + type: string + type: object + workloadEndpoint: + description: WorkloadEndpoint enables and configures the workload + endpoint controller. Enabled by default, set to nil to disable. + properties: + reconcilerPeriod: + description: 'ReconcilerPeriod is the period to perform + reconciliation with the Calico datastore. [Default: + 5m]' + type: string + type: object + type: object + etcdV3CompactionPeriod: + description: 'EtcdV3CompactionPeriod is the period between etcdv3 + compaction requests. Set to 0 to disable. [Default: 10m]' + type: string + healthChecks: + description: 'HealthChecks enables or disables support for health + checks [Default: Enabled]' + type: string + logSeverityScreen: + description: 'LogSeverityScreen is the log severity above which + logs are sent to the stdout. [Default: Info]' + type: string + required: + - controllers + type: object + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: networkpolicies.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: NetworkPolicy + listKind: NetworkPolicyList + plural: networkpolicies + singular: networkpolicy + scope: Namespaced + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + properties: + egress: + description: The ordered set of egress rules. Each rule contains + a set of packet match criteria and a corresponding action to apply. + items: + description: "A Rule encapsulates a set of match criteria and an + action. Both selector-based security Policy and security Profiles + reference rules - separated out as a list of rules for both ingress + and egress packet matching. \n Each positive match criteria has + a negated version, prefixed with Not. All the match criteria + within a rule must be satisfied for a packet to match. A single + rule can contain the positive and negative version of a match + and both must be satisfied for the rule to match." + properties: + action: + type: string + destination: + description: Destination contains the match criteria that apply + to destination entity. + properties: + namespaceSelector: + description: "NamespaceSelector is an optional field that + contains a selector expression. Only traffic that originates + from (or terminates at) endpoints within the selected + namespaces will be matched. When both NamespaceSelector + and Selector are defined on the same rule, then only workload + endpoints that are matched by both selectors will be selected + by the rule. \n For NetworkPolicy, an empty NamespaceSelector + implies that the Selector is limited to selecting only + workload endpoints in the same namespace as the NetworkPolicy. + \n For NetworkPolicy, `global()` NamespaceSelector implies + that the Selector is limited to selecting only GlobalNetworkSet + or HostEndpoint. \n For GlobalNetworkPolicy, an empty + NamespaceSelector implies the Selector applies to workload + endpoints across all namespaces." + type: string + nets: + description: Nets is an optional field that restricts the + rule to only apply to traffic that originates from (or + terminates at) IP addresses in any of the given subnets. + items: + type: string + type: array + notNets: + description: NotNets is the negated version of the Nets + field. + items: + type: string + type: array + notPorts: + description: NotPorts is the negated version of the Ports + field. Since only some protocols have ports, if any ports + are specified it requires the Protocol match in the Rule + to be set to "TCP" or "UDP". + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + notSelector: + description: NotSelector is the negated version of the Selector + field. See Selector field for subtleties with negated + selectors. + type: string + ports: + description: "Ports is an optional field that restricts + the rule to only apply to traffic that has a source (destination) + port that matches one of these ranges/values. This value + is a list of integers or strings that represent ranges + of ports. \n Since only some protocols have ports, if + any ports are specified it requires the Protocol match + in the Rule to be set to \"TCP\" or \"UDP\"." + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + selector: + description: "Selector is an optional field that contains + a selector expression (see Policy for sample syntax). + \ Only traffic that originates from (terminates at) endpoints + matching the selector will be matched. \n Note that: in + addition to the negated version of the Selector (see NotSelector + below), the selector expression syntax itself supports + negation. The two types of negation are subtly different. + One negates the set of matched endpoints, the other negates + the whole match: \n \tSelector = \"!has(my_label)\" matches + packets that are from other Calico-controlled \tendpoints + that do not have the label my_label. \n \tNotSelector + = \"has(my_label)\" matches packets that are not from + Calico-controlled \tendpoints that do have the label my_label. + \n The effect is that the latter will accept packets from + non-Calico sources whereas the former is limited to packets + from Calico-controlled endpoints." + type: string + serviceAccounts: + description: ServiceAccounts is an optional field that restricts + the rule to only apply to traffic that originates from + (or terminates at) a pod running as a matching service + account. + properties: + names: + description: Names is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account whose name is in the list. + items: + type: string + type: array + selector: + description: Selector is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account that matches the given label selector. If + both Names and Selector are specified then they are + AND'ed. + type: string + type: object + type: object + http: + description: HTTP contains match criteria that apply to HTTP + requests. + properties: + methods: + description: Methods is an optional field that restricts + the rule to apply only to HTTP requests that use one of + the listed HTTP Methods (e.g. GET, PUT, etc.) Multiple + methods are OR'd together. + items: + type: string + type: array + paths: + description: 'Paths is an optional field that restricts + the rule to apply to HTTP requests that use one of the + listed HTTP Paths. Multiple paths are OR''d together. + e.g: - exact: /foo - prefix: /bar NOTE: Each entry may + ONLY specify either a `exact` or a `prefix` match. The + validator will check for it.' + items: + description: 'HTTPPath specifies an HTTP path to match. + It may be either of the form: exact: : which matches + the path exactly or prefix: : which matches + the path prefix' + properties: + exact: + type: string + prefix: + type: string + type: object + type: array + type: object + icmp: + description: ICMP is an optional field that restricts the rule + to apply to a specific type and code of ICMP traffic. This + should only be specified if the Protocol field is set to "ICMP" + or "ICMPv6". + properties: + code: + description: Match on a specific ICMP code. If specified, + the Type value must also be specified. This is a technical + limitation imposed by the kernels iptables firewall, + which Calico uses to enforce the rule. + type: integer + type: + description: Match on a specific ICMP type. For example + a value of 8 refers to ICMP Echo Request (i.e. pings). + type: integer + type: object + ipVersion: + description: IPVersion is an optional field that restricts the + rule to only match a specific IP version. + type: integer + metadata: + description: Metadata contains additional information for this + rule + properties: + annotations: + additionalProperties: + type: string + description: Annotations is a set of key value pairs that + give extra information about the rule + type: object + type: object + notICMP: + description: NotICMP is the negated version of the ICMP field. + properties: + code: + description: Match on a specific ICMP code. If specified, + the Type value must also be specified. This is a technical + limitation imposed by the kernels iptables firewall, + which Calico uses to enforce the rule. + type: integer + type: + description: Match on a specific ICMP type. For example + a value of 8 refers to ICMP Echo Request (i.e. pings). + type: integer + type: object + notProtocol: + anyOf: + - type: integer + - type: string + description: NotProtocol is the negated version of the Protocol + field. + pattern: ^.* + x-kubernetes-int-or-string: true + protocol: + anyOf: + - type: integer + - type: string + description: "Protocol is an optional field that restricts the + rule to only apply to traffic of a specific IP protocol. Required + if any of the EntityRules contain Ports (because ports only + apply to certain protocols). \n Must be one of these string + values: \"TCP\", \"UDP\", \"ICMP\", \"ICMPv6\", \"SCTP\", + \"UDPLite\" or an integer in the range 1-255." + pattern: ^.* + x-kubernetes-int-or-string: true + source: + description: Source contains the match criteria that apply to + source entity. + properties: + namespaceSelector: + description: "NamespaceSelector is an optional field that + contains a selector expression. Only traffic that originates + from (or terminates at) endpoints within the selected + namespaces will be matched. When both NamespaceSelector + and Selector are defined on the same rule, then only workload + endpoints that are matched by both selectors will be selected + by the rule. \n For NetworkPolicy, an empty NamespaceSelector + implies that the Selector is limited to selecting only + workload endpoints in the same namespace as the NetworkPolicy. + \n For NetworkPolicy, `global()` NamespaceSelector implies + that the Selector is limited to selecting only GlobalNetworkSet + or HostEndpoint. \n For GlobalNetworkPolicy, an empty + NamespaceSelector implies the Selector applies to workload + endpoints across all namespaces." + type: string + nets: + description: Nets is an optional field that restricts the + rule to only apply to traffic that originates from (or + terminates at) IP addresses in any of the given subnets. + items: + type: string + type: array + notNets: + description: NotNets is the negated version of the Nets + field. + items: + type: string + type: array + notPorts: + description: NotPorts is the negated version of the Ports + field. Since only some protocols have ports, if any ports + are specified it requires the Protocol match in the Rule + to be set to "TCP" or "UDP". + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + notSelector: + description: NotSelector is the negated version of the Selector + field. See Selector field for subtleties with negated + selectors. + type: string + ports: + description: "Ports is an optional field that restricts + the rule to only apply to traffic that has a source (destination) + port that matches one of these ranges/values. This value + is a list of integers or strings that represent ranges + of ports. \n Since only some protocols have ports, if + any ports are specified it requires the Protocol match + in the Rule to be set to \"TCP\" or \"UDP\"." + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + selector: + description: "Selector is an optional field that contains + a selector expression (see Policy for sample syntax). + \ Only traffic that originates from (terminates at) endpoints + matching the selector will be matched. \n Note that: in + addition to the negated version of the Selector (see NotSelector + below), the selector expression syntax itself supports + negation. The two types of negation are subtly different. + One negates the set of matched endpoints, the other negates + the whole match: \n \tSelector = \"!has(my_label)\" matches + packets that are from other Calico-controlled \tendpoints + that do not have the label my_label. \n \tNotSelector + = \"has(my_label)\" matches packets that are not from + Calico-controlled \tendpoints that do have the label my_label. + \n The effect is that the latter will accept packets from + non-Calico sources whereas the former is limited to packets + from Calico-controlled endpoints." + type: string + serviceAccounts: + description: ServiceAccounts is an optional field that restricts + the rule to only apply to traffic that originates from + (or terminates at) a pod running as a matching service + account. + properties: + names: + description: Names is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account whose name is in the list. + items: + type: string + type: array + selector: + description: Selector is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account that matches the given label selector. If + both Names and Selector are specified then they are + AND'ed. + type: string + type: object + type: object + required: + - action + type: object + type: array + ingress: + description: The ordered set of ingress rules. Each rule contains + a set of packet match criteria and a corresponding action to apply. + items: + description: "A Rule encapsulates a set of match criteria and an + action. Both selector-based security Policy and security Profiles + reference rules - separated out as a list of rules for both ingress + and egress packet matching. \n Each positive match criteria has + a negated version, prefixed with Not. All the match criteria + within a rule must be satisfied for a packet to match. A single + rule can contain the positive and negative version of a match + and both must be satisfied for the rule to match." + properties: + action: + type: string + destination: + description: Destination contains the match criteria that apply + to destination entity. + properties: + namespaceSelector: + description: "NamespaceSelector is an optional field that + contains a selector expression. Only traffic that originates + from (or terminates at) endpoints within the selected + namespaces will be matched. When both NamespaceSelector + and Selector are defined on the same rule, then only workload + endpoints that are matched by both selectors will be selected + by the rule. \n For NetworkPolicy, an empty NamespaceSelector + implies that the Selector is limited to selecting only + workload endpoints in the same namespace as the NetworkPolicy. + \n For NetworkPolicy, `global()` NamespaceSelector implies + that the Selector is limited to selecting only GlobalNetworkSet + or HostEndpoint. \n For GlobalNetworkPolicy, an empty + NamespaceSelector implies the Selector applies to workload + endpoints across all namespaces." + type: string + nets: + description: Nets is an optional field that restricts the + rule to only apply to traffic that originates from (or + terminates at) IP addresses in any of the given subnets. + items: + type: string + type: array + notNets: + description: NotNets is the negated version of the Nets + field. + items: + type: string + type: array + notPorts: + description: NotPorts is the negated version of the Ports + field. Since only some protocols have ports, if any ports + are specified it requires the Protocol match in the Rule + to be set to "TCP" or "UDP". + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + notSelector: + description: NotSelector is the negated version of the Selector + field. See Selector field for subtleties with negated + selectors. + type: string + ports: + description: "Ports is an optional field that restricts + the rule to only apply to traffic that has a source (destination) + port that matches one of these ranges/values. This value + is a list of integers or strings that represent ranges + of ports. \n Since only some protocols have ports, if + any ports are specified it requires the Protocol match + in the Rule to be set to \"TCP\" or \"UDP\"." + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + selector: + description: "Selector is an optional field that contains + a selector expression (see Policy for sample syntax). + \ Only traffic that originates from (terminates at) endpoints + matching the selector will be matched. \n Note that: in + addition to the negated version of the Selector (see NotSelector + below), the selector expression syntax itself supports + negation. The two types of negation are subtly different. + One negates the set of matched endpoints, the other negates + the whole match: \n \tSelector = \"!has(my_label)\" matches + packets that are from other Calico-controlled \tendpoints + that do not have the label my_label. \n \tNotSelector + = \"has(my_label)\" matches packets that are not from + Calico-controlled \tendpoints that do have the label my_label. + \n The effect is that the latter will accept packets from + non-Calico sources whereas the former is limited to packets + from Calico-controlled endpoints." + type: string + serviceAccounts: + description: ServiceAccounts is an optional field that restricts + the rule to only apply to traffic that originates from + (or terminates at) a pod running as a matching service + account. + properties: + names: + description: Names is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account whose name is in the list. + items: + type: string + type: array + selector: + description: Selector is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account that matches the given label selector. If + both Names and Selector are specified then they are + AND'ed. + type: string + type: object + type: object + http: + description: HTTP contains match criteria that apply to HTTP + requests. + properties: + methods: + description: Methods is an optional field that restricts + the rule to apply only to HTTP requests that use one of + the listed HTTP Methods (e.g. GET, PUT, etc.) Multiple + methods are OR'd together. + items: + type: string + type: array + paths: + description: 'Paths is an optional field that restricts + the rule to apply to HTTP requests that use one of the + listed HTTP Paths. Multiple paths are OR''d together. + e.g: - exact: /foo - prefix: /bar NOTE: Each entry may + ONLY specify either a `exact` or a `prefix` match. The + validator will check for it.' + items: + description: 'HTTPPath specifies an HTTP path to match. + It may be either of the form: exact: : which matches + the path exactly or prefix: : which matches + the path prefix' + properties: + exact: + type: string + prefix: + type: string + type: object + type: array + type: object + icmp: + description: ICMP is an optional field that restricts the rule + to apply to a specific type and code of ICMP traffic. This + should only be specified if the Protocol field is set to "ICMP" + or "ICMPv6". + properties: + code: + description: Match on a specific ICMP code. If specified, + the Type value must also be specified. This is a technical + limitation imposed by the kernels iptables firewall, + which Calico uses to enforce the rule. + type: integer + type: + description: Match on a specific ICMP type. For example + a value of 8 refers to ICMP Echo Request (i.e. pings). + type: integer + type: object + ipVersion: + description: IPVersion is an optional field that restricts the + rule to only match a specific IP version. + type: integer + metadata: + description: Metadata contains additional information for this + rule + properties: + annotations: + additionalProperties: + type: string + description: Annotations is a set of key value pairs that + give extra information about the rule + type: object + type: object + notICMP: + description: NotICMP is the negated version of the ICMP field. + properties: + code: + description: Match on a specific ICMP code. If specified, + the Type value must also be specified. This is a technical + limitation imposed by the kernels iptables firewall, + which Calico uses to enforce the rule. + type: integer + type: + description: Match on a specific ICMP type. For example + a value of 8 refers to ICMP Echo Request (i.e. pings). + type: integer + type: object + notProtocol: + anyOf: + - type: integer + - type: string + description: NotProtocol is the negated version of the Protocol + field. + pattern: ^.* + x-kubernetes-int-or-string: true + protocol: + anyOf: + - type: integer + - type: string + description: "Protocol is an optional field that restricts the + rule to only apply to traffic of a specific IP protocol. Required + if any of the EntityRules contain Ports (because ports only + apply to certain protocols). \n Must be one of these string + values: \"TCP\", \"UDP\", \"ICMP\", \"ICMPv6\", \"SCTP\", + \"UDPLite\" or an integer in the range 1-255." + pattern: ^.* + x-kubernetes-int-or-string: true + source: + description: Source contains the match criteria that apply to + source entity. + properties: + namespaceSelector: + description: "NamespaceSelector is an optional field that + contains a selector expression. Only traffic that originates + from (or terminates at) endpoints within the selected + namespaces will be matched. When both NamespaceSelector + and Selector are defined on the same rule, then only workload + endpoints that are matched by both selectors will be selected + by the rule. \n For NetworkPolicy, an empty NamespaceSelector + implies that the Selector is limited to selecting only + workload endpoints in the same namespace as the NetworkPolicy. + \n For NetworkPolicy, `global()` NamespaceSelector implies + that the Selector is limited to selecting only GlobalNetworkSet + or HostEndpoint. \n For GlobalNetworkPolicy, an empty + NamespaceSelector implies the Selector applies to workload + endpoints across all namespaces." + type: string + nets: + description: Nets is an optional field that restricts the + rule to only apply to traffic that originates from (or + terminates at) IP addresses in any of the given subnets. + items: + type: string + type: array + notNets: + description: NotNets is the negated version of the Nets + field. + items: + type: string + type: array + notPorts: + description: NotPorts is the negated version of the Ports + field. Since only some protocols have ports, if any ports + are specified it requires the Protocol match in the Rule + to be set to "TCP" or "UDP". + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + notSelector: + description: NotSelector is the negated version of the Selector + field. See Selector field for subtleties with negated + selectors. + type: string + ports: + description: "Ports is an optional field that restricts + the rule to only apply to traffic that has a source (destination) + port that matches one of these ranges/values. This value + is a list of integers or strings that represent ranges + of ports. \n Since only some protocols have ports, if + any ports are specified it requires the Protocol match + in the Rule to be set to \"TCP\" or \"UDP\"." + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + selector: + description: "Selector is an optional field that contains + a selector expression (see Policy for sample syntax). + \ Only traffic that originates from (terminates at) endpoints + matching the selector will be matched. \n Note that: in + addition to the negated version of the Selector (see NotSelector + below), the selector expression syntax itself supports + negation. The two types of negation are subtly different. + One negates the set of matched endpoints, the other negates + the whole match: \n \tSelector = \"!has(my_label)\" matches + packets that are from other Calico-controlled \tendpoints + that do not have the label my_label. \n \tNotSelector + = \"has(my_label)\" matches packets that are not from + Calico-controlled \tendpoints that do have the label my_label. + \n The effect is that the latter will accept packets from + non-Calico sources whereas the former is limited to packets + from Calico-controlled endpoints." + type: string + serviceAccounts: + description: ServiceAccounts is an optional field that restricts + the rule to only apply to traffic that originates from + (or terminates at) a pod running as a matching service + account. + properties: + names: + description: Names is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account whose name is in the list. + items: + type: string + type: array + selector: + description: Selector is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account that matches the given label selector. If + both Names and Selector are specified then they are + AND'ed. + type: string + type: object + type: object + required: + - action + type: object + type: array + order: + description: Order is an optional field that specifies the order in + which the policy is applied. Policies with higher "order" are applied + after those with lower order. If the order is omitted, it may be + considered to be "infinite" - i.e. the policy will be applied last. Policies + with identical order will be applied in alphanumerical order based + on the Policy "Name". + type: number + selector: + description: "The selector is an expression used to pick pick out + the endpoints that the policy should be applied to. \n Selector + expressions follow this syntax: \n \tlabel == \"string_literal\" + \ -> comparison, e.g. my_label == \"foo bar\" \tlabel != \"string_literal\" + \ -> not equal; also matches if label is not present \tlabel in + { \"a\", \"b\", \"c\", ... } -> true if the value of label X is + one of \"a\", \"b\", \"c\" \tlabel not in { \"a\", \"b\", \"c\", + ... } -> true if the value of label X is not one of \"a\", \"b\", + \"c\" \thas(label_name) -> True if that label is present \t! expr + -> negation of expr \texpr && expr -> Short-circuit and \texpr + || expr -> Short-circuit or \t( expr ) -> parens for grouping \tall() + or the empty selector -> matches all endpoints. \n Label names are + allowed to contain alphanumerics, -, _ and /. String literals are + more permissive but they do not support escape characters. \n Examples + (with made-up labels): \n \ttype == \"webserver\" && deployment + == \"prod\" \ttype in {\"frontend\", \"backend\"} \tdeployment != + \"dev\" \t! has(label_name)" + type: string + serviceAccountSelector: + description: ServiceAccountSelector is an optional field for an expression + used to select a pod based on service accounts. + type: string + types: + description: "Types indicates whether this policy applies to ingress, + or to egress, or to both. When not explicitly specified (and so + the value on creation is empty or nil), Calico defaults Types according + to what Ingress and Egress are present in the policy. The default + is: \n - [ PolicyTypeIngress ], if there are no Egress rules (including + the case where there are also no Ingress rules) \n - [ PolicyTypeEgress + ], if there are Egress rules but no Ingress rules \n - [ PolicyTypeIngress, + PolicyTypeEgress ], if there are both Ingress and Egress rules. + \n When the policy is read back again, Types will always be one + of these values, never empty or nil." + items: + description: PolicyType enumerates the possible values of the PolicySpec + Types field. + type: string + type: array + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: networksets.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: NetworkSet + listKind: NetworkSetList + plural: networksets + singular: networkset + scope: Namespaced + versions: + - name: v1 + schema: + openAPIV3Schema: + description: NetworkSet is the Namespaced-equivalent of the GlobalNetworkSet. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: NetworkSetSpec contains the specification for a NetworkSet + resource. + properties: + nets: + description: The list of IP networks that belong to this set. + items: + type: string + type: array + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +--- +--- +# Source: calico/templates/calico-kube-controllers-rbac.yaml + +# Include a clusterrole for the kube-controllers component, +# and bind it to the calico-kube-controllers serviceaccount. +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: calico-kube-controllers +rules: + # Nodes are watched to monitor for deletions. + - apiGroups: [""] + resources: + - nodes + verbs: + - watch + - list + - get + # Pods are queried to check for existence. + - apiGroups: [""] + resources: + - pods + verbs: + - get + # IPAM resources are manipulated when nodes are deleted. + - apiGroups: ["crd.projectcalico.org"] + resources: + - ippools + verbs: + - list + - apiGroups: ["crd.projectcalico.org"] + resources: + - blockaffinities + - ipamblocks + - ipamhandles + verbs: + - get + - list + - create + - update + - delete + # kube-controllers manages hostendpoints. + - apiGroups: ["crd.projectcalico.org"] + resources: + - hostendpoints + verbs: + - get + - list + - create + - update + - delete + # Needs access to update clusterinformations. + - apiGroups: ["crd.projectcalico.org"] + resources: + - clusterinformations + verbs: + - get + - create + - update + # KubeControllersConfiguration is where it gets its config + - apiGroups: ["crd.projectcalico.org"] + resources: + - kubecontrollersconfigurations + verbs: + # read its own config + - get + # create a default if none exists + - create + # update status + - update + # watch for changes + - watch + # calico-kube-controllers requires root privileges + - apiGroups: ['policy'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: + - oob-anyuid-psp +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: calico-kube-controllers +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: calico-kube-controllers +subjects: +- kind: ServiceAccount + name: calico-kube-controllers + namespace: kube-system +--- + +--- +# Source: calico/templates/calico-node-rbac.yaml +# Include a clusterrole for the calico-node DaemonSet, +# and bind it to the calico-node serviceaccount. +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: calico-node +rules: + # The CNI plugin needs to get pods, nodes, and namespaces. + - apiGroups: [""] + resources: + - pods + - nodes + - namespaces + verbs: + - get + - apiGroups: [""] + resources: + - endpoints + - services + verbs: + # Used to discover service IPs for advertisement. + - watch + - list + # Used to discover Typhas. + - get + # Pod CIDR auto-detection on kubeadm needs access to config maps. + - apiGroups: [""] + resources: + - configmaps + verbs: + - get + - apiGroups: [""] + resources: + - nodes/status + verbs: + # Needed for clearing NodeNetworkUnavailable flag. + - patch + # Calico stores some configuration information in node annotations. + - update + # Watch for changes to Kubernetes NetworkPolicies. + - apiGroups: ["networking.k8s.io"] + resources: + - networkpolicies + verbs: + - watch + - list + # Used by Calico for policy information. + - apiGroups: [""] + resources: + - pods + - namespaces + - serviceaccounts + verbs: + - list + - watch + # The CNI plugin patches pods/status. + - apiGroups: [""] + resources: + - pods/status + verbs: + - patch + # Calico monitors various CRDs for config. + - apiGroups: ["crd.projectcalico.org"] + resources: + - globalfelixconfigs + - felixconfigurations + - bgppeers + - globalbgpconfigs + - bgpconfigurations + - ippools + - ipamblocks + - globalnetworkpolicies + - globalnetworksets + - networkpolicies + - networksets + - clusterinformations + - hostendpoints + - blockaffinities + verbs: + - get + - list + - watch + # Calico must create and update some CRDs on startup. + - apiGroups: ["crd.projectcalico.org"] + resources: + - ippools + - felixconfigurations + - clusterinformations + verbs: + - create + - update + # Calico stores some configuration information on the node. + - apiGroups: [""] + resources: + - nodes + verbs: + - get + - list + - watch + # These permissions are only required for upgrade from v2.6, and can + # be removed after upgrade or on fresh installations. + - apiGroups: ["crd.projectcalico.org"] + resources: + - bgpconfigurations + - bgppeers + verbs: + - create + - update + # These permissions are required for Calico CNI to perform IPAM allocations. + - apiGroups: ["crd.projectcalico.org"] + resources: + - blockaffinities + - ipamblocks + - ipamhandles + verbs: + - get + - list + - create + - update + - delete + - apiGroups: ["crd.projectcalico.org"] + resources: + - ipamconfigs + verbs: + - get + # Block affinities must also be watchable by confd for route aggregation. + - apiGroups: ["crd.projectcalico.org"] + resources: + - blockaffinities + verbs: + - watch + # The Calico IPAM migration needs to get daemonsets. These permissions can be + # removed if not upgrading from an installation using host-local IPAM. + - apiGroups: ["apps"] + resources: + - daemonsets + verbs: + - get + # calico-node requires full privileges + - apiGroups: ['policy'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: + - oob-privileged-psp + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: calico-node +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: calico-node +subjects: +- kind: ServiceAccount + name: calico-node + namespace: kube-system + +{% if plugins.calico.typha.enabled | default(false) == true -%} +--- +# Source: calico/templates/calico-typha.yaml +# This manifest creates a Service, which will be backed by Calico's Typha daemon. +# Typha sits in between Felix and the API server, reducing Calico's load on the API server. + +apiVersion: v1 +kind: Service +metadata: + name: calico-typha + namespace: kube-system + labels: + k8s-app: calico-typha +spec: + ports: + - port: 5473 + protocol: TCP + targetPort: calico-typha + name: calico-typha + selector: + k8s-app: calico-typha + +--- + +# This manifest creates a Deployment of Typha to back the above service. + +apiVersion: apps/v1 +kind: Deployment +metadata: + name: calico-typha + namespace: kube-system + labels: + k8s-app: calico-typha +spec: + # Number of Typha replicas. To enable Typha, set this to a non-zero value *and* set the + # typha_service_name variable in the calico-config ConfigMap above. + # + # We recommend using Typha if you have more than 50 nodes. Above 100 nodes it is essential + # (when using the Kubernetes datastore). Use one replica for every 100-200 nodes. In + # production, we recommend running at least 3 replicas to reduce the impact of rolling upgrade. + replicas: {{ plugins['calico']['typha']['replicas'] }} + revisionHistoryLimit: 2 + selector: + matchLabels: + k8s-app: calico-typha + template: + metadata: + labels: + k8s-app: calico-typha + annotations: + cluster-autoscaler.kubernetes.io/safe-to-evict: 'true' + spec: + nodeSelector: + {{ plugins['calico']['typha']['nodeSelector'] | toyaml | indent(width=8, indentfirst=False) }} + hostNetwork: true + tolerations: + # Mark the pod as a critical add-on for rescheduling. + - key: CriticalAddonsOnly + operator: Exists + # Since Calico can't network a pod until Typha is up, we need to run Typha itself + # as a host-networked pod. + serviceAccountName: calico-node + priorityClassName: system-cluster-critical + # fsGroup allows using projected serviceaccount tokens as described here kubernetes/kubernetes#82573 + securityContext: + fsGroup: 65534 + containers: + - image: {% if plugins['calico']['installation']['registry'] is defined and plugins['calico']['installation']['registry']|length %}{{ plugins['calico']['installation']['registry'] }}/{% endif %}{{ plugins['calico']['typha']['image'] }} + name: calico-typha + ports: + - containerPort: 5473 + name: calico-typha + protocol: TCP + envFrom: + - configMapRef: + # Allow KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT to be overridden for eBPF mode. + name: kubernetes-services-endpoint + optional: true + env: + # Enable "info" logging by default. Can be set to "debug" to increase verbosity. + - name: TYPHA_LOGSEVERITYSCREEN + value: "info" + # Disable logging to file and syslog since those don't make sense in Kubernetes. + - name: TYPHA_LOGFILEPATH + value: "none" + - name: TYPHA_LOGSEVERITYSYS + value: "none" + # Monitor the Kubernetes API to find the number of running instances and rebalance + # connections. + - name: TYPHA_CONNECTIONREBALANCINGMODE + value: "kubernetes" + - name: TYPHA_DATASTORETYPE + value: "kubernetes" + - name: TYPHA_HEALTHENABLED + value: "true" + # Uncomment these lines to enable prometheus metrics. Since Typha is host-networked, + # this opens a port on the host, which may need to be secured. + #- name: TYPHA_PROMETHEUSMETRICSENABLED + # value: "true" + #- name: TYPHA_PROMETHEUSMETRICSPORT + # value: "9093" + livenessProbe: + httpGet: + path: /liveness + port: 9098 + host: localhost + periodSeconds: 30 + initialDelaySeconds: 30 + securityContext: + runAsNonRoot: true + allowPrivilegeEscalation: false + readinessProbe: + httpGet: + path: /readiness + port: 9098 + host: localhost + periodSeconds: 10 + +--- + +# This manifest creates a Pod Disruption Budget for Typha to allow K8s Cluster Autoscaler to evict + +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: calico-typha + namespace: kube-system + labels: + k8s-app: calico-typha +spec: + maxUnavailable: 1 + selector: + matchLabels: + k8s-app: calico-typha +{%- endif %} +--- +# Source: calico/templates/calico-node.yaml +# This manifest installs the calico-node container, as well +# as the CNI plugins and network config on +# each master and worker node in a Kubernetes cluster. +kind: DaemonSet +apiVersion: apps/v1 +metadata: + name: calico-node + namespace: kube-system + labels: + k8s-app: calico-node +spec: + selector: + matchLabels: + k8s-app: calico-node + updateStrategy: + type: RollingUpdate + rollingUpdate: + maxUnavailable: 1 + template: + metadata: + labels: + k8s-app: calico-node + annotations: + # This, along with the CriticalAddonsOnly toleration below, + # marks the pod as a critical add-on, ensuring it gets + # priority scheduling and that its resources are reserved + # if it ever gets evicted. + scheduler.alpha.kubernetes.io/critical-pod: '' + spec: + nodeSelector: + kubernetes.io/os: linux + hostNetwork: true + tolerations: + # Make sure calico-node gets scheduled on all nodes. + - effect: NoSchedule + operator: Exists + # Mark the pod as a critical add-on for rescheduling. + - key: CriticalAddonsOnly + operator: Exists + - effect: NoExecute + operator: Exists + serviceAccountName: calico-node + # Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force + # deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods. + terminationGracePeriodSeconds: 0 + priorityClassName: system-node-critical + initContainers: + # This container performs upgrade from host-local IPAM to calico-ipam. + # It can be deleted if this is a fresh installation, or if you have already + # upgraded to use calico-ipam. + - name: upgrade-ipam + image: {% if plugins['calico']['installation']['registry'] is defined and plugins['calico']['installation']['registry']|length %}{{ plugins['calico']['installation']['registry'] }}/{% endif %}{{ plugins['calico']['cni']['image'] }} + command: ["/opt/cni/bin/calico-ipam", "-upgrade"] + env: + - name: KUBERNETES_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: CALICO_NETWORKING_BACKEND + valueFrom: + configMapKeyRef: + name: calico-config + key: calico_backend + volumeMounts: + - mountPath: /var/lib/cni/networks + name: host-local-net-dir + - mountPath: /host/opt/cni/bin + name: cni-bin-dir + securityContext: + privileged: true + # This container installs the CNI binaries + # and CNI network config file on each node. + - name: install-cni + image: {% if plugins['calico']['installation']['registry'] is defined and plugins['calico']['installation']['registry']|length %}{{ plugins['calico']['installation']['registry'] }}/{% endif %}{{ plugins['calico']['cni']['image'] }} + command: ["/opt/cni/bin/install"] + env: + # Name of the CNI config file to create. + - name: CNI_CONF_NAME + value: "10-calico.conflist" + # The CNI network config to install on each node. + - name: CNI_NETWORK_CONFIG + valueFrom: + configMapKeyRef: + name: calico-config + key: cni_network_config + # Set the hostname based on the k8s node name. + - name: KUBERNETES_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + # CNI MTU Config variable + - name: CNI_MTU + valueFrom: + configMapKeyRef: + name: calico-config + key: veth_mtu + # Prevents the container from sleeping forever. + - name: SLEEP + value: "false" + volumeMounts: + - mountPath: /host/opt/cni/bin + name: cni-bin-dir + - mountPath: /host/etc/cni/net.d + name: cni-net-dir + securityContext: + privileged: true + # Adds a Flex Volume Driver that creates a per-pod Unix Domain Socket to allow Dikastes + # to communicate with Felix over the Policy Sync API. + - name: flexvol-driver + image: {% if plugins['calico']['installation']['registry'] is defined and plugins['calico']['installation']['registry']|length %}{{ plugins['calico']['installation']['registry'] }}/{% endif %}{{ plugins['calico']['flexvol']['image'] }} + volumeMounts: + - name: flexvol-driver-host + mountPath: /host/driver + securityContext: + privileged: true + containers: + # Runs calico-node container on each Kubernetes node. This + # container programs network policy and routes on each + # host. + - name: calico-node + image: {% if plugins['calico']['installation']['registry'] is defined and plugins['calico']['installation']['registry']|length %}{{ plugins['calico']['installation']['registry'] }}/{% endif %}{{ plugins['calico']['node']['image'] }} + env: + # Use Kubernetes API as the backing datastore. + - name: DATASTORE_TYPE + value: "{{ plugins['calico']['env']['DATASTORE_TYPE'] }}" +{%- if plugins.calico.typha.enabled | default(false) == true %} + # Typha support: controlled by the ConfigMap. + - name: FELIX_TYPHAK8SSERVICENAME + valueFrom: + configMapKeyRef: + name: calico-config + key: typha_service_name +{%- endif %} + # Wait for the datastore. + - name: WAIT_FOR_DATASTORE + value: "{{ plugins['calico']['env']['WAIT_FOR_DATASTORE'] }}" + # Set based on the k8s node name. + - name: NODENAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + # Choose the backend to use. + - name: CALICO_NETWORKING_BACKEND + valueFrom: + configMapKeyRef: + name: calico-config + key: calico_backend + # Cluster type to identify the deployment type + - name: CLUSTER_TYPE + value: "{{ plugins['calico']['env']['CLUSTER_TYPE'] }}" + # Set MTU for tunnel device used if ipip is enabled + - name: FELIX_IPINIPMTU + valueFrom: + configMapKeyRef: + name: calico-config + key: veth_mtu + # Set MTU for the VXLAN tunnel device. + - name: FELIX_VXLANMTU + valueFrom: + configMapKeyRef: + name: calico-config + key: veth_mtu + + - name: CALICO_ROUTER_ID + value: "{{ plugins['calico']['env']['CALICO_ROUTER_ID'] }}" + + # IPv4 OPTIONS + - name: IP + value: "{{ plugins['calico']['env']['IP'] }}" + - name: IP_AUTODETECTION_METHOD + value: "{{ plugins['calico']['env']['IP_AUTODETECTION_METHOD'] }}" + - name: CALICO_IPV4POOL_IPIP + value: "{{ plugins['calico']['env']['CALICO_IPV4POOL_IPIP'] }}" + - name: CALICO_IPV4POOL_VXLAN + value: "{{ plugins['calico']['env']['CALICO_IPV4POOL_VXLAN'] }}" + - name: CALICO_IPV4POOL_CIDR + value: "{{ plugins['calico']['env']['CALICO_IPV4POOL_CIDR'] }}" + +{%- if not services.kubeadm.networking.podSubnet|isipv4 %} + # IPv6 OPTIONS + - name: CALICO_IPV6POOL_CIDR + value: "{{ plugins['calico']['env']['CALICO_IPV6POOL_CIDR'] }}" + - name: IP6 + value: "{{ plugins['calico']['env']['IP6'] }}" + - name: IP6_AUTODETECTION_METHOD + value: "{{ plugins['calico']['env']['IP6_AUTODETECTION_METHOD'] }}" + - name: FELIX_IPV6SUPPORT + value: "{{ plugins['calico']['env']['FELIX_IPV6SUPPORT'] }}" + - name: CALICO_IPV6POOL_IPIP + value: "{{ plugins['calico']['env']['CALICO_IPV6POOL_IPIP'] }}" + - name: CALICO_IPV6POOL_VXLAN + value: "{{ plugins['calico']['env']['CALICO_IPV6POOL_VXLAN'] }}" +{%- endif %} + + # Disable file logging so `kubectl logs` works. + - name: CALICO_DISABLE_FILE_LOGGING + value: "{{ plugins['calico']['env']['CALICO_DISABLE_FILE_LOGGING'] }}" + # Set Felix endpoint to host default action to ACCEPT. + - name: FELIX_DEFAULTENDPOINTTOHOSTACTION + value: "{{ plugins['calico']['env']['FELIX_DEFAULTENDPOINTTOHOSTACTION'] }}" + # Set Felix logging to "info" + - name: FELIX_LOGSEVERITYSCREEN + value: "{{ plugins['calico']['env']['FELIX_LOGSEVERITYSCREEN'] }}" + - name: FELIX_HEALTHENABLED + value: "{{ plugins['calico']['env']['FELIX_HEALTHENABLED'] }}" + securityContext: + privileged: true + resources: + requests: + cpu: 250m + livenessProbe: + exec: + command: + - /bin/calico-node + - -felix-live + - -bird-live + periodSeconds: 10 + initialDelaySeconds: 10 + failureThreshold: 6 + readinessProbe: + exec: + command: + - /bin/calico-node + - -felix-ready + - -bird-ready + periodSeconds: 10 + volumeMounts: + - mountPath: /lib/modules + name: lib-modules + readOnly: true + - mountPath: /run/xtables.lock + name: xtables-lock + readOnly: false + - mountPath: /var/run/calico + name: var-run-calico + readOnly: false + - mountPath: /var/lib/calico + name: var-lib-calico + readOnly: false + - name: policysync + mountPath: /var/run/nodeagent + volumes: + # Used by calico-node. + - name: lib-modules + hostPath: + path: /lib/modules + - name: var-run-calico + hostPath: + path: /var/run/calico + - name: var-lib-calico + hostPath: + path: /var/lib/calico + - name: xtables-lock + hostPath: + path: /run/xtables.lock + type: FileOrCreate + # Used to install CNI. + - name: cni-bin-dir + hostPath: + path: /opt/cni/bin + - name: cni-net-dir + hostPath: + path: /etc/cni/net.d + # Mount in the directory for host-local IPAM allocations. This is + # used when upgrading from host-local to calico-ipam, and can be removed + # if not using the upgrade-ipam init container. + - name: host-local-net-dir + hostPath: + path: /var/lib/cni/networks + # Used to create per-pod Unix Domain Sockets + - name: policysync + hostPath: + type: DirectoryOrCreate + path: /var/run/nodeagent + # Used to install Flex Volume Driver + - name: flexvol-driver-host + hostPath: + type: DirectoryOrCreate + path: /usr/libexec/kubernetes/kubelet-plugins/volume/exec/nodeagent~uds +--- + +apiVersion: v1 +kind: ServiceAccount +metadata: + name: calico-node + namespace: kube-system + +--- +# Source: calico/templates/calico-kube-controllers.yaml +# See https://github.com/projectcalico/kube-controllers +apiVersion: apps/v1 +kind: Deployment +metadata: + name: calico-kube-controllers + namespace: kube-system + labels: + k8s-app: calico-kube-controllers +spec: + # The controllers can only have a single active instance. + replicas: 1 + selector: + matchLabels: + k8s-app: calico-kube-controllers + strategy: + type: Recreate + template: + metadata: + name: calico-kube-controllers + namespace: kube-system + labels: + k8s-app: calico-kube-controllers + annotations: + scheduler.alpha.kubernetes.io/critical-pod: '' + spec: + nodeSelector: + {{ plugins['calico']['kube-controllers']['nodeSelector'] | toyaml | indent(width=8, indentfirst=False) }} + tolerations: + # Mark the pod as a critical add-on for rescheduling. + - key: CriticalAddonsOnly + operator: Exists + - key: node-role.kubernetes.io/master + effect: NoSchedule + serviceAccountName: calico-kube-controllers + priorityClassName: system-cluster-critical + containers: + - name: calico-kube-controllers + image: {% if plugins['calico']['installation']['registry'] is defined and plugins['calico']['installation']['registry']|length %}{{ plugins['calico']['installation']['registry'] }}/{% endif %}{{ plugins['calico']['kube-controllers']['image'] }} + env: + # Choose which controllers to run. + - name: ENABLED_CONTROLLERS + value: node + - name: DATASTORE_TYPE + value: kubernetes + readinessProbe: + exec: + command: + - /usr/bin/check-status + - -r + +--- + +apiVersion: v1 +kind: ServiceAccount +metadata: + name: calico-kube-controllers + namespace: kube-system + +--- +# Source: calico/templates/calico-etcd-secrets.yaml + +--- +# Source: calico/templates/calico-typha.yaml + +--- +# Source: calico/templates/configure-canal.yaml + + diff --git a/kubetool/templates/plugins/calico-v3.19.yaml.j2 b/kubetool/templates/plugins/calico-v3.19.yaml.j2 new file mode 100644 index 000000000..956b1dc2c --- /dev/null +++ b/kubetool/templates/plugins/calico-v3.19.yaml.j2 @@ -0,0 +1,4005 @@ +--- +# Source: calico/templates/calico-config.yaml +# This ConfigMap is used to configure a self-hosted Calico installation. +kind: ConfigMap +apiVersion: v1 +metadata: + name: calico-config + namespace: kube-system +data: + typha_service_name: "{% if plugins.calico.typha.enabled | default(false) == true %}calico-typha{% else %}none{% endif %}" + # Configure the backend to use. + calico_backend: "bird" + + # Configure the MTU to use for workload interfaces and tunnels. + # By default, MTU is auto-detected, and explicitly setting this field should not be required. + # You can override auto-detection by providing a non-zero value. + veth_mtu: "{{ plugins['calico']['mtu'] }}" + + # The CNI network configuration to install on each node. The special + # values in this config will be automatically populated. + cni_network_config: |- + { + "name": "k8s-pod-network", + "cniVersion": "0.3.1", + "plugins": [ + { + "type": "calico", + "log_level": "info", + "log_file_path": "/var/log/calico/cni/cni.log", + "datastore_type": "kubernetes", + "nodename": "__KUBERNETES_NODE_NAME__", + "mtu": __CNI_MTU__, + "ipam": {{ plugins['calico']['cni']['ipam']|tojson|safe }}, + "policy": { + "type": "k8s" + }, + "kubernetes": { + "kubeconfig": "__KUBECONFIG_FILEPATH__" + } + }, + { + "type": "portmap", + "snat": true, + "capabilities": {"portMappings": true} + }, + { + "type": "bandwidth", + "capabilities": {"bandwidth": true} + } + ] + } + +--- +# Source: calico/templates/kdd-crds.yaml + +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: bgpconfigurations.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: BGPConfiguration + listKind: BGPConfigurationList + plural: bgpconfigurations + singular: bgpconfiguration + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: BGPConfiguration contains the configuration for any BGP routing. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: BGPConfigurationSpec contains the values of the BGP configuration. + properties: + asNumber: + description: 'ASNumber is the default AS number used by a node. [Default: + 64512]' + format: int32 + type: integer + communities: + description: Communities is a list of BGP community values and their + arbitrary names for tagging routes. + items: + description: Community contains standard or large community value + and its name. + properties: + name: + description: Name given to community value. + type: string + value: + description: Value must be of format `aa:nn` or `aa:nn:mm`. + For standard community use `aa:nn` format, where `aa` and + `nn` are 16 bit number. For large community use `aa:nn:mm` + format, where `aa`, `nn` and `mm` are 32 bit number. Where, + `aa` is an AS Number, `nn` and `mm` are per-AS identifier. + pattern: ^(\d+):(\d+)$|^(\d+):(\d+):(\d+)$ + type: string + type: object + type: array + listenPort: + description: ListenPort is the port where BGP protocol should listen. + Defaults to 179 + maximum: 65535 + minimum: 1 + type: integer + logSeverityScreen: + description: 'LogSeverityScreen is the log severity above which logs + are sent to the stdout. [Default: INFO]' + type: string + nodeToNodeMeshEnabled: + description: 'NodeToNodeMeshEnabled sets whether full node to node + BGP mesh is enabled. [Default: true]' + type: boolean + prefixAdvertisements: + description: PrefixAdvertisements contains per-prefix advertisement + configuration. + items: + description: PrefixAdvertisement configures advertisement properties + for the specified CIDR. + properties: + cidr: + description: CIDR for which properties should be advertised. + type: string + communities: + description: Communities can be list of either community names + already defined in `Specs.Communities` or community value + of format `aa:nn` or `aa:nn:mm`. For standard community use + `aa:nn` format, where `aa` and `nn` are 16 bit number. For + large community use `aa:nn:mm` format, where `aa`, `nn` and + `mm` are 32 bit number. Where,`aa` is an AS Number, `nn` and + `mm` are per-AS identifier. + items: + type: string + type: array + type: object + type: array + serviceClusterIPs: + description: ServiceClusterIPs are the CIDR blocks from which service + cluster IPs are allocated. If specified, Calico will advertise these + blocks, as well as any cluster IPs within them. + items: + description: ServiceClusterIPBlock represents a single allowed ClusterIP + CIDR block. + properties: + cidr: + type: string + type: object + type: array + serviceExternalIPs: + description: ServiceExternalIPs are the CIDR blocks for Kubernetes + Service External IPs. Kubernetes Service ExternalIPs will only be + advertised if they are within one of these blocks. + items: + description: ServiceExternalIPBlock represents a single allowed + External IP CIDR block. + properties: + cidr: + type: string + type: object + type: array + serviceLoadBalancerIPs: + description: ServiceLoadBalancerIPs are the CIDR blocks for Kubernetes + Service LoadBalancer IPs. Kubernetes Service status.LoadBalancer.Ingress + IPs will only be advertised if they are within one of these blocks. + items: + description: ServiceLoadBalancerIPBlock represents a single allowed + LoadBalancer IP CIDR block. + properties: + cidr: + type: string + type: object + type: array + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: bgppeers.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: BGPPeer + listKind: BGPPeerList + plural: bgppeers + singular: bgppeer + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: BGPPeerSpec contains the specification for a BGPPeer resource. + properties: + asNumber: + description: The AS Number of the peer. + format: int32 + type: integer + keepOriginalNextHop: + description: Option to keep the original nexthop field when routes + are sent to a BGP Peer. Setting "true" configures the selected BGP + Peers node to use the "next hop keep;" instead of "next hop self;"(default) + in the specific branch of the Node on "bird.cfg". + type: boolean + node: + description: The node name identifying the Calico node instance that + is targeted by this peer. If this is not set, and no nodeSelector + is specified, then this BGP peer selects all nodes in the cluster. + type: string + nodeSelector: + description: Selector for the nodes that should have this peering. When + this is set, the Node field must be empty. + type: string + password: + description: Optional BGP password for the peerings generated by this + BGPPeer resource. + properties: + secretKeyRef: + description: Selects a key of a secret in the node pod's namespace. + properties: + key: + description: The key of the secret to select from. Must be + a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be + defined + type: boolean + required: + - key + type: object + type: object + peerIP: + description: The IP address of the peer followed by an optional port + number to peer with. If port number is given, format should be `[]:port` + or `:` for IPv4. If optional port number is not set, + and this peer IP and ASNumber belongs to a calico/node with ListenPort + set in BGPConfiguration, then we use that port to peer. + type: string + peerSelector: + description: Selector for the remote nodes to peer with. When this + is set, the PeerIP and ASNumber fields must be empty. For each + peering between the local node and selected remote nodes, we configure + an IPv4 peering if both ends have NodeBGPSpec.IPv4Address specified, + and an IPv6 peering if both ends have NodeBGPSpec.IPv6Address specified. The + remote AS number comes from the remote node's NodeBGPSpec.ASNumber, + or the global default if that is not set. + type: string + sourceAddress: + description: Specifies whether and how to configure a source address + for the peerings generated by this BGPPeer resource. Default value + "UseNodeIP" means to configure the node IP as the source address. "None" + means not to configure a source address. + type: string + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: blockaffinities.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: BlockAffinity + listKind: BlockAffinityList + plural: blockaffinities + singular: blockaffinity + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: BlockAffinitySpec contains the specification for a BlockAffinity + resource. + properties: + cidr: + type: string + deleted: + description: Deleted indicates that this block affinity is being deleted. + This field is a string for compatibility with older releases that + mistakenly treat this field as a string. + type: string + node: + type: string + state: + type: string + required: + - cidr + - deleted + - node + - state + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: clusterinformations.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: ClusterInformation + listKind: ClusterInformationList + plural: clusterinformations + singular: clusterinformation + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: ClusterInformation contains the cluster specific information. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: ClusterInformationSpec contains the values of describing + the cluster. + properties: + calicoVersion: + description: CalicoVersion is the version of Calico that the cluster + is running + type: string + clusterGUID: + description: ClusterGUID is the GUID of the cluster + type: string + clusterType: + description: ClusterType describes the type of the cluster + type: string + datastoreReady: + description: DatastoreReady is used during significant datastore migrations + to signal to components such as Felix that it should wait before + accessing the datastore. + type: boolean + variant: + description: Variant declares which variant of Calico should be active. + type: string + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: felixconfigurations.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: FelixConfiguration + listKind: FelixConfigurationList + plural: felixconfigurations + singular: felixconfiguration + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: Felix Configuration contains the configuration for Felix. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: FelixConfigurationSpec contains the values of the Felix configuration. + properties: + allowIPIPPacketsFromWorkloads: + description: 'AllowIPIPPacketsFromWorkloads controls whether Felix + will add a rule to drop IPIP encapsulated traffic from workloads + [Default: false]' + type: boolean + allowVXLANPacketsFromWorkloads: + description: 'AllowVXLANPacketsFromWorkloads controls whether Felix + will add a rule to drop VXLAN encapsulated traffic from workloads + [Default: false]' + type: boolean + awsSrcDstCheck: + description: 'Set source-destination-check on AWS EC2 instances. Accepted + value must be one of "DoNothing", "Enabled" or "Disabled". [Default: + DoNothing]' + enum: + - DoNothing + - Enable + - Disable + type: string + bpfConnectTimeLoadBalancingEnabled: + description: 'BPFConnectTimeLoadBalancingEnabled when in BPF mode, + controls whether Felix installs the connection-time load balancer. The + connect-time load balancer is required for the host to be able to + reach Kubernetes services and it improves the performance of pod-to-service + connections. The only reason to disable it is for debugging purposes. [Default: + true]' + type: boolean + bpfDataIfacePattern: + description: BPFDataIfacePattern is a regular expression that controls + which interfaces Felix should attach BPF programs to in order to + catch traffic to/from the network. This needs to match the interfaces + that Calico workload traffic flows over as well as any interfaces + that handle incoming traffic to nodeports and services from outside + the cluster. It should not match the workload interfaces (usually + named cali...). + type: string + bpfDisableUnprivileged: + description: 'BPFDisableUnprivileged, if enabled, Felix sets the kernel.unprivileged_bpf_disabled + sysctl to disable unprivileged use of BPF. This ensures that unprivileged + users cannot access Calico''s BPF maps and cannot insert their own + BPF programs to interfere with Calico''s. [Default: true]' + type: boolean + bpfEnabled: + description: 'BPFEnabled, if enabled Felix will use the BPF dataplane. + [Default: false]' + type: boolean + bpfExtToServiceConnmark: + description: 'BPFExtToServiceConnmark in BPF mode, control a 32bit + mark that is set on connections from an external client to a local + service. This mark allows us to control how packets of that connection + are routed within the host and how is routing intepreted by RPF + check. [Default: 0]' + type: integer + bpfExternalServiceMode: + description: 'BPFExternalServiceMode in BPF mode, controls how connections + from outside the cluster to services (node ports and cluster IPs) + are forwarded to remote workloads. If set to "Tunnel" then both + request and response traffic is tunneled to the remote node. If + set to "DSR", the request traffic is tunneled but the response traffic + is sent directly from the remote node. In "DSR" mode, the remote + node appears to use the IP of the ingress node; this requires a + permissive L2 network. [Default: Tunnel]' + type: string + bpfKubeProxyEndpointSlicesEnabled: + description: BPFKubeProxyEndpointSlicesEnabled in BPF mode, controls + whether Felix's embedded kube-proxy accepts EndpointSlices or not. + type: boolean + bpfKubeProxyIptablesCleanupEnabled: + description: 'BPFKubeProxyIptablesCleanupEnabled, if enabled in BPF + mode, Felix will proactively clean up the upstream Kubernetes kube-proxy''s + iptables chains. Should only be enabled if kube-proxy is not running. [Default: + true]' + type: boolean + bpfKubeProxyMinSyncPeriod: + description: 'BPFKubeProxyMinSyncPeriod, in BPF mode, controls the + minimum time between updates to the dataplane for Felix''s embedded + kube-proxy. Lower values give reduced set-up latency. Higher values + reduce Felix CPU usage by batching up more work. [Default: 1s]' + type: string + bpfLogLevel: + description: 'BPFLogLevel controls the log level of the BPF programs + when in BPF dataplane mode. One of "Off", "Info", or "Debug". The + logs are emitted to the BPF trace pipe, accessible with the command + `tc exec bpf debug`. [Default: Off].' + type: string + chainInsertMode: + description: 'ChainInsertMode controls whether Felix hooks the kernel''s + top-level iptables chains by inserting a rule at the top of the + chain or by appending a rule at the bottom. insert is the safe default + since it prevents Calico''s rules from being bypassed. If you switch + to append mode, be sure that the other rules in the chains signal + acceptance by falling through to the Calico rules, otherwise the + Calico policy will be bypassed. [Default: insert]' + type: string + dataplaneDriver: + type: string + debugDisableLogDropping: + type: boolean + debugMemoryProfilePath: + type: string + debugSimulateCalcGraphHangAfter: + type: string + debugSimulateDataplaneHangAfter: + type: string + defaultEndpointToHostAction: + description: 'DefaultEndpointToHostAction controls what happens to + traffic that goes from a workload endpoint to the host itself (after + the traffic hits the endpoint egress policy). By default Calico + blocks traffic from workload endpoints to the host itself with an + iptables "DROP" action. If you want to allow some or all traffic + from endpoint to host, set this parameter to RETURN or ACCEPT. Use + RETURN if you have your own rules in the iptables "INPUT" chain; + Calico will insert its rules at the top of that chain, then "RETURN" + packets to the "INPUT" chain once it has completed processing workload + endpoint egress policy. Use ACCEPT to unconditionally accept packets + from workloads after processing workload endpoint egress policy. + [Default: Drop]' + type: string + deviceRouteProtocol: + description: This defines the route protocol added to programmed device + routes, by default this will be RTPROT_BOOT when left blank. + type: integer + deviceRouteSourceAddress: + description: This is the source address to use on programmed device + routes. By default the source address is left blank, leaving the + kernel to choose the source address used. + type: string + disableConntrackInvalidCheck: + type: boolean + endpointReportingDelay: + type: string + endpointReportingEnabled: + type: boolean + externalNodesList: + description: ExternalNodesCIDRList is a list of CIDR's of external-non-calico-nodes + which may source tunnel traffic and have the tunneled traffic be + accepted at calico nodes. + items: + type: string + type: array + failsafeInboundHostPorts: + description: 'FailsafeInboundHostPorts is a list of UDP/TCP ports + and CIDRs that Felix will allow incoming traffic to host endpoints + on irrespective of the security policy. This is useful to avoid + accidentally cutting off a host with incorrect configuration. For + + back-compatibility, if the protocol is not specified, it defaults + to "tcp". If a CIDR is not specified, it will allow traffic from + all addresses. To disable all inbound host ports, use the value + none. The default value allows ssh access and DHCP. [Default: tcp:22, + udp:68, tcp:179, tcp:2379, tcp:2380, tcp:6443, tcp:6666, tcp:6667]' + items: + description: ProtoPort is combination of protocol, port, and CIDR. + Protocol and port must be specified. + properties: + net: + type: string + port: + type: integer + protocol: + type: string + required: + - port + - protocol + type: object + type: array + failsafeOutboundHostPorts: + description: 'FailsafeOutboundHostPorts is a list of UDP/TCP ports + and CIDRs that Felix will allow outgoing traffic from host endpoints + to irrespective of the security policy. This is useful to avoid + accidentally cutting off a host with incorrect configuration. For + + back-compatibility, if the protocol is not specified, it defaults + to "tcp". If a CIDR is not specified, it will allow traffic from + all addresses. To disable all outbound host ports, use the value + none. The default value opens etcd''s standard ports to ensure that + Felix does not get cut off from etcd as well as allowing DHCP and + DNS. [Default: tcp:179, tcp:2379, tcp:2380, tcp:6443, tcp:6666, + tcp:6667, udp:53, udp:67]' + items: + description: ProtoPort is combination of protocol, port, and CIDR. + Protocol and port must be specified. + properties: + net: + type: string + port: + type: integer + protocol: + type: string + required: + - port + - protocol + type: object + type: array + featureDetectOverride: + description: FeatureDetectOverride is used to override the feature + detection. Values are specified in a comma separated list with no + spaces, example; "SNATFullyRandom=true,MASQFullyRandom=false,RestoreSupportsLock=". + "true" or "false" will force the feature, empty or omitted values + are auto-detected. + type: string + genericXDPEnabled: + description: 'GenericXDPEnabled enables Generic XDP so network cards + that don''t support XDP offload or driver modes can use XDP. This + is not recommended since it doesn''t provide better performance + than iptables. [Default: false]' + type: boolean + healthEnabled: + type: boolean + healthHost: + type: string + healthPort: + type: integer + interfaceExclude: + description: 'InterfaceExclude is a comma-separated list of interfaces + that Felix should exclude when monitoring for host endpoints. The + default value ensures that Felix ignores Kubernetes'' IPVS dummy + interface, which is used internally by kube-proxy. If you want to + exclude multiple interface names using a single value, the list + supports regular expressions. For regular expressions you must wrap + the value with ''/''. For example having values ''/^kube/,veth1'' + will exclude all interfaces that begin with ''kube'' and also the + interface ''veth1''. [Default: kube-ipvs0]' + type: string + interfacePrefix: + description: 'InterfacePrefix is the interface name prefix that identifies + workload endpoints and so distinguishes them from host endpoint + interfaces. Note: in environments other than bare metal, the orchestrators + configure this appropriately. For example our Kubernetes and Docker + integrations set the ''cali'' value, and our OpenStack integration + sets the ''tap'' value. [Default: cali]' + type: string + interfaceRefreshInterval: + description: InterfaceRefreshInterval is the period at which Felix + rescans local interfaces to verify their state. The rescan can be + disabled by setting the interval to 0. + type: string + ipipEnabled: + type: boolean + ipipMTU: + description: 'IPIPMTU is the MTU to set on the tunnel device. See + Configuring MTU [Default: 1440]' + type: integer + ipsetsRefreshInterval: + description: 'IpsetsRefreshInterval is the period at which Felix re-checks + all iptables state to ensure that no other process has accidentally + broken Calico''s rules. Set to 0 to disable iptables refresh. [Default: + 90s]' + type: string + iptablesBackend: + description: IptablesBackend specifies which backend of iptables will + be used. The default is legacy. + type: string + iptablesFilterAllowAction: + type: string + iptablesLockFilePath: + description: 'IptablesLockFilePath is the location of the iptables + lock file. You may need to change this if the lock file is not in + its standard location (for example if you have mapped it into Felix''s + container at a different path). [Default: /run/xtables.lock]' + type: string + iptablesLockProbeInterval: + description: 'IptablesLockProbeInterval is the time that Felix will + wait between attempts to acquire the iptables lock if it is not + available. Lower values make Felix more responsive when the lock + is contended, but use more CPU. [Default: 50ms]' + type: string + iptablesLockTimeout: + description: 'IptablesLockTimeout is the time that Felix will wait + for the iptables lock, or 0, to disable. To use this feature, Felix + must share the iptables lock file with all other processes that + also take the lock. When running Felix inside a container, this + requires the /run directory of the host to be mounted into the calico/node + or calico/felix container. [Default: 0s disabled]' + type: string + iptablesMangleAllowAction: + type: string + iptablesMarkMask: + description: 'IptablesMarkMask is the mask that Felix selects its + IPTables Mark bits from. Should be a 32 bit hexadecimal number with + at least 8 bits set, none of which clash with any other mark bits + in use on the system. [Default: 0xff000000]' + format: int32 + type: integer + iptablesNATOutgoingInterfaceFilter: + type: string + iptablesPostWriteCheckInterval: + description: 'IptablesPostWriteCheckInterval is the period after Felix + has done a write to the dataplane that it schedules an extra read + back in order to check the write was not clobbered by another process. + This should only occur if another application on the system doesn''t + respect the iptables lock. [Default: 1s]' + type: string + iptablesRefreshInterval: + description: 'IptablesRefreshInterval is the period at which Felix + re-checks the IP sets in the dataplane to ensure that no other process + has accidentally broken Calico''s rules. Set to 0 to disable IP + sets refresh. Note: the default for this value is lower than the + other refresh intervals as a workaround for a Linux kernel bug that + was fixed in kernel version 4.11. If you are using v4.11 or greater + you may want to set this to, a higher value to reduce Felix CPU + usage. [Default: 10s]' + type: string + ipv6Support: + type: boolean + kubeNodePortRanges: + description: 'KubeNodePortRanges holds list of port ranges used for + service node ports. Only used if felix detects kube-proxy running + in ipvs mode. Felix uses these ranges to separate host and workload + traffic. [Default: 30000:32767].' + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + logFilePath: + description: 'LogFilePath is the full path to the Felix log. Set to + none to disable file logging. [Default: /var/log/calico/felix.log]' + type: string + logPrefix: + description: 'LogPrefix is the log prefix that Felix uses when rendering + LOG rules. [Default: calico-packet]' + type: string + logSeverityFile: + description: 'LogSeverityFile is the log severity above which logs + are sent to the log file. [Default: Info]' + type: string + logSeverityScreen: + description: 'LogSeverityScreen is the log severity above which logs + are sent to the stdout. [Default: Info]' + type: string + logSeveritySys: + description: 'LogSeveritySys is the log severity above which logs + are sent to the syslog. Set to None for no logging to syslog. [Default: + Info]' + type: string + maxIpsetSize: + type: integer + metadataAddr: + description: 'MetadataAddr is the IP address or domain name of the + server that can answer VM queries for cloud-init metadata. In OpenStack, + this corresponds to the machine running nova-api (or in Ubuntu, + nova-api-metadata). A value of none (case insensitive) means that + Felix should not set up any NAT rule for the metadata path. [Default: + 127.0.0.1]' + type: string + metadataPort: + description: 'MetadataPort is the port of the metadata server. This, + combined with global.MetadataAddr (if not ''None''), is used to + set up a NAT rule, from 169.254.169.254:80 to MetadataAddr:MetadataPort. + In most cases this should not need to be changed [Default: 8775].' + type: integer + mtuIfacePattern: + description: MTUIfacePattern is a regular expression that controls + which interfaces Felix should scan in order to calculate the host's + MTU. This should not match workload interfaces (usually named cali...). + type: string + natOutgoingAddress: + description: NATOutgoingAddress specifies an address to use when performing + source NAT for traffic in a natOutgoing pool that is leaving the + network. By default the address used is an address on the interface + the traffic is leaving on (ie it uses the iptables MASQUERADE target) + type: string + natPortRange: + anyOf: + - type: integer + - type: string + description: NATPortRange specifies the range of ports that is used + for port mapping when doing outgoing NAT. When unset the default + behavior of the network stack is used. + pattern: ^.* + x-kubernetes-int-or-string: true + netlinkTimeout: + type: string + openstackRegion: + description: 'OpenstackRegion is the name of the region that a particular + Felix belongs to. In a multi-region Calico/OpenStack deployment, + this must be configured somehow for each Felix (here in the datamodel, + or in felix.cfg or the environment on each compute node), and must + match the [calico] openstack_region value configured in neutron.conf + on each node. [Default: Empty]' + type: string + policySyncPathPrefix: + description: 'PolicySyncPathPrefix is used to by Felix to communicate + policy changes to external services, like Application layer policy. + [Default: Empty]' + type: string + prometheusGoMetricsEnabled: + description: 'PrometheusGoMetricsEnabled disables Go runtime metrics + collection, which the Prometheus client does by default, when set + to false. This reduces the number of metrics reported, reducing + Prometheus load. [Default: true]' + type: boolean + prometheusMetricsEnabled: + description: 'PrometheusMetricsEnabled enables the Prometheus metrics + server in Felix if set to true. [Default: false]' + type: boolean + prometheusMetricsHost: + description: 'PrometheusMetricsHost is the host that the Prometheus + metrics server should bind to. [Default: empty]' + type: string + prometheusMetricsPort: + description: 'PrometheusMetricsPort is the TCP port that the Prometheus + metrics server should bind to. [Default: 9091]' + type: integer + prometheusProcessMetricsEnabled: + description: 'PrometheusProcessMetricsEnabled disables process metrics + collection, which the Prometheus client does by default, when set + to false. This reduces the number of metrics reported, reducing + Prometheus load. [Default: true]' + type: boolean + removeExternalRoutes: + description: Whether or not to remove device routes that have not + been programmed by Felix. Disabling this will allow external applications + to also add device routes. This is enabled by default which means + we will remove externally added routes. + type: boolean + reportingInterval: + description: 'ReportingInterval is the interval at which Felix reports + its status into the datastore or 0 to disable. Must be non-zero + in OpenStack deployments. [Default: 30s]' + type: string + reportingTTL: + description: 'ReportingTTL is the time-to-live setting for process-wide + status reports. [Default: 90s]' + type: string + routeRefreshInterval: + description: 'RouteRefreshInterval is the period at which Felix re-checks + the routes in the dataplane to ensure that no other process has + accidentally broken Calico''s rules. Set to 0 to disable route refresh. + [Default: 90s]' + type: string + routeSource: + description: 'RouteSource configures where Felix gets its routing + information. - WorkloadIPs: use workload endpoints to construct + routes. - CalicoIPAM: the default - use IPAM data to construct routes.' + type: string + routeTableRange: + description: Calico programs additional Linux route tables for various + purposes. RouteTableRange specifies the indices of the route tables + that Calico should use. + properties: + max: + type: integer + min: + type: integer + required: + - max + - min + type: object + serviceLoopPrevention: + description: 'When service IP advertisement is enabled, prevent routing + loops to service IPs that are not in use, by dropping or rejecting + packets that do not get DNAT''d by kube-proxy. Unless set to "Disabled", + in which case such routing loops continue to be allowed. [Default: + Drop]' + type: string + sidecarAccelerationEnabled: + description: 'SidecarAccelerationEnabled enables experimental sidecar + acceleration [Default: false]' + type: boolean + usageReportingEnabled: + description: 'UsageReportingEnabled reports anonymous Calico version + number and cluster size to projectcalico.org. Logs warnings returned + by the usage server. For example, if a significant security vulnerability + has been discovered in the version of Calico being used. [Default: + true]' + type: boolean + usageReportingInitialDelay: + description: 'UsageReportingInitialDelay controls the minimum delay + before Felix makes a report. [Default: 300s]' + type: string + usageReportingInterval: + description: 'UsageReportingInterval controls the interval at which + Felix makes reports. [Default: 86400s]' + type: string + useInternalDataplaneDriver: + type: boolean + vxlanEnabled: + type: boolean + vxlanMTU: + description: 'VXLANMTU is the MTU to set on the tunnel device. See + Configuring MTU [Default: 1440]' + type: integer + vxlanPort: + type: integer + vxlanVNI: + type: integer + wireguardEnabled: + description: 'WireguardEnabled controls whether Wireguard is enabled. + [Default: false]' + type: boolean + wireguardInterfaceName: + description: 'WireguardInterfaceName specifies the name to use for + the Wireguard interface. [Default: wg.calico]' + type: string + wireguardListeningPort: + description: 'WireguardListeningPort controls the listening port used + by Wireguard. [Default: 51820]' + type: integer + wireguardMTU: + description: 'WireguardMTU controls the MTU on the Wireguard interface. + See Configuring MTU [Default: 1420]' + type: integer + wireguardRoutingRulePriority: + description: 'WireguardRoutingRulePriority controls the priority value + to use for the Wireguard routing rule. [Default: 99]' + type: integer + xdpEnabled: + description: 'XDPEnabled enables XDP acceleration for suitable untracked + incoming deny rules. [Default: true]' + type: boolean + xdpRefreshInterval: + description: 'XDPRefreshInterval is the period at which Felix re-checks + all XDP state to ensure that no other process has accidentally broken + Calico''s BPF maps or attached programs. Set to 0 to disable XDP + refresh. [Default: 90s]' + type: string + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: globalnetworkpolicies.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: GlobalNetworkPolicy + listKind: GlobalNetworkPolicyList + plural: globalnetworkpolicies + singular: globalnetworkpolicy + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + properties: + applyOnForward: + description: ApplyOnForward indicates to apply the rules in this policy + on forward traffic. + type: boolean + doNotTrack: + description: DoNotTrack indicates whether packets matched by the rules + in this policy should go through the data plane's connection tracking, + such as Linux conntrack. If True, the rules in this policy are + applied before any data plane connection tracking, and packets allowed + by this policy are marked as not to be tracked. + type: boolean + egress: + description: The ordered set of egress rules. Each rule contains + a set of packet match criteria and a corresponding action to apply. + items: + description: "A Rule encapsulates a set of match criteria and an + action. Both selector-based security Policy and security Profiles + reference rules - separated out as a list of rules for both ingress + and egress packet matching. \n Each positive match criteria has + a negated version, prefixed with \"Not\". All the match criteria + within a rule must be satisfied for a packet to match. A single + rule can contain the positive and negative version of a match + and both must be satisfied for the rule to match." + properties: + action: + type: string + destination: + description: Destination contains the match criteria that apply + to destination entity. + properties: + namespaceSelector: + description: "NamespaceSelector is an optional field that + contains a selector expression. Only traffic that originates + from (or terminates at) endpoints within the selected + namespaces will be matched. When both NamespaceSelector + and Selector are defined on the same rule, then only workload + endpoints that are matched by both selectors will be selected + by the rule. \n For NetworkPolicy, an empty NamespaceSelector + implies that the Selector is limited to selecting only + workload endpoints in the same namespace as the NetworkPolicy. + \n For NetworkPolicy, `global()` NamespaceSelector implies + that the Selector is limited to selecting only GlobalNetworkSet + or HostEndpoint. \n For GlobalNetworkPolicy, an empty + NamespaceSelector implies the Selector applies to workload + endpoints across all namespaces." + type: string + nets: + description: Nets is an optional field that restricts the + rule to only apply to traffic that originates from (or + terminates at) IP addresses in any of the given subnets. + items: + type: string + type: array + notNets: + description: NotNets is the negated version of the Nets + field. + items: + type: string + type: array + notPorts: + description: NotPorts is the negated version of the Ports + field. Since only some protocols have ports, if any ports + are specified it requires the Protocol match in the Rule + to be set to "TCP" or "UDP". + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + notSelector: + description: NotSelector is the negated version of the Selector + field. See Selector field for subtleties with negated + selectors. + type: string + ports: + description: "Ports is an optional field that restricts + the rule to only apply to traffic that has a source (destination) + port that matches one of these ranges/values. This value + is a list of integers or strings that represent ranges + of ports. \n Since only some protocols have ports, if + any ports are specified it requires the Protocol match + in the Rule to be set to \"TCP\" or \"UDP\"." + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + selector: + description: "Selector is an optional field that contains + a selector expression (see Policy for sample syntax). + \ Only traffic that originates from (terminates at) endpoints + matching the selector will be matched. \n Note that: in + addition to the negated version of the Selector (see NotSelector + below), the selector expression syntax itself supports + negation. The two types of negation are subtly different. + One negates the set of matched endpoints, the other negates + the whole match: \n \tSelector = \"!has(my_label)\" matches + packets that are from other Calico-controlled \tendpoints + that do not have the label \"my_label\". \n \tNotSelector + = \"has(my_label)\" matches packets that are not from + Calico-controlled \tendpoints that do have the label \"my_label\". + \n The effect is that the latter will accept packets from + non-Calico sources whereas the former is limited to packets + from Calico-controlled endpoints." + type: string + serviceAccounts: + description: ServiceAccounts is an optional field that restricts + the rule to only apply to traffic that originates from + (or terminates at) a pod running as a matching service + account. + properties: + names: + description: Names is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account whose name is in the list. + items: + type: string + type: array + selector: + description: Selector is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account that matches the given label selector. If + both Names and Selector are specified then they are + AND'ed. + type: string + type: object + type: object + http: + description: HTTP contains match criteria that apply to HTTP + requests. + properties: + methods: + description: Methods is an optional field that restricts + the rule to apply only to HTTP requests that use one of + the listed HTTP Methods (e.g. GET, PUT, etc.) Multiple + methods are OR'd together. + items: + type: string + type: array + paths: + description: 'Paths is an optional field that restricts + the rule to apply to HTTP requests that use one of the + listed HTTP Paths. Multiple paths are OR''d together. + e.g: - exact: /foo - prefix: /bar NOTE: Each entry may + ONLY specify either a `exact` or a `prefix` match. The + validator will check for it.' + items: + description: 'HTTPPath specifies an HTTP path to match. + It may be either of the form: exact: : which matches + the path exactly or prefix: : which matches + the path prefix' + properties: + exact: + type: string + prefix: + type: string + type: object + type: array + type: object + icmp: + description: ICMP is an optional field that restricts the rule + to apply to a specific type and code of ICMP traffic. This + should only be specified if the Protocol field is set to "ICMP" + or "ICMPv6". + properties: + code: + description: Match on a specific ICMP code. If specified, + the Type value must also be specified. This is a technical + limitation imposed by the kernel's iptables firewall, + which Calico uses to enforce the rule. + type: integer + type: + description: Match on a specific ICMP type. For example + a value of 8 refers to ICMP Echo Request (i.e. pings). + type: integer + type: object + ipVersion: + description: IPVersion is an optional field that restricts the + rule to only match a specific IP version. + type: integer + metadata: + description: Metadata contains additional information for this + rule + properties: + annotations: + additionalProperties: + type: string + description: Annotations is a set of key value pairs that + give extra information about the rule + type: object + type: object + notICMP: + description: NotICMP is the negated version of the ICMP field. + properties: + code: + description: Match on a specific ICMP code. If specified, + the Type value must also be specified. This is a technical + limitation imposed by the kernel's iptables firewall, + which Calico uses to enforce the rule. + type: integer + type: + description: Match on a specific ICMP type. For example + a value of 8 refers to ICMP Echo Request (i.e. pings). + type: integer + type: object + notProtocol: + anyOf: + - type: integer + - type: string + description: NotProtocol is the negated version of the Protocol + field. + pattern: ^.* + x-kubernetes-int-or-string: true + protocol: + anyOf: + - type: integer + - type: string + description: "Protocol is an optional field that restricts the + rule to only apply to traffic of a specific IP protocol. Required + if any of the EntityRules contain Ports (because ports only + apply to certain protocols). \n Must be one of these string + values: \"TCP\", \"UDP\", \"ICMP\", \"ICMPv6\", \"SCTP\", + \"UDPLite\" or an integer in the range 1-255." + pattern: ^.* + x-kubernetes-int-or-string: true + source: + description: Source contains the match criteria that apply to + source entity. + properties: + namespaceSelector: + description: "NamespaceSelector is an optional field that + contains a selector expression. Only traffic that originates + from (or terminates at) endpoints within the selected + namespaces will be matched. When both NamespaceSelector + and Selector are defined on the same rule, then only workload + endpoints that are matched by both selectors will be selected + by the rule. \n For NetworkPolicy, an empty NamespaceSelector + implies that the Selector is limited to selecting only + workload endpoints in the same namespace as the NetworkPolicy. + \n For NetworkPolicy, `global()` NamespaceSelector implies + that the Selector is limited to selecting only GlobalNetworkSet + or HostEndpoint. \n For GlobalNetworkPolicy, an empty + NamespaceSelector implies the Selector applies to workload + endpoints across all namespaces." + type: string + nets: + description: Nets is an optional field that restricts the + rule to only apply to traffic that originates from (or + terminates at) IP addresses in any of the given subnets. + items: + type: string + type: array + notNets: + description: NotNets is the negated version of the Nets + field. + items: + type: string + type: array + notPorts: + description: NotPorts is the negated version of the Ports + field. Since only some protocols have ports, if any ports + are specified it requires the Protocol match in the Rule + to be set to "TCP" or "UDP". + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + notSelector: + description: NotSelector is the negated version of the Selector + field. See Selector field for subtleties with negated + selectors. + type: string + ports: + description: "Ports is an optional field that restricts + the rule to only apply to traffic that has a source (destination) + port that matches one of these ranges/values. This value + is a list of integers or strings that represent ranges + of ports. \n Since only some protocols have ports, if + any ports are specified it requires the Protocol match + in the Rule to be set to \"TCP\" or \"UDP\"." + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + selector: + description: "Selector is an optional field that contains + a selector expression (see Policy for sample syntax). + \ Only traffic that originates from (terminates at) endpoints + matching the selector will be matched. \n Note that: in + addition to the negated version of the Selector (see NotSelector + below), the selector expression syntax itself supports + negation. The two types of negation are subtly different. + One negates the set of matched endpoints, the other negates + the whole match: \n \tSelector = \"!has(my_label)\" matches + packets that are from other Calico-controlled \tendpoints + that do not have the label \"my_label\". \n \tNotSelector + = \"has(my_label)\" matches packets that are not from + Calico-controlled \tendpoints that do have the label \"my_label\". + \n The effect is that the latter will accept packets from + non-Calico sources whereas the former is limited to packets + from Calico-controlled endpoints." + type: string + serviceAccounts: + description: ServiceAccounts is an optional field that restricts + the rule to only apply to traffic that originates from + (or terminates at) a pod running as a matching service + account. + properties: + names: + description: Names is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account whose name is in the list. + items: + type: string + type: array + selector: + description: Selector is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account that matches the given label selector. If + both Names and Selector are specified then they are + AND'ed. + type: string + type: object + type: object + required: + - action + type: object + type: array + ingress: + description: The ordered set of ingress rules. Each rule contains + a set of packet match criteria and a corresponding action to apply. + items: + description: "A Rule encapsulates a set of match criteria and an + action. Both selector-based security Policy and security Profiles + reference rules - separated out as a list of rules for both ingress + and egress packet matching. \n Each positive match criteria has + a negated version, prefixed with \"Not\". All the match criteria + within a rule must be satisfied for a packet to match. A single + rule can contain the positive and negative version of a match + and both must be satisfied for the rule to match." + properties: + action: + type: string + destination: + description: Destination contains the match criteria that apply + to destination entity. + properties: + namespaceSelector: + description: "NamespaceSelector is an optional field that + contains a selector expression. Only traffic that originates + from (or terminates at) endpoints within the selected + namespaces will be matched. When both NamespaceSelector + and Selector are defined on the same rule, then only workload + endpoints that are matched by both selectors will be selected + by the rule. \n For NetworkPolicy, an empty NamespaceSelector + implies that the Selector is limited to selecting only + workload endpoints in the same namespace as the NetworkPolicy. + \n For NetworkPolicy, `global()` NamespaceSelector implies + that the Selector is limited to selecting only GlobalNetworkSet + or HostEndpoint. \n For GlobalNetworkPolicy, an empty + NamespaceSelector implies the Selector applies to workload + endpoints across all namespaces." + type: string + nets: + description: Nets is an optional field that restricts the + rule to only apply to traffic that originates from (or + terminates at) IP addresses in any of the given subnets. + items: + type: string + type: array + notNets: + description: NotNets is the negated version of the Nets + field. + items: + type: string + type: array + notPorts: + description: NotPorts is the negated version of the Ports + field. Since only some protocols have ports, if any ports + are specified it requires the Protocol match in the Rule + to be set to "TCP" or "UDP". + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + notSelector: + description: NotSelector is the negated version of the Selector + field. See Selector field for subtleties with negated + selectors. + type: string + ports: + description: "Ports is an optional field that restricts + the rule to only apply to traffic that has a source (destination) + port that matches one of these ranges/values. This value + is a list of integers or strings that represent ranges + of ports. \n Since only some protocols have ports, if + any ports are specified it requires the Protocol match + in the Rule to be set to \"TCP\" or \"UDP\"." + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + selector: + description: "Selector is an optional field that contains + a selector expression (see Policy for sample syntax). + \ Only traffic that originates from (terminates at) endpoints + matching the selector will be matched. \n Note that: in + addition to the negated version of the Selector (see NotSelector + below), the selector expression syntax itself supports + negation. The two types of negation are subtly different. + One negates the set of matched endpoints, the other negates + the whole match: \n \tSelector = \"!has(my_label)\" matches + packets that are from other Calico-controlled \tendpoints + that do not have the label \"my_label\". \n \tNotSelector + = \"has(my_label)\" matches packets that are not from + Calico-controlled \tendpoints that do have the label \"my_label\". + \n The effect is that the latter will accept packets from + non-Calico sources whereas the former is limited to packets + from Calico-controlled endpoints." + type: string + serviceAccounts: + description: ServiceAccounts is an optional field that restricts + the rule to only apply to traffic that originates from + (or terminates at) a pod running as a matching service + account. + properties: + names: + description: Names is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account whose name is in the list. + items: + type: string + type: array + selector: + description: Selector is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account that matches the given label selector. If + both Names and Selector are specified then they are + AND'ed. + type: string + type: object + type: object + http: + description: HTTP contains match criteria that apply to HTTP + requests. + properties: + methods: + description: Methods is an optional field that restricts + the rule to apply only to HTTP requests that use one of + the listed HTTP Methods (e.g. GET, PUT, etc.) Multiple + methods are OR'd together. + items: + type: string + type: array + paths: + description: 'Paths is an optional field that restricts + the rule to apply to HTTP requests that use one of the + listed HTTP Paths. Multiple paths are OR''d together. + e.g: - exact: /foo - prefix: /bar NOTE: Each entry may + ONLY specify either a `exact` or a `prefix` match. The + validator will check for it.' + items: + description: 'HTTPPath specifies an HTTP path to match. + It may be either of the form: exact: : which matches + the path exactly or prefix: : which matches + the path prefix' + properties: + exact: + type: string + prefix: + type: string + type: object + type: array + type: object + icmp: + description: ICMP is an optional field that restricts the rule + to apply to a specific type and code of ICMP traffic. This + should only be specified if the Protocol field is set to "ICMP" + or "ICMPv6". + properties: + code: + description: Match on a specific ICMP code. If specified, + the Type value must also be specified. This is a technical + limitation imposed by the kernel's iptables firewall, + which Calico uses to enforce the rule. + type: integer + type: + description: Match on a specific ICMP type. For example + a value of 8 refers to ICMP Echo Request (i.e. pings). + type: integer + type: object + ipVersion: + description: IPVersion is an optional field that restricts the + rule to only match a specific IP version. + type: integer + metadata: + description: Metadata contains additional information for this + rule + properties: + annotations: + additionalProperties: + type: string + description: Annotations is a set of key value pairs that + give extra information about the rule + type: object + type: object + notICMP: + description: NotICMP is the negated version of the ICMP field. + properties: + code: + description: Match on a specific ICMP code. If specified, + the Type value must also be specified. This is a technical + limitation imposed by the kernel's iptables firewall, + which Calico uses to enforce the rule. + type: integer + type: + description: Match on a specific ICMP type. For example + a value of 8 refers to ICMP Echo Request (i.e. pings). + type: integer + type: object + notProtocol: + anyOf: + - type: integer + - type: string + description: NotProtocol is the negated version of the Protocol + field. + pattern: ^.* + x-kubernetes-int-or-string: true + protocol: + anyOf: + - type: integer + - type: string + description: "Protocol is an optional field that restricts the + rule to only apply to traffic of a specific IP protocol. Required + if any of the EntityRules contain Ports (because ports only + apply to certain protocols). \n Must be one of these string + values: \"TCP\", \"UDP\", \"ICMP\", \"ICMPv6\", \"SCTP\", + \"UDPLite\" or an integer in the range 1-255." + pattern: ^.* + x-kubernetes-int-or-string: true + source: + description: Source contains the match criteria that apply to + source entity. + properties: + namespaceSelector: + description: "NamespaceSelector is an optional field that + contains a selector expression. Only traffic that originates + from (or terminates at) endpoints within the selected + namespaces will be matched. When both NamespaceSelector + and Selector are defined on the same rule, then only workload + endpoints that are matched by both selectors will be selected + by the rule. \n For NetworkPolicy, an empty NamespaceSelector + implies that the Selector is limited to selecting only + workload endpoints in the same namespace as the NetworkPolicy. + \n For NetworkPolicy, `global()` NamespaceSelector implies + that the Selector is limited to selecting only GlobalNetworkSet + or HostEndpoint. \n For GlobalNetworkPolicy, an empty + NamespaceSelector implies the Selector applies to workload + endpoints across all namespaces." + type: string + nets: + description: Nets is an optional field that restricts the + rule to only apply to traffic that originates from (or + terminates at) IP addresses in any of the given subnets. + items: + type: string + type: array + notNets: + description: NotNets is the negated version of the Nets + field. + items: + type: string + type: array + notPorts: + description: NotPorts is the negated version of the Ports + field. Since only some protocols have ports, if any ports + are specified it requires the Protocol match in the Rule + to be set to "TCP" or "UDP". + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + notSelector: + description: NotSelector is the negated version of the Selector + field. See Selector field for subtleties with negated + selectors. + type: string + ports: + description: "Ports is an optional field that restricts + the rule to only apply to traffic that has a source (destination) + port that matches one of these ranges/values. This value + is a list of integers or strings that represent ranges + of ports. \n Since only some protocols have ports, if + any ports are specified it requires the Protocol match + in the Rule to be set to \"TCP\" or \"UDP\"." + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + selector: + description: "Selector is an optional field that contains + a selector expression (see Policy for sample syntax). + \ Only traffic that originates from (terminates at) endpoints + matching the selector will be matched. \n Note that: in + addition to the negated version of the Selector (see NotSelector + below), the selector expression syntax itself supports + negation. The two types of negation are subtly different. + One negates the set of matched endpoints, the other negates + the whole match: \n \tSelector = \"!has(my_label)\" matches + packets that are from other Calico-controlled \tendpoints + that do not have the label \"my_label\". \n \tNotSelector + = \"has(my_label)\" matches packets that are not from + Calico-controlled \tendpoints that do have the label \"my_label\". + \n The effect is that the latter will accept packets from + non-Calico sources whereas the former is limited to packets + from Calico-controlled endpoints." + type: string + serviceAccounts: + description: ServiceAccounts is an optional field that restricts + the rule to only apply to traffic that originates from + (or terminates at) a pod running as a matching service + account. + properties: + names: + description: Names is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account whose name is in the list. + items: + type: string + type: array + selector: + description: Selector is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account that matches the given label selector. If + both Names and Selector are specified then they are + AND'ed. + type: string + type: object + type: object + required: + - action + type: object + type: array + namespaceSelector: + description: NamespaceSelector is an optional field for an expression + used to select a pod based on namespaces. + type: string + order: + description: Order is an optional field that specifies the order in + which the policy is applied. Policies with higher "order" are applied + after those with lower order. If the order is omitted, it may be + considered to be "infinite" - i.e. the policy will be applied last. Policies + with identical order will be applied in alphanumerical order based + on the Policy "Name". + type: number + preDNAT: + description: PreDNAT indicates to apply the rules in this policy before + any DNAT. + type: boolean + selector: + description: "The selector is an expression used to pick pick out + the endpoints that the policy should be applied to. \n Selector + expressions follow this syntax: \n \tlabel == \"string_literal\" + \ -> comparison, e.g. my_label == \"foo bar\" \tlabel != \"string_literal\" + \ -> not equal; also matches if label is not present \tlabel in + { \"a\", \"b\", \"c\", ... } -> true if the value of label X is + one of \"a\", \"b\", \"c\" \tlabel not in { \"a\", \"b\", \"c\", + ... } -> true if the value of label X is not one of \"a\", \"b\", + \"c\" \thas(label_name) -> True if that label is present \t! expr + -> negation of expr \texpr && expr -> Short-circuit and \texpr + || expr -> Short-circuit or \t( expr ) -> parens for grouping \tall() + or the empty selector -> matches all endpoints. \n Label names are + allowed to contain alphanumerics, -, _ and /. String literals are + more permissive but they do not support escape characters. \n Examples + (with made-up labels): \n \ttype == \"webserver\" && deployment + == \"prod\" \ttype in {\"frontend\", \"backend\"} \tdeployment != + \"dev\" \t! has(label_name)" + type: string + serviceAccountSelector: + description: ServiceAccountSelector is an optional field for an expression + used to select a pod based on service accounts. + type: string + types: + description: "Types indicates whether this policy applies to ingress, + or to egress, or to both. When not explicitly specified (and so + the value on creation is empty or nil), Calico defaults Types according + to what Ingress and Egress rules are present in the policy. The + default is: \n - [ PolicyTypeIngress ], if there are no Egress rules + (including the case where there are also no Ingress rules) \n + - [ PolicyTypeEgress ], if there are Egress rules but no Ingress + rules \n - [ PolicyTypeIngress, PolicyTypeEgress ], if there are + both Ingress and Egress rules. \n When the policy is read back again, + Types will always be one of these values, never empty or nil." + items: + description: PolicyType enumerates the possible values of the PolicySpec + Types field. + type: string + type: array + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: globalnetworksets.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: GlobalNetworkSet + listKind: GlobalNetworkSetList + plural: globalnetworksets + singular: globalnetworkset + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: GlobalNetworkSet contains a set of arbitrary IP sub-networks/CIDRs + that share labels to allow rules to refer to them via selectors. The labels + of GlobalNetworkSet are not namespaced. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: GlobalNetworkSetSpec contains the specification for a NetworkSet + resource. + properties: + nets: + description: The list of IP networks that belong to this set. + items: + type: string + type: array + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: hostendpoints.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: HostEndpoint + listKind: HostEndpointList + plural: hostendpoints + singular: hostendpoint + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: HostEndpointSpec contains the specification for a HostEndpoint + resource. + properties: + expectedIPs: + description: "The expected IP addresses (IPv4 and IPv6) of the endpoint. + If \"InterfaceName\" is not present, Calico will look for an interface + matching any of the IPs in the list and apply policy to that. Note: + \tWhen using the selector match criteria in an ingress or egress + security Policy \tor Profile, Calico converts the selector into + a set of IP addresses. For host \tendpoints, the ExpectedIPs field + is used for that purpose. (If only the interface \tname is specified, + Calico does not learn the IPs of the interface for use in match + \tcriteria.)" + items: + type: string + type: array + interfaceName: + description: "Either \"*\", or the name of a specific Linux interface + to apply policy to; or empty. \"*\" indicates that this HostEndpoint + governs all traffic to, from or through the default network namespace + of the host named by the \"Node\" field; entering and leaving that + namespace via any interface, including those from/to non-host-networked + local workloads. \n If InterfaceName is not \"*\", this HostEndpoint + only governs traffic that enters or leaves the host through the + specific interface named by InterfaceName, or - when InterfaceName + is empty - through the specific interface that has one of the IPs + in ExpectedIPs. Therefore, when InterfaceName is empty, at least + one expected IP must be specified. Only external interfaces (such + as \"eth0\") are supported here; it isn't possible for a HostEndpoint + to protect traffic through a specific local workload interface. + \n Note: Only some kinds of policy are implemented for \"*\" HostEndpoints; + initially just pre-DNAT policy. Please check Calico documentation + for the latest position." + type: string + node: + description: The node name identifying the Calico node instance. + type: string + ports: + description: Ports contains the endpoint's named ports, which may + be referenced in security policy rules. + items: + properties: + name: + type: string + port: + type: integer + protocol: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + required: + - name + - port + - protocol + type: object + type: array + profiles: + description: A list of identifiers of security Profile objects that + apply to this endpoint. Each profile is applied in the order that + they appear in this list. Profile rules are applied after the selector-based + security policy. + items: + type: string + type: array + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: ipamblocks.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: IPAMBlock + listKind: IPAMBlockList + plural: ipamblocks + singular: ipamblock + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: IPAMBlockSpec contains the specification for an IPAMBlock + resource. + properties: + affinity: + type: string + allocations: + items: + type: integer + # TODO: This nullable is manually added in. We should update controller-gen + # to handle []*int properly itself. + nullable: true + type: array + attributes: + items: + properties: + handle_id: + type: string + secondary: + additionalProperties: + type: string + type: object + type: object + type: array + cidr: + type: string + deleted: + type: boolean + strictAffinity: + type: boolean + unallocated: + items: + type: integer + type: array + required: + - allocations + - attributes + - cidr + - strictAffinity + - unallocated + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: ipamconfigs.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: IPAMConfig + listKind: IPAMConfigList + plural: ipamconfigs + singular: ipamconfig + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: IPAMConfigSpec contains the specification for an IPAMConfig + resource. + properties: + autoAllocateBlocks: + type: boolean + maxBlocksPerHost: + description: MaxBlocksPerHost, if non-zero, is the max number of blocks + that can be affine to each host. + type: integer + strictAffinity: + type: boolean + required: + - autoAllocateBlocks + - strictAffinity + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: ipamhandles.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: IPAMHandle + listKind: IPAMHandleList + plural: ipamhandles + singular: ipamhandle + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: IPAMHandleSpec contains the specification for an IPAMHandle + resource. + properties: + block: + additionalProperties: + type: integer + type: object + deleted: + type: boolean + handleID: + type: string + required: + - block + - handleID + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: ippools.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: IPPool + listKind: IPPoolList + plural: ippools + singular: ippool + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: IPPoolSpec contains the specification for an IPPool resource. + properties: + blockSize: + description: The block size to use for IP address assignments from + this pool. Defaults to 26 for IPv4 and 112 for IPv6. + type: integer + cidr: + description: The pool CIDR. + type: string + disabled: + description: When disabled is true, Calico IPAM will not assign addresses + from this pool. + type: boolean + ipip: + description: 'Deprecated: this field is only used for APIv1 backwards + compatibility. Setting this field is not allowed, this field is + for internal use only.' + properties: + enabled: + description: When enabled is true, ipip tunneling will be used + to deliver packets to destinations within this pool. + type: boolean + mode: + description: The IPIP mode. This can be one of "always" or "cross-subnet". A + mode of "always" will also use IPIP tunneling for routing to + destination IP addresses within this pool. A mode of "cross-subnet" + will only use IPIP tunneling when the destination node is on + a different subnet to the originating node. The default value + (if not specified) is "always". + type: string + type: object + ipipMode: + description: Contains configuration for IPIP tunneling for this pool. + If not specified, then this is defaulted to "Never" (i.e. IPIP tunneling + is disabled). + type: string + nat-outgoing: + description: 'Deprecated: this field is only used for APIv1 backwards + compatibility. Setting this field is not allowed, this field is + for internal use only.' + type: boolean + natOutgoing: + description: When nat-outgoing is true, packets sent from Calico networked + containers in this pool to destinations outside of this pool will + be masqueraded. + type: boolean + nodeSelector: + description: Allows IPPool to allocate for a specific node by label + selector. + type: string + vxlanMode: + description: Contains configuration for VXLAN tunneling for this pool. + If not specified, then this is defaulted to "Never" (i.e. VXLAN + tunneling is disabled). + type: string + required: + - cidr + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: kubecontrollersconfigurations.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: KubeControllersConfiguration + listKind: KubeControllersConfigurationList + plural: kubecontrollersconfigurations + singular: kubecontrollersconfiguration + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: KubeControllersConfigurationSpec contains the values of the + Kubernetes controllers configuration. + properties: + controllers: + description: Controllers enables and configures individual Kubernetes + controllers + properties: + namespace: + description: Namespace enables and configures the namespace controller. + Enabled by default, set to nil to disable. + properties: + reconcilerPeriod: + description: 'ReconcilerPeriod is the period to perform reconciliation + with the Calico datastore. [Default: 5m]' + type: string + type: object + node: + description: Node enables and configures the node controller. + Enabled by default, set to nil to disable. + properties: + hostEndpoint: + description: HostEndpoint controls syncing nodes to host endpoints. + Disabled by default, set to nil to disable. + properties: + autoCreate: + description: 'AutoCreate enables automatic creation of + host endpoints for every node. [Default: Disabled]' + type: string + type: object + reconcilerPeriod: + description: 'ReconcilerPeriod is the period to perform reconciliation + with the Calico datastore. [Default: 5m]' + type: string + syncLabels: + description: 'SyncLabels controls whether to copy Kubernetes + node labels to Calico nodes. [Default: Enabled]' + type: string + type: object + policy: + description: Policy enables and configures the policy controller. + Enabled by default, set to nil to disable. + properties: + reconcilerPeriod: + description: 'ReconcilerPeriod is the period to perform reconciliation + with the Calico datastore. [Default: 5m]' + type: string + type: object + serviceAccount: + description: ServiceAccount enables and configures the service + account controller. Enabled by default, set to nil to disable. + properties: + reconcilerPeriod: + description: 'ReconcilerPeriod is the period to perform reconciliation + with the Calico datastore. [Default: 5m]' + type: string + type: object + workloadEndpoint: + description: WorkloadEndpoint enables and configures the workload + endpoint controller. Enabled by default, set to nil to disable. + properties: + reconcilerPeriod: + description: 'ReconcilerPeriod is the period to perform reconciliation + with the Calico datastore. [Default: 5m]' + type: string + type: object + type: object + etcdV3CompactionPeriod: + description: 'EtcdV3CompactionPeriod is the period between etcdv3 + compaction requests. Set to 0 to disable. [Default: 10m]' + type: string + healthChecks: + description: 'HealthChecks enables or disables support for health + checks [Default: Enabled]' + type: string + logSeverityScreen: + description: 'LogSeverityScreen is the log severity above which logs + are sent to the stdout. [Default: Info]' + type: string + required: + - controllers + type: object + status: + description: KubeControllersConfigurationStatus represents the status + of the configuration. It's useful for admins to be able to see the actual + config that was applied, which can be modified by environment variables + on the kube-controllers process. + properties: + environmentVars: + additionalProperties: + type: string + description: EnvironmentVars contains the environment variables on + the kube-controllers that influenced the RunningConfig. + type: object + runningConfig: + description: RunningConfig contains the effective config that is running + in the kube-controllers pod, after merging the API resource with + any environment variables. + properties: + controllers: + description: Controllers enables and configures individual Kubernetes + controllers + properties: + namespace: + description: Namespace enables and configures the namespace + controller. Enabled by default, set to nil to disable. + properties: + reconcilerPeriod: + description: 'ReconcilerPeriod is the period to perform + reconciliation with the Calico datastore. [Default: + 5m]' + type: string + type: object + node: + description: Node enables and configures the node controller. + Enabled by default, set to nil to disable. + properties: + hostEndpoint: + description: HostEndpoint controls syncing nodes to host + endpoints. Disabled by default, set to nil to disable. + properties: + autoCreate: + description: 'AutoCreate enables automatic creation + of host endpoints for every node. [Default: Disabled]' + type: string + type: object + reconcilerPeriod: + description: 'ReconcilerPeriod is the period to perform + reconciliation with the Calico datastore. [Default: + 5m]' + type: string + syncLabels: + description: 'SyncLabels controls whether to copy Kubernetes + node labels to Calico nodes. [Default: Enabled]' + type: string + type: object + policy: + description: Policy enables and configures the policy controller. + Enabled by default, set to nil to disable. + properties: + reconcilerPeriod: + description: 'ReconcilerPeriod is the period to perform + reconciliation with the Calico datastore. [Default: + 5m]' + type: string + type: object + serviceAccount: + description: ServiceAccount enables and configures the service + account controller. Enabled by default, set to nil to disable. + properties: + reconcilerPeriod: + description: 'ReconcilerPeriod is the period to perform + reconciliation with the Calico datastore. [Default: + 5m]' + type: string + type: object + workloadEndpoint: + description: WorkloadEndpoint enables and configures the workload + endpoint controller. Enabled by default, set to nil to disable. + properties: + reconcilerPeriod: + description: 'ReconcilerPeriod is the period to perform + reconciliation with the Calico datastore. [Default: + 5m]' + type: string + type: object + type: object + etcdV3CompactionPeriod: + description: 'EtcdV3CompactionPeriod is the period between etcdv3 + compaction requests. Set to 0 to disable. [Default: 10m]' + type: string + healthChecks: + description: 'HealthChecks enables or disables support for health + checks [Default: Enabled]' + type: string + logSeverityScreen: + description: 'LogSeverityScreen is the log severity above which + logs are sent to the stdout. [Default: Info]' + type: string + required: + - controllers + type: object + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: networkpolicies.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: NetworkPolicy + listKind: NetworkPolicyList + plural: networkpolicies + singular: networkpolicy + scope: Namespaced + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + properties: + egress: + description: The ordered set of egress rules. Each rule contains + a set of packet match criteria and a corresponding action to apply. + items: + description: "A Rule encapsulates a set of match criteria and an + action. Both selector-based security Policy and security Profiles + reference rules - separated out as a list of rules for both ingress + and egress packet matching. \n Each positive match criteria has + a negated version, prefixed with \"Not\". All the match criteria + within a rule must be satisfied for a packet to match. A single + rule can contain the positive and negative version of a match + and both must be satisfied for the rule to match." + properties: + action: + type: string + destination: + description: Destination contains the match criteria that apply + to destination entity. + properties: + namespaceSelector: + description: "NamespaceSelector is an optional field that + contains a selector expression. Only traffic that originates + from (or terminates at) endpoints within the selected + namespaces will be matched. When both NamespaceSelector + and Selector are defined on the same rule, then only workload + endpoints that are matched by both selectors will be selected + by the rule. \n For NetworkPolicy, an empty NamespaceSelector + implies that the Selector is limited to selecting only + workload endpoints in the same namespace as the NetworkPolicy. + \n For NetworkPolicy, `global()` NamespaceSelector implies + that the Selector is limited to selecting only GlobalNetworkSet + or HostEndpoint. \n For GlobalNetworkPolicy, an empty + NamespaceSelector implies the Selector applies to workload + endpoints across all namespaces." + type: string + nets: + description: Nets is an optional field that restricts the + rule to only apply to traffic that originates from (or + terminates at) IP addresses in any of the given subnets. + items: + type: string + type: array + notNets: + description: NotNets is the negated version of the Nets + field. + items: + type: string + type: array + notPorts: + description: NotPorts is the negated version of the Ports + field. Since only some protocols have ports, if any ports + are specified it requires the Protocol match in the Rule + to be set to "TCP" or "UDP". + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + notSelector: + description: NotSelector is the negated version of the Selector + field. See Selector field for subtleties with negated + selectors. + type: string + ports: + description: "Ports is an optional field that restricts + the rule to only apply to traffic that has a source (destination) + port that matches one of these ranges/values. This value + is a list of integers or strings that represent ranges + of ports. \n Since only some protocols have ports, if + any ports are specified it requires the Protocol match + in the Rule to be set to \"TCP\" or \"UDP\"." + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + selector: + description: "Selector is an optional field that contains + a selector expression (see Policy for sample syntax). + \ Only traffic that originates from (terminates at) endpoints + matching the selector will be matched. \n Note that: in + addition to the negated version of the Selector (see NotSelector + below), the selector expression syntax itself supports + negation. The two types of negation are subtly different. + One negates the set of matched endpoints, the other negates + the whole match: \n \tSelector = \"!has(my_label)\" matches + packets that are from other Calico-controlled \tendpoints + that do not have the label \"my_label\". \n \tNotSelector + = \"has(my_label)\" matches packets that are not from + Calico-controlled \tendpoints that do have the label \"my_label\". + \n The effect is that the latter will accept packets from + non-Calico sources whereas the former is limited to packets + from Calico-controlled endpoints." + type: string + serviceAccounts: + description: ServiceAccounts is an optional field that restricts + the rule to only apply to traffic that originates from + (or terminates at) a pod running as a matching service + account. + properties: + names: + description: Names is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account whose name is in the list. + items: + type: string + type: array + selector: + description: Selector is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account that matches the given label selector. If + both Names and Selector are specified then they are + AND'ed. + type: string + type: object + type: object + http: + description: HTTP contains match criteria that apply to HTTP + requests. + properties: + methods: + description: Methods is an optional field that restricts + the rule to apply only to HTTP requests that use one of + the listed HTTP Methods (e.g. GET, PUT, etc.) Multiple + methods are OR'd together. + items: + type: string + type: array + paths: + description: 'Paths is an optional field that restricts + the rule to apply to HTTP requests that use one of the + listed HTTP Paths. Multiple paths are OR''d together. + e.g: - exact: /foo - prefix: /bar NOTE: Each entry may + ONLY specify either a `exact` or a `prefix` match. The + validator will check for it.' + items: + description: 'HTTPPath specifies an HTTP path to match. + It may be either of the form: exact: : which matches + the path exactly or prefix: : which matches + the path prefix' + properties: + exact: + type: string + prefix: + type: string + type: object + type: array + type: object + icmp: + description: ICMP is an optional field that restricts the rule + to apply to a specific type and code of ICMP traffic. This + should only be specified if the Protocol field is set to "ICMP" + or "ICMPv6". + properties: + code: + description: Match on a specific ICMP code. If specified, + the Type value must also be specified. This is a technical + limitation imposed by the kernel's iptables firewall, + which Calico uses to enforce the rule. + type: integer + type: + description: Match on a specific ICMP type. For example + a value of 8 refers to ICMP Echo Request (i.e. pings). + type: integer + type: object + ipVersion: + description: IPVersion is an optional field that restricts the + rule to only match a specific IP version. + type: integer + metadata: + description: Metadata contains additional information for this + rule + properties: + annotations: + additionalProperties: + type: string + description: Annotations is a set of key value pairs that + give extra information about the rule + type: object + type: object + notICMP: + description: NotICMP is the negated version of the ICMP field. + properties: + code: + description: Match on a specific ICMP code. If specified, + the Type value must also be specified. This is a technical + limitation imposed by the kernel's iptables firewall, + which Calico uses to enforce the rule. + type: integer + type: + description: Match on a specific ICMP type. For example + a value of 8 refers to ICMP Echo Request (i.e. pings). + type: integer + type: object + notProtocol: + anyOf: + - type: integer + - type: string + description: NotProtocol is the negated version of the Protocol + field. + pattern: ^.* + x-kubernetes-int-or-string: true + protocol: + anyOf: + - type: integer + - type: string + description: "Protocol is an optional field that restricts the + rule to only apply to traffic of a specific IP protocol. Required + if any of the EntityRules contain Ports (because ports only + apply to certain protocols). \n Must be one of these string + values: \"TCP\", \"UDP\", \"ICMP\", \"ICMPv6\", \"SCTP\", + \"UDPLite\" or an integer in the range 1-255." + pattern: ^.* + x-kubernetes-int-or-string: true + source: + description: Source contains the match criteria that apply to + source entity. + properties: + namespaceSelector: + description: "NamespaceSelector is an optional field that + contains a selector expression. Only traffic that originates + from (or terminates at) endpoints within the selected + namespaces will be matched. When both NamespaceSelector + and Selector are defined on the same rule, then only workload + endpoints that are matched by both selectors will be selected + by the rule. \n For NetworkPolicy, an empty NamespaceSelector + implies that the Selector is limited to selecting only + workload endpoints in the same namespace as the NetworkPolicy. + \n For NetworkPolicy, `global()` NamespaceSelector implies + that the Selector is limited to selecting only GlobalNetworkSet + or HostEndpoint. \n For GlobalNetworkPolicy, an empty + NamespaceSelector implies the Selector applies to workload + endpoints across all namespaces." + type: string + nets: + description: Nets is an optional field that restricts the + rule to only apply to traffic that originates from (or + terminates at) IP addresses in any of the given subnets. + items: + type: string + type: array + notNets: + description: NotNets is the negated version of the Nets + field. + items: + type: string + type: array + notPorts: + description: NotPorts is the negated version of the Ports + field. Since only some protocols have ports, if any ports + are specified it requires the Protocol match in the Rule + to be set to "TCP" or "UDP". + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + notSelector: + description: NotSelector is the negated version of the Selector + field. See Selector field for subtleties with negated + selectors. + type: string + ports: + description: "Ports is an optional field that restricts + the rule to only apply to traffic that has a source (destination) + port that matches one of these ranges/values. This value + is a list of integers or strings that represent ranges + of ports. \n Since only some protocols have ports, if + any ports are specified it requires the Protocol match + in the Rule to be set to \"TCP\" or \"UDP\"." + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + selector: + description: "Selector is an optional field that contains + a selector expression (see Policy for sample syntax). + \ Only traffic that originates from (terminates at) endpoints + matching the selector will be matched. \n Note that: in + addition to the negated version of the Selector (see NotSelector + below), the selector expression syntax itself supports + negation. The two types of negation are subtly different. + One negates the set of matched endpoints, the other negates + the whole match: \n \tSelector = \"!has(my_label)\" matches + packets that are from other Calico-controlled \tendpoints + that do not have the label \"my_label\". \n \tNotSelector + = \"has(my_label)\" matches packets that are not from + Calico-controlled \tendpoints that do have the label \"my_label\". + \n The effect is that the latter will accept packets from + non-Calico sources whereas the former is limited to packets + from Calico-controlled endpoints." + type: string + serviceAccounts: + description: ServiceAccounts is an optional field that restricts + the rule to only apply to traffic that originates from + (or terminates at) a pod running as a matching service + account. + properties: + names: + description: Names is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account whose name is in the list. + items: + type: string + type: array + selector: + description: Selector is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account that matches the given label selector. If + both Names and Selector are specified then they are + AND'ed. + type: string + type: object + type: object + required: + - action + type: object + type: array + ingress: + description: The ordered set of ingress rules. Each rule contains + a set of packet match criteria and a corresponding action to apply. + items: + description: "A Rule encapsulates a set of match criteria and an + action. Both selector-based security Policy and security Profiles + reference rules - separated out as a list of rules for both ingress + and egress packet matching. \n Each positive match criteria has + a negated version, prefixed with \"Not\". All the match criteria + within a rule must be satisfied for a packet to match. A single + rule can contain the positive and negative version of a match + and both must be satisfied for the rule to match." + properties: + action: + type: string + destination: + description: Destination contains the match criteria that apply + to destination entity. + properties: + namespaceSelector: + description: "NamespaceSelector is an optional field that + contains a selector expression. Only traffic that originates + from (or terminates at) endpoints within the selected + namespaces will be matched. When both NamespaceSelector + and Selector are defined on the same rule, then only workload + endpoints that are matched by both selectors will be selected + by the rule. \n For NetworkPolicy, an empty NamespaceSelector + implies that the Selector is limited to selecting only + workload endpoints in the same namespace as the NetworkPolicy. + \n For NetworkPolicy, `global()` NamespaceSelector implies + that the Selector is limited to selecting only GlobalNetworkSet + or HostEndpoint. \n For GlobalNetworkPolicy, an empty + NamespaceSelector implies the Selector applies to workload + endpoints across all namespaces." + type: string + nets: + description: Nets is an optional field that restricts the + rule to only apply to traffic that originates from (or + terminates at) IP addresses in any of the given subnets. + items: + type: string + type: array + notNets: + description: NotNets is the negated version of the Nets + field. + items: + type: string + type: array + notPorts: + description: NotPorts is the negated version of the Ports + field. Since only some protocols have ports, if any ports + are specified it requires the Protocol match in the Rule + to be set to "TCP" or "UDP". + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + notSelector: + description: NotSelector is the negated version of the Selector + field. See Selector field for subtleties with negated + selectors. + type: string + ports: + description: "Ports is an optional field that restricts + the rule to only apply to traffic that has a source (destination) + port that matches one of these ranges/values. This value + is a list of integers or strings that represent ranges + of ports. \n Since only some protocols have ports, if + any ports are specified it requires the Protocol match + in the Rule to be set to \"TCP\" or \"UDP\"." + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + selector: + description: "Selector is an optional field that contains + a selector expression (see Policy for sample syntax). + \ Only traffic that originates from (terminates at) endpoints + matching the selector will be matched. \n Note that: in + addition to the negated version of the Selector (see NotSelector + below), the selector expression syntax itself supports + negation. The two types of negation are subtly different. + One negates the set of matched endpoints, the other negates + the whole match: \n \tSelector = \"!has(my_label)\" matches + packets that are from other Calico-controlled \tendpoints + that do not have the label \"my_label\". \n \tNotSelector + = \"has(my_label)\" matches packets that are not from + Calico-controlled \tendpoints that do have the label \"my_label\". + \n The effect is that the latter will accept packets from + non-Calico sources whereas the former is limited to packets + from Calico-controlled endpoints." + type: string + serviceAccounts: + description: ServiceAccounts is an optional field that restricts + the rule to only apply to traffic that originates from + (or terminates at) a pod running as a matching service + account. + properties: + names: + description: Names is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account whose name is in the list. + items: + type: string + type: array + selector: + description: Selector is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account that matches the given label selector. If + both Names and Selector are specified then they are + AND'ed. + type: string + type: object + type: object + http: + description: HTTP contains match criteria that apply to HTTP + requests. + properties: + methods: + description: Methods is an optional field that restricts + the rule to apply only to HTTP requests that use one of + the listed HTTP Methods (e.g. GET, PUT, etc.) Multiple + methods are OR'd together. + items: + type: string + type: array + paths: + description: 'Paths is an optional field that restricts + the rule to apply to HTTP requests that use one of the + listed HTTP Paths. Multiple paths are OR''d together. + e.g: - exact: /foo - prefix: /bar NOTE: Each entry may + ONLY specify either a `exact` or a `prefix` match. The + validator will check for it.' + items: + description: 'HTTPPath specifies an HTTP path to match. + It may be either of the form: exact: : which matches + the path exactly or prefix: : which matches + the path prefix' + properties: + exact: + type: string + prefix: + type: string + type: object + type: array + type: object + icmp: + description: ICMP is an optional field that restricts the rule + to apply to a specific type and code of ICMP traffic. This + should only be specified if the Protocol field is set to "ICMP" + or "ICMPv6". + properties: + code: + description: Match on a specific ICMP code. If specified, + the Type value must also be specified. This is a technical + limitation imposed by the kernel's iptables firewall, + which Calico uses to enforce the rule. + type: integer + type: + description: Match on a specific ICMP type. For example + a value of 8 refers to ICMP Echo Request (i.e. pings). + type: integer + type: object + ipVersion: + description: IPVersion is an optional field that restricts the + rule to only match a specific IP version. + type: integer + metadata: + description: Metadata contains additional information for this + rule + properties: + annotations: + additionalProperties: + type: string + description: Annotations is a set of key value pairs that + give extra information about the rule + type: object + type: object + notICMP: + description: NotICMP is the negated version of the ICMP field. + properties: + code: + description: Match on a specific ICMP code. If specified, + the Type value must also be specified. This is a technical + limitation imposed by the kernel's iptables firewall, + which Calico uses to enforce the rule. + type: integer + type: + description: Match on a specific ICMP type. For example + a value of 8 refers to ICMP Echo Request (i.e. pings). + type: integer + type: object + notProtocol: + anyOf: + - type: integer + - type: string + description: NotProtocol is the negated version of the Protocol + field. + pattern: ^.* + x-kubernetes-int-or-string: true + protocol: + anyOf: + - type: integer + - type: string + description: "Protocol is an optional field that restricts the + rule to only apply to traffic of a specific IP protocol. Required + if any of the EntityRules contain Ports (because ports only + apply to certain protocols). \n Must be one of these string + values: \"TCP\", \"UDP\", \"ICMP\", \"ICMPv6\", \"SCTP\", + \"UDPLite\" or an integer in the range 1-255." + pattern: ^.* + x-kubernetes-int-or-string: true + source: + description: Source contains the match criteria that apply to + source entity. + properties: + namespaceSelector: + description: "NamespaceSelector is an optional field that + contains a selector expression. Only traffic that originates + from (or terminates at) endpoints within the selected + namespaces will be matched. When both NamespaceSelector + and Selector are defined on the same rule, then only workload + endpoints that are matched by both selectors will be selected + by the rule. \n For NetworkPolicy, an empty NamespaceSelector + implies that the Selector is limited to selecting only + workload endpoints in the same namespace as the NetworkPolicy. + \n For NetworkPolicy, `global()` NamespaceSelector implies + that the Selector is limited to selecting only GlobalNetworkSet + or HostEndpoint. \n For GlobalNetworkPolicy, an empty + NamespaceSelector implies the Selector applies to workload + endpoints across all namespaces." + type: string + nets: + description: Nets is an optional field that restricts the + rule to only apply to traffic that originates from (or + terminates at) IP addresses in any of the given subnets. + items: + type: string + type: array + notNets: + description: NotNets is the negated version of the Nets + field. + items: + type: string + type: array + notPorts: + description: NotPorts is the negated version of the Ports + field. Since only some protocols have ports, if any ports + are specified it requires the Protocol match in the Rule + to be set to "TCP" or "UDP". + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + notSelector: + description: NotSelector is the negated version of the Selector + field. See Selector field for subtleties with negated + selectors. + type: string + ports: + description: "Ports is an optional field that restricts + the rule to only apply to traffic that has a source (destination) + port that matches one of these ranges/values. This value + is a list of integers or strings that represent ranges + of ports. \n Since only some protocols have ports, if + any ports are specified it requires the Protocol match + in the Rule to be set to \"TCP\" or \"UDP\"." + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + selector: + description: "Selector is an optional field that contains + a selector expression (see Policy for sample syntax). + \ Only traffic that originates from (terminates at) endpoints + matching the selector will be matched. \n Note that: in + addition to the negated version of the Selector (see NotSelector + below), the selector expression syntax itself supports + negation. The two types of negation are subtly different. + One negates the set of matched endpoints, the other negates + the whole match: \n \tSelector = \"!has(my_label)\" matches + packets that are from other Calico-controlled \tendpoints + that do not have the label \"my_label\". \n \tNotSelector + = \"has(my_label)\" matches packets that are not from + Calico-controlled \tendpoints that do have the label \"my_label\". + \n The effect is that the latter will accept packets from + non-Calico sources whereas the former is limited to packets + from Calico-controlled endpoints." + type: string + serviceAccounts: + description: ServiceAccounts is an optional field that restricts + the rule to only apply to traffic that originates from + (or terminates at) a pod running as a matching service + account. + properties: + names: + description: Names is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account whose name is in the list. + items: + type: string + type: array + selector: + description: Selector is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account that matches the given label selector. If + both Names and Selector are specified then they are + AND'ed. + type: string + type: object + type: object + required: + - action + type: object + type: array + order: + description: Order is an optional field that specifies the order in + which the policy is applied. Policies with higher "order" are applied + after those with lower order. If the order is omitted, it may be + considered to be "infinite" - i.e. the policy will be applied last. Policies + with identical order will be applied in alphanumerical order based + on the Policy "Name". + type: number + selector: + description: "The selector is an expression used to pick pick out + the endpoints that the policy should be applied to. \n Selector + expressions follow this syntax: \n \tlabel == \"string_literal\" + \ -> comparison, e.g. my_label == \"foo bar\" \tlabel != \"string_literal\" + \ -> not equal; also matches if label is not present \tlabel in + { \"a\", \"b\", \"c\", ... } -> true if the value of label X is + one of \"a\", \"b\", \"c\" \tlabel not in { \"a\", \"b\", \"c\", + ... } -> true if the value of label X is not one of \"a\", \"b\", + \"c\" \thas(label_name) -> True if that label is present \t! expr + -> negation of expr \texpr && expr -> Short-circuit and \texpr + || expr -> Short-circuit or \t( expr ) -> parens for grouping \tall() + or the empty selector -> matches all endpoints. \n Label names are + allowed to contain alphanumerics, -, _ and /. String literals are + more permissive but they do not support escape characters. \n Examples + (with made-up labels): \n \ttype == \"webserver\" && deployment + == \"prod\" \ttype in {\"frontend\", \"backend\"} \tdeployment != + \"dev\" \t! has(label_name)" + type: string + serviceAccountSelector: + description: ServiceAccountSelector is an optional field for an expression + used to select a pod based on service accounts. + type: string + types: + description: "Types indicates whether this policy applies to ingress, + or to egress, or to both. When not explicitly specified (and so + the value on creation is empty or nil), Calico defaults Types according + to what Ingress and Egress are present in the policy. The default + is: \n - [ PolicyTypeIngress ], if there are no Egress rules (including + the case where there are also no Ingress rules) \n - [ PolicyTypeEgress + ], if there are Egress rules but no Ingress rules \n - [ PolicyTypeIngress, + PolicyTypeEgress ], if there are both Ingress and Egress rules. + \n When the policy is read back again, Types will always be one + of these values, never empty or nil." + items: + description: PolicyType enumerates the possible values of the PolicySpec + Types field. + type: string + type: array + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: networksets.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: NetworkSet + listKind: NetworkSetList + plural: networksets + singular: networkset + scope: Namespaced + versions: + - name: v1 + schema: + openAPIV3Schema: + description: NetworkSet is the Namespaced-equivalent of the GlobalNetworkSet. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: NetworkSetSpec contains the specification for a NetworkSet + resource. + properties: + nets: + description: The list of IP networks that belong to this set. + items: + type: string + type: array + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +--- +--- +# Source: calico/templates/calico-kube-controllers-rbac.yaml + +# Include a clusterrole for the kube-controllers component, +# and bind it to the calico-kube-controllers serviceaccount. +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: calico-kube-controllers +rules: + # Nodes are watched to monitor for deletions. + - apiGroups: [""] + resources: + - nodes + verbs: + - watch + - list + - get + # Pods are queried to check for existence. + - apiGroups: [""] + resources: + - pods + verbs: + - get + # IPAM resources are manipulated when nodes are deleted. + - apiGroups: ["crd.projectcalico.org"] + resources: + - ippools + verbs: + - list + - apiGroups: ["crd.projectcalico.org"] + resources: + - blockaffinities + - ipamblocks + - ipamhandles + verbs: + - get + - list + - create + - update + - delete + - watch + # kube-controllers manages hostendpoints. + - apiGroups: ["crd.projectcalico.org"] + resources: + - hostendpoints + verbs: + - get + - list + - create + - update + - delete + # Needs access to update clusterinformations. + - apiGroups: ["crd.projectcalico.org"] + resources: + - clusterinformations + verbs: + - get + - create + - update + # KubeControllersConfiguration is where it gets its config + - apiGroups: ["crd.projectcalico.org"] + resources: + - kubecontrollersconfigurations + verbs: + # read its own config + - get + # create a default if none exists + - create + # update status + - update + # watch for changes + - watch + # calico-kube-controllers requires root privileges + - apiGroups: ['policy'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: + - oob-anyuid-psp +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: calico-kube-controllers +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: calico-kube-controllers +subjects: +- kind: ServiceAccount + name: calico-kube-controllers + namespace: kube-system +--- + +--- +# Source: calico/templates/calico-node-rbac.yaml +# Include a clusterrole for the calico-node DaemonSet, +# and bind it to the calico-node serviceaccount. +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: calico-node +rules: + # The CNI plugin needs to get pods, nodes, and namespaces. + - apiGroups: [""] + resources: + - pods + - nodes + - namespaces + verbs: + - get + - apiGroups: [""] + resources: + - endpoints + - services + verbs: + # Used to discover service IPs for advertisement. + - watch + - list + # Used to discover Typhas. + - get + # Pod CIDR auto-detection on kubeadm needs access to config maps. + - apiGroups: [""] + resources: + - configmaps + verbs: + - get + - apiGroups: [""] + resources: + - nodes/status + verbs: + # Needed for clearing NodeNetworkUnavailable flag. + - patch + # Calico stores some configuration information in node annotations. + - update + # Watch for changes to Kubernetes NetworkPolicies. + - apiGroups: ["networking.k8s.io"] + resources: + - networkpolicies + verbs: + - watch + - list + # Used by Calico for policy information. + - apiGroups: [""] + resources: + - pods + - namespaces + - serviceaccounts + verbs: + - list + - watch + # The CNI plugin patches pods/status. + - apiGroups: [""] + resources: + - pods/status + verbs: + - patch + # Calico monitors various CRDs for config. + - apiGroups: ["crd.projectcalico.org"] + resources: + - globalfelixconfigs + - felixconfigurations + - bgppeers + - globalbgpconfigs + - bgpconfigurations + - ippools + - ipamblocks + - globalnetworkpolicies + - globalnetworksets + - networkpolicies + - networksets + - clusterinformations + - hostendpoints + - blockaffinities + verbs: + - get + - list + - watch + # Calico must create and update some CRDs on startup. + - apiGroups: ["crd.projectcalico.org"] + resources: + - ippools + - felixconfigurations + - clusterinformations + verbs: + - create + - update + # Calico stores some configuration information on the node. + - apiGroups: [""] + resources: + - nodes + verbs: + - get + - list + - watch + # These permissions are only required for upgrade from v2.6, and can + # be removed after upgrade or on fresh installations. + - apiGroups: ["crd.projectcalico.org"] + resources: + - bgpconfigurations + - bgppeers + verbs: + - create + - update + # These permissions are required for Calico CNI to perform IPAM allocations. + - apiGroups: ["crd.projectcalico.org"] + resources: + - blockaffinities + - ipamblocks + - ipamhandles + verbs: + - get + - list + - create + - update + - delete + - apiGroups: ["crd.projectcalico.org"] + resources: + - ipamconfigs + verbs: + - get + # Block affinities must also be watchable by confd for route aggregation. + - apiGroups: ["crd.projectcalico.org"] + resources: + - blockaffinities + verbs: + - watch + # The Calico IPAM migration needs to get daemonsets. These permissions can be + # removed if not upgrading from an installation using host-local IPAM. + - apiGroups: ["apps"] + resources: + - daemonsets + verbs: + - get + # calico-node requires full privileges + - apiGroups: ['policy'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: + - oob-privileged-psp + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: calico-node +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: calico-node +subjects: +- kind: ServiceAccount + name: calico-node + namespace: kube-system + +{% if plugins.calico.typha.enabled | default(false) == true -%} +--- +# Source: calico/templates/calico-typha.yaml +# This manifest creates a Service, which will be backed by Calico's Typha daemon. +# Typha sits in between Felix and the API server, reducing Calico's load on the API server. + +apiVersion: v1 +kind: Service +metadata: + name: calico-typha + namespace: kube-system + labels: + k8s-app: calico-typha +spec: + ports: + - port: 5473 + protocol: TCP + targetPort: calico-typha + name: calico-typha + selector: + k8s-app: calico-typha + +--- + +# This manifest creates a Deployment of Typha to back the above service. + +apiVersion: apps/v1 +kind: Deployment +metadata: + name: calico-typha + namespace: kube-system + labels: + k8s-app: calico-typha +spec: + # Number of Typha replicas. To enable Typha, set this to a non-zero value *and* set the + # typha_service_name variable in the calico-config ConfigMap above. + # + # We recommend using Typha if you have more than 50 nodes. Above 100 nodes it is essential + # (when using the Kubernetes datastore). Use one replica for every 100-200 nodes. In + # production, we recommend running at least 3 replicas to reduce the impact of rolling upgrade. + replicas: {{ plugins['calico']['typha']['replicas'] }} + revisionHistoryLimit: 2 + selector: + matchLabels: + k8s-app: calico-typha + template: + metadata: + labels: + k8s-app: calico-typha + annotations: + cluster-autoscaler.kubernetes.io/safe-to-evict: 'true' + spec: + nodeSelector: + {{ plugins['calico']['typha']['nodeSelector'] | toyaml | indent(width=8, indentfirst=False) }} + hostNetwork: true + tolerations: + # Mark the pod as a critical add-on for rescheduling. + - key: CriticalAddonsOnly + operator: Exists + # Since Calico can't network a pod until Typha is up, we need to run Typha itself + # as a host-networked pod. + serviceAccountName: calico-node + priorityClassName: system-cluster-critical + # fsGroup allows using projected serviceaccount tokens as described here kubernetes/kubernetes#82573 + securityContext: + fsGroup: 65534 + containers: + - image: {% if plugins['calico']['installation']['registry'] is defined and plugins['calico']['installation']['registry']|length %}{{ plugins['calico']['installation']['registry'] }}/{% endif %}{{ plugins['calico']['typha']['image'] }} + name: calico-typha + ports: + - containerPort: 5473 + name: calico-typha + protocol: TCP + envFrom: + - configMapRef: + # Allow KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT to be overridden for eBPF mode. + name: kubernetes-services-endpoint + optional: true + env: + # Enable "info" logging by default. Can be set to "debug" to increase verbosity. + - name: TYPHA_LOGSEVERITYSCREEN + value: "info" + # Disable logging to file and syslog since those don't make sense in Kubernetes. + - name: TYPHA_LOGFILEPATH + value: "none" + - name: TYPHA_LOGSEVERITYSYS + value: "none" + # Monitor the Kubernetes API to find the number of running instances and rebalance + # connections. + - name: TYPHA_CONNECTIONREBALANCINGMODE + value: "kubernetes" + - name: TYPHA_DATASTORETYPE + value: "kubernetes" + - name: TYPHA_HEALTHENABLED + value: "true" + # Uncomment these lines to enable prometheus metrics. Since Typha is host-networked, + # this opens a port on the host, which may need to be secured. + #- name: TYPHA_PROMETHEUSMETRICSENABLED + # value: "true" + #- name: TYPHA_PROMETHEUSMETRICSPORT + # value: "9093" + livenessProbe: + httpGet: + path: /liveness + port: 9098 + host: localhost + periodSeconds: 30 + initialDelaySeconds: 30 + securityContext: + runAsNonRoot: true + allowPrivilegeEscalation: false + readinessProbe: + httpGet: + path: /readiness + port: 9098 + host: localhost + periodSeconds: 10 + +--- + +# This manifest creates a Pod Disruption Budget for Typha to allow K8s Cluster Autoscaler to evict + +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: calico-typha + namespace: kube-system + labels: + k8s-app: calico-typha +spec: + maxUnavailable: 1 + selector: + matchLabels: + k8s-app: calico-typha +{%- endif %} +--- +# Source: calico/templates/calico-node.yaml +# This manifest installs the calico-node container, as well +# as the CNI plugins and network config on +# each master and worker node in a Kubernetes cluster. +kind: DaemonSet +apiVersion: apps/v1 +metadata: + name: calico-node + namespace: kube-system + labels: + k8s-app: calico-node +spec: + selector: + matchLabels: + k8s-app: calico-node + updateStrategy: + type: RollingUpdate + rollingUpdate: + maxUnavailable: 1 + template: + metadata: + labels: + k8s-app: calico-node + annotations: + # This, along with the CriticalAddonsOnly toleration below, + # marks the pod as a critical add-on, ensuring it gets + # priority scheduling and that its resources are reserved + # if it ever gets evicted. + scheduler.alpha.kubernetes.io/critical-pod: '' + spec: + nodeSelector: + kubernetes.io/os: linux + hostNetwork: true + tolerations: + # Make sure calico-node gets scheduled on all nodes. + - effect: NoSchedule + operator: Exists + # Mark the pod as a critical add-on for rescheduling. + - key: CriticalAddonsOnly + operator: Exists + - effect: NoExecute + operator: Exists + serviceAccountName: calico-node + # Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force + # deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods. + terminationGracePeriodSeconds: 0 + priorityClassName: system-node-critical + initContainers: + # This container performs upgrade from host-local IPAM to calico-ipam. + # It can be deleted if this is a fresh installation, or if you have already + # upgraded to use calico-ipam. + - name: upgrade-ipam + image: {% if plugins['calico']['installation']['registry'] is defined and plugins['calico']['installation']['registry']|length %}{{ plugins['calico']['installation']['registry'] }}/{% endif %}{{ plugins['calico']['cni']['image'] }} + command: ["/opt/cni/bin/calico-ipam", "-upgrade"] + env: + - name: KUBERNETES_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: CALICO_NETWORKING_BACKEND + valueFrom: + configMapKeyRef: + name: calico-config + key: calico_backend + volumeMounts: + - mountPath: /var/lib/cni/networks + name: host-local-net-dir + - mountPath: /host/opt/cni/bin + name: cni-bin-dir + securityContext: + privileged: true + # This container installs the CNI binaries + # and CNI network config file on each node. + - name: install-cni + image: {% if plugins['calico']['installation']['registry'] is defined and plugins['calico']['installation']['registry']|length %}{{ plugins['calico']['installation']['registry'] }}/{% endif %}{{ plugins['calico']['cni']['image'] }} + command: ["/opt/cni/bin/install"] + env: + # Name of the CNI config file to create. + - name: CNI_CONF_NAME + value: "10-calico.conflist" + # The CNI network config to install on each node. + - name: CNI_NETWORK_CONFIG + valueFrom: + configMapKeyRef: + name: calico-config + key: cni_network_config + # Set the hostname based on the k8s node name. + - name: KUBERNETES_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + # CNI MTU Config variable + - name: CNI_MTU + valueFrom: + configMapKeyRef: + name: calico-config + key: veth_mtu + # Prevents the container from sleeping forever. + - name: SLEEP + value: "false" + volumeMounts: + - mountPath: /host/opt/cni/bin + name: cni-bin-dir + - mountPath: /host/etc/cni/net.d + name: cni-net-dir + securityContext: + privileged: true + # Adds a Flex Volume Driver that creates a per-pod Unix Domain Socket to allow Dikastes + # to communicate with Felix over the Policy Sync API. + - name: flexvol-driver + image: {% if plugins['calico']['installation']['registry'] is defined and plugins['calico']['installation']['registry']|length %}{{ plugins['calico']['installation']['registry'] }}/{% endif %}{{ plugins['calico']['flexvol']['image'] }} + volumeMounts: + - name: flexvol-driver-host + mountPath: /host/driver + securityContext: + privileged: true + containers: + # Runs calico-node container on each Kubernetes node. This + # container programs network policy and routes on each + # host. + - name: calico-node + image: {% if plugins['calico']['installation']['registry'] is defined and plugins['calico']['installation']['registry']|length %}{{ plugins['calico']['installation']['registry'] }}/{% endif %}{{ plugins['calico']['node']['image'] }} + env: + # Use Kubernetes API as the backing datastore. + - name: DATASTORE_TYPE + value: "{{ plugins['calico']['env']['DATASTORE_TYPE'] }}" +{%- if plugins.calico.typha.enabled | default(false) == true %} + # Typha support: controlled by the ConfigMap. + - name: FELIX_TYPHAK8SSERVICENAME + valueFrom: + configMapKeyRef: + name: calico-config + key: typha_service_name +{%- endif %} + # Wait for the datastore. + - name: WAIT_FOR_DATASTORE + value: "{{ plugins['calico']['env']['WAIT_FOR_DATASTORE'] }}" + # Set based on the k8s node name. + - name: NODENAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + # Choose the backend to use. + - name: CALICO_NETWORKING_BACKEND + valueFrom: + configMapKeyRef: + name: calico-config + key: calico_backend + # Cluster type to identify the deployment type + - name: CLUSTER_TYPE + value: "{{ plugins['calico']['env']['CLUSTER_TYPE'] }}" + + # Set MTU for tunnel device used if ipip is enabled + - name: FELIX_IPINIPMTU + valueFrom: + configMapKeyRef: + name: calico-config + key: veth_mtu + # Set MTU for the VXLAN tunnel device. + - name: FELIX_VXLANMTU + valueFrom: + configMapKeyRef: + name: calico-config + key: veth_mtu + + - name: CALICO_ROUTER_ID + value: "{{ plugins['calico']['env']['CALICO_ROUTER_ID'] }}" + + # IPv4 OPTIONS + - name: IP + value: "{{ plugins['calico']['env']['IP'] }}" + - name: IP_AUTODETECTION_METHOD + value: "{{ plugins['calico']['env']['IP_AUTODETECTION_METHOD'] }}" + - name: CALICO_IPV4POOL_IPIP + value: "{{ plugins['calico']['env']['CALICO_IPV4POOL_IPIP'] }}" + - name: CALICO_IPV4POOL_VXLAN + value: "{{ plugins['calico']['env']['CALICO_IPV4POOL_VXLAN'] }}" + - name: CALICO_IPV4POOL_CIDR + value: "{{ plugins['calico']['env']['CALICO_IPV4POOL_CIDR'] }}" + +{%- if not services.kubeadm.networking.podSubnet|isipv4 %} + # IPv6 OPTIONS + - name: CALICO_IPV6POOL_CIDR + value: "{{ plugins['calico']['env']['CALICO_IPV6POOL_CIDR'] }}" + - name: IP6 + value: "{{ plugins['calico']['env']['IP6'] }}" + - name: IP6_AUTODETECTION_METHOD + value: "{{ plugins['calico']['env']['IP6_AUTODETECTION_METHOD'] }}" + - name: FELIX_IPV6SUPPORT + value: "{{ plugins['calico']['env']['FELIX_IPV6SUPPORT'] }}" + - name: CALICO_IPV6POOL_IPIP + value: "{{ plugins['calico']['env']['CALICO_IPV6POOL_IPIP'] }}" + - name: CALICO_IPV6POOL_VXLAN + value: "{{ plugins['calico']['env']['CALICO_IPV6POOL_VXLAN'] }}" +{%- endif %} + + # Disable file logging so `kubectl logs` works. + - name: CALICO_DISABLE_FILE_LOGGING + value: "{{ plugins['calico']['env']['CALICO_DISABLE_FILE_LOGGING'] }}" + # Set Felix endpoint to host default action to ACCEPT. + - name: FELIX_DEFAULTENDPOINTTOHOSTACTION + value: "{{ plugins['calico']['env']['FELIX_DEFAULTENDPOINTTOHOSTACTION'] }}" + + + + - name: FELIX_HEALTHENABLED + value: "{{ plugins['calico']['env']['FELIX_HEALTHENABLED'] }}" + securityContext: + privileged: true + resources: + requests: + cpu: 250m + livenessProbe: + exec: + command: + - /bin/calico-node + - -felix-live + - -bird-live + periodSeconds: 10 + initialDelaySeconds: 10 + failureThreshold: 6 + readinessProbe: + exec: + command: + - /bin/calico-node + - -felix-ready + - -bird-ready + periodSeconds: 10 + volumeMounts: + - mountPath: /lib/modules + name: lib-modules + readOnly: true + - mountPath: /run/xtables.lock + name: xtables-lock + readOnly: false + - mountPath: /var/run/calico + name: var-run-calico + readOnly: false + - mountPath: /var/lib/calico + name: var-lib-calico + readOnly: false + - name: policysync + mountPath: /var/run/nodeagent + volumes: + # Used by calico-node. + - name: lib-modules + hostPath: + path: /lib/modules + - name: var-run-calico + hostPath: + path: /var/run/calico + - name: var-lib-calico + hostPath: + path: /var/lib/calico + - name: xtables-lock + hostPath: + path: /run/xtables.lock + type: FileOrCreate + # Used to install CNI. + - name: cni-bin-dir + hostPath: + path: /opt/cni/bin + - name: cni-net-dir + hostPath: + path: /etc/cni/net.d + # Mount in the directory for host-local IPAM allocations. This is + # used when upgrading from host-local to calico-ipam, and can be removed + # if not using the upgrade-ipam init container. + - name: host-local-net-dir + hostPath: + path: /var/lib/cni/networks + # Used to create per-pod Unix Domain Sockets + - name: policysync + hostPath: + type: DirectoryOrCreate + path: /var/run/nodeagent + # Used to install Flex Volume Driver + - name: flexvol-driver-host + hostPath: + type: DirectoryOrCreate + path: /usr/libexec/kubernetes/kubelet-plugins/volume/exec/nodeagent~uds +--- + +apiVersion: v1 +kind: ServiceAccount +metadata: + name: calico-node + namespace: kube-system + +--- +# Source: calico/templates/calico-kube-controllers.yaml +# See https://github.com/projectcalico/kube-controllers +apiVersion: apps/v1 +kind: Deployment +metadata: + name: calico-kube-controllers + namespace: kube-system + labels: + k8s-app: calico-kube-controllers +spec: + # The controllers can only have a single active instance. + replicas: 1 + selector: + matchLabels: + k8s-app: calico-kube-controllers + strategy: + type: Recreate + template: + metadata: + name: calico-kube-controllers + namespace: kube-system + labels: + k8s-app: calico-kube-controllers + annotations: + scheduler.alpha.kubernetes.io/critical-pod: '' + spec: + nodeSelector: + {{ plugins['calico']['kube-controllers']['nodeSelector'] | toyaml | indent(width=8, indentfirst=False) }} + tolerations: + # Mark the pod as a critical add-on for rescheduling. + - key: CriticalAddonsOnly + operator: Exists + - key: node-role.kubernetes.io/master + effect: NoSchedule + serviceAccountName: calico-kube-controllers + priorityClassName: system-cluster-critical + containers: + - name: calico-kube-controllers + image: {% if plugins['calico']['installation']['registry'] is defined and plugins['calico']['installation']['registry']|length %}{{ plugins['calico']['installation']['registry'] }}/{% endif %}{{ plugins['calico']['kube-controllers']['image'] }} + env: + # Choose which controllers to run. + - name: ENABLED_CONTROLLERS + value: node + - name: DATASTORE_TYPE + value: kubernetes + livenessProbe: + exec: + command: + - /usr/bin/check-status + - -l + periodSeconds: 10 + initialDelaySeconds: 10 + failureThreshold: 6 + readinessProbe: + exec: + command: + - /usr/bin/check-status + - -r + periodSeconds: 10 + +--- + +apiVersion: v1 +kind: ServiceAccount +metadata: + name: calico-kube-controllers + namespace: kube-system + +--- +# Source: calico/templates/calico-etcd-secrets.yaml + +--- +# Source: calico/templates/calico-typha.yaml + +--- +# Source: calico/templates/configure-canal.yaml + + diff --git a/kubetool/templates/plugins/calicoctl.cfg.j2 b/kubetool/templates/plugins/calicoctl.cfg.j2 new file mode 100644 index 000000000..037b3e95a --- /dev/null +++ b/kubetool/templates/plugins/calicoctl.cfg.j2 @@ -0,0 +1,6 @@ +apiVersion: projectcalico.org/v3 +kind: CalicoAPIConfig +metadata: +spec: + datastoreType: "kubernetes" + kubeconfig: "/root/.kube/config" diff --git a/kubetool/templates/plugins/dashboard-ingress.yaml.j2 b/kubetool/templates/plugins/dashboard-ingress.yaml.j2 new file mode 100644 index 000000000..77cebcb54 --- /dev/null +++ b/kubetool/templates/plugins/dashboard-ingress.yaml.j2 @@ -0,0 +1,4 @@ +--- +apiVersion: extensions/v1beta1 +kind: Ingress +{{ plugins['kubernetes-dashboard']['ingress']|toyaml }} diff --git a/kubetool/templates/plugins/dashboard-v2.0.yaml.j2 b/kubetool/templates/plugins/dashboard-v2.0.yaml.j2 new file mode 100644 index 000000000..0451464b4 --- /dev/null +++ b/kubetool/templates/plugins/dashboard-v2.0.yaml.j2 @@ -0,0 +1,304 @@ +# Copyright 2017 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Source: https://raw.githubusercontent.com/kubernetes/dashboard/v2.1.0/aio/deploy/recommended.yaml + +apiVersion: v1 +kind: Namespace +metadata: + name: kubernetes-dashboard + +--- + +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + k8s-app: kubernetes-dashboard + name: kubernetes-dashboard + namespace: kubernetes-dashboard + +--- + +kind: Service +apiVersion: v1 +metadata: + labels: + k8s-app: kubernetes-dashboard + name: kubernetes-dashboard + namespace: kubernetes-dashboard +spec: + ports: + - port: 443 + targetPort: 8443 + selector: + k8s-app: kubernetes-dashboard + +--- + +apiVersion: v1 +kind: Secret +metadata: + labels: + k8s-app: kubernetes-dashboard + name: kubernetes-dashboard-certs + namespace: kubernetes-dashboard +type: Opaque + +--- + +apiVersion: v1 +kind: Secret +metadata: + labels: + k8s-app: kubernetes-dashboard + name: kubernetes-dashboard-csrf + namespace: kubernetes-dashboard +type: Opaque +data: + csrf: "" + +--- + +apiVersion: v1 +kind: Secret +metadata: + labels: + k8s-app: kubernetes-dashboard + name: kubernetes-dashboard-key-holder + namespace: kubernetes-dashboard +type: Opaque + +--- + +kind: ConfigMap +apiVersion: v1 +metadata: + labels: + k8s-app: kubernetes-dashboard + name: kubernetes-dashboard-settings + namespace: kubernetes-dashboard + +--- + +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + labels: + k8s-app: kubernetes-dashboard + name: kubernetes-dashboard + namespace: kubernetes-dashboard +rules: + # Allow Dashboard to get, update and delete Dashboard exclusive secrets. + - apiGroups: [""] + resources: ["secrets"] + resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs", "kubernetes-dashboard-csrf"] + verbs: ["get", "update", "delete"] + # Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map. + - apiGroups: [""] + resources: ["configmaps"] + resourceNames: ["kubernetes-dashboard-settings"] + verbs: ["get", "update"] + # Allow Dashboard to get metrics. + - apiGroups: [""] + resources: ["services"] + resourceNames: ["heapster", "dashboard-metrics-scraper"] + verbs: ["proxy"] + - apiGroups: [""] + resources: ["services/proxy"] + resourceNames: ["heapster", "http:heapster:", "https:heapster:", "dashboard-metrics-scraper", "http:dashboard-metrics-scraper"] + verbs: ["get"] + +--- + +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + labels: + k8s-app: kubernetes-dashboard + name: kubernetes-dashboard +rules: + # Allow Metrics Scraper to get metrics from the Metrics server + - apiGroups: ["metrics.k8s.io"] + resources: ["pods", "nodes"] + verbs: ["get", "list", "watch"] + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + k8s-app: kubernetes-dashboard + name: kubernetes-dashboard + namespace: kubernetes-dashboard +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: kubernetes-dashboard +subjects: + - kind: ServiceAccount + name: kubernetes-dashboard + namespace: kubernetes-dashboard + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: kubernetes-dashboard +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: kubernetes-dashboard +subjects: + - kind: ServiceAccount + name: kubernetes-dashboard + namespace: kubernetes-dashboard + +--- + +kind: Deployment +apiVersion: apps/v1 +metadata: + labels: + k8s-app: kubernetes-dashboard + name: kubernetes-dashboard + namespace: kubernetes-dashboard +spec: + replicas: 1 + revisionHistoryLimit: 10 + selector: + matchLabels: + k8s-app: kubernetes-dashboard + template: + metadata: + labels: + k8s-app: kubernetes-dashboard + spec: + containers: + - name: kubernetes-dashboard + image: {% if plugins['kubernetes-dashboard']['installation']['registry'] is defined and plugins['kubernetes-dashboard']['installation']['registry']|length %}{{ plugins['kubernetes-dashboard']['installation']['registry'] }}/{% endif %}{{ plugins['kubernetes-dashboard']['dashboard']['image'] }} + imagePullPolicy: Always + ports: + - containerPort: 8443 + protocol: TCP + args: + - --auto-generate-certificates + - --namespace=kubernetes-dashboard + # Uncomment the following line to manually specify Kubernetes API server Host + # If not specified, Dashboard will attempt to auto discover the API server and connect + # to it. Uncomment only if the default does not work. + # - --apiserver-host=http://my-address:port + volumeMounts: + - name: kubernetes-dashboard-certs + mountPath: /certs + # Create on-disk volume to store exec logs + - mountPath: /tmp + name: tmp-volume + livenessProbe: + httpGet: + scheme: HTTPS + path: / + port: 8443 + initialDelaySeconds: 30 + timeoutSeconds: 30 + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + runAsUser: 1001 + runAsGroup: 2001 + volumes: + - name: kubernetes-dashboard-certs + secret: + secretName: kubernetes-dashboard-certs + - name: tmp-volume + emptyDir: {} + serviceAccountName: kubernetes-dashboard + nodeSelector: + {{ plugins['kubernetes-dashboard']['dashboard']['nodeSelector'] | toyaml | indent(width=8, indentfirst=False) }} + {% if plugins['kubernetes-dashboard']['dashboard']['tolerations'] is defined -%} + tolerations: + {{ plugins['kubernetes-dashboard']['dashboard']['tolerations'] | toyaml | indent(width=8, indentfirst=False) -}} + {%- endif %} + +--- + +kind: Service +apiVersion: v1 +metadata: + labels: + k8s-app: dashboard-metrics-scraper + name: dashboard-metrics-scraper + namespace: kubernetes-dashboard +spec: + ports: + - port: 8000 + targetPort: 8000 + selector: + k8s-app: dashboard-metrics-scraper + +--- + +kind: Deployment +apiVersion: apps/v1 +metadata: + labels: + k8s-app: dashboard-metrics-scraper + name: dashboard-metrics-scraper + namespace: kubernetes-dashboard +spec: + replicas: 1 + revisionHistoryLimit: 10 + selector: + matchLabels: + k8s-app: dashboard-metrics-scraper + template: + metadata: + labels: + k8s-app: dashboard-metrics-scraper + annotations: + seccomp.security.alpha.kubernetes.io/pod: 'runtime/default' + spec: + containers: + - name: dashboard-metrics-scraper + image: {% if plugins['kubernetes-dashboard']['installation']['registry'] is defined and plugins['kubernetes-dashboard']['installation']['registry']|length %}{{ plugins['kubernetes-dashboard']['installation']['registry'] }}/{% endif %}{{ plugins['kubernetes-dashboard']['metrics-scraper']['image'] }} + ports: + - containerPort: 8000 + protocol: TCP + livenessProbe: + httpGet: + scheme: HTTP + path: / + port: 8000 + initialDelaySeconds: 30 + timeoutSeconds: 30 + volumeMounts: + - mountPath: /tmp + name: tmp-volume + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + runAsUser: 1001 + runAsGroup: 2001 + serviceAccountName: kubernetes-dashboard + nodeSelector: + {{ plugins['kubernetes-dashboard']['metrics-scraper']['nodeSelector'] | toyaml | indent(width=8, indentfirst=False) }} + {% if plugins['kubernetes-dashboard']['metrics-scraper']['tolerations'] is defined -%} + tolerations: + {{ plugins['kubernetes-dashboard']['metrics-scraper']['tolerations'] | toyaml | indent(width=8, indentfirst=False) -}} + {%- endif %} + volumes: + - name: tmp-volume + emptyDir: {} diff --git a/kubetool/templates/plugins/dashboard-v2.1.yaml.j2 b/kubetool/templates/plugins/dashboard-v2.1.yaml.j2 new file mode 100644 index 000000000..0451464b4 --- /dev/null +++ b/kubetool/templates/plugins/dashboard-v2.1.yaml.j2 @@ -0,0 +1,304 @@ +# Copyright 2017 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Source: https://raw.githubusercontent.com/kubernetes/dashboard/v2.1.0/aio/deploy/recommended.yaml + +apiVersion: v1 +kind: Namespace +metadata: + name: kubernetes-dashboard + +--- + +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + k8s-app: kubernetes-dashboard + name: kubernetes-dashboard + namespace: kubernetes-dashboard + +--- + +kind: Service +apiVersion: v1 +metadata: + labels: + k8s-app: kubernetes-dashboard + name: kubernetes-dashboard + namespace: kubernetes-dashboard +spec: + ports: + - port: 443 + targetPort: 8443 + selector: + k8s-app: kubernetes-dashboard + +--- + +apiVersion: v1 +kind: Secret +metadata: + labels: + k8s-app: kubernetes-dashboard + name: kubernetes-dashboard-certs + namespace: kubernetes-dashboard +type: Opaque + +--- + +apiVersion: v1 +kind: Secret +metadata: + labels: + k8s-app: kubernetes-dashboard + name: kubernetes-dashboard-csrf + namespace: kubernetes-dashboard +type: Opaque +data: + csrf: "" + +--- + +apiVersion: v1 +kind: Secret +metadata: + labels: + k8s-app: kubernetes-dashboard + name: kubernetes-dashboard-key-holder + namespace: kubernetes-dashboard +type: Opaque + +--- + +kind: ConfigMap +apiVersion: v1 +metadata: + labels: + k8s-app: kubernetes-dashboard + name: kubernetes-dashboard-settings + namespace: kubernetes-dashboard + +--- + +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + labels: + k8s-app: kubernetes-dashboard + name: kubernetes-dashboard + namespace: kubernetes-dashboard +rules: + # Allow Dashboard to get, update and delete Dashboard exclusive secrets. + - apiGroups: [""] + resources: ["secrets"] + resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs", "kubernetes-dashboard-csrf"] + verbs: ["get", "update", "delete"] + # Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map. + - apiGroups: [""] + resources: ["configmaps"] + resourceNames: ["kubernetes-dashboard-settings"] + verbs: ["get", "update"] + # Allow Dashboard to get metrics. + - apiGroups: [""] + resources: ["services"] + resourceNames: ["heapster", "dashboard-metrics-scraper"] + verbs: ["proxy"] + - apiGroups: [""] + resources: ["services/proxy"] + resourceNames: ["heapster", "http:heapster:", "https:heapster:", "dashboard-metrics-scraper", "http:dashboard-metrics-scraper"] + verbs: ["get"] + +--- + +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + labels: + k8s-app: kubernetes-dashboard + name: kubernetes-dashboard +rules: + # Allow Metrics Scraper to get metrics from the Metrics server + - apiGroups: ["metrics.k8s.io"] + resources: ["pods", "nodes"] + verbs: ["get", "list", "watch"] + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + k8s-app: kubernetes-dashboard + name: kubernetes-dashboard + namespace: kubernetes-dashboard +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: kubernetes-dashboard +subjects: + - kind: ServiceAccount + name: kubernetes-dashboard + namespace: kubernetes-dashboard + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: kubernetes-dashboard +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: kubernetes-dashboard +subjects: + - kind: ServiceAccount + name: kubernetes-dashboard + namespace: kubernetes-dashboard + +--- + +kind: Deployment +apiVersion: apps/v1 +metadata: + labels: + k8s-app: kubernetes-dashboard + name: kubernetes-dashboard + namespace: kubernetes-dashboard +spec: + replicas: 1 + revisionHistoryLimit: 10 + selector: + matchLabels: + k8s-app: kubernetes-dashboard + template: + metadata: + labels: + k8s-app: kubernetes-dashboard + spec: + containers: + - name: kubernetes-dashboard + image: {% if plugins['kubernetes-dashboard']['installation']['registry'] is defined and plugins['kubernetes-dashboard']['installation']['registry']|length %}{{ plugins['kubernetes-dashboard']['installation']['registry'] }}/{% endif %}{{ plugins['kubernetes-dashboard']['dashboard']['image'] }} + imagePullPolicy: Always + ports: + - containerPort: 8443 + protocol: TCP + args: + - --auto-generate-certificates + - --namespace=kubernetes-dashboard + # Uncomment the following line to manually specify Kubernetes API server Host + # If not specified, Dashboard will attempt to auto discover the API server and connect + # to it. Uncomment only if the default does not work. + # - --apiserver-host=http://my-address:port + volumeMounts: + - name: kubernetes-dashboard-certs + mountPath: /certs + # Create on-disk volume to store exec logs + - mountPath: /tmp + name: tmp-volume + livenessProbe: + httpGet: + scheme: HTTPS + path: / + port: 8443 + initialDelaySeconds: 30 + timeoutSeconds: 30 + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + runAsUser: 1001 + runAsGroup: 2001 + volumes: + - name: kubernetes-dashboard-certs + secret: + secretName: kubernetes-dashboard-certs + - name: tmp-volume + emptyDir: {} + serviceAccountName: kubernetes-dashboard + nodeSelector: + {{ plugins['kubernetes-dashboard']['dashboard']['nodeSelector'] | toyaml | indent(width=8, indentfirst=False) }} + {% if plugins['kubernetes-dashboard']['dashboard']['tolerations'] is defined -%} + tolerations: + {{ plugins['kubernetes-dashboard']['dashboard']['tolerations'] | toyaml | indent(width=8, indentfirst=False) -}} + {%- endif %} + +--- + +kind: Service +apiVersion: v1 +metadata: + labels: + k8s-app: dashboard-metrics-scraper + name: dashboard-metrics-scraper + namespace: kubernetes-dashboard +spec: + ports: + - port: 8000 + targetPort: 8000 + selector: + k8s-app: dashboard-metrics-scraper + +--- + +kind: Deployment +apiVersion: apps/v1 +metadata: + labels: + k8s-app: dashboard-metrics-scraper + name: dashboard-metrics-scraper + namespace: kubernetes-dashboard +spec: + replicas: 1 + revisionHistoryLimit: 10 + selector: + matchLabels: + k8s-app: dashboard-metrics-scraper + template: + metadata: + labels: + k8s-app: dashboard-metrics-scraper + annotations: + seccomp.security.alpha.kubernetes.io/pod: 'runtime/default' + spec: + containers: + - name: dashboard-metrics-scraper + image: {% if plugins['kubernetes-dashboard']['installation']['registry'] is defined and plugins['kubernetes-dashboard']['installation']['registry']|length %}{{ plugins['kubernetes-dashboard']['installation']['registry'] }}/{% endif %}{{ plugins['kubernetes-dashboard']['metrics-scraper']['image'] }} + ports: + - containerPort: 8000 + protocol: TCP + livenessProbe: + httpGet: + scheme: HTTP + path: / + port: 8000 + initialDelaySeconds: 30 + timeoutSeconds: 30 + volumeMounts: + - mountPath: /tmp + name: tmp-volume + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + runAsUser: 1001 + runAsGroup: 2001 + serviceAccountName: kubernetes-dashboard + nodeSelector: + {{ plugins['kubernetes-dashboard']['metrics-scraper']['nodeSelector'] | toyaml | indent(width=8, indentfirst=False) }} + {% if plugins['kubernetes-dashboard']['metrics-scraper']['tolerations'] is defined -%} + tolerations: + {{ plugins['kubernetes-dashboard']['metrics-scraper']['tolerations'] | toyaml | indent(width=8, indentfirst=False) -}} + {%- endif %} + volumes: + - name: tmp-volume + emptyDir: {} diff --git a/kubetool/templates/plugins/dashboard-v2.3.yaml.j2 b/kubetool/templates/plugins/dashboard-v2.3.yaml.j2 new file mode 100644 index 000000000..2bc9bef14 --- /dev/null +++ b/kubetool/templates/plugins/dashboard-v2.3.yaml.j2 @@ -0,0 +1,304 @@ +# Copyright 2017 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Source: https://raw.githubusercontent.com/kubernetes/dashboard/v2.3.1/aio/deploy/recommended.yaml + +apiVersion: v1 +kind: Namespace +metadata: + name: kubernetes-dashboard + +--- + +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + k8s-app: kubernetes-dashboard + name: kubernetes-dashboard + namespace: kubernetes-dashboard + +--- + +kind: Service +apiVersion: v1 +metadata: + labels: + k8s-app: kubernetes-dashboard + name: kubernetes-dashboard + namespace: kubernetes-dashboard +spec: + ports: + - port: 443 + targetPort: 8443 + selector: + k8s-app: kubernetes-dashboard + +--- + +apiVersion: v1 +kind: Secret +metadata: + labels: + k8s-app: kubernetes-dashboard + name: kubernetes-dashboard-certs + namespace: kubernetes-dashboard +type: Opaque + +--- + +apiVersion: v1 +kind: Secret +metadata: + labels: + k8s-app: kubernetes-dashboard + name: kubernetes-dashboard-csrf + namespace: kubernetes-dashboard +type: Opaque +data: + csrf: "" + +--- + +apiVersion: v1 +kind: Secret +metadata: + labels: + k8s-app: kubernetes-dashboard + name: kubernetes-dashboard-key-holder + namespace: kubernetes-dashboard +type: Opaque + +--- + +kind: ConfigMap +apiVersion: v1 +metadata: + labels: + k8s-app: kubernetes-dashboard + name: kubernetes-dashboard-settings + namespace: kubernetes-dashboard + +--- + +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + labels: + k8s-app: kubernetes-dashboard + name: kubernetes-dashboard + namespace: kubernetes-dashboard +rules: + # Allow Dashboard to get, update and delete Dashboard exclusive secrets. + - apiGroups: [""] + resources: ["secrets"] + resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs", "kubernetes-dashboard-csrf"] + verbs: ["get", "update", "delete"] + # Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map. + - apiGroups: [""] + resources: ["configmaps"] + resourceNames: ["kubernetes-dashboard-settings"] + verbs: ["get", "update"] + # Allow Dashboard to get metrics. + - apiGroups: [""] + resources: ["services"] + resourceNames: ["heapster", "dashboard-metrics-scraper"] + verbs: ["proxy"] + - apiGroups: [""] + resources: ["services/proxy"] + resourceNames: ["heapster", "http:heapster:", "https:heapster:", "dashboard-metrics-scraper", "http:dashboard-metrics-scraper"] + verbs: ["get"] + +--- + +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + labels: + k8s-app: kubernetes-dashboard + name: kubernetes-dashboard +rules: + # Allow Metrics Scraper to get metrics from the Metrics server + - apiGroups: ["metrics.k8s.io"] + resources: ["pods", "nodes"] + verbs: ["get", "list", "watch"] + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + k8s-app: kubernetes-dashboard + name: kubernetes-dashboard + namespace: kubernetes-dashboard +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: kubernetes-dashboard +subjects: + - kind: ServiceAccount + name: kubernetes-dashboard + namespace: kubernetes-dashboard + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: kubernetes-dashboard +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: kubernetes-dashboard +subjects: + - kind: ServiceAccount + name: kubernetes-dashboard + namespace: kubernetes-dashboard + +--- + +kind: Deployment +apiVersion: apps/v1 +metadata: + labels: + k8s-app: kubernetes-dashboard + name: kubernetes-dashboard + namespace: kubernetes-dashboard +spec: + replicas: 1 + revisionHistoryLimit: 10 + selector: + matchLabels: + k8s-app: kubernetes-dashboard + template: + metadata: + labels: + k8s-app: kubernetes-dashboard + spec: + containers: + - name: kubernetes-dashboard + image: {% if plugins['kubernetes-dashboard']['installation']['registry'] is defined and plugins['kubernetes-dashboard']['installation']['registry']|length %}{{ plugins['kubernetes-dashboard']['installation']['registry'] }}/{% endif %}{{ plugins['kubernetes-dashboard']['dashboard']['image'] }} + imagePullPolicy: Always + ports: + - containerPort: 8443 + protocol: TCP + args: + - --auto-generate-certificates + - --namespace=kubernetes-dashboard + # Uncomment the following line to manually specify Kubernetes API server Host + # If not specified, Dashboard will attempt to auto discover the API server and connect + # to it. Uncomment only if the default does not work. + # - --apiserver-host=http://my-address:port + volumeMounts: + - name: kubernetes-dashboard-certs + mountPath: /certs + # Create on-disk volume to store exec logs + - mountPath: /tmp + name: tmp-volume + livenessProbe: + httpGet: + scheme: HTTPS + path: / + port: 8443 + initialDelaySeconds: 30 + timeoutSeconds: 30 + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + runAsUser: 1001 + runAsGroup: 2001 + volumes: + - name: kubernetes-dashboard-certs + secret: + secretName: kubernetes-dashboard-certs + - name: tmp-volume + emptyDir: {} + serviceAccountName: kubernetes-dashboard + nodeSelector: + {{ plugins['kubernetes-dashboard']['dashboard']['nodeSelector'] | toyaml | indent(width=8, indentfirst=False) }} + {% if plugins['kubernetes-dashboard']['dashboard']['tolerations'] is defined -%} + tolerations: + {{ plugins['kubernetes-dashboard']['dashboard']['tolerations'] | toyaml | indent(width=8, indentfirst=False) -}} + {%- endif %} + +--- + +kind: Service +apiVersion: v1 +metadata: + labels: + k8s-app: dashboard-metrics-scraper + name: dashboard-metrics-scraper + namespace: kubernetes-dashboard +spec: + ports: + - port: 8000 + targetPort: 8000 + selector: + k8s-app: dashboard-metrics-scraper + +--- + +kind: Deployment +apiVersion: apps/v1 +metadata: + labels: + k8s-app: dashboard-metrics-scraper + name: dashboard-metrics-scraper + namespace: kubernetes-dashboard +spec: + replicas: 1 + revisionHistoryLimit: 10 + selector: + matchLabels: + k8s-app: dashboard-metrics-scraper + template: + metadata: + labels: + k8s-app: dashboard-metrics-scraper + annotations: + seccomp.security.alpha.kubernetes.io/pod: 'runtime/default' + spec: + containers: + - name: dashboard-metrics-scraper + image: {% if plugins['kubernetes-dashboard']['installation']['registry'] is defined and plugins['kubernetes-dashboard']['installation']['registry']|length %}{{ plugins['kubernetes-dashboard']['installation']['registry'] }}/{% endif %}{{ plugins['kubernetes-dashboard']['metrics-scraper']['image'] }} + ports: + - containerPort: 8000 + protocol: TCP + livenessProbe: + httpGet: + scheme: HTTP + path: / + port: 8000 + initialDelaySeconds: 30 + timeoutSeconds: 30 + volumeMounts: + - mountPath: /tmp + name: tmp-volume + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + runAsUser: 1001 + runAsGroup: 2001 + serviceAccountName: kubernetes-dashboard + nodeSelector: + {{ plugins['kubernetes-dashboard']['metrics-scraper']['nodeSelector'] | toyaml | indent(width=8, indentfirst=False) }} + {% if plugins['kubernetes-dashboard']['metrics-scraper']['tolerations'] is defined -%} + tolerations: + {{ plugins['kubernetes-dashboard']['metrics-scraper']['tolerations'] | toyaml | indent(width=8, indentfirst=False) -}} + {%- endif %} + volumes: + - name: tmp-volume + emptyDir: {} diff --git a/kubetool/templates/plugins/flannel.yaml.j2 b/kubetool/templates/plugins/flannel.yaml.j2 new file mode 100644 index 000000000..1a46885d1 --- /dev/null +++ b/kubetool/templates/plugins/flannel.yaml.j2 @@ -0,0 +1,605 @@ +# Source: https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml + +--- + +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: psp.flannel.unprivileged + annotations: + seccomp.security.alpha.kubernetes.io/allowedProfileNames: docker/default + seccomp.security.alpha.kubernetes.io/defaultProfileName: docker/default + apparmor.security.beta.kubernetes.io/allowedProfileNames: runtime/default + apparmor.security.beta.kubernetes.io/defaultProfileName: runtime/default +spec: + privileged: false + volumes: + - configMap + - secret + - emptyDir + - hostPath + allowedHostPaths: + - pathPrefix: "/etc/cni/net.d" + - pathPrefix: "/etc/kube-flannel" + - pathPrefix: "/run/flannel" + readOnlyRootFilesystem: false + # Users and groups + runAsUser: + rule: RunAsAny + supplementalGroups: + rule: RunAsAny + fsGroup: + rule: RunAsAny + # Privilege Escalation + allowPrivilegeEscalation: false + defaultAllowPrivilegeEscalation: false + # Capabilities + allowedCapabilities: ['NET_ADMIN'] + defaultAddCapabilities: [] + requiredDropCapabilities: [] + # Host namespaces + hostPID: false + hostIPC: false + hostNetwork: true + hostPorts: + - min: 0 + max: 65535 + # SELinux + seLinux: + # SELinux is unused in CaaSP + rule: 'RunAsAny' +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: flannel +rules: + - apiGroups: ['extensions'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: ['psp.flannel.unprivileged'] + - apiGroups: + - "" + resources: + - pods + verbs: + - get + - apiGroups: + - "" + resources: + - nodes + verbs: + - list + - watch + - apiGroups: + - "" + resources: + - nodes/status + verbs: + - patch +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: flannel +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: flannel +subjects: +- kind: ServiceAccount + name: flannel + namespace: kube-system +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: flannel + namespace: kube-system +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: kube-flannel-cfg + namespace: kube-system + labels: + tier: node + app: flannel +data: + cni-conf.json: | + { + "name": "cbr0", + "cniVersion": "0.3.1", + "plugins": [ + { + "type": "flannel", + "delegate": { + "hairpinMode": true, + "isDefaultGateway": true + } + }, + { + "type": "portmap", + "capabilities": { + "portMappings": true + } + } + ] + } + net-conf.json: | + { + "Network": "{{ services["kubeadm"]["networking"]["podSubnet"] }}", + "Backend": { + "Type": "vxlan" + } + } +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: kube-flannel-ds-amd64 + namespace: kube-system + labels: + tier: node + app: flannel +spec: + selector: + matchLabels: + app: flannel + template: + metadata: + labels: + tier: node + app: flannel + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: beta.kubernetes.io/os + operator: In + values: + - linux + - key: beta.kubernetes.io/arch + operator: In + values: + - amd64 + hostNetwork: true + tolerations: + - operator: Exists + effect: NoSchedule + serviceAccountName: flannel + initContainers: + - name: install-cni + image: {% if plugins['flannel']['installation']['registry'] is defined and plugins['flannel']['installation']['registry']|length %}{{ plugins['flannel']['installation']['registry'] }}/{% endif %}{{ plugins['flannel']['image'] }} + command: + - cp + args: + - -f + - /etc/kube-flannel/cni-conf.json + - /etc/cni/net.d/10-flannel.conflist + volumeMounts: + - name: cni + mountPath: /etc/cni/net.d + - name: flannel-cfg + mountPath: /etc/kube-flannel/ + containers: + - name: kube-flannel + image: {% if plugins['flannel']['installation']['registry'] is defined and plugins['flannel']['installation']['registry']|length %}{{ plugins['flannel']['installation']['registry'] }}/{% endif %}{{ plugins['flannel']['image'] }} + command: + - /opt/bin/flanneld + args: + - --ip-masq + - --kube-subnet-mgr + resources: + requests: + cpu: "100m" + memory: "50Mi" + limits: + cpu: "100m" + memory: "50Mi" + securityContext: + privileged: false + capabilities: + add: ["NET_ADMIN"] + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + volumeMounts: + - name: run + mountPath: /run/flannel + - name: flannel-cfg + mountPath: /etc/kube-flannel/ + volumes: + - name: run + hostPath: + path: /run/flannel + - name: cni + hostPath: + path: /etc/cni/net.d + - name: flannel-cfg + configMap: + name: kube-flannel-cfg +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: kube-flannel-ds-arm64 + namespace: kube-system + labels: + tier: node + app: flannel +spec: + selector: + matchLabels: + app: flannel + template: + metadata: + labels: + tier: node + app: flannel + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: beta.kubernetes.io/os + operator: In + values: + - linux + - key: beta.kubernetes.io/arch + operator: In + values: + - arm64 + hostNetwork: true + tolerations: + - operator: Exists + effect: NoSchedule + serviceAccountName: flannel + initContainers: + - name: install-cni + image: quay.io/coreos/flannel:v0.11.0-arm64 + command: + - cp + args: + - -f + - /etc/kube-flannel/cni-conf.json + - /etc/cni/net.d/10-flannel.conflist + volumeMounts: + - name: cni + mountPath: /etc/cni/net.d + - name: flannel-cfg + mountPath: /etc/kube-flannel/ + containers: + - name: kube-flannel + image: quay.io/coreos/flannel:v0.11.0-arm64 + command: + - /opt/bin/flanneld + args: + - --ip-masq + - --kube-subnet-mgr + resources: + requests: + cpu: "100m" + memory: "50Mi" + limits: + cpu: "100m" + memory: "50Mi" + securityContext: + privileged: false + capabilities: + add: ["NET_ADMIN"] + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + volumeMounts: + - name: run + mountPath: /run/flannel + - name: flannel-cfg + mountPath: /etc/kube-flannel/ + volumes: + - name: run + hostPath: + path: /run/flannel + - name: cni + hostPath: + path: /etc/cni/net.d + - name: flannel-cfg + configMap: + name: kube-flannel-cfg +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: kube-flannel-ds-arm + namespace: kube-system + labels: + tier: node + app: flannel +spec: + selector: + matchLabels: + app: flannel + template: + metadata: + labels: + tier: node + app: flannel + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: beta.kubernetes.io/os + operator: In + values: + - linux + - key: beta.kubernetes.io/arch + operator: In + values: + - arm + hostNetwork: true + tolerations: + - operator: Exists + effect: NoSchedule + serviceAccountName: flannel + initContainers: + - name: install-cni + image: quay.io/coreos/flannel:v0.11.0-arm + command: + - cp + args: + - -f + - /etc/kube-flannel/cni-conf.json + - /etc/cni/net.d/10-flannel.conflist + volumeMounts: + - name: cni + mountPath: /etc/cni/net.d + - name: flannel-cfg + mountPath: /etc/kube-flannel/ + containers: + - name: kube-flannel + image: quay.io/coreos/flannel:v0.11.0-arm + command: + - /opt/bin/flanneld + args: + - --ip-masq + - --kube-subnet-mgr + resources: + requests: + cpu: "100m" + memory: "50Mi" + limits: + cpu: "100m" + memory: "50Mi" + securityContext: + privileged: false + capabilities: + add: ["NET_ADMIN"] + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + volumeMounts: + - name: run + mountPath: /run/flannel + - name: flannel-cfg + mountPath: /etc/kube-flannel/ + volumes: + - name: run + hostPath: + path: /run/flannel + - name: cni + hostPath: + path: /etc/cni/net.d + - name: flannel-cfg + configMap: + name: kube-flannel-cfg +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: kube-flannel-ds-ppc64le + namespace: kube-system + labels: + tier: node + app: flannel +spec: + selector: + matchLabels: + app: flannel + template: + metadata: + labels: + tier: node + app: flannel + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: beta.kubernetes.io/os + operator: In + values: + - linux + - key: beta.kubernetes.io/arch + operator: In + values: + - ppc64le + hostNetwork: true + tolerations: + - operator: Exists + effect: NoSchedule + serviceAccountName: flannel + initContainers: + - name: install-cni + image: quay.io/coreos/flannel:v0.11.0-ppc64le + command: + - cp + args: + - -f + - /etc/kube-flannel/cni-conf.json + - /etc/cni/net.d/10-flannel.conflist + volumeMounts: + - name: cni + mountPath: /etc/cni/net.d + - name: flannel-cfg + mountPath: /etc/kube-flannel/ + containers: + - name: kube-flannel + image: quay.io/coreos/flannel:v0.11.0-ppc64le + command: + - /opt/bin/flanneld + args: + - --ip-masq + - --kube-subnet-mgr + resources: + requests: + cpu: "100m" + memory: "50Mi" + limits: + cpu: "100m" + memory: "50Mi" + securityContext: + privileged: false + capabilities: + add: ["NET_ADMIN"] + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + volumeMounts: + - name: run + mountPath: /run/flannel + - name: flannel-cfg + mountPath: /etc/kube-flannel/ + volumes: + - name: run + hostPath: + path: /run/flannel + - name: cni + hostPath: + path: /etc/cni/net.d + - name: flannel-cfg + configMap: + name: kube-flannel-cfg +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: kube-flannel-ds-s390x + namespace: kube-system + labels: + tier: node + app: flannel +spec: + selector: + matchLabels: + app: flannel + template: + metadata: + labels: + tier: node + app: flannel + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: beta.kubernetes.io/os + operator: In + values: + - linux + - key: beta.kubernetes.io/arch + operator: In + values: + - s390x + hostNetwork: true + tolerations: + - operator: Exists + effect: NoSchedule + serviceAccountName: flannel + initContainers: + - name: install-cni + image: quay.io/coreos/flannel:v0.11.0-s390x + command: + - cp + args: + - -f + - /etc/kube-flannel/cni-conf.json + - /etc/cni/net.d/10-flannel.conflist + volumeMounts: + - name: cni + mountPath: /etc/cni/net.d + - name: flannel-cfg + mountPath: /etc/kube-flannel/ + containers: + - name: kube-flannel + image: quay.io/coreos/flannel:v0.11.0-s390x + command: + - /opt/bin/flanneld + args: + - --ip-masq + - --kube-subnet-mgr + resources: + requests: + cpu: "100m" + memory: "50Mi" + limits: + cpu: "100m" + memory: "50Mi" + securityContext: + privileged: false + capabilities: + add: ["NET_ADMIN"] + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + volumeMounts: + - name: run + mountPath: /run/flannel + - name: flannel-cfg + mountPath: /etc/kube-flannel/ + volumes: + - name: run + hostPath: + path: /run/flannel + - name: cni + hostPath: + path: /etc/cni/net.d + - name: flannel-cfg + configMap: + name: kube-flannel-cfg \ No newline at end of file diff --git a/kubetool/templates/plugins/haproxy-ingress-controller.yaml.j2 b/kubetool/templates/plugins/haproxy-ingress-controller.yaml.j2 new file mode 100644 index 000000000..94942ba8a --- /dev/null +++ b/kubetool/templates/plugins/haproxy-ingress-controller.yaml.j2 @@ -0,0 +1,225 @@ +# Source: https://github.com/haproxytech/kubernetes-ingress/blob/master/deploy/haproxy-ingress.yaml +--- +apiVersion: v1 +kind: Namespace +metadata: + name: haproxy-controller + +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: haproxy-ingress-service-account + namespace: haproxy-controller + +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: haproxy-ingress-cluster-role +rules: +- apiGroups: + - "" + resources: + - configmaps + - endpoints + - nodes + - pods + - services + - namespaces + - events + - serviceaccounts + verbs: + - get + - list + - watch +- apiGroups: + - "extensions" + resources: + - ingresses + - ingresses/status + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - secrets + verbs: + - get + - list + - watch + - create + - patch + - update +- apiGroups: + - extensions + resources: + - ingresses + verbs: + - get + - list + - watch +# haproxy requires host network access +- apiGroups: ['extensions'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: ['oob-host-network-psp'] +# haproxy requires root privileges +- apiGroups: ['extensions'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: ['oob-anyuid-psp'] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: haproxy-ingress-cluster-role-binding + namespace: haproxy-controller +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: haproxy-ingress-cluster-role +subjects: +- kind: ServiceAccount + name: haproxy-ingress-service-account + namespace: haproxy-controller + +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: haproxy-configmap + namespace: default +data: + +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + run: ingress-default-backend + name: ingress-default-backend + namespace: haproxy-controller +spec: + replicas: 1 + selector: + matchLabels: + run: ingress-default-backend + template: + metadata: + labels: + run: ingress-default-backend + spec: + serviceAccountName: haproxy-ingress-service-account + nodeSelector: + {{ plugins['haproxy-ingress-controller']['backend']['nodeSelector'] | toyaml | indent(width=8, indentfirst=False) }} + {% if plugins['haproxy-ingress-controller']['backend']['tolerations'] is defined -%} + tolerations: + {{ plugins['haproxy-ingress-controller']['backend']['tolerations'] | toyaml | indent(width=8, indentfirst=False) -}} + {%- endif %} + containers: + - name: ingress-default-backend + image: {% if plugins['haproxy-ingress-controller']['installation']['registry'] is defined and plugins['haproxy-ingress-controller']['installation']['registry']|length %}{{ plugins['haproxy-ingress-controller']['installation']['registry'] }}/{% endif %}{{ plugins['haproxy-ingress-controller']['backend']['image'] }} + ports: + - containerPort: 8080 + +--- +apiVersion: v1 +kind: Service +metadata: + labels: + run: ingress-default-backend + name: ingress-default-backend + namespace: haproxy-controller +spec: + selector: + run: ingress-default-backend + ports: + - name: port-1 + port: 8080 + protocol: TCP + targetPort: 8080 + +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + run: haproxy-ingress + name: haproxy-ingress + namespace: haproxy-controller +spec: + replicas: 1 + selector: + matchLabels: + run: haproxy-ingress + template: + metadata: + labels: + run: haproxy-ingress + spec: + serviceAccountName: haproxy-ingress-service-account + nodeSelector: + {{ plugins['haproxy-ingress-controller']['controller']['nodeSelector'] | toyaml | indent(width=8, indentfirst=False) }} + {% if plugins['haproxy-ingress-controller']['controller']['tolerations'] is defined -%} + tolerations: + {{ plugins['haproxy-ingress-controller']['controller']['tolerations'] | toyaml | indent(width=8, indentfirst=False) -}} + {%- endif %} + containers: + - name: haproxy-ingress + image: {% if plugins['haproxy-ingress-controller']['installation']['registry'] is defined and plugins['haproxy-ingress-controller']['installation']['registry']|length %}{{ plugins['haproxy-ingress-controller']['installation']['registry'] }}/{% endif %}{{ plugins['haproxy-ingress-controller']['controller']['image'] }} + args: + - --configmap=default/haproxy-configmap + - --default-backend-service=haproxy-controller/ingress-default-backend + resources: + requests: + cpu: "500m" + memory: "50Mi" + livenessProbe: + httpGet: + path: /healthz + port: 1042 + ports: + - name: http + containerPort: 80 + - name: https + containerPort: 443 + - name: stat + containerPort: 1024 + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + +--- +apiVersion: v1 +kind: Service +metadata: + labels: + run: haproxy-ingress + name: haproxy-ingress + namespace: haproxy-controller +spec: + selector: + run: haproxy-ingress + type: NodePort + ports: + - name: http + port: 80 + protocol: TCP + targetPort: 80 + - name: https + port: 443 + protocol: TCP + targetPort: 443 + - name: stat + port: 1024 + protocol: TCP + targetPort: 1024 diff --git a/kubetool/templates/plugins/iperf3.yaml.j2 b/kubetool/templates/plugins/iperf3.yaml.j2 new file mode 100644 index 000000000..991d382d3 --- /dev/null +++ b/kubetool/templates/plugins/iperf3.yaml.j2 @@ -0,0 +1,113 @@ +# Source: https://raw.githubusercontent.com/Pharb/kubernetes-iperf3/master/iperf3.yaml +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: iperf3 + namespace: default +--- +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: iperf3-psp-rb + namespace: default +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: oob-privileged-psp-cr +subjects: +- kind: ServiceAccount + name: iperf3 + namespace: default +--- + +apiVersion: apps/v1 +kind: Deployment +metadata: + name: iperf3-server-deployment + labels: + app: iperf3-server +spec: + replicas: 1 + selector: + matchLabels: + app: iperf3-server + template: + metadata: + labels: + app: iperf3-server + spec: + {% if plugins['iperf3']['nodeSelector'] is defined -%} + nodeSelector: + {{ plugins['iperf3']['nodeSelector'] | toyaml | indent(width=8, indentfirst=False) -}} + {%- endif %} + {% if plugins['iperf3']['tolerations'] is defined -%} + tolerations: + {{ plugins['iperf3']['tolerations'] | toyaml | indent(width=8, indentfirst=False) -}} + {%- endif %} + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 1 + preference: + matchExpressions: + - key: kubernetes.io/role + operator: In + values: + - master + serviceAccountName: iperf3 + containers: + - name: iperf3-server + image: networkstatic/iperf3 + args: ['-s'] + ports: + - containerPort: 5201 + name: server + terminationGracePeriodSeconds: 0 + +--- + +apiVersion: v1 +kind: Service +metadata: + name: iperf3-server +spec: + selector: + app: iperf3-server + ports: + - protocol: TCP + port: 5201 + targetPort: server + +--- + +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: iperf3-clients + labels: + app: iperf3-client +spec: + selector: + matchLabels: + app: iperf3-client + template: + metadata: + labels: + app: iperf3-client + spec: + {% if plugins['iperf3']['nodeSelector'] is defined -%} + nodeSelector: + {{ plugins['iperf3']['nodeSelector'] | toyaml | indent(width=8, indentfirst=False) -}} + {%- endif %} + {% if plugins['iperf3']['tolerations'] is defined -%} + tolerations: + {{ plugins['iperf3']['tolerations'] | toyaml | indent(width=8, indentfirst=False) -}} + {%- endif %} + serviceAccountName: iperf3 + containers: + - name: iperf3-client + image: networkstatic/iperf3 + command: ['/bin/sh', '-c', 'sleep infinity'] + # To benchmark manually: kubectl exec iperf3-clients-jlfxq -- /bin/sh -c 'iperf3 -c iperf3-server' + terminationGracePeriodSeconds: 0 diff --git a/kubetool/templates/plugins/local-path-provisioner.yaml.j2 b/kubetool/templates/plugins/local-path-provisioner.yaml.j2 new file mode 100644 index 000000000..5667530e9 --- /dev/null +++ b/kubetool/templates/plugins/local-path-provisioner.yaml.j2 @@ -0,0 +1,168 @@ +# Source https://github.com/rancher/local-path-provisioner/blob/d45bdcf6112a55c2cbaa03ea9cad4d65f4f20e54/deploy/local-path-storage.yaml +--- +apiVersion: v1 +kind: Namespace +metadata: + name: local-path-storage +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: local-path-provisioner-service-account + namespace: local-path-storage +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: local-path-provisioner-role +rules: +- apiGroups: [""] + resources: ["nodes", "persistentvolumeclaims", "configmaps"] + verbs: ["get", "list", "watch"] +- apiGroups: [""] + resources: ["endpoints", "persistentvolumes", "pods"] + verbs: ["*"] +- apiGroups: [""] + resources: ["events"] + verbs: ["create", "patch"] +- apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: local-path-provisioner-bind +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: local-path-provisioner-role +subjects: +- kind: ServiceAccount + name: local-path-provisioner-service-account + namespace: local-path-storage +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: local-path-provisioner-privileged-psp +roleRef: + kind: ClusterRole + name: oob-privileged-psp-cr + apiGroup: rbac.authorization.k8s.io +subjects: +- kind: ServiceAccount + name: local-path-provisioner-service-account + namespace: local-path-storage +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: local-path-provisioner + namespace: local-path-storage +spec: + replicas: 1 + selector: + matchLabels: + app: local-path-provisioner + template: + metadata: + labels: + app: local-path-provisioner + spec: + serviceAccountName: local-path-provisioner-service-account + containers: + - name: local-path-provisioner + image: {% if plugins['local-path-provisioner']['installation']['registry'] is defined and plugins['local-path-provisioner']['installation']['registry']|length %}{{ plugins['local-path-provisioner']['installation']['registry'] }}/{% endif %}{{ plugins['local-path-provisioner']['image'] }} + imagePullPolicy: IfNotPresent + command: + - local-path-provisioner + - --debug + - start + - --config + - /etc/config/config.json + volumeMounts: + - name: config-volume + mountPath: /etc/config/ + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + volumes: + - name: config-volume + configMap: + name: local-path-config +--- +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + annotations: + storageclass.kubernetes.io/is-default-class: "{{ plugins['local-path-provisioner']['storage-class']['is-default'] }}" + name: {{ plugins['local-path-provisioner']['storage-class']['name'] }} +provisioner: rancher.io/local-path +volumeBindingMode: WaitForFirstConsumer +reclaimPolicy: Delete +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: local-path-config + namespace: local-path-storage +data: + config.json: |- + { + "nodePathMap":[ + { + "node":"DEFAULT_PATH_FOR_NON_LISTED_NODES", + "paths":["{{ plugins['local-path-provisioner']['volume-dir'] }}"] + } + ] + } + setup: |- + #!/bin/sh + while getopts "m:s:p:" opt + do + case $opt in + p) + absolutePath=$OPTARG + ;; + s) + sizeInBytes=$OPTARG + ;; + m) + volMode=$OPTARG + ;; + esac + done + + mkdir -m 0777 -p ${absolutePath} + teardown: |- + #!/bin/sh + while getopts "m:s:p:" opt + do + case $opt in + p) + absolutePath=$OPTARG + ;; + s) + sizeInBytes=$OPTARG + ;; + m) + volMode=$OPTARG + ;; + esac + done + + rm -rf ${absolutePath} + helperPod.yaml: |- + apiVersion: v1 + kind: Pod + metadata: + name: helper-pod + spec: + containers: + - name: helper-pod + image: {% if plugins['local-path-provisioner']['installation']['registry'] is defined and plugins['local-path-provisioner']['installation']['registry']|length %}{{ plugins['local-path-provisioner']['installation']['registry'] }}/{% endif %}{{ plugins['local-path-provisioner']['helper-pod-image'] }} + + diff --git a/kubetool/templates/plugins/nginx-ingress-controller-v0.34.yaml.j2 b/kubetool/templates/plugins/nginx-ingress-controller-v0.34.yaml.j2 new file mode 100644 index 000000000..4f895ba28 --- /dev/null +++ b/kubetool/templates/plugins/nginx-ingress-controller-v0.34.yaml.j2 @@ -0,0 +1,365 @@ + +apiVersion: v1 +kind: Namespace +metadata: + name: ingress-nginx + labels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: ingress-nginx + +--- +# Source: ingress-nginx/templates/controller-serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + helm.sh/chart: ingress-nginx-2.11.1 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/version: 0.34.1 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: controller + name: ingress-nginx + namespace: ingress-nginx +{% if plugins['nginx-ingress-controller']['custom_headers'] is defined -%} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: custom-headers + namespace: ingress-nginx +data: +{{ plugins['nginx-ingress-controller']['custom_headers'] | toyaml | indent(width=2, indentfirst=True) -}} +{%- endif %} +--- +# Source: ingress-nginx/templates/controller-configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + labels: + helm.sh/chart: ingress-nginx-2.11.1 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/version: 0.34.1 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: controller + name: ingress-nginx-controller + namespace: ingress-nginx +data: +{% if plugins['nginx-ingress-controller']['config_map'] is defined -%} + {{ plugins['nginx-ingress-controller']['config_map'] | toyaml | indent(width=2, indentfirst=True) -}} +{%- endif %} +--- +# Source: ingress-nginx/templates/clusterrole.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + helm.sh/chart: ingress-nginx-2.11.1 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/version: 0.34.1 + app.kubernetes.io/managed-by: Helm + name: ingress-nginx + namespace: ingress-nginx +rules: + - apiGroups: + - '' + resources: + - configmaps + - endpoints + - nodes + - pods + - secrets + verbs: + - list + - watch + - apiGroups: + - '' + resources: + - nodes + verbs: + - get + - apiGroups: + - '' + resources: + - services + verbs: + - get + - list + - update + - watch + - apiGroups: + - extensions + - networking.k8s.io # k8s 1.14+ + resources: + - ingresses + verbs: + - get + - list + - watch + - apiGroups: + - '' + resources: + - events + verbs: + - create + - patch + - apiGroups: + - extensions + - networking.k8s.io # k8s 1.14+ + resources: + - ingresses/status + verbs: + - update + - apiGroups: + - networking.k8s.io # k8s 1.14+ + resources: + - ingressclasses + verbs: + - get + - list + - watch +--- +# Source: ingress-nginx/templates/clusterrolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + helm.sh/chart: ingress-nginx-2.11.1 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/version: 0.34.1 + app.kubernetes.io/managed-by: Helm + name: ingress-nginx + namespace: ingress-nginx +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: ingress-nginx +subjects: + - kind: ServiceAccount + name: ingress-nginx + namespace: ingress-nginx +--- +# Source: ingress-nginx/templates/controller-role.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + helm.sh/chart: ingress-nginx-2.11.1 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/version: 0.34.1 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: controller + name: ingress-nginx + namespace: ingress-nginx +rules: + - apiGroups: + - '' + resources: + - namespaces + verbs: + - get + - apiGroups: + - '' + resources: + - configmaps + - pods + - secrets + - endpoints + verbs: + - get + - list + - watch + - apiGroups: + - '' + resources: + - services + verbs: + - get + - list + - update + - watch + - apiGroups: + - extensions + - networking.k8s.io # k8s 1.14+ + resources: + - ingresses + verbs: + - get + - list + - watch + - apiGroups: + - extensions + - networking.k8s.io # k8s 1.14+ + resources: + - ingresses/status + verbs: + - update + - apiGroups: + - networking.k8s.io # k8s 1.14+ + resources: + - ingressclasses + verbs: + - get + - list + - watch + - apiGroups: + - '' + resources: + - configmaps + resourceNames: + - ingress-controller-leader-nginx + verbs: + - get + - update + - apiGroups: + - '' + resources: + - configmaps + verbs: + - create + - apiGroups: + - '' + resources: + - endpoints + verbs: + - create + - get + - update + - apiGroups: + - '' + resources: + - events + verbs: + - create + - patch + - apiGroups: ['extensions'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: ['oob-host-network-psp'] +--- +# Source: ingress-nginx/templates/controller-rolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + helm.sh/chart: ingress-nginx-2.11.1 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/version: 0.34.1 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: controller + name: ingress-nginx + namespace: ingress-nginx +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: ingress-nginx +subjects: + - kind: ServiceAccount + name: ingress-nginx + namespace: ingress-nginx +--- +# Source: ingress-nginx/templates/controller-deployment.yaml +apiVersion: apps/v1 +kind: DaemonSet +metadata: + labels: + helm.sh/chart: ingress-nginx-2.11.1 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/version: 0.34.1 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: controller + name: ingress-nginx-controller + namespace: ingress-nginx +spec: + selector: + matchLabels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/component: controller + revisionHistoryLimit: 10 + minReadySeconds: 0 + template: + metadata: + labels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/component: controller + spec: + dnsPolicy: ClusterFirst + containers: + - name: controller + image: {% if plugins['nginx-ingress-controller']['installation']['registry'] is defined and plugins['nginx-ingress-controller']['installation']['registry']|length %}{{ plugins['nginx-ingress-controller']['installation']['registry'] }}/{% endif %}{{ plugins['nginx-ingress-controller']['controller']['image'] }} + imagePullPolicy: IfNotPresent + lifecycle: + preStop: + exec: + command: + - /wait-shutdown + args: + - /nginx-ingress-controller + - --election-id=ingress-controller-leader + - --ingress-class=nginx + - --configmap=ingress-nginx/ingress-nginx-controller + {% if plugins['nginx-ingress-controller']['controller']['ssl']['enableSslPassthrough'] == true -%} + - --enable-ssl-passthrough + {%- endif %} + {% if plugins['nginx-ingress-controller']['controller']['ssl']['default-certificate'] is defined -%} + - --default-ssl-certificate=kube-system/default-ingress-cert + {%- endif %} + securityContext: + capabilities: + drop: + - ALL + add: + - NET_BIND_SERVICE + runAsUser: 101 + allowPrivilegeEscalation: true + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + livenessProbe: + httpGet: + path: /healthz + port: 10254 + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + httpGet: + path: /healthz + port: 10254 + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 3 + ports: + {{ plugins['nginx-ingress-controller']['ports'] | toyaml | indent(width=12, indentfirst=False) }} + resources: + requests: + cpu: 100m + memory: 90Mi + serviceAccountName: ingress-nginx + terminationGracePeriodSeconds: 300 + nodeSelector: + {{ plugins['nginx-ingress-controller']['controller']['nodeSelector'] | toyaml | indent(width=8, indentfirst=False) }} + {% if plugins['nginx-ingress-controller']['controller']['tolerations'] is defined -%} + tolerations: + {{ plugins['nginx-ingress-controller']['controller']['tolerations'] | toyaml | indent(width=8, indentfirst=False) -}} + {%- endif %} \ No newline at end of file diff --git a/kubetool/templates/plugins/nginx-ingress-controller-v0.35.yaml.j2 b/kubetool/templates/plugins/nginx-ingress-controller-v0.35.yaml.j2 new file mode 100644 index 000000000..3a216a8f5 --- /dev/null +++ b/kubetool/templates/plugins/nginx-ingress-controller-v0.35.yaml.j2 @@ -0,0 +1,364 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: ingress-nginx + labels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: ingress-nginx + +--- +# Source: ingress-nginx/templates/controller-serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + helm.sh/chart: ingress-nginx-3.1.0 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/version: 0.35.0 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: controller + name: ingress-nginx + namespace: ingress-nginx +{% if plugins['nginx-ingress-controller']['custom_headers'] is defined -%} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: custom-headers + namespace: ingress-nginx +data: +{{ plugins['nginx-ingress-controller']['custom_headers'] | toyaml | indent(width=2, indentfirst=True) -}} +{%- endif %} +--- +# Source: ingress-nginx/templates/controller-configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + labels: + helm.sh/chart: ingress-nginx-3.1.0 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/version: 0.35.0 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: controller + name: ingress-nginx-controller + namespace: ingress-nginx +data: +{% if plugins['nginx-ingress-controller']['config_map'] is defined -%} + {{ plugins['nginx-ingress-controller']['config_map'] | toyaml | indent(width=2, indentfirst=True) -}} +{%- endif %} +--- +# Source: ingress-nginx/templates/clusterrole.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + helm.sh/chart: ingress-nginx-3.1.0 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/version: 0.35.0 + app.kubernetes.io/managed-by: Helm + name: ingress-nginx +rules: + - apiGroups: + - '' + resources: + - configmaps + - endpoints + - nodes + - pods + - secrets + verbs: + - list + - watch + - apiGroups: + - '' + resources: + - nodes + verbs: + - get + - apiGroups: + - '' + resources: + - services + verbs: + - get + - list + - update + - watch + - apiGroups: + - extensions + - networking.k8s.io # k8s 1.14+ + resources: + - ingresses + verbs: + - get + - list + - watch + - apiGroups: + - '' + resources: + - events + verbs: + - create + - patch + - apiGroups: + - extensions + - networking.k8s.io # k8s 1.14+ + resources: + - ingresses/status + verbs: + - update + - apiGroups: + - networking.k8s.io # k8s 1.14+ + resources: + - ingressclasses + verbs: + - get + - list + - watch +--- +# Source: ingress-nginx/templates/clusterrolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + helm.sh/chart: ingress-nginx-3.1.0 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/version: 0.35.0 + app.kubernetes.io/managed-by: Helm + name: ingress-nginx +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: ingress-nginx +subjects: + - kind: ServiceAccount + name: ingress-nginx + namespace: ingress-nginx +--- +# Source: ingress-nginx/templates/controller-role.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + helm.sh/chart: ingress-nginx-3.1.0 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/version: 0.35.0 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: controller + name: ingress-nginx + namespace: ingress-nginx +rules: + - apiGroups: + - '' + resources: + - namespaces + verbs: + - get + - apiGroups: + - '' + resources: + - configmaps + - pods + - secrets + - endpoints + verbs: + - get + - list + - watch + - apiGroups: + - '' + resources: + - services + verbs: + - get + - list + - update + - watch + - apiGroups: + - extensions + - networking.k8s.io # k8s 1.14+ + resources: + - ingresses + verbs: + - get + - list + - watch + - apiGroups: + - extensions + - networking.k8s.io # k8s 1.14+ + resources: + - ingresses/status + verbs: + - update + - apiGroups: + - networking.k8s.io # k8s 1.14+ + resources: + - ingressclasses + verbs: + - get + - list + - watch + - apiGroups: + - '' + resources: + - configmaps + resourceNames: + - ingress-controller-leader-nginx + verbs: + - get + - update + - apiGroups: + - '' + resources: + - configmaps + verbs: + - create + - apiGroups: + - '' + resources: + - endpoints + verbs: + - create + - get + - update + - apiGroups: + - '' + resources: + - events + verbs: + - create + - patch + - apiGroups: ['extensions'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: ['oob-host-network-psp'] +--- +# Source: ingress-nginx/templates/controller-rolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + helm.sh/chart: ingress-nginx-3.1.0 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/version: 0.35.0 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: controller + name: ingress-nginx + namespace: ingress-nginx +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: ingress-nginx +subjects: + - kind: ServiceAccount + name: ingress-nginx + namespace: ingress-nginx +--- +# Source: ingress-nginx/templates/controller-deployment.yaml +apiVersion: apps/v1 +kind: DaemonSet +metadata: + labels: + helm.sh/chart: ingress-nginx-3.1.0 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/version: 0.35.0 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: controller + name: ingress-nginx-controller + namespace: ingress-nginx +spec: + selector: + matchLabels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/component: controller + revisionHistoryLimit: 10 + minReadySeconds: 0 + template: + metadata: + labels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/component: controller + spec: + dnsPolicy: ClusterFirst + containers: + - name: controller + image: {% if plugins['nginx-ingress-controller']['installation']['registry'] is defined and plugins['nginx-ingress-controller']['installation']['registry']|length %}{{ plugins['nginx-ingress-controller']['installation']['registry'] }}/{% endif %}{{ plugins['nginx-ingress-controller']['controller']['image'] }} + imagePullPolicy: IfNotPresent + lifecycle: + preStop: + exec: + command: + - /wait-shutdown + args: + - /nginx-ingress-controller + - --election-id=ingress-controller-leader + - --ingress-class=nginx + - --configmap=ingress-nginx/ingress-nginx-controller + {% if plugins['nginx-ingress-controller']['controller']['ssl']['enableSslPassthrough'] is true -%} + - --enable-ssl-passthrough + {%- endif %} + {% if plugins['nginx-ingress-controller']['controller']['ssl']['default-certificate'] is defined -%} + - --default-ssl-certificate=kube-system/default-ingress-cert + {%- endif %} + securityContext: + capabilities: + drop: + - ALL + add: + - NET_BIND_SERVICE + runAsUser: 101 + allowPrivilegeEscalation: true + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LD_PRELOAD + value: /usr/local/lib/libmimalloc.so + livenessProbe: + httpGet: + path: /healthz + port: 10254 + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + httpGet: + path: /healthz + port: 10254 + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 3 + ports: + {{ plugins['nginx-ingress-controller']['ports'] | toyaml | indent(width=12, indentfirst=False) }} + resources: + requests: + cpu: 100m + memory: 90Mi + serviceAccountName: ingress-nginx + terminationGracePeriodSeconds: 300 + nodeSelector: + {{ plugins['nginx-ingress-controller']['controller']['nodeSelector'] | toyaml | indent(width=8, indentfirst=False) }} + {% if plugins['nginx-ingress-controller']['controller']['tolerations'] is defined -%} + tolerations: + {{ plugins['nginx-ingress-controller']['controller']['tolerations'] | toyaml | indent(width=8, indentfirst=False) -}} + {%- endif %} \ No newline at end of file diff --git a/kubetool/templates/plugins/nginx-ingress-controller-v0.43.yaml.j2 b/kubetool/templates/plugins/nginx-ingress-controller-v0.43.yaml.j2 new file mode 100644 index 000000000..e2466c351 --- /dev/null +++ b/kubetool/templates/plugins/nginx-ingress-controller-v0.43.yaml.j2 @@ -0,0 +1,364 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: ingress-nginx + labels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: ingress-nginx + +--- +# Source: ingress-nginx/templates/controller-serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + helm.sh/chart: ingress-nginx-3.19.0 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/version: 0.43.0 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: controller + name: ingress-nginx + namespace: ingress-nginx +{% if plugins['nginx-ingress-controller']['custom_headers'] is defined -%} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: custom-headers + namespace: ingress-nginx +data: +{{ plugins['nginx-ingress-controller']['custom_headers'] | toyaml | indent(width=2, indentfirst=True) -}} +{%- endif %} +--- +# Source: ingress-nginx/templates/controller-configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + labels: + helm.sh/chart: ingress-nginx-3.19.0 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/version: 0.43.0 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: controller + name: ingress-nginx-controller + namespace: ingress-nginx +data: +{% if plugins['nginx-ingress-controller']['config_map'] is defined -%} + {{ plugins['nginx-ingress-controller']['config_map'] | toyaml | indent(width=2, indentfirst=True) -}} +{%- endif %} +--- +# Source: ingress-nginx/templates/clusterrole.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + helm.sh/chart: ingress-nginx-3.19.0 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/version: 0.43.0 + app.kubernetes.io/managed-by: Helm + name: ingress-nginx +rules: + - apiGroups: + - '' + resources: + - configmaps + - endpoints + - nodes + - pods + - secrets + verbs: + - list + - watch + - apiGroups: + - '' + resources: + - nodes + verbs: + - get + - apiGroups: + - '' + resources: + - services + verbs: + - get + - list + - update + - watch + - apiGroups: + - extensions + - networking.k8s.io # k8s 1.14+ + resources: + - ingresses + verbs: + - get + - list + - watch + - apiGroups: + - '' + resources: + - events + verbs: + - create + - patch + - apiGroups: + - extensions + - networking.k8s.io # k8s 1.14+ + resources: + - ingresses/status + verbs: + - update + - apiGroups: + - networking.k8s.io # k8s 1.14+ + resources: + - ingressclasses + verbs: + - get + - list + - watch +--- +# Source: ingress-nginx/templates/clusterrolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + helm.sh/chart: ingress-nginx-3.19.0 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/version: 0.43.0 + app.kubernetes.io/managed-by: Helm + name: ingress-nginx +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: ingress-nginx +subjects: + - kind: ServiceAccount + name: ingress-nginx + namespace: ingress-nginx +--- +# Source: ingress-nginx/templates/controller-role.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + helm.sh/chart: ingress-nginx-3.19.0 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/version: 0.43.0 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: controller + name: ingress-nginx + namespace: ingress-nginx +rules: + - apiGroups: + - '' + resources: + - namespaces + verbs: + - get + - apiGroups: + - '' + resources: + - configmaps + - pods + - secrets + - endpoints + verbs: + - get + - list + - watch + - apiGroups: + - '' + resources: + - services + verbs: + - get + - list + - update + - watch + - apiGroups: + - extensions + - networking.k8s.io # k8s 1.14+ + resources: + - ingresses + verbs: + - get + - list + - watch + - apiGroups: + - extensions + - networking.k8s.io # k8s 1.14+ + resources: + - ingresses/status + verbs: + - update + - apiGroups: + - networking.k8s.io # k8s 1.14+ + resources: + - ingressclasses + verbs: + - get + - list + - watch + - apiGroups: + - '' + resources: + - configmaps + resourceNames: + - ingress-controller-leader-nginx + verbs: + - get + - update + - apiGroups: + - '' + resources: + - configmaps + verbs: + - create + - apiGroups: + - '' + resources: + - endpoints + verbs: + - create + - get + - update + - apiGroups: + - '' + resources: + - events + verbs: + - create + - patch + - apiGroups: ['extensions'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: ['oob-host-network-psp'] +--- +# Source: ingress-nginx/templates/controller-rolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + helm.sh/chart: ingress-nginx-3.19.0 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/version: 0.43.0 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: controller + name: ingress-nginx + namespace: ingress-nginx +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: ingress-nginx +subjects: + - kind: ServiceAccount + name: ingress-nginx + namespace: ingress-nginx +--- +# Source: ingress-nginx/templates/controller-deployment.yaml +apiVersion: apps/v1 +kind: DaemonSet +metadata: + labels: + helm.sh/chart: ingress-nginx-3.19.0 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/version: 0.43.0 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: controller + name: ingress-nginx-controller + namespace: ingress-nginx +spec: + selector: + matchLabels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/component: controller + revisionHistoryLimit: 10 + minReadySeconds: 0 + template: + metadata: + labels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/component: controller + spec: + dnsPolicy: ClusterFirst + containers: + - name: controller + image: {% if plugins['nginx-ingress-controller']['installation']['registry'] is defined and plugins['nginx-ingress-controller']['installation']['registry']|length %}{{ plugins['nginx-ingress-controller']['installation']['registry'] }}/{% endif %}{{ plugins['nginx-ingress-controller']['controller']['image'] }} + imagePullPolicy: IfNotPresent + lifecycle: + preStop: + exec: + command: + - /wait-shutdown + args: + - /nginx-ingress-controller + - --election-id=ingress-controller-leader + - --ingress-class=nginx + - --configmap=ingress-nginx/ingress-nginx-controller + {% if plugins['nginx-ingress-controller']['controller']['ssl']['enableSslPassthrough'] is true -%} + - --enable-ssl-passthrough + {%- endif %} + {% if plugins['nginx-ingress-controller']['controller']['ssl']['default-certificate'] is defined -%} + - --default-ssl-certificate=kube-system/default-ingress-cert + {%- endif %} + securityContext: + capabilities: + drop: + - ALL + add: + - NET_BIND_SERVICE + runAsUser: 101 + allowPrivilegeEscalation: true + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LD_PRELOAD + value: /usr/local/lib/libmimalloc.so + livenessProbe: + httpGet: + path: /healthz + port: 10254 + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + httpGet: + path: /healthz + port: 10254 + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 3 + ports: + {{ plugins['nginx-ingress-controller']['ports'] | toyaml | indent(width=12, indentfirst=False) }} + resources: + requests: + cpu: 100m + memory: 90Mi + serviceAccountName: ingress-nginx + terminationGracePeriodSeconds: 300 + nodeSelector: + {{ plugins['nginx-ingress-controller']['controller']['nodeSelector'] | toyaml | indent(width=8, indentfirst=False) }} + {% if plugins['nginx-ingress-controller']['controller']['tolerations'] is defined -%} + tolerations: + {{ plugins['nginx-ingress-controller']['controller']['tolerations'] | toyaml | indent(width=8, indentfirst=False) -}} + {%- endif %} \ No newline at end of file diff --git a/kubetool/templates/plugins/nginx-ingress-controller-v0.48.yaml.j2 b/kubetool/templates/plugins/nginx-ingress-controller-v0.48.yaml.j2 new file mode 100644 index 000000000..daa0a37c5 --- /dev/null +++ b/kubetool/templates/plugins/nginx-ingress-controller-v0.48.yaml.j2 @@ -0,0 +1,354 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: ingress-nginx + labels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: ingress-nginx + +--- +# Source: ingress-nginx/templates/controller-serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + helm.sh/chart: ingress-nginx-3.34.0 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/version: 0.48.1 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: controller + name: ingress-nginx + namespace: ingress-nginx +automountServiceAccountToken: true +{% if plugins['nginx-ingress-controller']['custom_headers'] is defined -%} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: custom-headers + namespace: ingress-nginx +data: +{{ plugins['nginx-ingress-controller']['custom_headers'] | toyaml | indent(width=2, indentfirst=True) -}} +{%- endif %} +--- +# Source: ingress-nginx/templates/controller-configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + labels: + helm.sh/chart: ingress-nginx-3.34.0 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/version: 0.48.1 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: controller + name: ingress-nginx-controller + namespace: ingress-nginx +data: +{% if plugins['nginx-ingress-controller']['config_map'] is defined -%} + {{ plugins['nginx-ingress-controller']['config_map'] | toyaml | indent(width=2, indentfirst=True) -}} +{%- endif %} +--- +# Source: ingress-nginx/templates/clusterrole.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + helm.sh/chart: ingress-nginx-3.34.0 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/version: 0.48.1 + app.kubernetes.io/managed-by: Helm + name: ingress-nginx +rules: + - apiGroups: + - '' + resources: + - configmaps + - endpoints + - nodes + - pods + - secrets + verbs: + - list + - watch + - apiGroups: + - '' + resources: + - nodes + verbs: + - get + - apiGroups: + - '' + resources: + - services + verbs: + - get + - list + - watch + - apiGroups: + - extensions + - networking.k8s.io # k8s 1.14+ + resources: + - ingresses + verbs: + - get + - list + - watch + - apiGroups: + - '' + resources: + - events + verbs: + - create + - patch + - apiGroups: + - extensions + - networking.k8s.io # k8s 1.14+ + resources: + - ingresses/status + verbs: + - update + - apiGroups: + - networking.k8s.io # k8s 1.14+ + resources: + - ingressclasses + verbs: + - get + - list + - watch +--- +# Source: ingress-nginx/templates/clusterrolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + helm.sh/chart: ingress-nginx-3.34.0 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/version: 0.48.1 + app.kubernetes.io/managed-by: Helm + name: ingress-nginx +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: ingress-nginx +subjects: + - kind: ServiceAccount + name: ingress-nginx + namespace: ingress-nginx +--- +# Source: ingress-nginx/templates/controller-role.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + helm.sh/chart: ingress-nginx-3.34.0 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/version: 0.48.1 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: controller + name: ingress-nginx + namespace: ingress-nginx +rules: + - apiGroups: + - '' + resources: + - namespaces + verbs: + - get + - apiGroups: + - '' + resources: + - configmaps + - pods + - secrets + - endpoints + verbs: + - get + - list + - watch + - apiGroups: + - '' + resources: + - services + verbs: + - get + - list + - watch + - apiGroups: + - extensions + - networking.k8s.io # k8s 1.14+ + resources: + - ingresses + verbs: + - get + - list + - watch + - apiGroups: + - extensions + - networking.k8s.io # k8s 1.14+ + resources: + - ingresses/status + verbs: + - update + - apiGroups: + - networking.k8s.io # k8s 1.14+ + resources: + - ingressclasses + verbs: + - get + - list + - watch + - apiGroups: + - '' + resources: + - configmaps + resourceNames: + - ingress-controller-leader-nginx + verbs: + - get + - update + - apiGroups: + - '' + resources: + - configmaps + verbs: + - create + - apiGroups: + - '' + resources: + - events + verbs: + - create + - patch + - apiGroups: ['extensions'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: ['oob-host-network-psp'] +--- +# Source: ingress-nginx/templates/controller-rolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + helm.sh/chart: ingress-nginx-3.34.0 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/version: 0.48.1 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: controller + name: ingress-nginx + namespace: ingress-nginx +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: ingress-nginx +subjects: + - kind: ServiceAccount + name: ingress-nginx + namespace: ingress-nginx +--- +# Source: ingress-nginx/templates/controller-deployment.yaml +apiVersion: apps/v1 +kind: DaemonSet +metadata: + labels: + helm.sh/chart: ingress-nginx-3.34.0 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/version: 0.48.1 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: controller + name: ingress-nginx-controller + namespace: ingress-nginx +spec: + selector: + matchLabels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/component: controller + revisionHistoryLimit: 10 + minReadySeconds: 0 + template: + metadata: + labels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/component: controller + spec: + dnsPolicy: ClusterFirst + containers: + - name: controller + image: {% if plugins['nginx-ingress-controller']['installation']['registry'] is defined and plugins['nginx-ingress-controller']['installation']['registry']|length %}{{ plugins['nginx-ingress-controller']['installation']['registry'] }}/{% endif %}{{ plugins['nginx-ingress-controller']['controller']['image'] }} + imagePullPolicy: IfNotPresent + lifecycle: + preStop: + exec: + command: + - /wait-shutdown + args: + - /nginx-ingress-controller + - --election-id=ingress-controller-leader + - --ingress-class=nginx + - --configmap=ingress-nginx/ingress-nginx-controller + {% if plugins['nginx-ingress-controller']['controller']['ssl']['enableSslPassthrough'] is true -%} + - --enable-ssl-passthrough + {%- endif %} + {% if plugins['nginx-ingress-controller']['controller']['ssl']['default-certificate'] is defined -%} + - --default-ssl-certificate=kube-system/default-ingress-cert + {%- endif %} + securityContext: + capabilities: + drop: + - ALL + add: + - NET_BIND_SERVICE + runAsUser: 101 + allowPrivilegeEscalation: true + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LD_PRELOAD + value: /usr/local/lib/libmimalloc.so + livenessProbe: + failureThreshold: 5 + httpGet: + path: /healthz + port: 10254 + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + readinessProbe: + failureThreshold: 3 + httpGet: + path: /healthz + port: 10254 + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + ports: + {{ plugins['nginx-ingress-controller']['ports'] | toyaml | indent(width=12, indentfirst=False) }} + resources: + requests: + cpu: 100m + memory: 90Mi + nodeSelector: + {{ plugins['nginx-ingress-controller']['controller']['nodeSelector'] | toyaml | indent(width=8, indentfirst=False) }} + {% if plugins['nginx-ingress-controller']['controller']['tolerations'] is defined -%} + tolerations: + {{ plugins['nginx-ingress-controller']['controller']['tolerations'] | toyaml | indent(width=8, indentfirst=False) -}} + {%- endif %} + serviceAccountName: ingress-nginx \ No newline at end of file diff --git a/kubetool/templates/plugins/sock-shop-ingress.yaml.j2 b/kubetool/templates/plugins/sock-shop-ingress.yaml.j2 new file mode 100644 index 000000000..993bed317 --- /dev/null +++ b/kubetool/templates/plugins/sock-shop-ingress.yaml.j2 @@ -0,0 +1,15 @@ +--- +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + name: front-end + namespace: sock-shop +spec: + rules: + - host: {{ cluster_name }} + http: + paths: + - path: / + backend: + serviceName: front-end + servicePort: 80 diff --git a/kubetool/templates/plugins/sock-shop.yaml.j2 b/kubetool/templates/plugins/sock-shop.yaml.j2 new file mode 100644 index 000000000..7851b5b91 --- /dev/null +++ b/kubetool/templates/plugins/sock-shop.yaml.j2 @@ -0,0 +1,825 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: sock-shop + namespace: sock-shop +--- +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: sock-shop-psp-rb + namespace: sock-shop +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: oob-privileged-psp-cr +subjects: +- kind: ServiceAccount + name: sock-shop + namespace: sock-shop +--- +apiVersion: v1 +kind: Namespace +metadata: + name: sock-shop +--- +# Source: https://github.com/microservices-demo/microservices-demo/blob/master/deploy/kubernetes/complete-demo.yaml + +apiVersion: apps/v1 +kind: Deployment +metadata: + name: carts-db + labels: + name: carts-db + namespace: sock-shop +spec: + replicas: 1 + template: + metadata: + labels: + name: carts-db + spec: + serviceAccountName: sock-shop + containers: + - name: carts-db + image: {% if plugins['sock-shop']['installation']['registry'] is defined and plugins['sock-shop']['installation']['registry']|length %}{{ plugins['sock-shop']['installation']['registry'] }}/{% endif %}{{ plugins['sock-shop']['carts-db']['image'] }} + ports: + - name: mongo + containerPort: 27017 + securityContext: + capabilities: + drop: + - all + add: + - CHOWN + - SETGID + - SETUID + readOnlyRootFilesystem: true + volumeMounts: + - mountPath: /tmp + name: tmp-volume + volumes: + - name: tmp-volume + emptyDir: + medium: Memory + nodeSelector: + beta.kubernetes.io/os: linux + {% if plugins['sock-shop']['nodeSelector'] is defined -%} + {{ plugins['sock-shop']['nodeSelector'] | toyaml | indent(width=8, indentfirst=False) -}} + {%- endif %} + {% if plugins['sock-shop']['tolerations'] is defined -%} + tolerations: + {{ plugins['sock-shop']['tolerations'] | toyaml | indent(width=8, indentfirst=False) -}} + {%- endif %} + selector: + matchLabels: + name: carts-db +--- +apiVersion: v1 +kind: Service +metadata: + name: carts-db + labels: + name: carts-db + namespace: sock-shop +spec: + ports: + # the port that this service should serve on + - port: 27017 + targetPort: 27017 + selector: + name: carts-db +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: carts + labels: + name: carts + namespace: sock-shop +spec: + replicas: 1 + selector: + matchLabels: + name: carts + template: + metadata: + labels: + name: carts + spec: + serviceAccountName: sock-shop + containers: + - name: carts + image: {% if plugins['sock-shop']['installation']['registry'] is defined and plugins['sock-shop']['installation']['registry']|length %}{{ plugins['sock-shop']['installation']['registry'] }}/{% endif %}{{ plugins['sock-shop']['carts']['image'] }} + ports: + - containerPort: 80 + env: + - name: ZIPKIN + value: zipkin.jaeger.svc.cluster.local + - name: JAVA_OPTS + value: -Xms64m -Xmx128m -XX:PermSize=32m -XX:MaxPermSize=64m -XX:+UseG1GC -Djava.security.egd=file:/dev/urandom + securityContext: + runAsNonRoot: true + runAsUser: 10001 + capabilities: + drop: + - all + add: + - NET_BIND_SERVICE + readOnlyRootFilesystem: true + volumeMounts: + - mountPath: /tmp + name: tmp-volume + volumes: + - name: tmp-volume + emptyDir: + medium: Memory + nodeSelector: + beta.kubernetes.io/os: linux + {% if plugins['sock-shop']['nodeSelector'] is defined -%} + {{ plugins['sock-shop']['nodeSelector'] | toyaml | indent(width=8, indentfirst=False) -}} + {%- endif %} + {% if plugins['sock-shop']['tolerations'] is defined -%} + tolerations: + {{ plugins['sock-shop']['tolerations'] | toyaml | indent(width=8, indentfirst=False) -}} + {%- endif %} +--- +apiVersion: v1 +kind: Service +metadata: + name: carts + labels: + name: carts + namespace: sock-shop +spec: + ports: + # the port that this service should serve on + - port: 80 + targetPort: 80 + selector: + name: carts +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: catalogue-db + labels: + name: catalogue-db + namespace: sock-shop +spec: + replicas: 1 + selector: + matchLabels: + name: catalogue-db + template: + metadata: + labels: + name: catalogue-db + spec: + serviceAccountName: sock-shop + containers: + - name: catalogue-db + image: {% if plugins['sock-shop']['installation']['registry'] is defined and plugins['sock-shop']['installation']['registry']|length %}{{ plugins['sock-shop']['installation']['registry'] }}/{% endif %}{{ plugins['sock-shop']['catalogue-db']['image'] }} + env: + - name: MYSQL_ROOT_PASSWORD + value: fake_password + - name: MYSQL_DATABASE + value: socksdb + ports: + - name: mysql + containerPort: 3306 + nodeSelector: + beta.kubernetes.io/os: linux + {% if plugins['sock-shop']['nodeSelector'] is defined -%} + {{ plugins['sock-shop']['nodeSelector'] | toyaml | indent(width=8, indentfirst=False) -}} + {%- endif %} + {% if plugins['sock-shop']['tolerations'] is defined -%} + tolerations: + {{ plugins['sock-shop']['tolerations'] | toyaml | indent(width=8, indentfirst=False) -}} + {%- endif %} +--- +apiVersion: v1 +kind: Service +metadata: + name: catalogue-db + labels: + name: catalogue-db + namespace: sock-shop +spec: + ports: + # the port that this service should serve on + - port: 3306 + targetPort: 3306 + selector: + name: catalogue-db +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: catalogue + labels: + name: catalogue + namespace: sock-shop +spec: + replicas: 1 + selector: + matchLabels: + name: catalogue + template: + metadata: + labels: + name: catalogue + spec: + serviceAccountName: sock-shop + containers: + - name: catalogue + image: {% if plugins['sock-shop']['installation']['registry'] is defined and plugins['sock-shop']['installation']['registry']|length %}{{ plugins['sock-shop']['installation']['registry'] }}/{% endif %}{{ plugins['sock-shop']['catalogue']['image'] }} + ports: + - containerPort: 80 + securityContext: + runAsNonRoot: true + runAsUser: 10001 + capabilities: + drop: + - all + add: + - NET_BIND_SERVICE + readOnlyRootFilesystem: true + nodeSelector: + beta.kubernetes.io/os: linux + {% if plugins['sock-shop']['nodeSelector'] is defined -%} + {{ plugins['sock-shop']['nodeSelector'] | toyaml | indent(width=8, indentfirst=False) -}} + {%- endif %} + {% if plugins['sock-shop']['tolerations'] is defined -%} + tolerations: + {{ plugins['sock-shop']['tolerations'] | toyaml | indent(width=8, indentfirst=False) -}} + {%- endif %} +--- +apiVersion: v1 +kind: Service +metadata: + name: catalogue + labels: + name: catalogue + namespace: sock-shop +spec: + ports: + # the port that this service should serve on + - port: 80 + targetPort: 80 + selector: + name: catalogue +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: front-end + labels: + name: front-end + namespace: sock-shop +spec: + replicas: 1 + selector: + matchLabels: + name: front-end + template: + metadata: + labels: + name: front-end + spec: + serviceAccountName: sock-shop + containers: + - name: front-end + image: {% if plugins['sock-shop']['installation']['registry'] is defined and plugins['sock-shop']['installation']['registry']|length %}{{ plugins['sock-shop']['installation']['registry'] }}/{% endif %}{{ plugins['sock-shop']['front-end']['image'] }} + resources: + requests: + cpu: 100m + memory: 100Mi + ports: + - containerPort: 8079 + securityContext: + runAsNonRoot: true + runAsUser: 10001 + capabilities: + drop: + - all + readOnlyRootFilesystem: true + nodeSelector: + beta.kubernetes.io/os: linux + {% if plugins['sock-shop']['nodeSelector'] is defined -%} + {{ plugins['sock-shop']['nodeSelector'] | toyaml | indent(width=8, indentfirst=False) -}} + {%- endif %} + {% if plugins['sock-shop']['tolerations'] is defined -%} + tolerations: + {{ plugins['sock-shop']['tolerations'] | toyaml | indent(width=8, indentfirst=False) -}} + {%- endif %} +--- +apiVersion: v1 +kind: Service +metadata: + name: front-end + labels: + name: front-end + namespace: sock-shop +spec: + type: NodePort + ports: + - port: 80 + targetPort: 8079 + nodePort: 30001 + selector: + name: front-end +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: orders-db + labels: + name: orders-db + namespace: sock-shop +spec: + replicas: 1 + selector: + matchLabels: + name: orders-db + template: + metadata: + labels: + name: orders-db + spec: + serviceAccountName: sock-shop + containers: + - name: orders-db + image: {% if plugins['sock-shop']['installation']['registry'] is defined and plugins['sock-shop']['installation']['registry']|length %}{{ plugins['sock-shop']['installation']['registry'] }}/{% endif %}{{ plugins['sock-shop']['orders-db']['image'] }} + ports: + - name: mongo + containerPort: 27017 + securityContext: + capabilities: + drop: + - all + add: + - CHOWN + - SETGID + - SETUID + readOnlyRootFilesystem: true + volumeMounts: + - mountPath: /tmp + name: tmp-volume + volumes: + - name: tmp-volume + emptyDir: + medium: Memory + nodeSelector: + beta.kubernetes.io/os: linux + {% if plugins['sock-shop']['nodeSelector'] is defined -%} + {{ plugins['sock-shop']['nodeSelector'] | toyaml | indent(width=8, indentfirst=False) -}} + {%- endif %} + {% if plugins['sock-shop']['tolerations'] is defined -%} + tolerations: + {{ plugins['sock-shop']['tolerations'] | toyaml | indent(width=8, indentfirst=False) -}} + {%- endif %} +--- +apiVersion: v1 +kind: Service +metadata: + name: orders-db + labels: + name: orders-db + namespace: sock-shop +spec: + ports: + # the port that this service should serve on + - port: 27017 + targetPort: 27017 + selector: + name: orders-db +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: orders + labels: + name: orders + namespace: sock-shop +spec: + replicas: 1 + selector: + matchLabels: + name: orders + template: + metadata: + labels: + name: orders + spec: + serviceAccountName: sock-shop + containers: + - name: orders + image: {% if plugins['sock-shop']['installation']['registry'] is defined and plugins['sock-shop']['installation']['registry']|length %}{{ plugins['sock-shop']['installation']['registry'] }}/{% endif %}{{ plugins['sock-shop']['orders']['image'] }} + env: + - name: ZIPKIN + value: zipkin.jaeger.svc.cluster.local + - name: JAVA_OPTS + value: -Xms64m -Xmx128m -XX:PermSize=32m -XX:MaxPermSize=64m -XX:+UseG1GC -Djava.security.egd=file:/dev/urandom + ports: + - containerPort: 80 + securityContext: + runAsNonRoot: true + runAsUser: 10001 + capabilities: + drop: + - all + add: + - NET_BIND_SERVICE + readOnlyRootFilesystem: true + volumeMounts: + - mountPath: /tmp + name: tmp-volume + volumes: + - name: tmp-volume + emptyDir: + medium: Memory + nodeSelector: + beta.kubernetes.io/os: linux + {% if plugins['sock-shop']['nodeSelector'] is defined -%} + {{ plugins['sock-shop']['nodeSelector'] | toyaml | indent(width=8, indentfirst=False) -}} + {%- endif %} + {% if plugins['sock-shop']['tolerations'] is defined -%} + tolerations: + {{ plugins['sock-shop']['tolerations'] | toyaml | indent(width=8, indentfirst=False) -}} + {%- endif %} +--- +apiVersion: v1 +kind: Service +metadata: + name: orders + labels: + name: orders + namespace: sock-shop +spec: + ports: + # the port that this service should serve on + - port: 80 + targetPort: 80 + selector: + name: orders +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: payment + labels: + name: payment + namespace: sock-shop +spec: + replicas: 1 + selector: + matchLabels: + name: payment + template: + metadata: + labels: + name: payment + spec: + serviceAccountName: sock-shop + containers: + - name: payment + image: {% if plugins['sock-shop']['installation']['registry'] is defined and plugins['sock-shop']['installation']['registry']|length %}{{ plugins['sock-shop']['installation']['registry'] }}/{% endif %}{{ plugins['sock-shop']['payment']['image'] }} + ports: + - containerPort: 80 + securityContext: + runAsNonRoot: true + runAsUser: 10001 + capabilities: + drop: + - all + add: + - NET_BIND_SERVICE + readOnlyRootFilesystem: true + nodeSelector: + beta.kubernetes.io/os: linux + {% if plugins['sock-shop']['nodeSelector'] is defined -%} + {{ plugins['sock-shop']['nodeSelector'] | toyaml | indent(width=8, indentfirst=False) -}} + {%- endif %} + {% if plugins['sock-shop']['tolerations'] is defined -%} + tolerations: + {{ plugins['sock-shop']['tolerations'] | toyaml | indent(width=8, indentfirst=False) -}} + {%- endif %} +--- +apiVersion: v1 +kind: Service +metadata: + name: payment + labels: + name: payment + namespace: sock-shop +spec: + ports: + # the port that this service should serve on + - port: 80 + targetPort: 80 + selector: + name: payment +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: queue-master + labels: + name: queue-master + namespace: sock-shop +spec: + replicas: 1 + selector: + matchLabels: + name: queue-master + template: + metadata: + labels: + name: queue-master + spec: + serviceAccountName: sock-shop + containers: + - name: queue-master + image: {% if plugins['sock-shop']['installation']['registry'] is defined and plugins['sock-shop']['installation']['registry']|length %}{{ plugins['sock-shop']['installation']['registry'] }}/{% endif %}{{ plugins['sock-shop']['queue-master']['image'] }} + ports: + - containerPort: 80 + nodeSelector: + beta.kubernetes.io/os: linux + {% if plugins['sock-shop']['nodeSelector'] is defined -%} + {{ plugins['sock-shop']['nodeSelector'] | toyaml | indent(width=8, indentfirst=False) -}} + {%- endif %} + {% if plugins['sock-shop']['tolerations'] is defined -%} + tolerations: + {{ plugins['sock-shop']['tolerations'] | toyaml | indent(width=8, indentfirst=False) -}} + {%- endif %} +--- +apiVersion: v1 +kind: Service +metadata: + name: queue-master + labels: + name: queue-master + annotations: + prometheus.io/path: "/prometheus" + namespace: sock-shop +spec: + ports: + # the port that this service should serve on + - port: 80 + targetPort: 80 + selector: + name: queue-master +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: rabbitmq + labels: + name: rabbitmq + namespace: sock-shop +spec: + replicas: 1 + selector: + matchLabels: + name: rabbitmq + template: + metadata: + labels: + name: rabbitmq + spec: + serviceAccountName: sock-shop + containers: + - name: rabbitmq + image: {% if plugins['sock-shop']['installation']['registry'] is defined and plugins['sock-shop']['installation']['registry']|length %}{{ plugins['sock-shop']['installation']['registry'] }}/{% endif %}{{ plugins['sock-shop']['rabbitmq']['image'] }} + ports: + - containerPort: 5672 + securityContext: + capabilities: + drop: + - all + add: + - CHOWN + - SETGID + - SETUID + - DAC_OVERRIDE + readOnlyRootFilesystem: true + nodeSelector: + beta.kubernetes.io/os: linux + {% if plugins['sock-shop']['nodeSelector'] is defined -%} + {{ plugins['sock-shop']['nodeSelector'] | toyaml | indent(width=8, indentfirst=False) -}} + {%- endif %} + {% if plugins['sock-shop']['tolerations'] is defined -%} + tolerations: + {{ plugins['sock-shop']['tolerations'] | toyaml | indent(width=8, indentfirst=False) -}} + {%- endif %} +--- +apiVersion: v1 +kind: Service +metadata: + name: rabbitmq + labels: + name: rabbitmq + namespace: sock-shop +spec: + ports: + # the port that this service should serve on + - port: 5672 + targetPort: 5672 + selector: + name: rabbitmq +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: shipping + labels: + name: shipping + namespace: sock-shop +spec: + replicas: 1 + selector: + matchLabels: + name: shipping + template: + metadata: + labels: + name: shipping + spec: + serviceAccountName: sock-shop + containers: + - name: shipping + image: {% if plugins['sock-shop']['installation']['registry'] is defined and plugins['sock-shop']['installation']['registry']|length %}{{ plugins['sock-shop']['installation']['registry'] }}/{% endif %}{{ plugins['sock-shop']['shipping']['image'] }} + env: + - name: ZIPKIN + value: zipkin.jaeger.svc.cluster.local + - name: JAVA_OPTS + value: -Xms64m -Xmx128m -XX:PermSize=32m -XX:MaxPermSize=64m -XX:+UseG1GC -Djava.security.egd=file:/dev/urandom + ports: + - containerPort: 80 + securityContext: + runAsNonRoot: true + runAsUser: 10001 + capabilities: + drop: + - all + add: + - NET_BIND_SERVICE + readOnlyRootFilesystem: true + volumeMounts: + - mountPath: /tmp + name: tmp-volume + volumes: + - name: tmp-volume + emptyDir: + medium: Memory + nodeSelector: + beta.kubernetes.io/os: linux + {% if plugins['sock-shop']['nodeSelector'] is defined -%} + {{ plugins['sock-shop']['nodeSelector'] | toyaml | indent(width=8, indentfirst=False) -}} + {%- endif %} + {% if plugins['sock-shop']['tolerations'] is defined -%} + tolerations: + {{ plugins['sock-shop']['tolerations'] | toyaml | indent(width=8, indentfirst=False) -}} + {%- endif %} +--- +apiVersion: v1 +kind: Service +metadata: + name: shipping + labels: + name: shipping + namespace: sock-shop +spec: + ports: + # the port that this service should serve on + - port: 80 + targetPort: 80 + selector: + name: shipping +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: user-db + labels: + name: user-db + namespace: sock-shop +spec: + replicas: 1 + selector: + matchLabels: + name: user-db + template: + metadata: + labels: + name: user-db + spec: + serviceAccountName: sock-shop + containers: + - name: user-db + image: {% if plugins['sock-shop']['installation']['registry'] is defined and plugins['sock-shop']['installation']['registry']|length %}{{ plugins['sock-shop']['installation']['registry'] }}/{% endif %}{{ plugins['sock-shop']['user-db']['image'] }} + ports: + - name: mongo + containerPort: 27017 + securityContext: + capabilities: + drop: + - all + add: + - CHOWN + - SETGID + - SETUID + readOnlyRootFilesystem: true + volumeMounts: + - mountPath: /tmp + name: tmp-volume + volumes: + - name: tmp-volume + emptyDir: + medium: Memory + nodeSelector: + beta.kubernetes.io/os: linux + {% if plugins['sock-shop']['nodeSelector'] is defined -%} + {{ plugins['sock-shop']['nodeSelector'] | toyaml | indent(width=8, indentfirst=False) -}} + {%- endif %} + {% if plugins['sock-shop']['tolerations'] is defined -%} + tolerations: + {{ plugins['sock-shop']['tolerations'] | toyaml | indent(width=8, indentfirst=False) -}} + {%- endif %} +--- +apiVersion: v1 +kind: Service +metadata: + name: user-db + labels: + name: user-db + namespace: sock-shop +spec: + ports: + # the port that this service should serve on + - port: 27017 + targetPort: 27017 + selector: + name: user-db +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: user + labels: + name: user + namespace: sock-shop +spec: + replicas: 1 + selector: + matchLabels: + name: user + template: + metadata: + labels: + name: user + spec: + serviceAccountName: sock-shop + containers: + - name: user + image: {% if plugins['sock-shop']['installation']['registry'] is defined and plugins['sock-shop']['installation']['registry']|length %}{{ plugins['sock-shop']['installation']['registry'] }}/{% endif %}{{ plugins['sock-shop']['user']['image'] }} + ports: + - containerPort: 80 + env: + - name: MONGO_HOST + value: user-db:27017 + securityContext: + runAsNonRoot: true + runAsUser: 10001 + capabilities: + drop: + - all + add: + - NET_BIND_SERVICE + readOnlyRootFilesystem: true + nodeSelector: + beta.kubernetes.io/os: linux + {% if plugins['sock-shop']['nodeSelector'] is defined -%} + {{ plugins['sock-shop']['nodeSelector'] | toyaml | indent(width=8, indentfirst=False) -}} + {%- endif %} + {% if plugins['sock-shop']['tolerations'] is defined -%} + tolerations: + {{ plugins['sock-shop']['tolerations'] | toyaml | indent(width=8, indentfirst=False) -}} + {%- endif %} +--- +apiVersion: v1 +kind: Service +metadata: + name: user + labels: + name: user + namespace: sock-shop +spec: + ports: + # the port that this service should serve on + - port: 80 + targetPort: 80 + selector: + name: user diff --git a/kubetool/testsuite.py b/kubetool/testsuite.py new file mode 100644 index 000000000..17f661787 --- /dev/null +++ b/kubetool/testsuite.py @@ -0,0 +1,292 @@ +import textwrap +from traceback import * +import csv +from datetime import datetime +from kubetool.core import utils +import fabric + +TC_UNKNOWN = -1 +TC_PASSED = 0 +TC_FAILED = 1 +TC_WARNED = 2 +TC_EXCEPTED = 3 + +badges_weights = { + 'succeeded': 0, + 'warned': 1, + 'failed': 2, + 'excepted': 3, + 'unknown': 4, +} + + +class TestCase: + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + if type is None: + if self.status is TC_UNKNOWN: + self.success() + elif type is TestFailure: + self.fail(value) + elif type is TestWarn: + self.warn(value) + else: + if isinstance(value, fabric.group.GroupException): + value.result.print() + else: + print_exc() + self.exception(value) + print(self.get_summary(show_hint=True)) + return True + + def __init__(self, ts, id, category, name, default_results=None, minimal=None, recommended=None): + self.include_in_ts(ts) + self.category = category + self.id = str(id) + self.name = name + self.status = TC_UNKNOWN + self.results = default_results + self.minimal = minimal + self.recommended = recommended + + def include_in_ts(self, ts): + ts.register_tc(self) + return self + + def success(self, results=None): + if self.results is None: + self.results = results + self.status = TC_PASSED + return self + + def fail(self, results): + self.status = TC_FAILED + self.results = results + return self + + def warn(self, results): + self.status = TC_WARNED + self.results = results + return self + + def exception(self, results): + self.status = TC_EXCEPTED + self.results = results + return self + + def get_summary(self, show_description=False, show_hint=False, show_minimal=False, show_recommended=False): + output = "" + + output += " " * (15 - len(self.category)) + output += self.category + " " + + color = "" + if self.is_succeeded(): + color = "\x1b[38;5;041m" + output += " \x1b[48;5;041m\x1b[38;5;232m OK \x1b[49m\x1b[39m " + if self.is_failed(): + color = "\x1b[38;5;196m" + output += " \x1b[48;5;196m\x1b[38;5;231m FAIL \x1b[49m\x1b[39m " + if self.is_warned(): + color = "\x1b[38;5;208m" + output += " \x1b[48;5;208m\x1b[38;5;231m WARN \x1b[49m\x1b[39m " + if self.is_excepted(): + color = "\x1b[31m" + output += " \x1b[41m ERROR? \x1b[49m " + + output += self.id + " " + output += self.name + " " + + results = " " + str(self.results) + + output += "." * (146 - len(output) - len(results)) + output += "%s%s\x1b[39m" % (color, results) + + if show_minimal: + if self.minimal is None: + output += ' ' * 15 + else: + minimal = str(self.minimal) + output += ' ' * (15-len(minimal)) + minimal + + if show_recommended: + if self.recommended is None: + output += ' ' * 14 + else: + recommended = str(self.recommended) + output += ' ' * (14-len(recommended)) + recommended + + if show_hint and (isinstance(self.results, TestFailure) or isinstance(self.results, TestWarn)) and self.results.hint is not None: + output += "\n HINT:\n" + textwrap.indent(str(self.results.hint), " ") + + return output + + def get_readable_status(self): + if self.is_succeeded(): + return 'ok' + if self.is_failed(): + return 'fail' + if self.is_warned(): + return 'warning' + if self.is_excepted(): + return 'exception' + + def is_succeeded(self): + return self.status is TC_PASSED + + def is_failed(self): + return self.status is TC_FAILED + + def is_warned(self): + return self.status is TC_WARNED + + def is_excepted(self): + return self.status is TC_EXCEPTED + + +class TestCaseNegativeResult(BaseException): + + def __init__(self, message, hint=None, group_result=None): + super().__init__(message) + self.message = message + self.hint = hint + self.group_result = group_result + + +class TestFailure(TestCaseNegativeResult): + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + +class TestWarn(TestCaseNegativeResult): + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + +class TestSuite: + + def __init__(self): + self.tcs = [] + + def register_tc(self, tc): + self.tcs.append(tc) + + def is_any_test_failed(self): + for tc in self.tcs: + if tc.is_failed() or tc.is_excepted(): + return True + return False + + def is_any_test_warned(self): + for tc in self.tcs: + if tc.is_warned(): + return True + return False + + def get_final_summary(self, show_minimal=True, show_recommended=True): + result = " Group Status ID Test Actual result" + if show_minimal: + result += " Minimal" + if show_recommended: + result += " Recommended" + result += "\n" + + for tc in self.tcs: + result += "\n" + tc.get_summary(show_minimal=show_minimal, show_recommended=show_recommended) + + result += "\n\nOVERALL RESULTS: " + + for key, value in sorted(self.get_stats_data().items(), key=lambda _key: badges_weights[_key[0]]): + colors = '' + if key == 'succeeded': + colors = "\x1b[48;5;041m\x1b[38;5;232m" + if key == 'failed': + colors = "\x1b[48;5;196m\x1b[38;5;231m" + if key == 'warned': + colors = "\x1b[48;5;208m\x1b[38;5;231m" + if key == 'excepted': + colors = "\x1b[41m" + result += "%s %s %s \x1b[49m\x1b[39m " % (colors, value ,key.upper()) + + result += "\n" + + return result + + def print_final_status(self, log): + if self.is_any_test_failed(): + log.error("\nTEST FAILED" + "\nThe environment does not meet the minimal requirements. Check the test report and resolve the issues.") + return + if self.is_any_test_warned(): + log.warning("\nTEST PASSED WITH WARNINGS" + "\nThe environment meets the minimal requirements, but is not as recommended. Try to check the test report and resolve the issues.") + return + log.info("\nTEST PASSED") + + def get_stats_data(self): + results = {} + for tc in self.tcs: + key = 'unknown' + if tc.is_succeeded(): + key = 'succeeded' + elif tc.is_failed(): + key = 'failed' + elif tc.is_warned(): + key = 'warned' + elif tc.is_excepted(): + key = 'excepted' + value = results.get(key, 0) + 1 + results[key] = value + return results + + def save_csv(self, destination_file_path, delimiter=';'): + with open(destination_file_path, mode='w') as stream: + csv_writer = csv.writer(stream, delimiter=delimiter, quotechar='"', quoting=csv.QUOTE_MINIMAL) + csv_writer.writerow(['group', 'status', 'test_id', 'test_name', 'current_result', 'minimal_result', 'recommended_result']) + for tc in self.tcs: + csv_writer.writerow([ + tc.category.lower(), + tc.get_readable_status(), + tc.id, + tc.name, + tc.results, + tc.minimal, + tc.recommended + ]) + + def save_html(self, destination_file_path, check_type, append_styles=True): + with open(destination_file_path, mode='w') as stream: + stream.write('%s Check Report
%s
' % (check_type, datetime.utcnow())) + for key, value in sorted(self.get_stats_data().items(), key=lambda _key: badges_weights[_key[0]]): + stream.write('
%s %s
' % (key, value, key)) + stream.write('

%s Check Report

' % check_type) + stream.write('') + for tc in self.tcs: + minimal = tc.minimal + if minimal is None: + minimal = '' + recommended = tc.recommended + if recommended is None: + recommended = '' + stream.write('' % + (tc.get_readable_status(), + tc.category.lower(), + tc.get_readable_status(), + tc.id, + tc.name, + tc.results, + minimal, + recommended + )) + stream.write('
GroupStatusIDTestActual ResultMinimalRecommended
%s
%s
%s%s%s%s%s
') + if append_styles: + with open(utils.get_resource_absolute_path('resources/reports/check_report.css', + script_relative=True)) as css_stream: + stream.write('' % css_stream.read()) + + stream.write('') diff --git a/kubetool/thirdparties.py b/kubetool/thirdparties.py new file mode 100644 index 000000000..2d5ce57c9 --- /dev/null +++ b/kubetool/thirdparties.py @@ -0,0 +1,186 @@ +from copy import deepcopy + +from kubetool.core import utils +from kubetool.core.cluster import KubernetesCluster + + +def enrich_inventory_apply_upgrade_defaults(inventory, cluster): + if cluster.context.get('initial_procedure') == 'upgrade': + upgrade_version = cluster.context["upgrade_version"] + upgrade_thirdparties = cluster.procedure_inventory.get(upgrade_version, {}).get('thirdparties') + if upgrade_thirdparties: + upgrade_thirdparties = deepcopy(upgrade_thirdparties) + default_thirdparties = cluster.defaults['services']['thirdparties'] + + # keep some configurations (unpack) from default thirdparties, if they are not re-defined + for destination, config in upgrade_thirdparties.items(): + if destination in default_thirdparties and 'unpack' in default_thirdparties[destination]\ + and 'unpack' not in config: + config['unpack'] = default_thirdparties[destination]['unpack'] + + inventory['services']['thirdparties'] = upgrade_thirdparties + else: + cluster.log.warning('New thirdparties for upgrade procedure is not set in procedure config - default will be used') + return inventory + + +def enrich_inventory_apply_defaults(inventory, cluster): + # if thirdparties is empty, then nothing to do + if not inventory['services'].get('thirdparties', {}): + return inventory + raw_inventory = cluster.raw_inventory + + for destination, config in inventory['services']['thirdparties'].items(): + + if isinstance(config, str): + config = { + 'source': config + } + + if config.get('source') is None: + raise Exception('Source not found for thirparty \'%s\'' % destination) + + if config.get('mode') is None: + config['mode'] = 700 + + if config.get('owner') is None: + config['owner'] = 'root' + + if config.get('group') is not None: + config['groups'] = [config['group']] + del config['group'] + + if config.get('node') is not None: + config['nodes'] = [config['node']] + del config['node'] + + if config.get('groups') is None and config.get('nodes') is None: + config['groups'] = ['master', 'worker'] + + if config.get('nodes') is not None: + all_nodes_names = cluster.nodes['all'].get_nodes_names() + for node_name in config['nodes']: + if node_name not in all_nodes_names: + raise Exception('Unknown node name provided for thirdparty %s. ' + 'Expected any of %s, but \'%s\' found.' + % (destination, all_nodes_names, node_name)) + + # if source is re-defined by user, but "sha1" is not provided, + # then remove default "sha1", because it may be wrong + raw_config = raw_inventory.get('services', {}).get('thirdparties', {}).get(destination, {}) + if 'source' in raw_config and 'sha1' not in raw_config and 'sha1' in config: + del config['sha1'] + + inventory['services']['thirdparties'][destination] = config + + # remove "crictl" from thirdparties when docker is used, but ONLY IF it is NOT explicitly specified in cluster.yaml + cri_name = inventory['services']['cri']['containerRuntime'] + crictl_key = '/usr/bin/crictl.tar.gz' + if cri_name == "docker" and \ + crictl_key not in cluster.raw_inventory.get('services', {}).get('thirdparties', {}) and \ + crictl_key in inventory['services']['thirdparties']: + del(inventory['services']['thirdparties'][crictl_key]) + + return inventory + + +def install_thirdparty(cluster: KubernetesCluster, destination, config=None): + + if config is None: + config = cluster.inventory['services'].get('thirdparties', {}).get(destination) + + if config is None: + raise Exception('Not possible to install thirdparty %s - not found in configfile' % destination) + + cluster.log.debug("Thirdparty \"%s\" will be installed" % destination) + is_curl = config['source'][:4] == 'http' and '://' in config['source'][4:8] + + # all commands will be grouped to single run + remote_commands = '' + + # directory will be created if it is not exists + destination_directory = '/'.join(destination.split('/')[:-1]) + cluster.log.verbose('Destination directory: %s' % destination_directory) + + common_group = cluster.create_group_from_groups_nodes_names(config.get('groups', []), config.get('nodes', [])) + + if cluster.context['initial_procedure'] == 'add_node': + common_group = common_group.get_new_nodes() + + # ! ATTENTION, in the further code there is no error and nothing is missing ! + # Here a long shell command is intentionally constructed and executed at once to speed up work + # At the same time, in the middle of the construction of the command, a file may suddenly be uploaded and then + # the command will be executed in two runs instead of single run + + # is destination directory exists? + remote_commands += 'mkdir -p %s' % destination_directory + + if is_curl: + cluster.log.verbose('Installation via curl download detected') + if config.get('sha1') is not None: + cluster.log.debug('SHA1 hash is defined, it will be used during installation') + # if hash equal, then stop further actions immediately! unpack should not be performed too + remote_commands += ' && FILE_HASH=$(sudo openssl sha1 %s | sed "s/^.* //"); ' \ + '[ "%s" == "${FILE_HASH}" ] && exit 0 || true ' % (destination, config['sha1']) + remote_commands += ' && sudo rm -f %s && sudo curl -f -g -L %s -o %s && ' % (destination, config['source'], destination) + else: + cluster.log.verbose('Installation via sftp upload detected') + cluster.log.debug(common_group.sudo(remote_commands)) + remote_commands = '' + # todo: use sha1 from configfile instead of calculating if provided? + local_path = utils.get_resource_absolute_path(config['source'], script_relative=True) + binary = bool(config.get('binary', True)) + common_group.put(local_path, destination, sudo=True, binary=binary) + + # TODO: !!! HALT IF FILE ALREADY EXISTS ON REMOTE MACHINES !!! + + remote_commands += 'sudo chmod %s %s' % (config['mode'], destination) + remote_commands += ' && sudo chown %s %s' % (config['owner'], destination) + remote_commands += ' && sudo ls -la %s' % destination + + if config.get('unpack') is not None: + cluster.log.verbose('Unpack request detected') + + remote_commands += ' && sudo mkdir -p %s' % config['unpack'] + + extension = destination.split('.')[-1] + if extension == 'zip': + cluster.log.verbose('Unzip will be used for unpacking') + remote_commands += ' && sudo unzip %s -d %s' % (destination, config['unpack']) + else: + cluster.log.verbose('Tar will be used for unpacking') + remote_commands += ' && sudo tar -zxf %s -C %s' % (destination, config['unpack']) + + remote_commands += ' && sudo tar -tf %s | xargs -I FILE sudo chmod %s %s/FILE' \ + % (destination, config['mode'], config['unpack']) + remote_commands += ' && sudo tar -tf %s | xargs -I FILE sudo chown %s %s/FILE' \ + % (destination, config['owner'], config['unpack']) + remote_commands += ' && sudo tar -tf %s | xargs -I FILE sudo ls -la %s/FILE' % (destination, config['unpack']) + + return common_group.sudo(remote_commands) + + +def install_all_thirparties(group): + cluster = group.cluster + log = cluster.log + + if not group.cluster.inventory['services'].get('thirdparties', {}): + return + + for destination, config in group.cluster.inventory['services']['thirdparties'].items(): + skip_thirdparty = False + + if cluster.context.get("initial_procedure") != "add_node": + # TODO: speed up algorithm via else/continue/break + for plugin_name, plugin_configs in group.cluster.inventory['plugins'].items(): + for plugin_procedure in plugin_configs['installation']['procedures']: + if plugin_procedure.get('thirdparty') == destination: + log.verbose('Thirdparty \'%s\' should be installed with \'%s\' plugin' + % (destination, plugin_name)) + skip_thirdparty = True + + if skip_thirdparty: + log.verbose('Thirdparty %s installation delayed' % destination) + else: + res = install_thirdparty(group.cluster, destination, config) + log.debug(res) diff --git a/kubetool/yum.py b/kubetool/yum.py new file mode 100644 index 000000000..540690adf --- /dev/null +++ b/kubetool/yum.py @@ -0,0 +1,83 @@ +import configparser +import io + + +def ls_repofiles(group): + return group.sudo('ls -la /etc/yum.repos.d') + + +def backup_repo(group, repo_filename="*"): + if not group.cluster.inventory['services']['packages']['package_manager']['replace-repositories']: + group.cluster.log.debug("Skipped - repos replacement disabled in configuration") + return + # all files in directory will be renamed: xxx.repo -> xxx.repo.bak + # if there already any files with ".bak" extension, they should not be renamed to ".bak.bak"! + return group.sudo("find /etc/yum.repos.d/ -type f -name '%s.repo' | sudo xargs -iNAME mv -f NAME NAME.bak" % repo_filename) + + +def add_repo(group, repo_data="", repo_filename="predefined"): + # if repo_data is dict, then convert it to string with config inside + if isinstance(repo_data, dict): + config = configparser.ConfigParser() + for repo_id, data in repo_data.items(): + config[repo_id] = data + repo_data = io.StringIO() + config.write(repo_data) + group.put(repo_data, '/etc/yum.repos.d/%s.repo' % repo_filename, sudo=True) + return group.sudo('yum clean all && sudo yum updateinfo') + + +def clean(group, mode="all"): + return group.sudo("yum clean %s" % mode) + + +def install(group, include=None, exclude=None): + if include is None: + raise Exception('You must specify included packages to install') + + if isinstance(include, list): + include = ' '.join(include) + command = 'yum install -y %s' % include + + if exclude is not None: + if isinstance(exclude, list): + exclude = ','.join(exclude) + command += ' --exclude=%s' % exclude + command += f"; rpm -q {include}; if [ $? != 0 ]; then echo \"Failed to check version for some packages. " \ + f"Make sure packages are not already installed with higher versions. " \ + f"Also, make sure user-defined packages have rpm-compatible names. \"; exit 1; fi " + install_result = group.sudo(command) + + return install_result + + +def remove(group, include=None, exclude=None): + if include is None: + raise Exception('You must specify included packages to remove') + + if isinstance(include, list): + include = ' '.join(include) + command = 'yum remove -y %s' % include + + if exclude is not None: + if isinstance(exclude, list): + exclude = ','.join(exclude) + command += ' --exclude=%s' % exclude + + return group.sudo(command) + + +def upgrade(group, include=None, exclude=None): + if include is None: + raise Exception('You must specify included packages to upgrade') + + if isinstance(include, list): + include = ' '.join(include) + command = 'yum upgrade -y %s' % include + + if exclude is not None: + if isinstance(exclude, list): + exclude = ','.join(exclude) + command += ' --exclude=%s' % exclude + + return group.sudo(command) diff --git a/kubetools b/kubetools new file mode 100755 index 000000000..11eccc4eb --- /dev/null +++ b/kubetools @@ -0,0 +1,11 @@ +#!/bin/bash + +# This env variable is used to force the stdout and stderr streams to be unbuffered in python. +# By default we set this variable to true, because buffering sometimes causes very long output hanging => bad UX. +# Users can override this variable and set it to false if unbuffered output causes any issues, but python output may start hanging again. +if [ -z "${PYTHONUNBUFFERED}" ]; then + export PYTHONUNBUFFERED=TRUE +fi + +SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )" +python3 "$SCRIPT_DIR"/kubetools.py $@ \ No newline at end of file diff --git a/kubetools.py b/kubetools.py new file mode 100644 index 000000000..903324c82 --- /dev/null +++ b/kubetools.py @@ -0,0 +1,4 @@ +from kubetool import __main__ + +if __name__ == '__main__': + __main__.main() diff --git a/main.spec b/main.spec new file mode 100644 index 000000000..7ddd8d726 --- /dev/null +++ b/main.spec @@ -0,0 +1,67 @@ +# -*- mode: python ; coding: utf-8 -*- + +block_cipher = None +options = [ ('u', None, 'OPTION')] + + +a = Analysis(['./kubetool/__main__.py'], + hiddenimports=[ + 'kubetool.procedures.add_node', + 'kubetool.procedures.check_iaas', + 'kubetool.procedures.check_paas', + 'kubetool.procedures.do', + 'kubetool.procedures.install', + 'kubetool.procedures.manage_psp', + 'kubetool.procedures.remove_node', + 'kubetool.procedures.upgrade', + 'kubetool.procedures.cert_renew', + 'kubetool.procedures.backup', + 'kubetool.procedures.restore', + 'kubetool.procedures.reboot' + ], + pathex=['./'], + binaries=[], + datas=[ + ('./kubetool/resources/configurations/*', './kubetool/resources/configurations'), + ('./kubetool/resources/psp/*', './kubetool/resources/psp'), + ('./kubetool/resources/reports/*', './kubetool/resources/reports'), + ('./kubetool/resources/scripts/*', './kubetool/resources/scripts'), + ('./kubetool/resources/drop_ins/*', './kubetool/resources/drop_ins'), + ('./kubetool/templates/*', './kubetool/templates'), + ('./kubetool/templates/plugins/*', './kubetool/templates/plugins'), + ('./kubetool/plugins/*', './kubetool/plugins') + ], + runtime_hooks=[], + excludes=[], + win_no_prefer_redirects=False, + win_private_assemblies=False, + cipher=block_cipher, + noarchive=False) + +pyz = PYZ(a.pure, a.zipped_data, + cipher=block_cipher) + +exe = EXE(pyz, + a.scripts, + a.binaries, + a.zipfiles, + a.datas, + options, + exclude_binaries=False, + name='kubetools', + debug=False, + bootloader_ignore_signals=False, + strip=False, + upx=True, + console=False) + +coll = COLLECT(exe, + a.scripts, + a.binaries, + a.zipfiles, + a.datas, + debug=False, + strip=False, + upx=True, + upx_exclude=[], + name='main') diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 000000000..07de284aa --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,3 @@ +[build-system] +requires = ["setuptools", "wheel"] +build-backend = "setuptools.build_meta" \ No newline at end of file diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 000000000..9b72a35f2 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,9 @@ +PyYAML +deepmerge +fabric +jinja2==2.11.* +invoke +ruamel.yaml +ansible==2.9.9 +pygelf +toml \ No newline at end of file diff --git a/setup.cfg b/setup.cfg new file mode 100644 index 000000000..d3bfa04b5 --- /dev/null +++ b/setup.cfg @@ -0,0 +1,27 @@ +[metadata] +name = kubetool +# Version should not be hard-coded here. +# It should be calculated dynamically in "setup.py" in integration with CI during build/publication process. +# We do not have integration with build/publication process, so effectively we do not use version. +version = 0.0.1 + +[options] +packages = find: +python_requires = >=3.7 +install_requires = + PyYAML + deepmerge + fabric + jinja2==2.11.* + invoke + ruamel.yaml + ansible==2.9.9 + pygelf + toml + +[options.packages.find] +exclude = test, test.* + +# Include all resources required by kubetool in runtime. +[options.package_data] +* = * \ No newline at end of file diff --git a/test/__init__.py b/test/__init__.py new file mode 100755 index 000000000..ddcac9e49 --- /dev/null +++ b/test/__init__.py @@ -0,0 +1,4 @@ +#!/usr/bin/env python3 + +if __name__ == '__main__': + pass diff --git a/test/unit/__init__.py b/test/unit/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/test/unit/core/__init__.py b/test/unit/core/__init__.py new file mode 100755 index 000000000..ddcac9e49 --- /dev/null +++ b/test/unit/core/__init__.py @@ -0,0 +1,4 @@ +#!/usr/bin/env python3 + +if __name__ == '__main__': + pass diff --git a/test/unit/core/test_flow.py b/test/unit/core/test_flow.py new file mode 100755 index 000000000..fcb85ffb9 --- /dev/null +++ b/test/unit/core/test_flow.py @@ -0,0 +1,121 @@ +#!/usr/bin/env python3 + +import unittest +import ast +from unittest import mock + +from kubetool.core import flow +from kubetool import demo + +test_msg = "test_function_return_result" + + +def test_func(cluster): + try: + # Need to fill values in cluster context in some tests to know that function was called + current_value = cluster.context.get("test_info") + if current_value is None: + cluster.context["test_info"] = 1 + else: + cluster.context["test_info"] = current_value + 1 + except Exception as ex: + print(ex) + return test_msg + + +tasks = { + "deploy": { + "loadbalancer": { + "haproxy": test_func, + "keepalived": test_func + }, + "accounts": test_func + }, + "overview": test_func +} + + +def replace_a_func_in_dict(test_res): + test_res_str = str(test_res).replace(str(test_func), "'a'") + return ast.literal_eval(test_res_str) + + +class FlowTest(unittest.TestCase): + def test_filter_flow_1(self): + test_tasks = ["deploy.loadbalancer.haproxy"] + + test_res = flow.filter_flow(tasks, test_tasks, "") + test_res = replace_a_func_in_dict(test_res) + + expected_res = {'deploy': {'loadbalancer': {'haproxy': 'a'}}} + self.assertEqual(expected_res, test_res, "Incorrect filtered flow.") + + def test_filter_flow_2(self): + test_tasks = ["deploy"] + + test_res = flow.filter_flow(tasks, test_tasks, "") + test_res = replace_a_func_in_dict(test_res) + + expected_res = {'deploy': {'accounts': 'a', 'loadbalancer': {'haproxy': 'a', 'keepalived': 'a'}}} + self.assertEqual(expected_res, test_res, "Incorrect filtered flow.") + + def test_filter_flow_3(self): + test_tasks = ["deploy.loadbalancer.haproxy", "overview"] + + test_res = flow.filter_flow(tasks, test_tasks, "") + test_res = replace_a_func_in_dict(test_res) + + expected_res = {'deploy': {'loadbalancer': {'haproxy': 'a'}}, 'overview': 'a'} + self.assertEqual(expected_res, test_res, "Incorrect filtered flow.") + + def test_filter_flow_excluded(self): + test_tasks = ["deploy"] + excluded_tasks = ["deploy.loadbalancer"] + + test_res = flow.filter_flow(tasks, test_tasks, excluded_tasks) + test_res = replace_a_func_in_dict(test_res) + + expected_res = {'deploy': {'accounts': 'a'}} + self.assertEqual(expected_res, test_res, "Incorrect filtered flow.") + + def test_schedule_cumulative_point(self): + cluster = demo.new_cluster(demo.generate_inventory(**demo.FULLHA)) + flow.schedule_cumulative_point(cluster, test_func) + points = cluster.context["scheduled_cumulative_points"] + self.assertIn(test_func, points, "Test cumulative point was not added to cluster context") + + def test_add_task_to_proceeded_list(self): + cluster = demo.new_cluster(demo.generate_inventory(**demo.FULLHA)) + task_path = "prepare" + flow.add_task_to_proceeded_list(cluster, task_path) + proceeded_tasks = cluster.context["proceeded_tasks"] + self.assertIn(task_path, proceeded_tasks, "Test proceeded task was not added to cluster context") + + def test_proceed_cumulative_point(self): + cluster = demo.new_cluster(demo.generate_inventory(**demo.FULLHA)) + cumulative_points = { + 'core.test_flow.test_func': ['prepare.system.modprobe'] + } + flow.schedule_cumulative_point(cluster, test_func) + res = flow.proceed_cumulative_point(cluster, cumulative_points, "prepare.system.modprobe") + self.assertIn(test_msg, str(res)) + + def test_run_flow(self): + cluster = demo.new_cluster(demo.generate_inventory(**demo.FULLHA)) + flow.run_flow(tasks, cluster, {}) + + self.assertEqual(4, cluster.context["test_info"], "Here should be 4 calls of test_func for: \ + deploy.loadbalancer.haproxy, deploy.loadbalancer.keepalived, deploy.accounts, overview.") + + @mock.patch('kubetool.core.flow.load_inventory', return_value=demo.new_cluster(demo.generate_inventory(**demo.FULLHA))) + def test_run(self, patched_func): + test_tasks = ["deploy.loadbalancer.haproxy"] + args = flow.new_parser("Help text").parse_args(['-v']) + flow.run(tasks, test_tasks, [], {}, flow.create_context(args)) + cluster = patched_func.return_value + self.assertEqual(1, cluster.context["test_info"], + "It had to be one call of test_func for deploy.loadbalancer.haproxy action") + + +if __name__ == '__main__': + unittest.main() diff --git a/test/unit/docker/__init__.py b/test/unit/docker/__init__.py new file mode 100755 index 000000000..e142aff71 --- /dev/null +++ b/test/unit/docker/__init__.py @@ -0,0 +1,9 @@ +#!/usr/bin/env python3 + +import docker + +# connect to local docker daemon +# start containers +# try to upload/download files +# try to update/checkout repo, install/delete/update packages +# and etc. diff --git a/test/unit/k8s_cert_test.py b/test/unit/k8s_cert_test.py new file mode 100644 index 000000000..b7101b025 --- /dev/null +++ b/test/unit/k8s_cert_test.py @@ -0,0 +1,38 @@ +import unittest + +from kubetool import k8s_certs + + +class K8sCertTest(unittest.TestCase): + + def test_certs_verify_succeeds(self): + self.assertTrue(k8s_certs.verify_certs_supported(k8s_certs.supported_k8s_certs)) + + def test_certs_verify_fails(self): + with self.assertRaisesRegex(Exception, "Found unsupported cert"): + k8s_certs.verify_certs_supported(["bad test"]) + + def test_single_all_verify_succeeds(self): + self.assertTrue(k8s_certs.verify_all_is_absent_or_single(["all"])) + + def test_single_all_verify_succeeds_absent(self): + self.assertTrue(k8s_certs.verify_all_is_absent_or_single(["absent all", "and something else"])) + + def test_single_all_verify_fails(self): + with self.assertRaisesRegex(Exception, "Found 'all' in certs list, but it is not single"): + k8s_certs.verify_all_is_absent_or_single(["all", "and something else"]) + + def test_correct_cert_list_format(self): + self.assertTrue(k8s_certs.verify_cert_list_format(["list", "of", "certs"])) + + def test_none_cert_list(self): + with self.assertRaisesRegex(Exception, "Incorrect k8s certs renew configuration"): + k8s_certs.verify_cert_list_format(None) + + def test_non_list_cert_list(self): + with self.assertRaisesRegex(Exception, "Incorrect k8s certs renew configuration"): + k8s_certs.verify_cert_list_format("value") + + def test_empty_cert_list(self): + with self.assertRaisesRegex(Exception, "Incorrect k8s certs renew configuration"): + k8s_certs.verify_cert_list_format([]) \ No newline at end of file diff --git a/test/unit/plugins/__init__.py b/test/unit/plugins/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/test/unit/plugins/test_template.py b/test/unit/plugins/test_template.py new file mode 100644 index 000000000..ead217926 --- /dev/null +++ b/test/unit/plugins/test_template.py @@ -0,0 +1,108 @@ +import os +import unittest + +from kubetool import demo +from kubetool.core import utils +from kubetool.plugins import verify_template, apply_template + + +class TestTemplate(unittest.TestCase): + def test_verify_template(self): + test_cases = [ + { + "name": "One yaml template", + "source": "test/unit/plugins/test_templates/test_template1.yaml", + "valid": True, + }, + { + "name": "Wildcard path matching three yaml templates", + "source": "test/unit/plugins/test_templates/*.yaml", + "valid": True, + }, + { + "name": "Directory wildcard path matching two yaml templates", + "source": "test/unit/plugins/test_templates/*", + "valid": True, + }, + { + "name": "Wildcard path matching zero templates", + "source": "test/unit/plugins/test_templates/*.conf", + "valid": False, + }, + { + "name": "Path to non-existent template", + "source": "test/unit/plugins/test_templates/template.conf", + "valid": False, + }, + ] + + for tc in test_cases: + # Run the test + with self.subTest(tc["name"]): + # Create new test config with the source value + config = {"source": tc["source"]} + + if tc["valid"]: + # If test case is valid just run the function + verify_template(None, config) + else: + # If test case is not valid check for exception raise + self.assertRaises( + Exception, + verify_template, + None, config + ) + + + def test_apply_template(self): + test_cases = [ + { + "name": "One yaml template", + "create_files": ["./test_templates/test_template.yaml"], + "source": "test/unit/plugins/test_templates/test_template1.yaml", + "valid": True, + }, + { + "name": "Wildcard path matching three yaml templates", + "source": "test/unit/plugins/test_templates/*.yaml", + "valid": True, + }, + { + "name": "Directory wildcard path matching two yaml templates", + "source": "test/unit/plugins/test_templates/*", + "valid": True, + }, + { + "name": "Wildcard path matching zero templates", + "source": "test/unit/plugins/test_templates/*.conf", + "valid": False, + }, + { + "name": "Path to non-existent template", + "source": "test/unit/plugins/test_templates/template.conf", + "valid": False, + }, + ] + + for tc in test_cases: + # Run the test + with self.subTest(tc["name"]): + # Create new fake cluster + cluster = demo.new_cluster( + demo.generate_inventory(**demo.FULLHA)) + # Create new test config with the source value + config = { + "source": tc["source"], + 'apply_required': False, + } + + if tc["valid"]: + # If test case is valid just run the function + apply_template(cluster, config) + else: + # If test case is not valid check for exception raise + self.assertRaises( + Exception, + apply_template, + cluster, config + ) diff --git a/test/unit/plugins/test_templates/test_template1.yaml b/test/unit/plugins/test_templates/test_template1.yaml new file mode 100644 index 000000000..e69de29bb diff --git a/test/unit/plugins/test_templates/test_template2.yaml b/test/unit/plugins/test_templates/test_template2.yaml new file mode 100644 index 000000000..e69de29bb diff --git a/test/unit/plugins/test_templates/test_template3.yaml b/test/unit/plugins/test_templates/test_template3.yaml new file mode 100644 index 000000000..e69de29bb diff --git a/test/unit/test_coredns.py b/test/unit/test_coredns.py new file mode 100755 index 000000000..0c764ac0b --- /dev/null +++ b/test/unit/test_coredns.py @@ -0,0 +1,106 @@ +#!/usr/bin/env python3 + +import unittest + +from kubetool import coredns, system, demo + + +class CorednsDefaultsEnrichment(unittest.TestCase): + + def test_add_hosts_config(self): + inventory = demo.generate_inventory(**demo.MINIHA_KEEPALIVED) + cluster = demo.new_cluster(inventory) + generated_hosts = system.generate_etc_hosts_config(cluster.inventory) + self.assertEquals(generated_hosts, cluster.inventory['services']['coredns'].get('configmap').get('Hosts')) + + def test_already_defined_hosts_config(self): + inventory = demo.generate_inventory(**demo.MINIHA_KEEPALIVED) + inventory['services'] = { + 'coredns': { + 'configmap': { + 'Hosts': '1.2.3.4 example.org' + } + } + } + cluster = demo.new_cluster(inventory) + self.assertEquals('1.2.3.4 example.org', cluster.inventory['services']['coredns']['configmap']['Hosts']) + + +class CorednsGenerator(unittest.TestCase): + + def test_configmap_generation(self): + inventory = demo.generate_inventory(**demo.MINIHA_KEEPALIVED) + inventory['services'] = { + 'coredns': { + 'configmap': { + 'Corefile': { + '.:53': { + 'errors': True, + 'prometheus': ':9153', + 'cache': 30, + 'kubernetes': { + 'default': { + 'zone': [ + 'test' + ], + 'data': { + 'pods': 'insecure', + 'fallthrough': [ + 'ip6.arpa' + ], + 'ttl': 30 + } + } + }, + 'template': { + 'default': { + 'class': 'IN', + 'type': 'A', + 'zone': 'test', + 'data': { + 'match': '^(.*\.)?localhost\.$', + 'answer': '{{ .Name }} 3600 IN A 1.1.1.1' + } + } + }, + 'forward': [ + '.', + '/etc/resolv.conf', + ], + } + } + } + } + } + + config = coredns.generate_configmap(inventory) + self.assertEqual('''apiVersion: v1 +kind: ConfigMap +metadata: + name: coredns + namespace: kube-system +data: + Corefile: | + .:53 { + errors + prometheus :9153 + cache 30 + kubernetes test { + pods insecure + fallthrough ip6.arpa + ttl 30 + } + template IN A test { + match ^(.*\.)?localhost\.$ + answer "{{ .Name }} 3600 IN A 1.1.1.1" + } + forward . /etc/resolv.conf + } +''', config) + + def test_configmap_generation_with_hosts(self): + inventory = demo.generate_inventory(**demo.MINIHA) + cluster = demo.new_cluster(inventory) + config = coredns.generate_configmap(cluster.inventory) + self.assertIn('Hosts: |', config) + self.assertIn('192.168.0.2 master-1.k8s.fake.local', config) diff --git a/test/unit/test_defaults.py b/test/unit/test_defaults.py new file mode 100755 index 000000000..2e6535652 --- /dev/null +++ b/test/unit/test_defaults.py @@ -0,0 +1,123 @@ +#!/usr/bin/env python3 + +import unittest + +from kubetool.core import defaults +from kubetool import demo + + +class DefaultsEnrichmentAppendControlPlain(unittest.TestCase): + + def test_controlplain_already_defined(self): + inventory = demo.generate_inventory(**demo.MINIHA_KEEPALIVED) + inventory['control_plain'] = { + 'internal': '1.1.1.1', + 'external': '2.2.2.2' + } + inventory = defaults.append_controlplain(inventory, None) + self.assertEqual(inventory['control_plain']['internal'], '1.1.1.1') + self.assertEqual(inventory['control_plain']['external'], '2.2.2.2') + + def test_controlplain_already_internal_defined(self): + inventory = demo.generate_inventory(**demo.MINIHA_KEEPALIVED) + inventory['control_plain'] = { + 'internal': '1.1.1.1' + } + inventory = defaults.append_controlplain(inventory, None) + self.assertEqual(inventory['control_plain']['internal'], '1.1.1.1') + self.assertEqual(inventory['control_plain']['external'], inventory['nodes'][0]['address']) + + def test_controlplain_already_external_defined(self): + inventory = demo.generate_inventory(**demo.MINIHA_KEEPALIVED) + inventory['control_plain'] = { + 'external': '2.2.2.2' + } + inventory = defaults.append_controlplain(inventory, None) + self.assertEqual(inventory['control_plain']['internal'], inventory['vrrp_ips'][0]) + self.assertEqual(inventory['control_plain']['external'], '2.2.2.2') + + def test_controlplain_calculated_half_vrrp_half_master(self): + inventory = demo.generate_inventory(**demo.MINIHA_KEEPALIVED) + inventory = defaults.append_controlplain(inventory, None) + self.assertEqual(inventory['control_plain']['internal'], inventory['vrrp_ips'][0]) + self.assertEqual(inventory['control_plain']['external'], inventory['nodes'][0]['address']) + + def test_controlplain_calculated_fully_vrrp(self): + inventory = demo.generate_inventory(**demo.MINIHA_KEEPALIVED) + inventory['vrrp_ips'][0] = { + 'ip': '192.168.0.1', + 'floating_ip': inventory['vrrp_ips'][0] + } + inventory = defaults.append_controlplain(inventory, None) + self.assertEqual(inventory['control_plain']['internal'], inventory['vrrp_ips'][0]['ip']) + self.assertEqual(inventory['control_plain']['external'], inventory['vrrp_ips'][0]['floating_ip']) + + def test_controlplain_calculated_half_fully_master(self): + inventory = demo.generate_inventory(**demo.MINIHA) + inventory = defaults.append_controlplain(inventory, None) + self.assertEqual(inventory['control_plain']['internal'], inventory['nodes'][0]['internal_address']) + self.assertEqual(inventory['control_plain']['external'], inventory['nodes'][0]['address']) + + def test_controlplain_control_endpoint_vrrp(self): + inventory = demo.generate_inventory(**demo.MINIHA) + inventory['vrrp_ips'] = [ + { + 'ip': '192.168.0.1', + 'floating_ip': '1.1.1.1' + }, + { + 'ip': '192.168.0.2', + 'floating_ip': '2.2.2.2', + 'control_endpoint': True + } + ] + inventory = defaults.append_controlplain(inventory, None) + self.assertEqual(inventory['control_plain']['internal'], '192.168.0.2') + self.assertEqual(inventory['control_plain']['external'], '2.2.2.2') + + def test_controlplain_control_half_endpoint_vrrp(self): + inventory = demo.generate_inventory(**demo.MINIHA) + inventory['vrrp_ips'] = [ + { + 'ip': '192.168.0.1', + 'floating_ip': '1.1.1.1' + }, + { + 'ip': '192.168.0.2', + 'control_endpoint': True + } + ] + inventory = defaults.append_controlplain(inventory, None) + self.assertEqual(inventory['control_plain']['internal'], '192.168.0.2') + self.assertEqual(inventory['control_plain']['external'], '1.1.1.1') + + def test_controlplain_control_half_endpoint_vrrp_half_master(self): + inventory = demo.generate_inventory(**demo.MINIHA) + inventory['vrrp_ips'] = [ + { + 'ip': '192.168.0.1', + }, + { + 'ip': '192.168.0.2', + 'control_endpoint': True + } + ] + inventory = defaults.append_controlplain(inventory, None) + self.assertEqual(inventory['control_plain']['internal'], '192.168.0.2') + self.assertEqual(inventory['control_plain']['external'], inventory['nodes'][0]['address']) + + def test_controlplain_control_half_endpoint_vrrp_half_endpoint_master(self): + inventory = demo.generate_inventory(**demo.MINIHA) + inventory['vrrp_ips'] = [ + { + 'ip': '192.168.0.1', + }, + { + 'ip': '192.168.0.2', + 'control_endpoint': True + } + ] + inventory['nodes'][1]['control_endpoint'] = True + inventory = defaults.append_controlplain(inventory, None) + self.assertEqual(inventory['control_plain']['internal'], '192.168.0.2') + self.assertEqual(inventory['control_plain']['external'], inventory['nodes'][1]['address']) diff --git a/test/unit/test_demo.py b/test/unit/test_demo.py new file mode 100755 index 000000000..9da06233d --- /dev/null +++ b/test/unit/test_demo.py @@ -0,0 +1,103 @@ +#!/usr/bin/env python3 + +import io +import unittest + +from kubetool import demo, system + + +class TestInventoryGenerator(unittest.TestCase): + + def test_fullha_generation(self): + inventory = demo.generate_inventory(balancer=1, master=3, worker=3) + self.assertEqual(7, len(inventory['nodes']), msg="The received number of nodes does not match the expected") + + +class TestNewCluster(unittest.TestCase): + + def test_created_cluster_groups(self): + cluster = demo.new_cluster(demo.generate_inventory(**demo.FULLHA)) + self.assertEqual(1, len(cluster.nodes['balancer'].nodes), msg="Incorrect number of balancers for a full scheme") + + +class TestFakeShell(unittest.TestCase): + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.cluster = demo.new_cluster(demo.generate_inventory(**demo.FULLHA)) + + def test_run(self): + self.cluster.fake_shell.add(demo.create_nodegroup_result(self.cluster.nodes['all'], stdout='anonymous'), + 'run', ['whoami']) + + results = self.cluster.nodes['all'].run('whoami') + for conn, result in results.items(): + self.assertEqual('anonymous', result.stdout, msg="Invalid fake nodegroup result stdout") + + def test_calculate_calls(self): + self.cluster.fake_shell.reset() + self.cluster.fake_shell.add(demo.create_nodegroup_result(self.cluster.nodes['all'], + code=1, stderr='sudo: kubectl: command not found'), + 'sudo', ['kubectl cluster-info']) + self.cluster.fake_shell.add(demo.create_nodegroup_result(self.cluster.nodes['all'], code=-1), + 'sudo', [self.cluster.globals['nodes']['boot']['reboot_command']]) + self.cluster.fake_shell.add(demo.create_nodegroup_result(self.cluster.nodes['all'], stdout='example result'), + 'run', ['last reboot'], usage_limit=1) + self.cluster.fake_shell.add(demo.create_nodegroup_result(self.cluster.nodes['all'], stdout='example result 2'), + 'run', ['last reboot'], usage_limit=1) + + system.reboot_nodes(self.cluster.nodes['master']) + + self.assertEqual(2, + len(self.cluster.fake_shell.history_find('run', ['last reboot'])), + msg="Wrong number of reboots in history") + + +class TestFakeFS(unittest.TestCase): + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.cluster = demo.new_cluster(demo.generate_inventory(**demo.FULLHA)) + + def test_put_string(self): + self.cluster.fake_fs.reset() + + expected_data = 'hello\nworld' + node_hostname = list(self.cluster.nodes['master'].nodes.keys())[0] + + self.cluster.fake_fs.write(node_hostname, '/tmp/test/file.txt', expected_data) + actual_data = self.cluster.fake_fs.read(node_hostname, '/tmp/test/file.txt') + + self.assertEqual(expected_data, actual_data, msg="Written and read data are not equal") + + def test_put_stringio(self): + self.cluster.fake_fs.reset() + + expected_data = io.StringIO('hello\nworld') + node_hostname = list(self.cluster.nodes['master'].nodes.keys())[0] + + self.cluster.fake_fs.write(node_hostname, '/tmp/test/file.txt', expected_data) + actual_data = self.cluster.fake_fs.read(node_hostname, '/tmp/test/file.txt') + + self.assertEqual(expected_data.getvalue(), actual_data, msg="Written and read data are not equal") + + def test_get_nonexistent(self): + self.cluster.fake_fs.reset() + + node_hostname = list(self.cluster.nodes['master'].nodes.keys())[0] + actual_data = self.cluster.fake_fs.read(node_hostname, '/tmp/test/file.txt') + self.assertIsNone(actual_data, msg="Reading did not return None in response") + + def test_write_file_to_cluster(self): + self.cluster.fake_fs.reset() + + expected_data = 'hello\nworld' + self.cluster.nodes['master'].put(io.StringIO(expected_data), '/tmp/test/file.txt') + actual_data_group = self.cluster.fake_fs.group_read(self.cluster.nodes['master'], '/tmp/test/file.txt') + + for host, actual_data in actual_data_group.items(): + self.assertEqual(expected_data, actual_data, msg="Written and read data are not equal for node %s" % host) + + +if __name__ == '__main__': + unittest.main() diff --git a/test/unit/test_group.py b/test/unit/test_group.py new file mode 100755 index 000000000..6bc6c42d9 --- /dev/null +++ b/test/unit/test_group.py @@ -0,0 +1,95 @@ +#!/usr/bin/env python3 + +import unittest +import random + +import fabric + +from kubetool import demo +from kubetool.demo import FakeKubernetesCluster + + +class TestGroupCreation(unittest.TestCase): + + # Test should from the following cluster: + # master-1 roles: [master, worker] + # worker-1 roles: [worker] + # Get only node with single worker role using filter lambda function + def test_new_group_from_lambda_filter(self): + multirole_inventory = demo.generate_inventory(balancer=0, master=1, worker=['master-1', 'worker-1']) + cluster = demo.new_cluster(multirole_inventory) + + expected_group = cluster.make_group(list(cluster.nodes['worker'].nodes.keys())[1:]) + filtered_group = cluster.nodes['worker'].new_group(apply_filter=lambda node: 'master' not in node['roles']) + + self.assertDictEqual(expected_group.nodes, filtered_group.nodes, msg="Filtered groups do not match") + + def test_exclude_group(self): + inventory = demo.generate_inventory(balancer=2, master=2, worker=0) + cluster = demo.new_cluster(inventory) + + result_group = cluster.nodes['all'].exclude_group(cluster.nodes['balancer']) + + self.assertDictEqual(cluster.nodes['master'].nodes, result_group.nodes, msg="Final groups do not match") + + def test_exclude_group_2(self): + multirole_inventory = demo.generate_inventory(balancer=0, master=1, worker=['master-1', 'worker-1']) + cluster = demo.new_cluster(multirole_inventory) + + expected_group = cluster.make_group(list(cluster.nodes['worker'].nodes.keys())[1:]) + result_group = cluster.nodes['worker'].exclude_group(cluster.nodes['master']) + + self.assertDictEqual(expected_group.nodes, result_group.nodes, msg="Final groups do not match") + + def test_include_group(self): + inventory = demo.generate_inventory(balancer=2, master=2, worker=0) + cluster = demo.new_cluster(inventory) + + result_group = cluster.nodes['balancer'].include_group(cluster.nodes['master']) + + self.assertDictEqual(cluster.nodes['all'].nodes, result_group.nodes, msg="Final groups do not match") + + +class TestGroupCall(unittest.TestCase): + cluster: FakeKubernetesCluster = None + + @classmethod + def setUpClass(cls): + cls.cluster = demo.new_cluster(demo.generate_inventory(**demo.FULLHA)) + + def tearDown(self): + TestGroupCall.cluster.fake_shell.reset() + + def test_run_empty_group(self): + # bug reproduces inside _do(), that is why it is necessary to use real cluster + cluster = demo.new_cluster(demo.generate_inventory(**demo.FULLHA), fake=False) + empty_group = cluster.nodes["worker"].new_group(apply_filter=lambda node: 'xxx' in node['roles']) + # if there no nodes in empty group - an exception should not be produced - empty result should be returned + empty_group.run('whoami', is_async=True) + empty_group.run('whoami', is_async=False) + + def test_GroupException_one_node_failed(self): + all_nodes = TestGroupCall.cluster.nodes["all"] + results = demo.create_nodegroup_result(all_nodes, stdout='example result') + results[random.choice(list(all_nodes.nodes.keys()))] = Exception('Some error') + + TestGroupCall.cluster.fake_shell.add(results, "run", ['some command']) + + exception = None + try: + all_nodes.run('some command') + except fabric.group.GroupException as e: + exception = e + + self.assertIsNotNone(exception, msg="GroupException should be raised") + nested_exc = 0 + for _, result in exception.result.items(): + if isinstance(result, Exception): + nested_exc += 1 + self.assertEqual('Some error', result.args[0], msg="Unexpected exception message") + + self.assertEqual(1, nested_exc, msg="One wrapped exception should happen") + + +if __name__ == '__main__': + unittest.main() diff --git a/test/unit/test_haproxy.py b/test/unit/test_haproxy.py new file mode 100755 index 000000000..413386564 --- /dev/null +++ b/test/unit/test_haproxy.py @@ -0,0 +1,126 @@ +#!/usr/bin/env python3 + +import unittest + +from kubetool import haproxy, packages +from kubetool import demo + + +class HAProxyDefaultsEnrichment(unittest.TestCase): + + def test_correct_inventory(self): + inventory = demo.generate_inventory(**demo.MINIHA_KEEPALIVED) + print("Inventory: " + str(inventory)) + cluster = demo.new_cluster(inventory) + haproxy.enrich_inventory(cluster.inventory, None) + + def test_inventory_verify_multirole_balancer_without_keepalived(self): + inventory = demo.generate_inventory(master=3, balancer=['master-1', 'master-2', 'master-3'], + worker=['master-1', 'master-2', 'master-3'], keepalived=0) + + print("Inventory: " + str(inventory)) + + with self.assertRaises(Exception) as cm: + demo.new_cluster(inventory) + + self.assertIn(haproxy.ERROR_VRRP_IS_NOT_CONFIGURED, str(cm.exception), "Invalid exception message") + + +class TestHaproxyInstallation(unittest.TestCase): + + def test_haproxy_installation_when_already_installed(self): + inventory = demo.generate_inventory(**demo.FULLHA) + cluster = demo.new_cluster(inventory) + + package_associations = cluster.inventory['services']['packages']['associations']['haproxy'] + + # simulate already installed haproxy package + expected_results_1 = demo.create_nodegroup_result(cluster.nodes['balancer'], stdout='Haproxy v1.2.3') + cluster.fake_shell.add(expected_results_1, 'sudo', ['%s -v' % package_associations['executable_name']]) + + # simulate mkdir command + expected_results_2 = demo.create_nodegroup_result(cluster.nodes['balancer']) + cluster.fake_shell.add(expected_results_2, 'sudo', ["mkdir -p /etc/systemd/system/rh-haproxy18-haproxy.service.d"]) + + # simulate systemd daemon reload + expected_results_3 = demo.create_nodegroup_result(cluster.nodes['balancer']) + cluster.fake_shell.add(expected_results_3, 'sudo', ["systemctl daemon-reload"]) + + # simulate enable package command + expected_results_4 = demo.create_nodegroup_result(cluster.nodes['balancer'], stdout='ok') + cluster.fake_shell.add(expected_results_4, 'sudo', ['systemctl enable %s --now' % package_associations['service_name']]) + + # start installation + actual_result = haproxy.install(cluster.nodes['balancer']) + + # verify installation result should be the same as simulated and contain version print stdout + expected_results_1 = cluster.nodes["all"]._make_result(expected_results_1) + + # TODO: this section is not compatible with RemoteExecutor yet + # self.assertEqual(expected_results, actual_result) + + def test_haproxy_installation_when_not_installed(self): + inventory = demo.generate_inventory(**demo.FULLHA) + cluster = demo.new_cluster(inventory) + + package_associations = cluster.inventory['services']['packages']['associations']['haproxy'] + + # simulate haproxy package missing + missing_package_command = ['%s -v' % package_associations['executable_name']] + missing_package_result = demo.create_nodegroup_result(cluster.nodes['balancer'], + code=127, stderr='Command haproxy not found') + cluster.fake_shell.add(missing_package_result, 'sudo', missing_package_command) + + # simulate package installation + installation_command = ['yum install -y %s; rpm -q %s; if [ $? != 0 ]; then echo ' + '\"Failed to check version for some packages. ' + 'Make sure packages are not already installed with higher versions. ' + 'Also, make sure user-defined packages have rpm-compatible names. \"; exit 1; fi ' + % (package_associations['package_name'], package_associations['package_name'])] + expected_results = demo.create_nodegroup_result(cluster.nodes['balancer'], code=0, + stdout='Successfully installed haproxy') + cluster.fake_shell.add(expected_results, 'sudo', installation_command) + + # simulate package installation check command + check_command = [f'rpm -q {package_associations["package_name"]}'] + expected_results_1 = demo.create_nodegroup_result(cluster.nodes['balancer'], code=0, + stdout='All packages installed') + cluster.fake_shell.add(expected_results_1, 'sudo', check_command) + + # simulate mkdir command + expected_results_2 = demo.create_nodegroup_result(cluster.nodes['balancer']) + cluster.fake_shell.add(expected_results_2, 'sudo', ["mkdir -p /etc/systemd/system/rh-haproxy18-haproxy.service.d"]) + + # simulate systemd daemon reload + expected_results_3 = demo.create_nodegroup_result(cluster.nodes['balancer']) + cluster.fake_shell.add(expected_results_3, 'sudo', ["systemctl daemon-reload"]) + + # simulate enable package command + expected_results_4 = demo.create_nodegroup_result(cluster.nodes['balancer'], stdout='ok') + cluster.fake_shell.add(expected_results_4, 'sudo', ['systemctl enable %s --now' % package_associations['service_name']]) + + # start installation + actual_result = haproxy.install(cluster.nodes['balancer']) + + # verify installation result should be the same as simulated and contain version print stdout + expected_results = get_result_str(expected_results) + + self.assertEqual(expected_results, actual_result) + + +def get_result_str(results): + output = "" + for host, result in results.items(): + if output != "": + output += "\n" + output += "\t%s (%s): code=%i" % (host, 0, result.exited) + if result.stdout: + output += "\n\t\tSTDOUT: %s" % result.stdout.replace("\n", "\n\t\t ") + if result.stderr: + output += "\n\t\tSTDERR: %s" % result.stderr.replace("\n", "\n\t\t ") + + return output + + +if __name__ == '__main__': + unittest.main() diff --git a/test/unit/test_inventory.py b/test/unit/test_inventory.py new file mode 100755 index 000000000..2dde9821d --- /dev/null +++ b/test/unit/test_inventory.py @@ -0,0 +1,75 @@ +#!/usr/bin/env python3 + +import unittest + +from kubetool import demo + + +class TestInventoryValidation(unittest.TestCase): + + def test_labels_check(self): + inventory = demo.generate_inventory(master=0, balancer=1, worker=0) + inventory["nodes"][0]["labels"] = {"should": "fail"} + with self.assertRaises(Exception) as context: + demo.new_cluster(inventory, fake=False) + + self.assertIn("Only 'worker' or 'master' nodes can have labels", str(context.exception)) + + def test_taints_check(self): + inventory = demo.generate_inventory(master=0, balancer=1, worker=0) + inventory["nodes"][0]["taints"] = ["should fail"] + with self.assertRaises(Exception) as context: + demo.new_cluster(inventory, fake=False) + + self.assertIn("Only 'worker' or 'master' nodes can have taints", str(context.exception)) + + def test_invalid_node_name(self): + inventory = demo.generate_inventory(master=1, balancer=0, worker=0) + inventory["nodes"][0]["name"] = "bad_node/name" + + with self.assertRaises(Exception): + demo.new_cluster(inventory, fake=False) + + def test_correct_node_name(self): + inventory = demo.generate_inventory(master=1, balancer=0, worker=0) + inventory["nodes"][0]["name"] = "correct-node.name123" + demo.new_cluster(inventory, fake=False) + + def test_new_group_from_nodes(self): + inventory = demo.generate_inventory(**demo.FULLHA_KEEPALIVED) + cluster = demo.new_cluster(inventory) + group = cluster.create_group_from_groups_nodes_names([], ['balancer-1', 'master-1']) + self.assertEqual(2, len(group.nodes)) + + node_names = group.get_nodes_names() + self.assertIn('balancer-1', node_names) + self.assertIn('master-1', node_names) + + def test_new_group_from_groups(self): + inventory = demo.generate_inventory(**demo.FULLHA_KEEPALIVED) + cluster = demo.new_cluster(inventory) + group = cluster.create_group_from_groups_nodes_names(['master', 'balancer'], []) + self.assertEqual(5, len(group.nodes)) + + node_names = group.get_nodes_names() + self.assertIn('balancer-1', node_names) + self.assertIn('balancer-2', node_names) + self.assertIn('master-1', node_names) + self.assertIn('master-2', node_names) + self.assertIn('master-3', node_names) + + def test_new_group_from_nodes_and_groups_multi(self): + inventory = demo.generate_inventory(**demo.FULLHA_KEEPALIVED) + cluster = demo.new_cluster(inventory) + group = cluster.create_group_from_groups_nodes_names(['master'], ['balancer-1']) + self.assertEqual(4, len(group.nodes)) + + node_names = group.get_nodes_names() + self.assertIn('balancer-1', node_names) + self.assertIn('master-1', node_names) + self.assertIn('master-2', node_names) + self.assertIn('master-3', node_names) + + +if __name__ == '__main__': + unittest.main() diff --git a/test/unit/test_keepalived.py b/test/unit/test_keepalived.py new file mode 100755 index 000000000..e3559ed3a --- /dev/null +++ b/test/unit/test_keepalived.py @@ -0,0 +1,259 @@ +#!/usr/bin/env python3 + +import unittest + +from kubetool import demo, keepalived + + +class TestKeepalivedDefaultsEnrichment(unittest.TestCase): + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.inventory = demo.generate_inventory(**demo.FULLHA_KEEPALIVED) + self.cluster = demo.new_cluster(self.inventory) + self.cluster2 = demo.new_cluster(self.inventory) + + def test_no_vrrp_ips_defined(self): + inventory = demo.generate_inventory(**demo.FULLHA) + demo.new_cluster(inventory) + + def test_hosts_auto_detection(self): + self.assertIn(self.cluster.inventory.get('vrrp_ips')[0]['hosts'][0]['name'], ['balancer-1', 'balancer-2']) + self.assertIn(self.cluster.inventory.get('vrrp_ips')[0]['hosts'][1]['name'], ['balancer-1', 'balancer-2']) + + def test_vrrp_ips_conversion(self): + self.assertIsInstance(self.inventory.get('vrrp_ips')[0], str) + self.assertIsInstance(self.cluster.inventory.get('vrrp_ips')[0], dict) + self.assertEqual(self.inventory.get('vrrp_ips')[0], self.cluster.inventory.get('vrrp_ips')[0]['ip']) + + def test_auth_interface_detect(self): + self.assertEqual(self.cluster.inventory.get('vrrp_ips')[0]['hosts'][0]['interface'], 'eth0') + + def test_nondefault_interface_apply(self): + inventory = demo.generate_inventory(**demo.FULLHA_KEEPALIVED) + inventory['vrrp_ips'][0] = { + 'ip': inventory['vrrp_ips'][0], + 'interface': 'test' + } + cluster = demo.new_cluster(inventory) + + self.assertEqual(cluster.inventory.get('vrrp_ips')[0]['interface'], 'test') + + def test_default_router_id_generation(self): + self.assertIsNotNone(self.cluster.inventory.get('vrrp_ips')[0]['router_id']) + self.assertTrue(self.cluster.inventory.get('vrrp_ips')[0]['router_id'].isnumeric()) + self.assertEqual(self.cluster.inventory.get('vrrp_ips')[0]['router_id'], + self.cluster2.inventory.get('vrrp_ips')[0]['router_id']) + + def test_default_router_id_generation_ipv6(self): + inventory = demo.generate_inventory(**demo.FULLHA_KEEPALIVED) + inventory['vrrp_ips'] = ['::1'] + cluster = demo.new_cluster(inventory) + self.assertEqual(cluster.inventory.get('vrrp_ips')[0]['router_id'], '1') + + def test_default_router_id_generation_ipv6_2(self): + inventory = demo.generate_inventory(**demo.FULLHA_KEEPALIVED) + inventory['vrrp_ips'] = ['::'] + cluster = demo.new_cluster(inventory) + self.assertEqual(cluster.inventory.get('vrrp_ips')[0]['router_id'], '0') + + def test_default_router_id_generation_ipv6_3(self): + inventory = demo.generate_inventory(**demo.FULLHA_KEEPALIVED) + inventory['vrrp_ips'] = ['fdda:5cc1:23:4::f'] + cluster = demo.new_cluster(inventory) + self.assertEqual(cluster.inventory.get('vrrp_ips')[0]['router_id'], '15') + + def test_default_router_id_generation_ipv6_4(self): + inventory = demo.generate_inventory(**demo.FULLHA_KEEPALIVED) + inventory['vrrp_ips'] = ['fdda:5cc1:23:4::1f'] + cluster = demo.new_cluster(inventory) + self.assertEqual(cluster.inventory.get('vrrp_ips')[0]['router_id'], '31') + + def test_default_router_id_generation_ipv6_5(self): + inventory = demo.generate_inventory(**demo.FULLHA_KEEPALIVED) + inventory['vrrp_ips'] = ['fdc8:f4e3:c24a:1403:f816:3eff:fe6b:a082'] + cluster = demo.new_cluster(inventory) + self.assertEqual(cluster.inventory.get('vrrp_ips')[0]['router_id'], '130') + + def test_default_router_id_generation_ipv6_6(self): + inventory = demo.generate_inventory(**demo.FULLHA_KEEPALIVED) + inventory['vrrp_ips'] = ['2001:db8:1:2:020c:29ff:fe0c:47d5'] + cluster = demo.new_cluster(inventory) + self.assertEqual(cluster.inventory.get('vrrp_ips')[0]['router_id'], '213') + + def test_default_id_generation(self): + self.assertIsNotNone(self.cluster.inventory.get('vrrp_ips')[0]['id']) + self.assertEqual(len(self.cluster.inventory.get('vrrp_ips')[0]['id']), + self.cluster.globals['keepalived']['defaults']['label_size']) + self.assertEqual(self.cluster.inventory.get('vrrp_ips')[0]['id'], + self.cluster2.inventory.get('vrrp_ips')[0]['id']) + + def test_default_password_generation(self): + self.assertIsNotNone(self.cluster.inventory.get('vrrp_ips')[0]['password']) + self.assertEqual(len(self.cluster.inventory.get('vrrp_ips')[0]['password']), + self.cluster.globals['keepalived']['defaults']['password_size']) + self.assertNotEqual(self.cluster.inventory.get('vrrp_ips')[0]['password'], + self.cluster2.inventory.get('vrrp_ips')[0]['password']) + + def test_default_hosts_priority_generation(self): + self.assertEqual(self.cluster.inventory.get('vrrp_ips')[0]['hosts'][0].get('priority'), + self.cluster.globals['keepalived']['defaults']['priority']['max_value'] - + self.cluster.globals['keepalived']['defaults']['priority']['step']) + self.assertEqual(self.cluster.inventory.get('vrrp_ips')[0]['hosts'][1].get('priority'), + self.cluster.globals['keepalived']['defaults']['priority']['max_value'] - + self.cluster.globals['keepalived']['defaults']['priority']['step'] * 2) + + def test_keepalived_role_appeared(self): + self.assertIn('keepalived', self.cluster.roles) + + def test_keepalived_group_appeared(self): + self.assertIsNotNone(self.cluster.nodes.get('keepalived')) + + balancer_1_ip = self.cluster.nodes['all'].get_member(0, provide_node_configs=True, + apply_filter={'name': 'balancer-1'})['connect_to'] + self.assertIn(balancer_1_ip, list(self.cluster.nodes['keepalived'].nodes.keys())) + + def test_vrrp_defined_no_hosts_and_balancers(self): + # vrrp_ip defined, but hosts for it is not defined + no balancers to auto determine -> then raise exception + inventory = demo.generate_inventory(balancer=0, master=3, worker=3, keepalived=1) + with self.assertRaises(Exception): + demo.new_cluster(inventory) + + +class TestKeepalivedInstallation(unittest.TestCase): + + def test_keepalived_installation_when_already_installed(self): + inventory = demo.generate_inventory(**demo.FULLHA_KEEPALIVED) + cluster = demo.new_cluster(inventory) + + package_associations = cluster.inventory['services']['packages']['associations']['keepalived'] + + # simulate already installed keepalived package + expected_results_1 = demo.create_nodegroup_result(cluster.nodes['keepalived'], stdout='Keepalived v1.2.3') + cluster.fake_shell.add(expected_results_1, 'sudo', ['%s -v' % package_associations['executable_name']]) + + # simulate mkdir command + expected_results_2 = demo.create_nodegroup_result(cluster.nodes['balancer']) + cluster.fake_shell.add(expected_results_2, 'sudo', ["mkdir -p /etc/systemd/system/keepalived.service.d"]) + + # simulate systemd daemon reload + expected_results_3 = demo.create_nodegroup_result(cluster.nodes['balancer']) + cluster.fake_shell.add(expected_results_3, 'sudo', ["systemctl daemon-reload"]) + + # simulate chmod command + expected_results_4 = demo.create_nodegroup_result(cluster.nodes['keepalived'], stdout='ok') + cluster.fake_shell.add(expected_results_4, 'sudo', ['chmod +x /usr/local/bin/check_haproxy.sh']) + + # simulate enable package command + expected_results_5 = demo.create_nodegroup_result(cluster.nodes['keepalived'], stdout='ok') + cluster.fake_shell.add(expected_results_5, 'sudo', + ['systemctl enable %s --now' % package_associations['service_name']]) + + # start installation + actual_result = keepalived.install(cluster.nodes['keepalived']) + + # verify installation result should be the same as simulated and contain version print stdout + expected_results_1 = cluster.nodes["all"]._make_result(expected_results_1) + self.assertEqual(expected_results_1, actual_result) + + def test_keepalived_installation_when_not_installed(self): + inventory = demo.generate_inventory(**demo.FULLHA_KEEPALIVED) + cluster = demo.new_cluster(inventory) + + package_associations = cluster.inventory['services']['packages']['associations']['keepalived'] + + # simulate keepalived package missing + missing_package_command = ['%s -v' % package_associations['executable_name']] + missing_package_result = demo.create_nodegroup_result(cluster.nodes['keepalived'], + code=127, stderr='Command keepalived not found') + cluster.fake_shell.add(missing_package_result, 'sudo', missing_package_command) + + # simulate package installation + installation_command = ['yum install -y %s; rpm -q %s; if [ $? != 0 ]; then echo ' + '\"Failed to check version for some packages. ' + 'Make sure packages are not already installed with higher versions. ' + 'Also, make sure user-defined packages have rpm-compatible names. \"; exit 1; fi ' + % (package_associations['package_name'], package_associations['package_name'])] + expected_results = demo.create_nodegroup_result(cluster.nodes['keepalived'], code=0, + stdout='Successfully installed keepalived') + cluster.fake_shell.add(expected_results, 'sudo', installation_command) + + # simulate package installation check command + check_command = [f'rpm -q {package_associations["package_name"]}'] + expected_results_1 = demo.create_nodegroup_result(cluster.nodes['balancer'], code=0, + stdout='All packages installed') + cluster.fake_shell.add(expected_results_1, 'sudo', check_command) + + # simulate mkdir command + expected_results_2 = demo.create_nodegroup_result(cluster.nodes['balancer']) + cluster.fake_shell.add(expected_results_2, 'sudo', ["mkdir -p /etc/systemd/system/keepalived.service.d"]) + + # simulate systemd daemon reload + expected_results_3 = demo.create_nodegroup_result(cluster.nodes['balancer']) + cluster.fake_shell.add(expected_results_3, 'sudo', ["systemctl daemon-reload"]) + + # simulate chmod command + expected_results_4 = demo.create_nodegroup_result(cluster.nodes['keepalived'], stdout='ok') + cluster.fake_shell.add(expected_results_4, 'sudo', ['chmod +x /usr/local/bin/check_haproxy.sh']) + + # simulate enable package command + expected_results_5 = demo.create_nodegroup_result(cluster.nodes['keepalived'], stdout='ok') + cluster.fake_shell.add(expected_results_5, 'sudo', + ['systemctl enable %s --now' % package_associations['service_name']]) + + # start installation + actual_result = keepalived.install(cluster.nodes['keepalived']) + + # verify installation result should be the same as simulated and contain version print stdout + expected_results = cluster.nodes["all"]._make_result(expected_results) + self.assertEqual(expected_results, actual_result) + + +class TestKeepalivedConfigGeneration(unittest.TestCase): + + def test_(self): + # TODO: ! + pass + + +class TestKeepalivedConfigApply(unittest.TestCase): + + def test_config_apply(self): + inventory = demo.generate_inventory(**demo.FULLHA_KEEPALIVED) + cluster = demo.new_cluster(inventory) + + node = cluster.nodes['keepalived'].get_first_member(provide_node_configs=True) + expected_config = keepalived.generate_config(cluster.inventory, node) + + package_associations = cluster.inventory['services']['packages']['associations']['keepalived'] + configs_directory = '/'.join(package_associations['config_location'].split('/')[:-1]) + + # simulate mkdir for configs + cluster.fake_shell.add(demo.create_nodegroup_result(cluster.nodes['keepalived'], code=0), 'sudo', + ['mkdir -p %s' % configs_directory]) + + # simulate configs ls -la + cluster.fake_shell.add(demo.create_nodegroup_result(cluster.nodes['keepalived'], code=0), 'sudo', + ['ls -la %s' % package_associations['config_location']]) + + # simulate daemon restart + cluster.fake_shell.add(demo.create_nodegroup_result(cluster.nodes['keepalived'], code=0), 'sudo', + ['systemctl restart %s' % package_associations['service_name']]) + + # simulate daemon status + expected_result = demo.create_nodegroup_result(cluster.nodes['keepalived'], code=0) + cluster.fake_shell.add(expected_result, 'sudo', ['systemctl status %s' % package_associations['service_name']]) + + actual_result = keepalived.configure(cluster.nodes['keepalived']) + + expected_result = cluster.nodes["all"]._make_result(expected_result) + self.assertEqual(expected_result, actual_result) + + # read placed data in FakeFS + actual_config = cluster.fake_fs.read(node['connect_to'], package_associations['config_location']) + + self.assertEqual(expected_config, actual_config) + + +if __name__ == '__main__': + unittest.main() diff --git a/test/unit/test_upgrade.py b/test/unit/test_upgrade.py new file mode 100755 index 000000000..112337f83 --- /dev/null +++ b/test/unit/test_upgrade.py @@ -0,0 +1,114 @@ +#!/usr/bin/env python3 + +import unittest + +from kubetool import kubernetes +from kubetool.procedures import upgrade +from kubetool import demo + + +class UpgradeVerifyUpgradePlan(unittest.TestCase): + + def test_valid_upgrade_plan(self): + upgrade.verify_upgrade_plan([ + 'v1.17.1', + 'v1.18.2' + ]) + + def test_invalid_upgrade_plan(self): + with self.assertRaises(Exception): + upgrade.verify_upgrade_plan([ + 'v1.17.1', + 'v1.19.3' + ]) + + def test_upgrade_plan_bad_symbols(self): + with self.assertRaises(Exception): + upgrade.verify_upgrade_plan([ + 'v1.17 .1', + 'v1.18.2' + ]) + + def test_upgrade_plan_invalid_version(self): + with self.assertRaises(Exception): + upgrade.verify_upgrade_plan([ + 'v1.17', + 'v1.18.2' + ]) + + def test_upgrade_plan_invalid_version2(self): + with self.assertRaises(Exception): + upgrade.verify_upgrade_plan([ + '1.17.1', + '1.18.2' + ]) + + def test_upgrade_plan_sort(self): + result = upgrade.verify_upgrade_plan([ + 'v2.1.1', + 'v1.13.2', + 'v1.15.0', + 'v1.18.2', + 'v1.16.2', + 'v1.14.4', + 'v2.0.3', + 'v1.17.1', + 'v1.13.1', + ]) + + self.assertEqual([ + 'v1.13.1', + 'v1.13.2', + 'v1.14.4', + 'v1.15.0', + 'v1.16.2', + 'v1.17.1', + 'v1.18.2', + 'v2.0.3', + 'v2.1.1', + ], result) + + +class UpgradeDefaultsEnrichment(unittest.TestCase): + + def prepare_cluster(self, old, new): + inventory = demo.generate_inventory(**demo.MINIHA_KEEPALIVED) + inventory['services']['kubeadm'] = { + 'kubernetesVersion': old + } + cluster = demo.new_cluster(inventory) + cluster.context['upgrade_version'] = new + cluster.context['initial_procedure'] = 'upgrade' + return cluster + + def test_correct_inventory(self): + old_kubernetes_version = 'v1.18.4' + new_kubernetes_version = 'v1.18.10' + cluster = self.prepare_cluster(old_kubernetes_version, new_kubernetes_version) + cluster._inventory = kubernetes.enrich_upgrade_inventory(cluster.inventory, cluster) + self.assertEqual(new_kubernetes_version, cluster.inventory['services']['kubeadm']['kubernetesVersion']) + + def test_incorrect_inventory_high_range(self): + old_kubernetes_version = 'v1.18.4' + new_kubernetes_version = 'v1.20.2' + cluster = self.prepare_cluster(old_kubernetes_version, new_kubernetes_version) + with self.assertRaises(Exception): + kubernetes.enrich_upgrade_inventory(cluster.inventory, cluster) + + def test_incorrect_inventory_downgrade(self): + old_kubernetes_version = 'v1.20.2' + new_kubernetes_version = 'v1.18.4' + cluster = self.prepare_cluster(old_kubernetes_version, new_kubernetes_version) + with self.assertRaises(Exception): + kubernetes.enrich_upgrade_inventory(cluster.inventory, cluster) + + def test_incorrect_inventory_same_version(self): + old_kubernetes_version = 'v1.18.4' + new_kubernetes_version = 'v1.18.4' + cluster = self.prepare_cluster(old_kubernetes_version, new_kubernetes_version) + with self.assertRaises(Exception): + kubernetes.enrich_upgrade_inventory(cluster.inventory, cluster) + + +if __name__ == '__main__': + unittest.main() diff --git a/test/unit/test_workaround.py b/test/unit/test_workaround.py new file mode 100755 index 000000000..a2a275849 --- /dev/null +++ b/test/unit/test_workaround.py @@ -0,0 +1,61 @@ +#!/usr/bin/env python3 + +import unittest + +from kubetool import demo +from paramiko.ssh_exception import SSHException + +ETCD_LEADER_CHANGED_MESSAGE = 'Error from server: rpc error: code = Unavailable desc = etcdserver: leader changed' + + +class TestUnexpectedErrors(unittest.TestCase): + + def test_etcd_leader_changed_workaround(self): + cluster = demo.new_cluster(demo.generate_inventory(**demo.FULLHA)) + + # to increase test speed, let's override global workaround timeout value + cluster.globals['workaround']['timeout'] = 0 + + command = ['kubectl describe nodes'] + + bad_results = demo.create_nodegroup_result(cluster.nodes['master'], code=-1, stderr=ETCD_LEADER_CHANGED_MESSAGE) + good_results = demo.create_nodegroup_result(cluster.nodes['master'], stdout='Kubernetes master is running at %s' + % cluster.inventory['cluster_name']) + + cluster.fake_shell.add(bad_results, 'sudo', command, usage_limit=1) + cluster.fake_shell.add(good_results, 'sudo', command) + + results = cluster.nodes['master'].get_any_member().sudo('kubectl describe nodes') + + for conn, result in results.items(): + self.assertIn('is running', result.stdout, msg="After an unsuccessful attempt, the workaround mechanism " + "should have worked and got the right result, but it seems " + "something went wrong") + + def test_encountered_rsa_key(self): + cluster = demo.new_cluster(demo.generate_inventory(**demo.FULLHA)) + + # to increase test speed, let's override global workaround timeout value + cluster.globals['workaround']['timeout'] = 0 + + command = ['kubectl describe nodes'] + + bad_results = demo.create_exception_result(cluster.nodes['master'], + exception=SSHException('encountered RSA key, expected OPENSSH key')) + good_results = demo.create_nodegroup_result(cluster.nodes['master'], stdout='Kubernetes master is running at %s' + % cluster.inventory['cluster_name']) + + cluster.fake_shell.add(bad_results, 'sudo', command, usage_limit=1) + cluster.fake_shell.add(good_results, 'sudo', command) + cluster.fake_shell.add(demo.create_nodegroup_result(cluster.nodes['all'], stdout='example result'), 'run', ['last reboot']) + + results = cluster.nodes['master'].get_any_member().sudo('kubectl describe nodes') + + for conn, result in results.items(): + self.assertIn('is running', result.stdout, msg="After an unsuccessful attempt, the workaround mechanism " + "should have worked and got the right result, but it seems " + "something went wrong") + + +if __name__ == '__main__': + unittest.main()