From 824e616625b3c5b80e3d1ac4410edb41142021cd Mon Sep 17 00:00:00 2001 From: Teddy Andrieux Date: Mon, 7 Jun 2021 10:02:28 +0200 Subject: [PATCH 1/7] salt: Wait for pillar to refresh before `mine.update` During bootstrap we need an up to date pillar to have all `mine_function` in the minion pillar before doing a `mine.update`. Since by default `saltutil.refresh_pillar` is asynchrone just add a `wait: true` so that this refresh properly wait for the pillar to be refreshed --- salt/metalk8s/orchestrate/bootstrap/init.sls | 2 ++ 1 file changed, 2 insertions(+) diff --git a/salt/metalk8s/orchestrate/bootstrap/init.sls b/salt/metalk8s/orchestrate/bootstrap/init.sls index fdfc9dd02c..68b364b7fc 100644 --- a/salt/metalk8s/orchestrate/bootstrap/init.sls +++ b/salt/metalk8s/orchestrate/bootstrap/init.sls @@ -132,6 +132,8 @@ Update pillar on bootstrap minion after highstate: salt.function: - name: saltutil.refresh_pillar - tgt: {{ pillar.bootstrap_id }} + - kwarg: + wait: true - require: - salt: Configure bootstrap Node object From f86cb04fef232ca7a779033b9a06a6a015febea1 Mon Sep 17 00:00:00 2001 From: Teddy Andrieux Date: Fri, 4 Jun 2021 17:09:30 +0200 Subject: [PATCH 2/7] salt: Define Control Plane Ingress IP and Endpoint at a single place Instead of hardcode Control Plane Ingress Endpoint at several places in the salt states, add a new Salt module to "compute" it and use this salt module everywhere in our salt states --- buildchain/buildchain/salt_tree.py | 1 - charts/ingress-nginx-control-plane.yaml | 2 +- charts/kube-prometheus-stack.yaml | 8 +-- salt/_modules/metalk8s_network.py | 28 ++++++++ salt/metalk8s/addons/dex/config/dex.yaml.j2 | 8 ++- .../certs/server.sls | 3 +- .../control-plane-ip.sls | 39 ----------- .../deployed/chart.sls | 3 +- .../prometheus-operator/deployed/chart.sls | 8 +-- .../config/metalk8s-shell-ui-config.yaml.j2 | 2 +- .../addons/ui/deployed/ui-configuration.sls | 2 +- salt/metalk8s/addons/ui/deployed/ui.sls.in | 5 -- .../kubernetes/apiserver/installed.sls | 5 +- salt/tests/unit/formulas/config.yaml | 29 -------- salt/tests/unit/formulas/fixtures/salt.py | 6 ++ .../modules/files/test_metalk8s_network.yaml | 44 ++++++++++++ .../unit/modules/test_metalk8s_network.py | 68 +++++++++++++++++++ 17 files changed, 166 insertions(+), 95 deletions(-) delete mode 100644 salt/metalk8s/addons/nginx-ingress-control-plane/control-plane-ip.sls diff --git a/buildchain/buildchain/salt_tree.py b/buildchain/buildchain/salt_tree.py index 4c457b96d8..a311aad14b 100644 --- a/buildchain/buildchain/salt_tree.py +++ b/buildchain/buildchain/salt_tree.py @@ -427,7 +427,6 @@ def _get_parts(self) -> Iterator[str]: Path("salt/metalk8s/addons/nginx-ingress-control-plane/deployed/init.sls"), Path("salt/metalk8s/addons/nginx-ingress-control-plane/deployed/chart.sls"), Path("salt/metalk8s/addons/nginx-ingress-control-plane/deployed/tls-secret.sls"), - Path("salt/metalk8s/addons/nginx-ingress-control-plane/control-plane-ip.sls"), Path("salt/metalk8s/beacon/certificates.sls"), Path("salt/metalk8s/container-engine/containerd/configured.sls"), Path("salt/metalk8s/container-engine/containerd/files/50-metalk8s.conf.j2"), diff --git a/charts/ingress-nginx-control-plane.yaml b/charts/ingress-nginx-control-plane.yaml index e30542b427..e3b586f3cf 100644 --- a/charts/ingress-nginx-control-plane.yaml +++ b/charts/ingress-nginx-control-plane.yaml @@ -35,7 +35,7 @@ controller: type: ClusterIP externalIPs: - - '{%- endraw -%}{{ grains.metalk8s.control_plane_ip }}{%- raw -%}' + - '{%- endraw -%}{{ salt.metalk8s_network.get_control_plane_ingress_ip() }}{%- raw -%}' enableHttp: false diff --git a/charts/kube-prometheus-stack.yaml b/charts/kube-prometheus-stack.yaml index 9d66e86fff..2f15046f91 100644 --- a/charts/kube-prometheus-stack.yaml +++ b/charts/kube-prometheus-stack.yaml @@ -176,7 +176,7 @@ grafana: grafana.ini: server: - root_url: '__escape__(https://{{ "{{ grains.metalk8s.control_plane_ip }}" }}:8443/grafana)' + root_url: '__escape__({{ "{{ salt.metalk8s_network.get_control_plane_ingress_endpoint() }}" }}/grafana)' analytics: reporting_enabled: false check_for_updates: false @@ -188,9 +188,9 @@ grafana: scopes: "openid profile email groups" client_id: "grafana-ui" client_secret: "4lqK98NcsWG5qBRHJUqYM1" - auth_url: '__escape__(https://{{ "{{ grains.metalk8s.control_plane_ip }}" }}:8443/oidc/auth)' - token_url: '__escape__(https://{{ "{{ grains.metalk8s.control_plane_ip }}" }}:8443/oidc/token)' - api_url: '__escape__(https://{{ "{{ grains.metalk8s.control_plane_ip }}" }}:8443/oidc/userinfo)' + auth_url: '__escape__({{ "{{ salt.metalk8s_network.get_control_plane_ingress_endpoint() }}" }}/oidc/auth)' + token_url: '__escape__({{ "{{ salt.metalk8s_network.get_control_plane_ingress_endpoint() }}" }}/oidc/token)' + api_url: '__escape__({{ "{{ salt.metalk8s_network.get_control_plane_ingress_endpoint() }}" }}/oidc/userinfo)' role_attribute_path: >- contains(`{% endraw %}{{ "{{ dex.spec.config.staticPasswords | map(attribute='email') | list | tojson }}" }}{% raw %}`, email) && 'Admin' diff --git a/salt/_modules/metalk8s_network.py b/salt/_modules/metalk8s_network.py index 2d05ff36e8..f915aa586a 100644 --- a/salt/_modules/metalk8s_network.py +++ b/salt/_modules/metalk8s_network.py @@ -226,3 +226,31 @@ def routes(): ) return ret + + +def get_control_plane_ingress_ip(): + # Use Bootstrap Control Plane IP as Ingress Control plane IP + bootstrap_id = __salt__["metalk8s.minions_by_role"]("bootstrap")[0] + + if __grains__["id"] == bootstrap_id: + return __grains__["metalk8s"]["control_plane_ip"] + + if __opts__.get("__role") == "minion": + mine_ret = __salt__["mine.get"](tgt=bootstrap_id, fun="control_plane_ip") + else: + mine_ret = __salt__["saltutil.runner"]( + "mine.get", tgt=bootstrap_id, fun="control_plane_ip" + ) + + if not isinstance(mine_ret, dict) or bootstrap_id not in mine_ret: + raise CommandExecutionError( + "Unable to get {} Control Plane IP: {}".format(bootstrap_id, mine_ret) + ) + + return mine_ret[bootstrap_id] + + +def get_control_plane_ingress_endpoint(): + return "https://{}:8443".format( + __salt__["metalk8s_network.get_control_plane_ingress_ip"]() + ) diff --git a/salt/metalk8s/addons/dex/config/dex.yaml.j2 b/salt/metalk8s/addons/dex/config/dex.yaml.j2 index f737334ba3..e905fcd43e 100644 --- a/salt/metalk8s/addons/dex/config/dex.yaml.j2 +++ b/salt/metalk8s/addons/dex/config/dex.yaml.j2 @@ -10,6 +10,8 @@ ) %} +{%- set control_plane_ingress_ep = salt.metalk8s_network.get_control_plane_ingress_endpoint() %} + # Defaults for configuration of Dex (OIDC) apiVersion: addons.metalk8s.scality.com/v1alpha2 @@ -21,7 +23,7 @@ spec: # Dex server configuration config: - issuer: https://{{ grains.metalk8s.control_plane_ip }}:8443/oidc + issuer: {{ control_plane_ingress_ep }}/oidc storage: config: @@ -65,12 +67,12 @@ spec: - id: metalk8s-ui name: MetalK8s UI redirectURIs: - - https://{{ grains.metalk8s.control_plane_ip }}:8443/{{ metalk8s_ui_config.spec.basePath.lstrip('/') }} + - {{ control_plane_ingress_ep }}/{{ metalk8s_ui_config.spec.basePath.lstrip('/') }} secret: ybrMJpVMQxsiZw26MhJzCjA2ut - id: grafana-ui name: Grafana UI redirectURIs: - - https://{{ grains.metalk8s.control_plane_ip }}:8443/grafana/login/generic_oauth + - {{ control_plane_ingress_ep }}/grafana/login/generic_oauth secret: 4lqK98NcsWG5qBRHJUqYM1 enablePasswordDB: true diff --git a/salt/metalk8s/addons/nginx-ingress-control-plane/certs/server.sls b/salt/metalk8s/addons/nginx-ingress-control-plane/certs/server.sls index 50bd0b4616..b0987160a0 100644 --- a/salt/metalk8s/addons/nginx-ingress-control-plane/certs/server.sls +++ b/salt/metalk8s/addons/nginx-ingress-control-plane/certs/server.sls @@ -21,7 +21,6 @@ Create Control-Plane Ingress server private key: - unless: - test -f "{{ private_key_path }}" -{# TODO: add Ingress Service IP once stable (LoadBalancer probably) #} {%- set certSANs = [ grains.fqdn, 'localhost', @@ -30,7 +29,7 @@ Create Control-Plane Ingress server private key: 'nginx-ingress-control-plane.metalk8s-ingress', 'nginx-ingress-control-plane.metalk8s-ingress.svc', 'nginx-ingress-control-plane.metalk8s-ingress.svc.cluster.local', - grains.metalk8s.control_plane_ip, + salt.metalk8s_network.get_control_plane_ingress_ip(), ] %} Generate Control-Plane Ingress server certificate: diff --git a/salt/metalk8s/addons/nginx-ingress-control-plane/control-plane-ip.sls b/salt/metalk8s/addons/nginx-ingress-control-plane/control-plane-ip.sls deleted file mode 100644 index 120daf048b..0000000000 --- a/salt/metalk8s/addons/nginx-ingress-control-plane/control-plane-ip.sls +++ /dev/null @@ -1,39 +0,0 @@ -{# This whole block is used to "know" the Ingress external IP used by Dex. - It will be removed once we can have a known LoadBalancer IP for Ingress. #} -{% if '_errors' in pillar.metalk8s.nodes %} - {# Assume this is the bootstrap Node and we haven't an apiserver yet #} - {%- set bootstrap_id = grains.id %} -{%- elif pillar.metalk8s.nodes | length <= 1 %} - {# Only one node (or even, zero) can/should only happen during bootstrap #} - {%- set bootstrap_id = grains.id %} -{%- else %} - {%- set bootstrap_nodes = salt.metalk8s.minions_by_role('bootstrap') %} - {%- if bootstrap_nodes %} - {%- set bootstrap_id = bootstrap_nodes | first %} - {%- else %} - {{ raise('Missing bootstrap node') }} - {%- endif %} -{%- endif %} - -{%- if bootstrap_id is none %} - {{ raise('Missing bootstrap Node in pillar, cannot proceed.') }} -{%- elif bootstrap_id == grains.id %} - {%- set bootstrap_control_plane_ip = grains.metalk8s.control_plane_ip %} -{%- else %} - {%- if opts.get('__role') == 'minion' %} - {%- set bootstrap_control_plane_ip = salt.mine.get( - tgt=bootstrap_id, - fun='control_plane_ip')[bootstrap_id] - %} - {%- else %} - {#- If we are on the master then use the runner #} - {%- set bootstrap_control_plane_ip = salt.saltutil.runner( - 'mine.get', - tgt=bootstrap_id, - fun='control_plane_ip')[bootstrap_id] - %} - {%- endif %} -{%- endif %} - -{%- set ingress_control_plane = bootstrap_control_plane_ip ~ ':8443' %} -{# (end of Ingress URL retrieval) #} diff --git a/salt/metalk8s/addons/nginx-ingress-control-plane/deployed/chart.sls b/salt/metalk8s/addons/nginx-ingress-control-plane/deployed/chart.sls index 98d983f2ea..d741e219ec 100644 --- a/salt/metalk8s/addons/nginx-ingress-control-plane/deployed/chart.sls +++ b/salt/metalk8s/addons/nginx-ingress-control-plane/deployed/chart.sls @@ -283,7 +283,8 @@ metadata: namespace: metalk8s-ingress spec: externalIPs: - - '{%- endraw -%}{{ grains.metalk8s.control_plane_ip }}{%- raw -%}' + - '{%- endraw -%}{{ salt.metalk8s_network.get_control_plane_ingress_ip() }}{%- raw + -%}' ports: - name: https port: 8443 diff --git a/salt/metalk8s/addons/prometheus-operator/deployed/chart.sls b/salt/metalk8s/addons/prometheus-operator/deployed/chart.sls index 29ac18f01d..49b48bb15b 100644 --- a/salt/metalk8s/addons/prometheus-operator/deployed/chart.sls +++ b/salt/metalk8s/addons/prometheus-operator/deployed/chart.sls @@ -20826,15 +20826,15 @@ data: [auth] oauth_auto_login = true [auth.generic_oauth] - api_url = "{% endraw -%}https://{{ grains.metalk8s.control_plane_ip }}:8443/oidc/userinfo{%- raw %}" - auth_url = "{% endraw -%}https://{{ grains.metalk8s.control_plane_ip }}:8443/oidc/auth{%- raw %}" + api_url = "{% endraw -%}{{ salt.metalk8s_network.get_control_plane_ingress_endpoint() }}/oidc/userinfo{%- raw %}" + auth_url = "{% endraw -%}{{ salt.metalk8s_network.get_control_plane_ingress_endpoint() }}/oidc/auth{%- raw %}" client_id = grafana-ui client_secret = 4lqK98NcsWG5qBRHJUqYM1 enabled = true role_attribute_path = contains(`{% endraw %}{{ dex.spec.config.staticPasswords | map(attribute='email') | list | tojson }}{% raw %}`, email) && 'Admin' scopes = openid profile email groups tls_skip_verify_insecure = true - token_url = "{% endraw -%}https://{{ grains.metalk8s.control_plane_ip }}:8443/oidc/token{%- raw %}" + token_url = "{% endraw -%}{{ salt.metalk8s_network.get_control_plane_ingress_endpoint() }}/oidc/token{%- raw %}" [grafana_net] url = https://grafana.net [log] @@ -20845,7 +20845,7 @@ data: plugins = /var/lib/grafana/plugins provisioning = /etc/grafana/provisioning [server] - root_url = "{% endraw -%}https://{{ grains.metalk8s.control_plane_ip }}:8443/grafana{%- raw %}" + root_url = "{% endraw -%}{{ salt.metalk8s_network.get_control_plane_ingress_endpoint() }}/grafana{%- raw %}" kind: ConfigMap metadata: labels: diff --git a/salt/metalk8s/addons/ui/config/metalk8s-shell-ui-config.yaml.j2 b/salt/metalk8s/addons/ui/config/metalk8s-shell-ui-config.yaml.j2 index 9319d49c71..e3deeab289 100644 --- a/salt/metalk8s/addons/ui/config/metalk8s-shell-ui-config.yaml.j2 +++ b/salt/metalk8s/addons/ui/config/metalk8s-shell-ui-config.yaml.j2 @@ -19,7 +19,7 @@ kind: ShellUIConfig spec: oidc: providerUrl: "/oidc" - redirectUrl: "https://{{ grains.metalk8s.control_plane_ip }}:8443/{{ metalk8s_ui_config.spec.basePath.lstrip('/') }}" + redirectUrl: "{{ salt.metalk8s_network.get_control_plane_ingress_endpoint() }}/{{ metalk8s_ui_config.spec.basePath.lstrip('/') }}" clientId: "metalk8s-ui" responseType: "id_token" scopes: "openid profile email groups offline_access audience:server:client_id:oidc-auth-client" diff --git a/salt/metalk8s/addons/ui/deployed/ui-configuration.sls b/salt/metalk8s/addons/ui/deployed/ui-configuration.sls index c124d6b991..3e68151cad 100644 --- a/salt/metalk8s/addons/ui/deployed/ui-configuration.sls +++ b/salt/metalk8s/addons/ui/deployed/ui-configuration.sls @@ -55,7 +55,7 @@ metalk8s-ui-config ConfigMap already exist: {%- endif %} {%- set stripped_base_path = metalk8s_ui.spec.basePath.strip('/') %} - {%- set cp_ingress_url = "https://" ~ grains.metalk8s.control_plane_ip ~ ":8443" %} + {%- set cp_ingress_url = salt.metalk8s_network.get_control_plane_ingress_endpoint() %} {%- set metalk8s_ui_url = cp_ingress_url ~ '/' ~ stripped_base_path ~ ('/' if stripped_base_path else '') %} diff --git a/salt/metalk8s/addons/ui/deployed/ui.sls.in b/salt/metalk8s/addons/ui/deployed/ui.sls.in index 77a65ccb32..2a911af9f7 100644 --- a/salt/metalk8s/addons/ui/deployed/ui.sls.in +++ b/salt/metalk8s/addons/ui/deployed/ui.sls.in @@ -1,11 +1,6 @@ include: - .namespace -{%- from "metalk8s/addons/nginx-ingress-control-plane/control-plane-ip.sls" - import ingress_control_plane with context -%} - - {%- set metalk8s_ui_defaults = salt.slsutil.renderer( 'salt://metalk8s/addons/ui/config/metalk8s-ui-config.yaml', saltenv=saltenv ) diff --git a/salt/metalk8s/kubernetes/apiserver/installed.sls b/salt/metalk8s/kubernetes/apiserver/installed.sls index 1a588a847c..d22e3a926e 100644 --- a/salt/metalk8s/kubernetes/apiserver/installed.sls +++ b/salt/metalk8s/kubernetes/apiserver/installed.sls @@ -2,9 +2,6 @@ {%- from "metalk8s/map.jinja" import certificates with context %} {%- from "metalk8s/map.jinja" import metalk8s with context %} {%- from "metalk8s/map.jinja" import networks with context %} -{%- from "metalk8s/addons/nginx-ingress-control-plane/control-plane-ip.sls" - import ingress_control_plane with context -%} {%- set encryption_k8s_path = "/etc/kubernetes/encryption.conf" %} @@ -98,7 +95,7 @@ Create kube-apiserver Pod manifest: # } - --encryption-provider-config={{ encryption_k8s_path }} - --cors-allowed-origins=.* - - --oidc-issuer-url=https://{{ ingress_control_plane }}/oidc + - --oidc-issuer-url={{ salt.metalk8s_network.get_control_plane_ingress_endpoint() }}/oidc - --oidc-client-id=oidc-auth-client - --oidc-ca-file=/etc/metalk8s/pki/nginx-ingress/ca.crt - --oidc-username-claim=email diff --git a/salt/tests/unit/formulas/config.yaml b/salt/tests/unit/formulas/config.yaml index 3c5dbaa3a7..7a938cf30d 100644 --- a/salt/tests/unit/formulas/config.yaml +++ b/salt/tests/unit/formulas/config.yaml @@ -82,35 +82,6 @@ metalk8s: kind: LokiConfig spec: {} - nginx-ingress-control-plane: - control-plane-ip.sls: - _cases: - "Bootstrap node is local minion (default)": {} - "Bootstrapping (errors in pillar)": - pillar_overrides: - metalk8s: - nodes: - _errors: ["Some error when retrieving nodes"] - "Bootstrapping (no nodes in pillar)": - pillar_overrides: - metalk8s: - nodes: [] - "Bootstrap minion is not local": - _subcases: - "From master": - mode: master - "From minion": - mode: minion - pillar_overrides: - metalk8s: - ca: - minion: other-bootstrap - nodes: - bootstrap: # default grains.id - roles: [master, infra, etcd] - other-bootstrap: - roles: [ca, bootstrap] - prometheus-operator: post-cleanup.sls: _cases: diff --git a/salt/tests/unit/formulas/fixtures/salt.py b/salt/tests/unit/formulas/fixtures/salt.py index d00a19363b..f9f268021a 100644 --- a/salt/tests/unit/formulas/fixtures/salt.py +++ b/salt/tests/unit/formulas/fixtures/salt.py @@ -414,6 +414,12 @@ def metalk8s_grafana_load_dashboard(source: str, **_kwargs: Any) -> Any: register_basic("metalk8s_network.get_oidc_service_ip")( MagicMock(return_value="10.96.0.7") ) +register_basic("metalk8s_network.get_control_plane_ingress_ip")( + MagicMock(return_value="192.168.1.240") +) +register_basic("metalk8s_network.get_control_plane_ingress_endpoint")( + MagicMock(return_value="https://192.168.1.240:8443") +) @register_basic("metalk8s_network.get_ip_from_cidrs") diff --git a/salt/tests/unit/modules/files/test_metalk8s_network.yaml b/salt/tests/unit/modules/files/test_metalk8s_network.yaml index 162265a0b6..692f96f90d 100644 --- a/salt/tests/unit/modules/files/test_metalk8s_network.yaml +++ b/salt/tests/unit/modules/files/test_metalk8s_network.yaml @@ -83,3 +83,47 @@ routes: 10.200.0.0/16 dev eth0 proto kernel scope link src 10.200.2.41 result: - *simple_route + +get_control_plane_ingress_ip: + # 1. Nominal bootstrap IP (from Salt minion) + - mine_ret: + bootstrap: 1.1.1.2 + result: 1.1.1.2 + + # 2. Nominal bootstrap IP (from Salt master) + - mine_runner_ret: + bootstrap: 1.1.1.3 + mine_ret: + bootstrap: 2.2.2.3 + opts: + __role: master + result: 1.1.1.3 + + # 3. Nominal bootstrap IP running from bootstrap node + - grains: + id: bootstrap + metalk8s: + control_plane_ip: 1.1.1.4 + result: 1.1.1.4 + + # 4. Error unable to get from mine (from Salt minion) + - mine_ret: {} + raises: true + result: "Unable to get bootstrap Control Plane IP: {}" + + # 5. Error unable to get from mine (from Salt master) + - mine_runner_ret: {} + opts: + __role: master + raises: true + result: "Unable to get bootstrap Control Plane IP: {}" + +get_control_plane_ingress_endpoint: + # 1. Nominal + - cp_ingress_ip_ret: 1.1.1.1 + result: https://1.1.1.1:8443 + + # 2. Error when getting IP + - cp_ingress_ip_ret: "Error because of banana" + raises: true + result: "Error because of banana" diff --git a/salt/tests/unit/modules/test_metalk8s_network.py b/salt/tests/unit/modules/test_metalk8s_network.py index 50bd4481b1..d50c1b990c 100644 --- a/salt/tests/unit/modules/test_metalk8s_network.py +++ b/salt/tests/unit/modules/test_metalk8s_network.py @@ -290,3 +290,71 @@ def _mock_convert_cidr(cidr): ): self.assertEqual(metalk8s_network.routes(), result) mock_ip_cmd.assert_called_once_with("ip -4 route show table main") + + @utils.parameterized_from_cases(YAML_TESTS_CASES["get_control_plane_ingress_ip"]) + def test_get_control_plane_ingress_ip( + self, + result, + raises=False, + opts=None, + grains=None, + mine_ret=None, + mine_runner_ret=None, + ): + """ + Tests the return of `get_control_plane_ingress_ip` function + """ + if opts is None: + opts = {"__role": "minion"} + if grains is None: + grains = {"id": "my-node"} + + salt_dict = { + "metalk8s.minions_by_role": MagicMock(return_value=["bootstrap"]), + "mine.get": MagicMock(return_value=mine_ret), + "saltutil.runner": MagicMock(return_value=mine_runner_ret), + } + + with patch.dict(metalk8s_network.__salt__, salt_dict), patch.dict( + metalk8s_network.__opts__, opts + ), patch.dict(metalk8s_network.__grains__, grains): + if raises: + self.assertRaisesRegex( + CommandExecutionError, + result, + metalk8s_network.get_control_plane_ingress_ip, + ) + else: + self.assertEqual( + metalk8s_network.get_control_plane_ingress_ip(), result + ) + + @utils.parameterized_from_cases( + YAML_TESTS_CASES["get_control_plane_ingress_endpoint"] + ) + def test_get_control_plane_ingress_endpoint( + self, result, raises=False, cp_ingress_ip_ret=None + ): + """ + Tests the return of `get_control_plane_ingress_endpoint` function + """ + mock_get_cp_ingress_ip = MagicMock(return_value=cp_ingress_ip_ret) + if raises: + mock_get_cp_ingress_ip.side_effect = CommandExecutionError( + cp_ingress_ip_ret + ) + + with patch.dict( + metalk8s_network.__salt__, + {"metalk8s_network.get_control_plane_ingress_ip": mock_get_cp_ingress_ip}, + ): + if raises: + self.assertRaisesRegex( + CommandExecutionError, + result, + metalk8s_network.get_control_plane_ingress_endpoint, + ) + else: + self.assertEqual( + metalk8s_network.get_control_plane_ingress_endpoint(), result + ) From d410b52e38571550f59f4766c23cb129e35a16e4 Mon Sep 17 00:00:00 2001 From: Teddy Andrieux Date: Fri, 4 Jun 2021 17:18:05 +0200 Subject: [PATCH 3/7] tests: Retrieve Control Plane Ingress Endpoint from Kubernetes Service Instead of hardcode Ingress port in tests and use Bootstrap Control Plane IP to compute the Control Plane Ingress Endpoint, retrieve it using the Kubernetes Service --- salt/metalk8s/defaults.yaml | 2 +- tests/conftest.py | 41 +++++++++++++++------- tests/post/features/authentication.feature | 2 +- tests/post/steps/test_authentication.py | 29 ++++++--------- tests/post/steps/test_network.py | 4 ++- tests/post/steps/test_ui.py | 10 ++---- tests/utils.py | 13 +++---- 7 files changed, 53 insertions(+), 48 deletions(-) diff --git a/salt/metalk8s/defaults.yaml b/salt/metalk8s/defaults.yaml index 0ed257c808..5ce36a2c74 100644 --- a/salt/metalk8s/defaults.yaml +++ b/salt/metalk8s/defaults.yaml @@ -74,7 +74,7 @@ networks: control_plane_ip:8080: expected: nginx description: MetalK8s repository - control_plane_ip:8443: + ingress_control_plane_ip:8443: expected: kube-proxy description: Control plane nginx ingress master: diff --git a/tests/conftest.py b/tests/conftest.py index d4c17a2983..61e48f0974 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -13,9 +13,6 @@ from tests import utils -CONTROL_PLANE_INGRESS_PORT = 8443 - - # Pytest command-line options def pytest_addoption(parser): parser.addoption( @@ -84,6 +81,28 @@ def kubeconfig(kubeconfig_data, tmp_path): return str(kubeconfig_path) # Need Python 3.6 to open() a Path object +@pytest.fixture +def control_plane_ingress_ip(k8s_client): + """Return the Control Plane Ingress IP from Kubernetes service""" + ingress_svc = k8s_client.read_namespaced_service( + name="ingress-nginx-control-plane-controller", + namespace="metalk8s-ingress", + ) + return ingress_svc.spec.external_i_ps[0] + + +@pytest.fixture +def control_plane_ingress_ep(k8s_client, control_plane_ingress_ip): + """Return the Control Plane Ingress Endpoint from Kubernetes service""" + ingress_svc = k8s_client.read_namespaced_service( + name="ingress-nginx-control-plane-controller", + namespace="metalk8s-ingress", + ) + ingress_port = ingress_svc.spec.ports[0].port + + return "https://{}:{}".format(control_plane_ingress_ip, ingress_port) + + @pytest.fixture def k8s_apiclient(kubeconfig): """Return an ApiClient to use for interacting with all K8s APIs.""" @@ -245,8 +264,8 @@ def ssh_config(request): @pytest.fixture -def prometheus_api(control_plane_ip): - return utils.PrometheusApi(control_plane_ip, CONTROL_PLANE_INGRESS_PORT) +def prometheus_api(control_plane_ingress_ep): + return utils.PrometheusApi(endpoint=control_plane_ingress_ep) def count_running_pods(request, k8s_client, pods_count, label, namespace, node): @@ -362,24 +381,22 @@ def check_resource_list(host, resource, namespace): ), converters=dict(should_fail=lambda s: s == "not "), ) -def dex_login(username, password, should_fail, control_plane_ip): +def dex_login(username, password, should_fail, control_plane_ingress_ep): session = utils.requests_retry_session( # Both Dex and the ingress controller may fail with one of the following codes status_forcelist=(500, 502, 503, 504), retries=10, backoff_factor=2, ) - ingress_url = "https://{}:{}".format(control_plane_ip, CONTROL_PLANE_INGRESS_PORT) - get_auth_start = time.time() try: auth_page = session.post( - ingress_url + "/oidc/auth?", + control_plane_ingress_ep + "/oidc/auth?", data={ "response_type": "id_token", "client_id": "metalk8s-ui", "scope": "openid audience:server:client_id:oidc-auth-client", - "redirect_uri": ingress_url + "/", + "redirect_uri": control_plane_ingress_ep + "/", "nonce": "nonce", }, verify=False, @@ -409,7 +426,7 @@ def dex_login(username, password, should_fail, control_plane_ip): try: auth_response = session.post( - ingress_url + next_path, + control_plane_ingress_ep + next_path, data={"login": username, "password": password}, verify=False, allow_redirects=False, @@ -417,7 +434,7 @@ def dex_login(username, password, should_fail, control_plane_ip): except requests.exceptions.ConnectionError as exc: pytest.fail( "Failed when authenticating to Dex through '{}': {}".format( - ingress_url + next_path, exc + control_plane_ingress_ep + next_path, exc ) ) diff --git a/tests/post/features/authentication.feature b/tests/post/features/authentication.feature index 4665ba64cd..699ca3b4b1 100644 --- a/tests/post/features/authentication.feature +++ b/tests/post/features/authentication.feature @@ -18,7 +18,7 @@ Feature: Authentication is up and running Given the Kubernetes API is available And the control-plane Ingress path '/oidc' is available And pods with label 'app.kubernetes.io/name=dex' are 'Ready' - When we perform a request on '/oidc/' with port '8443' on control-plane IP + When we perform a request on '/oidc/' on control-plane Ingress Then the server returns '404' with message '404 page not found' Scenario: Login to Dex using incorrect email diff --git a/tests/post/steps/test_authentication.py b/tests/post/steps/test_authentication.py index b2547f57e7..670d71afef 100644 --- a/tests/post/steps/test_authentication.py +++ b/tests/post/steps/test_authentication.py @@ -13,11 +13,6 @@ from tests import utils -# Constants {{{ - -INGRESS_PORT = 8443 - -# }}} # Scenarios {{{ @@ -102,18 +97,12 @@ def _wait_for_ingress_pod_and_container(): # When {{{ -@when( - parsers.parse( - "we perform a request on '{path}' with port '{port}' on control-plane IP" - ) -) -def perform_request(host, context, control_plane_ip, path, port): +@when(parsers.parse("we perform a request on '{path}' on control-plane Ingress")) +def perform_request(host, context, control_plane_ingress_ep, path): session = utils.requests_retry_session() try: context["response"] = session.get( - "https://{ip}:{port}{path}".format( - ip=control_plane_ip, port=port, path=path - ), + "{ingress_ep}{path}".format(ingress_ep=control_plane_ingress_ep, path=path), verify=False, ) except requests.exceptions.ConnectionError as exc: @@ -125,19 +114,18 @@ def perform_request(host, context, control_plane_ip, path, port): @then("we can reach the OIDC openID configuration") -def reach_openid_config(host, control_plane_ip): +def reach_openid_config(host, control_plane_ingress_ep): session = utils.requests_retry_session( # Both Dex and the ingress controller may fail with one of the following codes status_forcelist=(500, 502, 503, 504), retries=10, backoff_factor=2, ) - ingress_url = "https://{}:{}".format(control_plane_ip, INGRESS_PORT) def _get_openID_config(): try: response = session.get( - ingress_url + "/oidc/.well-known/openid-configuration", + control_plane_ingress_ep + "/oidc/.well-known/openid-configuration", verify=False, ) except requests.exceptions.ConnectionError as exc: @@ -148,8 +136,11 @@ def _get_openID_config(): assert response.status_code == 200 response_body = response.json() assert all(key in response_body for key in ["issuer", "authorization_endpoint"]) - assert response_body["issuer"] == ingress_url + "/oidc" - assert response_body["authorization_endpoint"] == ingress_url + "/oidc/auth" + assert response_body["issuer"] == control_plane_ingress_ep + "/oidc" + assert ( + response_body["authorization_endpoint"] + == control_plane_ingress_ep + "/oidc/auth" + ) utils.retry(_get_openID_config, times=10, wait=5) diff --git a/tests/post/steps/test_network.py b/tests/post/steps/test_network.py index 8710b69acd..e6c5c49575 100644 --- a/tests/post/steps/test_network.py +++ b/tests/post/steps/test_network.py @@ -31,7 +31,7 @@ def check_ports(host, ssh_config): @then("we have only expected processes listening") -def check_all_listening_process(host, version): +def check_all_listening_process(host, version, control_plane_ingress_ip): # List of knwon listening process ignored_listening_processes = [ 22, # sshd @@ -92,6 +92,8 @@ def check_all_listening_process(host, version): keys.append("{}:{}".format("control_plane_ip", port)) if ip == grain_ips["workload_plane_ip"]: keys.append("{}:{}".format("workload_plane_ip", port)) + if ip == control_plane_ingress_ip: + keys.append("{}:{}".format("ingress_control_plane_ip", port)) # One of the key already part of expected listening processes if any(key in expected_listening_processes for key in keys): diff --git a/tests/post/steps/test_ui.py b/tests/post/steps/test_ui.py index 7fcdb4a3a5..ea3e89ed55 100644 --- a/tests/post/steps/test_ui.py +++ b/tests/post/steps/test_ui.py @@ -1,10 +1,6 @@ -import json - -import pytest from pytest_bdd import scenario, then import requests -from tests import utils # Scenarios @scenario("../features/ui_alive.feature", "Reach the UI") @@ -13,11 +9,9 @@ def test_ui(host): @then("we can reach the UI") -def reach_UI(host): - ip = utils.get_grain(host, "metalk8s:control_plane_ip") - +def reach_UI(host, control_plane_ingress_ep): response = requests.get( - "https://{ip}:8443".format(ip=ip), + control_plane_ingress_ep, verify=False, ) diff --git a/tests/utils.py b/tests/utils.py index eb2a6ef6ec..dd2e18aada 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -194,9 +194,12 @@ class PrometheusApiError(Exception): class PrometheusApi: - def __init__(self, host, port=9090): - self.host = host - self.port = port + def __init__(self, host=None, port=9090, endpoint=None): + self.endpoint = endpoint + + if not self.endpoint: + self.endpoint = "https://{}:{}".format(host, port) + self.session = requests_retry_session() def request(self, method, route, **kwargs): @@ -204,9 +207,7 @@ def request(self, method, route, **kwargs): kwargs.setdefault("verify", False) response = self.session.request( method, - "https://{}:{}/api/prometheus/api/v1/{}".format( - self.host, self.port, route - ), + "{}/api/prometheus/api/v1/{}".format(self.endpoint, route), **kwargs ) response.raise_for_status() From c6efd5afae31458f4ca5beeea6a5d41eb26ad322 Mon Sep 17 00:00:00 2001 From: Teddy Andrieux Date: Mon, 7 Jun 2021 11:51:46 +0200 Subject: [PATCH 4/7] docs: Update doc to reach the UI using Ingress CP endpoint Update the command to retrieve the IP using Kubernetes API and Control Plane Ingress Service to get the external IP --- docs/installation/services.rst | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/docs/installation/services.rst b/docs/installation/services.rst index 6e51ca6601..2676a9d23e 100644 --- a/docs/installation/services.rst +++ b/docs/installation/services.rst @@ -12,13 +12,14 @@ and can be used for operating, extending and upgrading a MetalK8s cluster. Gather Required Information ^^^^^^^^^^^^^^^^^^^^^^^^^^^ -Get the control plane IP of the bootstrap node. +Get the ingress control plane IP. -.. code-block:: shell +.. code-block:: console - root@bootstrap $ salt-call grains.get metalk8s:control_plane_ip - local: - + root@bootstrap $ kubectl --kubeconfig=/etc/kubernetes/admin.conf \ + get svc -n metalk8s-ingress ingress-nginx-control-plane-controller \ + -o=jsonpath='{.spec.externalIPs[0]}{"\n"}' + Use MetalK8s UI ^^^^^^^^^^^^^^^ From 967099a4e198cb230ac682b8f713edade56dc8a7 Mon Sep 17 00:00:00 2001 From: Teddy Andrieux Date: Mon, 7 Jun 2021 12:57:19 +0200 Subject: [PATCH 5/7] salt,docs: Make Ingress Control Plane IP configurable Make the IP used to reach the UI and other Control Plane components configurable from Bootstrap config file Refs: #2381 --- CHANGELOG.md | 6 ++++++ docs/installation/bootstrap.rst | 12 ++++++++++++ salt/_modules/metalk8s_network.py | 3 +++ salt/metalk8s/defaults.yaml | 6 +++--- .../modules/files/test_metalk8s_network.yaml | 18 +++++++++++++----- .../unit/modules/test_metalk8s_network.py | 9 +++++++-- 6 files changed, 44 insertions(+), 10 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 401887eab3..752fb1072d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -60,6 +60,12 @@ LVMLogicalVolume volume in the UI (PR[#3410](https://github.com/scality/metalk8s/pull/3410)) +- [#2381](https://github.com/scality/metalk8s/issues/2381)) - Allow + configuring the Control Plane Ingress' external IP, to enable high + availability with failover of this (virtual) IP between control plane + nodes. This failover is not managed by MetalK8s. + (PR[#3415](https://github.com/scality/metalk8s/pull/3415)) + ### Breaking changes - [#2199](https://github.com/scality/metalk8s/issues/2199) - Prometheus label diff --git a/docs/installation/bootstrap.rst b/docs/installation/bootstrap.rst index 764ae73249..a2bb7ef335 100644 --- a/docs/installation/bootstrap.rst +++ b/docs/installation/bootstrap.rst @@ -47,6 +47,8 @@ Configuration networks: controlPlane: cidr: + ingress: + ip: workloadPlane: cidr: mtu: @@ -80,6 +82,16 @@ notation for it's various subfields. network. This is an :ref:`advanced configuration` which we do not recommend for non-experts. + For ``controlPlane`` entry, an ``ingress`` can also be provided. This + section allow to set the IP that will be used to connect to all the + control plane components, like MetalK8s-UI and the whole monitoring + stack. We suggest using a + `Virtual IP `_ that + will sit on a working master Node. The default value for this + Ingress IP is the control plane IP of the Bootstrap node (which means + that if you lose the Bootstrap node, you no longer have access to any + control plane component). + For ``workloadPlane`` entry an `MTU `_ can also be provided, this MTU value should be the lowest MTU value accross diff --git a/salt/_modules/metalk8s_network.py b/salt/_modules/metalk8s_network.py index f915aa586a..ddf62cd423 100644 --- a/salt/_modules/metalk8s_network.py +++ b/salt/_modules/metalk8s_network.py @@ -229,6 +229,9 @@ def routes(): def get_control_plane_ingress_ip(): + if "ingress" in __pillar__["networks"]["control_plane"]: + return __pillar__["networks"]["control_plane"]["ingress"]["ip"] + # Use Bootstrap Control Plane IP as Ingress Control plane IP bootstrap_id = __salt__["metalk8s.minions_by_role"]("bootstrap")[0] diff --git a/salt/metalk8s/defaults.yaml b/salt/metalk8s/defaults.yaml index 5ce36a2c74..95d37476fc 100644 --- a/salt/metalk8s/defaults.yaml +++ b/salt/metalk8s/defaults.yaml @@ -74,9 +74,6 @@ networks: control_plane_ip:8080: expected: nginx description: MetalK8s repository - ingress_control_plane_ip:8443: - expected: kube-proxy - description: Control plane nginx ingress master: 0.0.0.0:6443: expected: kube-apiserver @@ -87,6 +84,9 @@ networks: 127.0.0.1:7443: expected: nginx description: Apiserver proxy + ingress_control_plane_ip:8443: + expected: kube-proxy + description: Control plane nginx ingress control_plane_ip:10257: expected: kube-controller-manager description: Kubernetes controller manager diff --git a/salt/tests/unit/modules/files/test_metalk8s_network.yaml b/salt/tests/unit/modules/files/test_metalk8s_network.yaml index 692f96f90d..7174b7122a 100644 --- a/salt/tests/unit/modules/files/test_metalk8s_network.yaml +++ b/salt/tests/unit/modules/files/test_metalk8s_network.yaml @@ -85,12 +85,20 @@ routes: - *simple_route get_control_plane_ingress_ip: - # 1. Nominal bootstrap IP (from Salt minion) + # 1. Nominal Ingress IP from pillar + - pillar: + networks: + control_plane: + ingress: + ip: 1.1.1.1 + result: 1.1.1.1 + + # 2. Nominal bootstrap IP (from Salt minion) - mine_ret: bootstrap: 1.1.1.2 result: 1.1.1.2 - # 2. Nominal bootstrap IP (from Salt master) + # 3. Nominal bootstrap IP (from Salt master) - mine_runner_ret: bootstrap: 1.1.1.3 mine_ret: @@ -99,19 +107,19 @@ get_control_plane_ingress_ip: __role: master result: 1.1.1.3 - # 3. Nominal bootstrap IP running from bootstrap node + # 4. Nominal bootstrap IP running from bootstrap node - grains: id: bootstrap metalk8s: control_plane_ip: 1.1.1.4 result: 1.1.1.4 - # 4. Error unable to get from mine (from Salt minion) + # 5. Error unable to get from mine (from Salt minion) - mine_ret: {} raises: true result: "Unable to get bootstrap Control Plane IP: {}" - # 5. Error unable to get from mine (from Salt master) + # 6. Error unable to get from mine (from Salt master) - mine_runner_ret: {} opts: __role: master diff --git a/salt/tests/unit/modules/test_metalk8s_network.py b/salt/tests/unit/modules/test_metalk8s_network.py index d50c1b990c..6baf2a29a9 100644 --- a/salt/tests/unit/modules/test_metalk8s_network.py +++ b/salt/tests/unit/modules/test_metalk8s_network.py @@ -296,6 +296,7 @@ def test_get_control_plane_ingress_ip( self, result, raises=False, + pillar=None, opts=None, grains=None, mine_ret=None, @@ -304,6 +305,8 @@ def test_get_control_plane_ingress_ip( """ Tests the return of `get_control_plane_ingress_ip` function """ + if pillar is None: + pillar = {"networks": {"control_plane": {}}} if opts is None: opts = {"__role": "minion"} if grains is None: @@ -316,8 +319,10 @@ def test_get_control_plane_ingress_ip( } with patch.dict(metalk8s_network.__salt__, salt_dict), patch.dict( - metalk8s_network.__opts__, opts - ), patch.dict(metalk8s_network.__grains__, grains): + metalk8s_network.__pillar__, pillar + ), patch.dict(metalk8s_network.__opts__, opts), patch.dict( + metalk8s_network.__grains__, grains + ): if raises: self.assertRaisesRegex( CommandExecutionError, From a5b85ddfbbaf015bf6a25d67abbaf072bf6ca72d Mon Sep 17 00:00:00 2001 From: Teddy Andrieux Date: Mon, 7 Jun 2021 17:12:41 +0200 Subject: [PATCH 6/7] tests: Make given running on multi node cluster more public Move `we are on a multi node cluster` from test registry to conftest, so that it can be used in other tests --- tests/post/steps/conftest.py | 8 ++++++++ tests/post/steps/test_registry.py | 14 +------------- 2 files changed, 9 insertions(+), 13 deletions(-) diff --git a/tests/post/steps/conftest.py b/tests/post/steps/conftest.py index 994b7b9cd2..4bfb5ec93c 100644 --- a/tests/post/steps/conftest.py +++ b/tests/post/steps/conftest.py @@ -117,6 +117,14 @@ def test_volume(volume_client, name): volume_client.delete(name, sync=True) +@given("we are on a multi node cluster") +def check_multi_node(k8s_client): + nodes = k8s_client.list_node() + + if len(nodes.items) == 1: + pytest.skip("We skip single node cluster for this test") + + # }}} # Then {{{ diff --git a/tests/post/steps/test_registry.py b/tests/post/steps/test_registry.py index 3bfe300617..12dec7ad78 100644 --- a/tests/post/steps/test_registry.py +++ b/tests/post/steps/test_registry.py @@ -3,7 +3,7 @@ import os.path import pytest -from pytest_bdd import given, when, scenario, then, parsers +from pytest_bdd import when, scenario, then, parsers import testinfra from tests import kube_utils, utils @@ -46,18 +46,6 @@ def test_pull_registry_ha(host, teardown): pass -# }}} -# Given {{{ - - -@given("we are on a multi node cluster") -def check_multi_node(k8s_client): - nodes = k8s_client.list_node() - - if len(nodes.items) == 1: - pytest.skip("We skip single node cluster for this test") - - # }}} # When {{{ From 4fe7cac927d937ea3c55c517d9bb1ad1c731c4b5 Mon Sep 17 00:00:00 2001 From: Teddy Andrieux Date: Mon, 7 Jun 2021 15:16:04 +0200 Subject: [PATCH 7/7] salt,docs,tests: Add procedure to change the Control Plane Ingress IP Add a simple orchestrate and small procedure in the documentation to change the Control Plane Ingress IP --- buildchain/buildchain/salt_tree.py | 1 + .../changing_control_plane_ingress_ip.rst | 44 +++++++++++ docs/operation/index.rst | 1 + .../update-control-plane-ingress-ip.sls | 57 ++++++++++++++ tests/post/features/ingress.feature | 10 +++ tests/post/steps/test_ingress.py | 78 +++++++++++++++++++ 6 files changed, 191 insertions(+) create mode 100644 docs/operation/changing_control_plane_ingress_ip.rst create mode 100644 salt/metalk8s/orchestrate/update-control-plane-ingress-ip.sls diff --git a/buildchain/buildchain/salt_tree.py b/buildchain/buildchain/salt_tree.py index a311aad14b..1ae8ab8587 100644 --- a/buildchain/buildchain/salt_tree.py +++ b/buildchain/buildchain/salt_tree.py @@ -545,6 +545,7 @@ def _get_parts(self) -> Iterator[str]: Path("salt/metalk8s/orchestrate/downgrade/precheck.sls"), Path("salt/metalk8s/orchestrate/downgrade/pre.sls"), Path("salt/metalk8s/orchestrate/downgrade/post.sls"), + Path("salt/metalk8s/orchestrate/update-control-plane-ingress-ip.sls"), Path("salt/metalk8s/orchestrate/upgrade/init.sls"), Path("salt/metalk8s/orchestrate/upgrade/precheck.sls"), Path("salt/metalk8s/orchestrate/upgrade/pre.sls"), diff --git a/docs/operation/changing_control_plane_ingress_ip.rst b/docs/operation/changing_control_plane_ingress_ip.rst new file mode 100644 index 0000000000..0a586272f8 --- /dev/null +++ b/docs/operation/changing_control_plane_ingress_ip.rst @@ -0,0 +1,44 @@ +Changing the Control Plane Ingress IP +===================================== + +#. On the Bootstrap node, update the ``ip`` field from + ``networks.controlPlane.ingress`` in the Bootstrap configuration file. + (refer to :ref:`Bootstrap Configuration`) + +#. Refresh the pillar. + + .. code-block:: console + + $ salt-call saltutil.refresh_pillar wait=True + +#. Check that the change is taken into account. + + .. code-block:: console + + $ salt-call metalk8s_network.get_control_plane_ingress_ip + local: + + +#. On the Bootstrap node, reconfigure apiServer: + + .. parsed-literal:: + + $ salt-call state.sls \\ + metalk8s.kubernetes.apiserver \\ + saltenv=metalk8s-|version| + +#. Reconfigure Control Plane components: + + .. parsed-literal:: + + $ kubectl exec -n kube-system -c salt-master \\ + --kubeconfig=/etc/kubernetes/admin.conf \\ + $(kubectl --kubeconfig=/etc/kubernetes/admin.conf get pod \\ + -l "app.kubernetes.io/name=salt-master" \\ + --namespace=kube-system -o jsonpath='{.items[0].metadata.name}') \\ + -- salt-run state.orchestrate \\ + metalk8s.orchestrate.update-control-plane-ingress-ip \\ + saltenv=metalk8s-|version| + +#. You can :ref:`access the MetalK8s GUI ` + using this new IP. diff --git a/docs/operation/index.rst b/docs/operation/index.rst index a08c430b6e..e056cf3fdd 100644 --- a/docs/operation/index.rst +++ b/docs/operation/index.rst @@ -19,6 +19,7 @@ do not have a working MetalK8s_ setup. disaster_recovery/index solutions changing_node_hostname + changing_control_plane_ingress_ip metalk8s-utils registry_ha listening_processes diff --git a/salt/metalk8s/orchestrate/update-control-plane-ingress-ip.sls b/salt/metalk8s/orchestrate/update-control-plane-ingress-ip.sls new file mode 100644 index 0000000000..8bde7b3253 --- /dev/null +++ b/salt/metalk8s/orchestrate/update-control-plane-ingress-ip.sls @@ -0,0 +1,57 @@ +{%- set bootstrap_node = salt.metalk8s.minions_by_role('bootstrap') | first %} +Check pillar content on {{ bootstrap_node }}: + salt.function: + - name: metalk8s.check_pillar_keys + - tgt: {{ bootstrap_node }} + - kwarg: + keys: + - metalk8s.endpoints.repositories + raise_error: False + - retry: + attempts: 5 + +Regenerate Control Plane Ingress cert on {{ bootstrap_node }}: + salt.state: + - tgt: {{ bootstrap_node }} + - sls: + - metalk8s.addons.nginx-ingress-control-plane.certs + - saltenv: {{ saltenv }} + - require: + - salt: Check pillar content on {{ bootstrap_node }} + +Reconfigure Control Plane Ingress: + salt.runner: + - name: state.orchestrate + - mods: + - metalk8s.addons.nginx-ingress-control-plane.deployed + - saltenv: {{ saltenv }} + - require: + - salt: Regenerate Control Plane Ingress cert on {{ bootstrap_node }} + +Reconfigure Control Plane components: + salt.runner: + - name: state.orchestrate + - mods: + - metalk8s.addons.dex.deployed + - metalk8s.addons.prometheus-operator.deployed + - metalk8s.addons.ui.deployed + - saltenv: {{ saltenv }} + - require: + - salt: Reconfigure Control Plane Ingress + +{%- set master_nodes = salt.metalk8s.minions_by_role('master') %} +{%- for node in master_nodes | sort %} + +Reconfigure apiserver on {{ node }}: + salt.state: + - tgt: {{ node }} + - sls: + - metalk8s.kubernetes.apiserver + - saltenv: {{ saltenv }} + - require: + - salt: Reconfigure Control Plane components + {%- if loop.previtem is defined %} + - salt: Reconfigure apiserver on {{ loop.previtem }} + {%- endif %} + +{%- endfor %} diff --git a/tests/post/features/ingress.feature b/tests/post/features/ingress.feature index 3a06c69424..2c834903e0 100644 --- a/tests/post/features/ingress.feature +++ b/tests/post/features/ingress.feature @@ -18,3 +18,13 @@ Feature: Ingress And the node control-plane IP is not equal to its workload-plane IP When we perform an HTTP request on port 80 on a control-plane IP Then the server should not respond + + Scenario: Change Control Plane Ingress IP to node-1 IP + Given the Kubernetes API is available + And we are on a multi node cluster + And pods with label 'app.kubernetes.io/name=ingress-nginx' are 'Ready' + When we update control plane ingress IP to node 'node-1' IP + And we wait for the rollout of 'daemonset/ingress-nginx-control-plane-controller' in namespace 'metalk8s-ingress' to complete + And we wait for the rollout of 'deploy/dex' in namespace 'metalk8s-auth' to complete + Then the control plane ingress IP is equal to node 'node-1' IP + And we are able to login to Dex as 'admin@metalk8s.invalid' using password 'password' diff --git a/tests/post/steps/test_ingress.py b/tests/post/steps/test_ingress.py index 4cf2dc3cc5..b930cd04ee 100644 --- a/tests/post/steps/test_ingress.py +++ b/tests/post/steps/test_ingress.py @@ -1,8 +1,11 @@ +import json +import os import requests import requests.exceptions import pytest from pytest_bdd import given, parsers, scenario, then, when +import testinfra from tests import utils @@ -22,11 +25,30 @@ def test_access_http_services_on_control_plane_ip(host): pass +@scenario("../features/ingress.feature", "Change Control Plane Ingress IP to node-1 IP") +def test_change_cp_ingress_ip(host, teardown): + pass + + @pytest.fixture(scope="function") def context(): return {} +@pytest.fixture +def teardown(context, host, ssh_config, version): + yield + if "bootstrap_to_restore" in context: + with host.sudo(): + host.check_output( + "cp {} /etc/metalk8s/bootstrap.yaml".format( + context["bootstrap_to_restore"] + ) + ) + + re_configure_cp_ingress(host, version, ssh_config) + + @given("the node control-plane IP is not equal to its workload-plane IP") def node_control_plane_ip_is_not_equal_to_its_workload_plane_ip(host): data = utils.get_grain(host, "metalk8s") @@ -67,6 +89,17 @@ def perform_request(host, context, protocol, port, plane): context["exception"] = exc +@when(parsers.parse("we update control plane ingress IP to node '{node_name}' IP")) +def update_cp_ingress_ip(host, context, ssh_config, version, node_name): + node = testinfra.get_host(node_name, ssh_config=ssh_config) + ip = utils.get_grain(node, "metalk8s:control_plane_ip") + + bootstrap_patch = {"networks": {"controlPlane": {"ingress": {"ip": ip}}}} + + patch_bootstrap_config(context, host, bootstrap_patch) + re_configure_cp_ingress(host, version, ssh_config) + + @then( parsers.re(r"the server returns (?P\d+) '(?P.+)'"), converters=dict(status_code=int), @@ -82,3 +115,48 @@ def server_returns(host, context, status_code, reason): def server_does_not_respond(host, context): assert "exception" in context assert isinstance(context["exception"], requests.exceptions.ConnectionError) + + +@then(parsers.parse("the control plane ingress IP is equal to node '{node_name}' IP")) +def check_cp_ingress_node_ip(control_plane_ingress_ip, node_name, ssh_config): + node = testinfra.get_host(node_name, ssh_config=ssh_config) + ip = utils.get_grain(node, "metalk8s:control_plane_ip") + + assert control_plane_ingress_ip == ip + + +def patch_bootstrap_config(context, host, patch): + with host.sudo(): + cmd_ret = host.check_output("salt-call --out json --local temp.dir") + + tmp_dir = json.loads(cmd_ret)["local"] + + with host.sudo(): + host.check_output("cp /etc/metalk8s/bootstrap.yaml {}".format(tmp_dir)) + + context["bootstrap_to_restore"] = os.path.join(tmp_dir, "bootstrap.yaml") + + with host.sudo(): + host.check_output( + "salt-call --local --retcode-passthrough state.single " + "file.serialize /etc/metalk8s/bootstrap.yaml " + "dataset='{}' " + "merge_if_exists=True".format(json.dumps(patch)) + ) + + +def re_configure_cp_ingress(host, version, ssh_config): + with host.sudo(): + host.check_output( + "salt-call --retcode-passthrough state.sls " + "metalk8s.kubernetes.apiserver saltenv=metalk8s-{}".format(version) + ) + + command = [ + "salt-run", + "state.orchestrate", + "metalk8s.orchestrate.update-control-plane-ingress-ip", + "saltenv=metalk8s-{}".format(version), + ] + + utils.run_salt_command(host, command, ssh_config)