diff --git a/lib/charms/data_platform_libs/v0/data_interfaces.py b/lib/charms/data_platform_libs/v0/data_interfaces.py index 59a97226a..a2162aa0b 100644 --- a/lib/charms/data_platform_libs/v0/data_interfaces.py +++ b/lib/charms/data_platform_libs/v0/data_interfaces.py @@ -331,7 +331,7 @@ def _on_topic_requested(self, event: TopicRequestedEvent): # Increment this PATCH version before using `charmcraft publish-lib` or reset # to 0 if you are raising the major API version -LIBPATCH = 37 +LIBPATCH = 38 PYDEPS = ["ops>=2.0.0"] @@ -2606,6 +2606,14 @@ def set_version(self, relation_id: int, version: str) -> None: """ self.update_relation_data(relation_id, {"version": version}) + def set_subordinated(self, relation_id: int) -> None: + """Raises the subordinated flag in the application relation databag. + + Args: + relation_id: the identifier for a particular relation. + """ + self.update_relation_data(relation_id, {"subordinated": "true"}) + class DatabaseProviderEventHandlers(EventHandlers): """Provider-side of the database relation handlers.""" @@ -2842,6 +2850,21 @@ def _on_relation_created_event(self, event: RelationCreatedEvent) -> None: def _on_relation_changed_event(self, event: RelationChangedEvent) -> None: """Event emitted when the database relation has changed.""" + is_subordinate = False + remote_unit_data = None + for key in event.relation.data.keys(): + if isinstance(key, Unit) and not key.name.startswith(self.charm.app.name): + remote_unit_data = event.relation.data[key] + elif isinstance(key, Application) and key.name != self.charm.app.name: + is_subordinate = event.relation.data[key].get("subordinated") == "true" + + if is_subordinate: + if not remote_unit_data: + return + + if remote_unit_data.get("state") != "ready": + return + # Check which data has changed to emit customs events. diff = self._diff(event) diff --git a/lib/charms/mysql/v0/async_replication.py b/lib/charms/mysql/v0/async_replication.py index f9e245220..3e5a31f18 100644 --- a/lib/charms/mysql/v0/async_replication.py +++ b/lib/charms/mysql/v0/async_replication.py @@ -54,7 +54,7 @@ # The unique Charmhub library identifier, never change it LIBID = "4de21f1a022c4e2c87ac8e672ec16f6a" LIBAPI = 0 -LIBPATCH = 4 +LIBPATCH = 5 RELATION_OFFER = "replication-offer" RELATION_CONSUMER = "replication" @@ -248,8 +248,6 @@ def on_async_relation_broken(self, event: RelationBrokenEvent): # noqa: C901 "\tThe cluster can be recreated with the `recreate-cluster` action.\n" "\tAlternatively the cluster can be rejoined to the cluster set." ) - # reset the cluster node count flag - del self._charm.app_peer_data["units-added-to-cluster"] # set flag to persist removed from cluster-set state self._charm.app_peer_data["removed-from-cluster-set"] = "true" @@ -834,8 +832,6 @@ def _on_consumer_changed(self, event): # noqa: C901 self._charm.unit.status = MaintenanceStatus("Dissolving replica cluster") logger.info("Dissolving replica cluster") self._charm._mysql.dissolve_cluster() - # reset the cluster node count flag - del self._charm.app_peer_data["units-added-to-cluster"] # reset force rejoin-secondaries flag del self._charm.app_peer_data["rejoin-secondaries"] @@ -869,11 +865,6 @@ def _on_consumer_changed(self, event): # noqa: C901 if cluster_set_domain_name := self._charm._mysql.get_cluster_set_name(): self._charm.app_peer_data["cluster-set-domain-name"] = cluster_set_domain_name - # set the number of units added to the cluster for a single unit replica cluster - # needed here since it will skip the `RECOVERING` state - if self._charm.app.planned_units() == 1: - self._charm.app_peer_data["units-added-to-cluster"] = "1" - self._charm._on_update_status(None) elif state == States.RECOVERING: # recovering cluster (copying data and/or joining units) @@ -882,10 +873,6 @@ def _on_consumer_changed(self, event): # noqa: C901 "Waiting for recovery to complete on other units" ) logger.debug("Awaiting other units to join the cluster") - # reset the number of units added to the cluster - # this will trigger secondaries to join the cluster - node_count = self._charm._mysql.get_cluster_node_count() - self._charm.app_peer_data["units-added-to-cluster"] = str(node_count) # set state flags to allow secondaries to join the cluster self._charm.unit_peer_data["member-state"] = "online" self._charm.unit_peer_data["member-role"] = "primary" diff --git a/lib/charms/mysql/v0/mysql.py b/lib/charms/mysql/v0/mysql.py index eb9797192..97b4ffcfa 100644 --- a/lib/charms/mysql/v0/mysql.py +++ b/lib/charms/mysql/v0/mysql.py @@ -128,7 +128,7 @@ def wait_until_mysql_connection(self) -> None: # Increment this major API version when introducing breaking changes LIBAPI = 0 -LIBPATCH = 62 +LIBPATCH = 64 UNIT_TEARDOWN_LOCKNAME = "unit-teardown" UNIT_ADD_LOCKNAME = "unit-add" @@ -589,7 +589,6 @@ def create_cluster(self) -> None: # rescan cluster for cleanup of unused # recovery users self._mysql.rescan_cluster() - self.app_peer_data["units-added-to-cluster"] = "1" state, role = self._mysql.get_member_state() @@ -1779,6 +1778,27 @@ def _get_host_ip(host: str) -> str: return ",".join(rw_endpoints), ",".join(ro_endpoints), ",".join(no_endpoints) + def execute_remove_instance( + self, connect_instance: Optional[str] = None, force: bool = False + ) -> None: + """Execute the remove_instance() script with mysqlsh. + + Args: + connect_instance: (optional) The instance from where to run the remove_instance() + force: (optional) Whether to force the removal of the instance + """ + remove_instance_options = { + "password": self.cluster_admin_password, + "force": "true" if force else "false", + } + remove_instance_commands = ( + f"shell.connect('{self.cluster_admin_user}:{self.cluster_admin_password}@{connect_instance or self.instance_address}')", + f"cluster = dba.get_cluster('{self.cluster_name}')", + "cluster.remove_instance(" + f"'{self.cluster_admin_user}@{self.instance_address}', {remove_instance_options})", + ) + self._run_mysqlsh_script("\n".join(remove_instance_commands)) + @retry( retry=retry_if_exception_type(MySQLRemoveInstanceRetryError), stop=stop_after_attempt(15), @@ -1842,17 +1862,7 @@ def remove_instance(self, unit_label: str, lock_instance: Optional[str] = None) ) # Just remove instance - remove_instance_options = { - "password": self.cluster_admin_password, - "force": "true", - } - remove_instance_commands = ( - f"shell.connect('{self.cluster_admin_user}:{self.cluster_admin_password}@{self.instance_address}')", - f"cluster = dba.get_cluster('{self.cluster_name}')", - "cluster.remove_instance(" - f"'{self.cluster_admin_user}@{self.instance_address}', {remove_instance_options})", - ) - self._run_mysqlsh_script("\n".join(remove_instance_commands)) + self.execute_remove_instance(force=True) except MySQLClientError as e: # In case of an error, raise an error and retry logger.warning( diff --git a/lib/charms/mysql/v0/tls.py b/lib/charms/mysql/v0/tls.py index 2f7d31ad2..9ecb4d14d 100644 --- a/lib/charms/mysql/v0/tls.py +++ b/lib/charms/mysql/v0/tls.py @@ -52,7 +52,7 @@ LIBID = "eb73947deedd4380a3a90d527e0878eb" LIBAPI = 0 -LIBPATCH = 5 +LIBPATCH = 6 SCOPE = "unit" diff --git a/poetry.lock b/poetry.lock index 98fe0c9e4..ade072e67 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2024,7 +2024,6 @@ files = [ {file = "PyYAML-6.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34"}, {file = "PyYAML-6.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:855fb52b0dc35af121542a76b9a84f8d1cd886ea97c84703eaa6d88e37a2ad28"}, {file = "PyYAML-6.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:40df9b996c2b73138957fe23a16a4f0ba614f4c0efce1e9406a184b6d07fa3a9"}, - {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a08c6f0fe150303c1c6b71ebcd7213c2858041a7e01975da3a99aed1e7a378ef"}, {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c22bec3fbe2524cde73d7ada88f6566758a8f7227bfbf93a408a9d86bcc12a0"}, {file = "PyYAML-6.0.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4"}, {file = "PyYAML-6.0.1-cp312-cp312-win32.whl", hash = "sha256:d483d2cdf104e7c9fa60c544d92981f12ad66a457afae824d146093b8c294c54"}, diff --git a/src/charm.py b/src/charm.py index 171bc070f..02bc83b8e 100755 --- a/src/charm.py +++ b/src/charm.py @@ -41,10 +41,17 @@ from charms.rolling_ops.v0.rollingops import RollingOpsManager from charms.tempo_k8s.v1.charm_tracing import trace_charm from charms.tempo_k8s.v2.tracing import TracingEndpointRequirer -from ops import EventBase, RelationBrokenEvent, RelationCreatedEvent, Unit +from ops import EventBase, RelationBrokenEvent, RelationCreatedEvent from ops.charm import RelationChangedEvent, UpdateStatusEvent from ops.main import main -from ops.model import ActiveStatus, BlockedStatus, Container, MaintenanceStatus, WaitingStatus +from ops.model import ( + ActiveStatus, + BlockedStatus, + Container, + MaintenanceStatus, + Unit, + WaitingStatus, +) from ops.pebble import Layer from config import CharmConfig, MySQLConfig @@ -189,7 +196,7 @@ def tracing_endpoint(self) -> Optional[str]: def _mysql(self) -> MySQL: """Returns an instance of the MySQL object from mysql_k8s_helpers.""" return MySQL( - self._get_unit_fqdn(), + self.get_unit_address(), self.app_peer_data["cluster-name"], self.app_peer_data["cluster-set-domain-name"], self.get_secret("app", ROOT_PASSWORD_KEY), # pyright: ignore [reportArgumentType] @@ -252,11 +259,7 @@ def restart_peers(self) -> Optional[ops.model.Relation]: @property def unit_address(self) -> str: """Return the address of this unit.""" - return self._get_unit_fqdn() - - def get_unit_address(self, unit: Unit) -> str: - """Return the address of a unit.""" - return self._get_unit_fqdn(unit.name) + return self.get_unit_address() def get_unit_hostname(self, unit_name: Optional[str] = None) -> str: """Get the hostname.localdomain for a unit. @@ -272,17 +275,15 @@ def get_unit_hostname(self, unit_name: Optional[str] = None) -> str: unit_name = unit_name or self.unit.name return f"{unit_name.replace('/', '-')}.{self.app.name}-endpoints" - def _get_unit_fqdn(self, unit_name: Optional[str] = None) -> str: - """Create a fqdn for a unit. + def get_unit_address(self, unit: Optional[Unit] = None) -> str: + """Get fqdn/address for a unit. Translate juju unit name to resolvable hostname. - - Args: - unit_name: unit name - Returns: - A string representing the fqdn of the unit. """ - return getfqdn(self.get_unit_hostname(unit_name)) + if not unit: + unit = self.unit + + return getfqdn(self.get_unit_hostname(unit.name)) def is_unit_busy(self) -> bool: """Returns whether the unit is busy.""" @@ -294,7 +295,7 @@ def _get_primary_from_online_peer(self) -> Optional[str]: if self.peers.data[unit].get("member-state") == "online": try: return self._mysql.get_cluster_primary_address( - connect_instance_address=self._get_unit_fqdn(unit.name), + connect_instance_address=self.get_unit_address(unit), ) except MySQLGetClusterPrimaryAddressError: # try next unit @@ -325,7 +326,7 @@ def join_unit_to_cluster(self) -> None: Try to join the unit from the primary unit. """ instance_label = self.unit.name.replace("/", "-") - instance_address = self._get_unit_fqdn(self.unit.name) + instance_address = self.get_unit_address(self.unit) if not self._mysql.is_instance_in_cluster(instance_label): # Add new instance to the cluster @@ -370,6 +371,21 @@ def join_unit_to_cluster(self) -> None: # Stop GR for cases where the instance was previously part of the cluster # harmless otherwise self._mysql.stop_group_replication() + + # If instance already in cluster, before adding instance to cluster, + # remove the instance from the cluster and call rescan_cluster() + # without adding/removing instances to clean up stale users + if ( + instance_label + in self._mysql.get_cluster_status(from_instance=cluster_primary)[ + "defaultreplicaset" + ]["topology"].keys() + ): + self._mysql.execute_remove_instance( + connect_instance=cluster_primary, force=True + ) + self._mysql.rescan_cluster(from_instance=cluster_primary) + self._mysql.add_instance_to_cluster( instance_address=instance_address, instance_unit_label=instance_label, @@ -385,7 +401,6 @@ def join_unit_to_cluster(self) -> None: logger.debug("waiting: failed to acquire lock when adding instance to cluster") return - # Update 'units-added-to-cluster' counter in the peer relation databag self.unit_peer_data["member-state"] = "online" self.unit.status = ActiveStatus(self.active_status_message) logger.debug(f"Instance {instance_label} is cluster member") @@ -669,7 +684,7 @@ def _on_mysql_pebble_ready(self, event) -> None: # First run setup self._configure_instance(container) - if not self.unit.is_leader(): + if not self.unit.is_leader() or self.cluster_initialized: # Non-leader units try to join cluster self.unit.status = WaitingStatus("Waiting for instance to join the cluster") self.unit_peer_data.update({"member-role": "secondary", "member-state": "waiting"}) @@ -793,10 +808,6 @@ def _on_update_status(self, _: Optional[UpdateStatusEvent]) -> None: def _set_app_status(self) -> None: """Set the application status based on the cluster state.""" - nodes = self._mysql.get_cluster_node_count() - if nodes > 0: - self.app_peer_data["units-added-to-cluster"] = str(nodes) - try: primary_address = self._mysql.get_cluster_primary_address() except MySQLGetClusterPrimaryAddressError: @@ -838,7 +849,7 @@ def _on_database_storage_detaching(self, _) -> None: logger.info("Switching primary to unit 0") try: self._mysql.set_cluster_primary( - new_primary_address=self._get_unit_fqdn(f"{self.app.name}/0") + new_primary_address=getfqdn(self.get_unit_hostname(f"{self.app.name}/0")) ) except MySQLSetClusterPrimaryError: logger.warning("Failed to switch primary to unit 0") diff --git a/src/upgrade.py b/src/upgrade.py index a7b90c34b..0fd10016a 100644 --- a/src/upgrade.py +++ b/src/upgrade.py @@ -5,6 +5,7 @@ import json import logging +from socket import getfqdn from typing import TYPE_CHECKING from charms.data_platform_libs.v0.upgrade import ( @@ -174,12 +175,12 @@ def _pre_upgrade_prepare(self) -> None: """ if self.charm._mysql.get_primary_label() != f"{self.charm.app.name}-0": # set the primary to the first unit for switchover mitigation - new_primary = self.charm._get_unit_fqdn(f"{self.charm.app.name}/0") + new_primary = getfqdn(self.charm.get_unit_hostname(f"{self.charm.app.name}/0")) self.charm._mysql.set_cluster_primary(new_primary) # set slow shutdown on all instances for unit in self.app_units: - unit_address = self.charm._get_unit_fqdn(unit.name) + unit_address = self.charm.get_unit_address(unit) self.charm._mysql.set_dynamic_variable( variable="innodb_fast_shutdown", value="0", instance_address=unit_address ) @@ -293,9 +294,7 @@ def _complete_upgrade(self): if self.charm.unit_label == f"{self.charm.app.name}/1": # penultimate unit, reset the primary for faster switchover try: - self.charm._mysql.set_cluster_primary( - self.charm._get_unit_fqdn(self.charm.unit.name) - ) + self.charm._mysql.set_cluster_primary(self.charm.get_unit_address(self.charm.unit)) except MySQLSetClusterPrimaryError: logger.debug("Failed to set primary") @@ -322,7 +321,7 @@ def _check_server_upgradeability(self) -> None: if len(self.upgrade_stack or []) < self.charm.app.planned_units(): # check is done for 1st upgrading unit return - instance = self.charm._get_unit_fqdn(f"{self.charm.app.name}/0") + instance = getfqdn(self.charm.get_unit_hostname(f"{self.charm.app.name}/0")) self.charm._mysql.verify_server_upgradable(instance=instance) logger.debug("MySQL server is upgradeable") diff --git a/tests/integration/high_availability/high_availability_helpers.py b/tests/integration/high_availability/high_availability_helpers.py index 01b9132ba..b251905f4 100644 --- a/tests/integration/high_availability/high_availability_helpers.py +++ b/tests/integration/high_availability/high_availability_helpers.py @@ -10,11 +10,14 @@ from typing import List, Optional, Tuple import kubernetes +import lightkube import yaml from juju.model import Model from juju.unit import Unit from lightkube import Client +from lightkube.models.meta_v1 import ObjectMeta from lightkube.resources.apps_v1 import StatefulSet +from lightkube.resources.core_v1 import PersistentVolume, PersistentVolumeClaim, Pod from pytest_operator.plugin import OpsTest from tenacity import RetryError, Retrying, retry, stop_after_attempt, stop_after_delay, wait_fixed @@ -291,9 +294,13 @@ async def high_availability_test_setup(ops_test: OpsTest) -> Tuple[str, str]: Args: ops_test: The ops test framework """ + logger.info("Deploying mysql-k8s and scaling to 3 units") mysql_application_name = await deploy_and_scale_mysql(ops_test) + + logger.info("Deploying mysql-test-app") application_name = await deploy_and_scale_application(ops_test) + logger.info("Relating mysql-k8s with mysql-test-app") await relate_mysql_and_application(ops_test, mysql_application_name, application_name) return mysql_application_name, application_name @@ -586,3 +593,90 @@ def get_sts_partition(ops_test: OpsTest, app_name: str) -> int: client = Client() # type: ignore statefulset = client.get(res=StatefulSet, namespace=ops_test.model.info.name, name=app_name) return statefulset.spec.updateStrategy.rollingUpdate.partition # type: ignore + + +def get_pod(ops_test: OpsTest, unit_name: str) -> Pod: + """Retrieve the PVs of a pod.""" + client = lightkube.Client() + return client.get( + res=Pod, namespace=ops_test.model.info.name, name=unit_name.replace("/", "-") + ) + + +def get_pod_pvcs(pod: Pod) -> list[PersistentVolumeClaim]: + """Get a pod's PVCs.""" + if pod.spec is None: + return [] + + client = lightkube.Client() + pod_pvcs = [] + + for volume in pod.spec.volumes: + if volume.persistentVolumeClaim is None: + continue + + pvc_name = volume.persistentVolumeClaim.claimName + pod_pvcs.append( + client.get( + res=PersistentVolumeClaim, + name=pvc_name, + namespace=pod.metadata.namespace, + ) + ) + + return pod_pvcs + + +def get_pod_pvs(pod: Pod) -> list[PersistentVolume]: + """Get a pod's PVs.""" + if pod.spec is None: + return [] + + pod_pvs = [] + client = lightkube.Client() + for pv in client.list(res=PersistentVolume, namespace=pod.metadata.namespace): + if pv.spec.claimRef.name.endswith(pod.metadata.name): + pod_pvs.append(pv) + return pod_pvs + + +def evict_pod(pod: Pod) -> None: + """Evict a pod.""" + if pod.metadata is None: + return + + logger.info(f"Evicting pod {pod.metadata.name}") + client = lightkube.Client() + eviction = Pod.Eviction( + metadata=ObjectMeta(name=pod.metadata.name, namespace=pod.metadata.namespace), + ) + client.create(eviction, name=str(pod.metadata.name)) + + +def delete_pvs(pvs: list[PersistentVolume]) -> None: + """Delete the provided PVs.""" + for pv in pvs: + logger.info(f"Deleting PV {pv.spec.claimRef.name}") + client = lightkube.Client() + client.delete( + PersistentVolume, + pv.metadata.name, + namespace=pv.spec.claimRef.namespace, + grace_period=0, + ) + + +def delete_pvcs(pvcs: list[PersistentVolumeClaim]) -> None: + """Delete the provided PVCs.""" + for pvc in pvcs: + if pvc.metadata is None: + continue + + logger.info(f"Deleting PVC {pvc.metadata.name}") + client = lightkube.Client() + client.delete( + PersistentVolumeClaim, + pvc.metadata.name, + namespace=pvc.metadata.namespace, + grace_period=0, + ) diff --git a/tests/integration/high_availability/test_node_drain.py b/tests/integration/high_availability/test_node_drain.py new file mode 100644 index 000000000..355b18ebc --- /dev/null +++ b/tests/integration/high_availability/test_node_drain.py @@ -0,0 +1,79 @@ +#!/usr/bin/env python3 +# Copyright 2022 Canonical Ltd. +# See LICENSE file for licensing details. + +import logging + +import pytest +from pytest_operator.plugin import OpsTest + +from ..helpers import get_primary_unit +from .high_availability_helpers import ( + delete_pvcs, + delete_pvs, + ensure_all_units_continuous_writes_incrementing, + ensure_n_online_mysql_members, + evict_pod, + get_pod, + get_pod_pvcs, + get_pod_pvs, + high_availability_test_setup, +) + +logger = logging.getLogger(__name__) + +MYSQL_CONTAINER_NAME = "mysql" +MYSQLD_PROCESS_NAME = "mysqld" +TIMEOUT = 30 * 60 + + +@pytest.mark.group(1) +@pytest.mark.skip_if_deployed +async def test_build_and_deploy(ops_test: OpsTest) -> None: + """Simple test to ensure that the mysql and application charms get deployed.""" + await high_availability_test_setup(ops_test) + + +@pytest.mark.group(1) +@pytest.mark.abort_on_fail +async def test_pod_eviction_and_pvc_deletion(ops_test: OpsTest, continuous_writes) -> None: + """Test behavior when node drains - pod is evicted and pvs are rotated.""" + mysql_application_name, _ = await high_availability_test_setup(ops_test) + + logger.info("Waiting until 3 mysql instances are online") + # ensure all units in the cluster are online + assert await ensure_n_online_mysql_members( + ops_test, 3 + ), "The deployed mysql application is not fully online" + + logger.info("Ensuring all units have continuous writes incrementing") + await ensure_all_units_continuous_writes_incrementing(ops_test) + + mysql_unit = ops_test.model.applications[mysql_application_name].units[0] + primary = await get_primary_unit(ops_test, mysql_unit, mysql_application_name) + + logger.info(f"Evicting primary node {primary.name} and deleting its PVCs") + primary_pod = get_pod(ops_test, primary.name) + primary_pod_pvcs = get_pod_pvcs(primary_pod) + primary_pod_pvs = get_pod_pvs(primary_pod) + evict_pod(primary_pod) + delete_pvcs(primary_pod_pvcs) + delete_pvs(primary_pod_pvs) + + async with ops_test.fast_forward(): + logger.info("Waiting for evicted primary pod to be rescheduled") + await ops_test.model.wait_for_idle( + apps=[mysql_application_name], + status="active", + raise_on_blocked=True, + timeout=TIMEOUT, + wait_for_exact_units=3, + ) + + logger.info("Waiting until 3 mysql instances are online") + assert await ensure_n_online_mysql_members( + ops_test, 3 + ), "The deployed mysql application is not fully online after primary pod eviction" + + logger.info("Ensuring all units have continuous writes incrementing") + await ensure_all_units_continuous_writes_incrementing(ops_test) diff --git a/tests/unit/test_charm.py b/tests/unit/test_charm.py index aca3fec41..2e4e63bff 100644 --- a/tests/unit/test_charm.py +++ b/tests/unit/test_charm.py @@ -41,6 +41,11 @@ def setUp(self) -> None: self.peer_relation_id = self.harness.add_relation("database-peers", "database-peers") self.restart_relation_id = self.harness.add_relation("restart", "restart") self.harness.add_relation_unit(self.peer_relation_id, f"{APP_NAME}/1") + self.harness.update_relation_data( + self.peer_relation_id, + "mysql-k8s", + {"cluster-name": "test_cluster", "cluster-set-domain-name": "test_cluster_set"}, + ) self.charm = self.harness.charm self.maxDiff = None @@ -186,24 +191,31 @@ def test_mysql_pebble_ready( self.layer_dict(with_mysqld_exporter=True)["services"], ) + @patch("charm.MySQLOperatorCharm.unit_initialized", new_callable=PropertyMock) + @patch("charm.MySQLOperatorCharm.cluster_initialized", new_callable=PropertyMock) @patch("charm.MySQLOperatorCharm.join_unit_to_cluster") @patch("charm.MySQLOperatorCharm._configure_instance") @patch("charm.MySQLOperatorCharm._write_mysqld_configuration") @patch("upgrade.MySQLK8sUpgrade.idle", return_value=True) @patch("charm.MySQLOperatorCharm._mysql") def test_pebble_ready_set_data( - self, mock_mysql, mock_upgrade_idle, mock_write_conf, mock_conf, mock_join + self, + mock_mysql, + mock_upgrade_idle, + mock_write_conf, + mock_conf, + mock_join, + _cluster_intialized, + _unit_intialized, ): mock_mysql.is_data_dir_initialised.return_value = False mock_mysql.get_member_state.return_value = ("online", "primary") self.harness.set_can_connect("mysql", True) self.harness.set_leader() - # test on non leader - self.harness.set_leader(is_leader=False) - self.harness.container_pebble_ready("mysql") - self.assertEqual(self.charm.unit_peer_data["member-role"], "secondary") - self.assertEqual(self.charm.unit_peer_data["member-state"], "waiting") + mock_mysql.cluster_metadata_exists.return_value = False + _cluster_intialized.return_value = False + _unit_intialized.return_value = False # test on leader self.harness.set_leader(is_leader=True) @@ -211,6 +223,14 @@ def test_pebble_ready_set_data( self.assertEqual(self.charm.unit_peer_data["member-state"], "online") self.assertEqual(self.charm.unit_peer_data["member-role"], "primary") + _cluster_intialized.return_value = True + + # test on non leader + self.harness.set_leader(is_leader=False) + self.harness.container_pebble_ready("mysql") + self.assertEqual(self.charm.unit_peer_data["member-role"], "secondary") + self.assertEqual(self.charm.unit_peer_data["member-state"], "waiting") + @patch("charm.MySQLOperatorCharm._mysql", new_callable=PropertyMock) def test_mysql_pebble_ready_non_leader(self, _mysql_mock): # Test pebble ready when not leader @@ -237,6 +257,11 @@ def test_mysql_pebble_ready_exception(self, _mysql_mock): self.assertFalse(isinstance(self.charm.unit.status, ActiveStatus)) def test_on_config_changed(self): + self.harness.update_relation_data( + self.peer_relation_id, + "mysql-k8s", + {"cluster-name": "", "cluster-set-domain-name": "test_cluster_set"}, + ) # Test config changed set of cluster name self.assertEqual(self.charm.peers.data[self.charm.app].get("cluster-name"), None) self.harness.set_leader() diff --git a/tests/unit/test_database.py b/tests/unit/test_database.py index 9b5ffaf85..f4695a779 100644 --- a/tests/unit/test_database.py +++ b/tests/unit/test_database.py @@ -43,6 +43,11 @@ def setUp(self): self.harness.begin() self.peer_relation_id = self.harness.add_relation("database-peers", "database-peers") self.harness.add_relation_unit(self.peer_relation_id, f"{APP_NAME}/1") + self.harness.update_relation_data( + self.peer_relation_id, + "mysql-k8s", + {"cluster-name": "test_cluster", "cluster-set-domain-name": "test_cluster_set"}, + ) self.database_relation_id = self.harness.add_relation(DB_RELATION_NAME, "app") self.harness.add_relation_unit(self.database_relation_id, "app/0") self.charm = self.harness.charm diff --git a/tests/unit/test_upgrade.py b/tests/unit/test_upgrade.py index 65559693a..0f4c140c3 100644 --- a/tests/unit/test_upgrade.py +++ b/tests/unit/test_upgrade.py @@ -31,6 +31,7 @@ } +# @patch("mysql_k8s_helpers.MySQL.cluster_metadata_exists", return_value=True) class TestUpgrade(unittest.TestCase): """Test the upgrade class.""" @@ -174,7 +175,16 @@ def test_pebble_ready( self.harness.update_relation_data( self.upgrade_relation_id, "mysql-k8s/0", {"state": "upgrading"} ) - self.harness.container_pebble_ready("mysql") + with patch( + "charm.MySQLOperatorCharm.unit_initialized", + new_callable=PropertyMock, + return_value=True, + ), patch( + "charm.MySQLOperatorCharm.cluster_initialized", + new_callable=PropertyMock, + return_value=True, + ): + self.harness.container_pebble_ready("mysql") self.assertEqual( self.harness.get_relation_data(self.upgrade_relation_id, "mysql-k8s/1")["state"], "idle", # change to `completed` - behavior not yet set in the lib @@ -187,7 +197,16 @@ def test_pebble_ready( # setup for exception mock_is_instance_in_cluster.return_value = False - self.harness.container_pebble_ready("mysql") + with patch( + "charm.MySQLOperatorCharm.unit_initialized", + new_callable=PropertyMock, + return_value=True, + ), patch( + "charm.MySQLOperatorCharm.cluster_initialized", + new_callable=PropertyMock, + return_value=True, + ): + self.harness.container_pebble_ready("mysql") self.assertTrue(isinstance(self.charm.unit.status, BlockedStatus)) @patch(