diff --git a/src/backups.py b/src/backups.py index ac08087136..55ceceb2bf 100644 --- a/src/backups.py +++ b/src/backups.py @@ -59,9 +59,6 @@ def _are_backup_settings_ok(self) -> Tuple[bool, Optional[str]]: if missing_parameters: return False, f"Missing S3 parameters: {missing_parameters}" - if "stanza" not in self.charm._peers.data[self.charm.unit]: - return False, "Stanza was not initialised" - return True, None def _can_unit_perform_backup(self) -> Tuple[bool, Optional[str]]: @@ -69,15 +66,24 @@ def _can_unit_perform_backup(self) -> Tuple[bool, Optional[str]]: if self.charm.is_blocked: return False, "Unit is in a blocking state" - if ( - self.charm.unit.name == self.charm._patroni.get_primary(unit_name_pattern=True) - and self.charm.app.planned_units() > 1 - ): + tls_enabled = "tls" in self.charm.unit_peer_data + + # Only enable backups on primary if there are replicas but TLS is not enabled. + if self.charm.is_primary and self.charm.app.planned_units() > 1 and tls_enabled: return False, "Unit cannot perform backups as it is the cluster primary" + # Can create backups on replicas only if TLS is enabled (it's needed to enable + # pgBackRest to communicate with the primary to request that missing WAL files + # are pushed to the S3 repo before the backup action is triggered). + if not self.charm.is_primary and not tls_enabled: + return False, "Unit cannot perform backups as TLS is not enabled" + if not self.charm._patroni.member_started: return False, "Unit cannot perform backups as it's not in running state" + if "stanza" not in self.charm.app_peer_data: + return False, "Stanza was not initialised" + return self._are_backup_settings_ok() def _construct_endpoint(self, s3_parameters: Dict) -> str: @@ -116,6 +122,11 @@ def _empty_data_files(self) -> None: ) raise + def _change_connectivity_to_database(self, connectivity: bool) -> None: + """Enable or disable the connectivity to the database.""" + self.charm.unit_peer_data.update({"connectivity": "on" if connectivity else "off"}) + self.charm.update_config() + def _execute_command(self, command: List[str]) -> Tuple[str, str]: """Execute a command in the workload container.""" return self.container.exec( @@ -180,6 +191,9 @@ def _initialise_stanza(self) -> None: located, how it will be backed up, archiving options, etc. (more info in https://pgbackrest.org/user-guide.html#quickstart/configure-stanza). """ + if not self.charm.unit.is_leader(): + return + if self.charm.is_blocked: logger.warning("couldn't initialize stanza due to a blocked status") return @@ -199,7 +213,7 @@ def _initialise_stanza(self) -> None: return # Store the stanza name to be used in configurations updates. - self.charm._peers.data[self.charm.unit].update({"stanza": self.charm.cluster_name}) + self.charm.app_peer_data.update({"stanza": self.charm.cluster_name}) # Update the configuration to use pgBackRest as the archiving mechanism. self.charm.update_config() @@ -208,7 +222,8 @@ def _initialise_stanza(self) -> None: # Check that the stanza is correctly configured. for attempt in Retrying(stop=stop_after_attempt(5), wait=wait_fixed(3)): with attempt: - self.charm._patroni.reload_patroni_configuration() + if self.charm._patroni.member_started: + self.charm._patroni.reload_patroni_configuration() self._execute_command( ["pgbackrest", f"--stanza={self.charm.cluster_name}", "check"] ) @@ -218,6 +233,32 @@ def _initialise_stanza(self) -> None: self.charm.unit.status = BlockedStatus( f"failed to initialize stanza with error {str(e)}" ) + return + + return + + @property + def _is_primary_pgbackrest_service_running(self) -> bool: + """Returns whether the pgBackRest TLS server is running in the primary unit.""" + try: + primary = self.charm._patroni.get_primary() + except (RetryError, ConnectionError) as e: + logger.error(f"failed to get primary with error {str(e)}") + return False + + primary_endpoint = self.charm._get_hostname_from_unit(primary) + + try: + self._execute_command( + ["pgbackrest", "server-ping", "--io-timeout=10", primary_endpoint] + ) + except ExecError as e: + logger.warning( + f"Failed to contact pgBackRest TLS server on {primary_endpoint} with error {str(e)}" + ) + return False + + return True def _on_s3_credential_changed(self, event: CredentialsChangedEvent): """Call the stanza initialization when the credentials or the connection info change.""" @@ -226,16 +267,14 @@ def _on_s3_credential_changed(self, event: CredentialsChangedEvent): event.defer() return - s3_parameters, missing_parameters = self._retrieve_s3_parameters() - if missing_parameters: - logger.warning( - f"Cannot set pgBackRest configurations due to missing S3 parameters: {missing_parameters}" - ) + if not self._render_pgbackrest_conf_file(): + logger.debug("Cannot set pgBackRest configurations, missing configurations.") return - self._render_pgbackrest_conf_file(s3_parameters) self._initialise_stanza() + self.start_stop_pgbackrest_service() + def _on_create_backup_action(self, event) -> None: """Request that pgBackRest creates a backup.""" can_unit_perform_backup, validation_message = self._can_unit_perform_backup() @@ -267,17 +306,26 @@ def _on_create_backup_action(self, event) -> None: event.fail("Failed to upload metadata to provided S3") return + if not self.charm.is_primary: + # Create a rule to mark the cluster as in a creating backup state and update + # the Patroni configuration. + self._change_connectivity_to_database(connectivity=False) + + self.charm.unit.status = MaintenanceStatus("creating backup") + try: - self.charm.unit.status = MaintenanceStatus("creating backup") - stdout, stderr = self._execute_command( - [ - "pgbackrest", - f"--stanza={self.charm.cluster_name}", - "--log-level-console=debug", - "--type=full", - "backup", - ] - ) + command = [ + "pgbackrest", + f"--stanza={self.charm.cluster_name}", + "--log-level-console=debug", + "--type=full", + "backup", + ] + if self.charm.is_primary: + # Force the backup to run in the primary if it's not possible to run it + # on the replicas (that happens when TLS is not enabled). + command.append("--no-backup-standby") + stdout, stderr = self._execute_command(command) backup_id = self._list_backups_ids(show_failed=True)[-1] except ExecError as e: logger.exception(e) @@ -329,6 +377,11 @@ def _on_create_backup_action(self, event) -> None: else: event.set_results({"backup-status": "backup created"}) + if not self.charm.is_primary: + # Remove the rule the marks the cluster as in a creating backup state + # and update the Patroni configuration. + self._change_connectivity_to_database(connectivity=True) + self.charm.unit.status = ActiveStatus() def _on_list_backups_action(self, event) -> None: @@ -451,15 +504,31 @@ def _pre_restore_checks(self, event: ActionEvent) -> bool: event.fail(error_message) return False + logger.info("Checking that this unit was already elected the leader unit") + if not self.charm.unit.is_leader(): + error_message = "Unit cannot restore backup as it was not elected the leader unit yet" + logger.warning(error_message) + event.fail(error_message) + return False + return True - def _render_pgbackrest_conf_file(self, s3_parameters: Dict) -> None: + def _render_pgbackrest_conf_file(self) -> bool: """Render the pgBackRest configuration file.""" + s3_parameters, missing_parameters = self._retrieve_s3_parameters() + if missing_parameters: + logger.warning( + f"Cannot set pgBackRest configurations due to missing S3 parameters: {missing_parameters}" + ) + return False + # Open the template pgbackrest.conf file. with open("templates/pgbackrest.conf.j2", "r") as file: template = Template(file.read()) # Render the template file with the correct values. rendered = template.render( + enable_tls=self.charm.is_tls_enabled and len(self.charm.peer_members_endpoints) > 0, + peer_endpoints=self.charm.peer_members_endpoints, path=s3_parameters["path"], region=s3_parameters.get("region"), endpoint=s3_parameters["endpoint"], @@ -468,6 +537,7 @@ def _render_pgbackrest_conf_file(self, s3_parameters: Dict) -> None: access_key=s3_parameters["access-key"], secret_key=s3_parameters["secret-key"], stanza=self.charm.cluster_name, + storage_path=self.charm._storage_path, user=BACKUP_USER, ) # Delete the original file and render the one with the right info. @@ -479,6 +549,8 @@ def _render_pgbackrest_conf_file(self, s3_parameters: Dict) -> None: group=WORKLOAD_OS_GROUP, ) + return True + def _restart_database(self) -> None: """Removes the restoring backup flag and restart the database.""" self.charm.app_peer_data.update({"archive-mode": "", "restoring-backup": ""}) @@ -510,6 +582,34 @@ def _retrieve_s3_parameters(self) -> Tuple[Dict, List[str]]: return s3_parameters, [] + def start_stop_pgbackrest_service(self) -> bool: + """Start or stop the pgBackRest TLS server service. + + Returns: + a boolean indicating whether the operation succeeded. + """ + # Ignore this operation if backups settings aren't ok. + are_backup_settings_ok, _ = self._are_backup_settings_ok() + if not are_backup_settings_ok: + return True + + # Update pgBackRest configuration (to update the TLS settings). + if not self._render_pgbackrest_conf_file(): + return False + + # Stop the service if TLS is not enabled or there are no replicas. + if not self.charm.is_tls_enabled or len(self.charm.peer_members_endpoints) == 0: + self.container.stop(self.charm.pgbackrest_server_service) + return True + + # Don't start the service if the service hasn't started yet in the primary. + if not self.charm.is_primary and not self._is_primary_pgbackrest_service_running: + return False + + # Start the service. + self.container.restart(self.charm.pgbackrest_server_service) + return True + def _upload_content_to_s3( self: str, content: str, diff --git a/src/charm.py b/src/charm.py index b83b6f8a1f..91b7045fb8 100755 --- a/src/charm.py +++ b/src/charm.py @@ -69,6 +69,7 @@ def __init__(self, *args): super().__init__(*args) self._postgresql_service = "postgresql" + self.pgbackrest_server_service = "pgbackrest server" self._unit = self.model.unit.name self._name = self.model.app.name self._namespace = self.model.name @@ -247,6 +248,18 @@ def _on_peer_relation_changed(self, event: RelationChangedEvent) -> None: self.postgresql_client_relation.update_read_only_endpoint() + # Start or stop the pgBackRest TLS server service when TLS certificate change. + if not self.backup.start_stop_pgbackrest_service(): + # Ping primary to start its TLS server. + self.unit_peer_data.update({"start-tls-server": "True"}) + logger.debug( + "Deferring on_peer_relation_changed: awaiting for TLS server service to start on primary" + ) + event.defer() + return + else: + self.unit_peer_data.pop("start-tls.server", None) + self.unit.status = ActiveStatus() def _on_install(self, _) -> None: @@ -724,9 +737,19 @@ def _patroni(self): self.get_secret("app", USER_PASSWORD_KEY), self.get_secret("app", REPLICATION_PASSWORD_KEY), self.get_secret("app", REWIND_PASSWORD_KEY), - self.postgresql.is_tls_enabled(check_current_host=True), + bool(self.unit_peer_data.get("tls")), ) + @property + def is_primary(self) -> bool: + """Return whether this unit is the primary instance.""" + return self._unit == self._patroni.get_primary(unit_name_pattern=True) + + @property + def is_tls_enabled(self) -> bool: + """Return whether TLS is enabled.""" + return all(self.tls.get_tls_files()) + @property def _endpoint(self) -> str: """Current unit hostname.""" @@ -741,6 +764,22 @@ def _endpoints(self) -> List[str]: # If the peer relations was not created yet, return only the current member hostname. return [self._endpoint] + @property + def peer_members_endpoints(self) -> List[str]: + """Fetch current list of peer members endpoints. + + Returns: + A list of peer members addresses (strings). + """ + # Get all members endpoints and remove the current unit endpoint from the list. + endpoints = self._endpoints + current_unit_endpoint = self._get_hostname_from_unit( + self._unit_name_to_pod_name(self._unit) + ) + if current_unit_endpoint in endpoints: + endpoints.remove(current_unit_endpoint) + return endpoints + def _add_to_endpoints(self, endpoint) -> None: """Add one endpoint to the members list.""" self._update_endpoints(endpoint_to_add=endpoint) @@ -777,8 +816,8 @@ def _postgresql_layer(self) -> Layer: "summary": "entrypoint of the postgresql + patroni image", "command": f"/usr/bin/python3 /usr/local/bin/patroni {self._storage_path}/patroni.yml", "startup": "enabled", - "user": "postgres", - "group": "postgres", + "user": WORKLOAD_OS_USER, + "group": WORKLOAD_OS_GROUP, "environment": { "PATRONI_KUBERNETES_LABELS": f"{{application: patroni, cluster-name: {self.cluster_name}}}", "PATRONI_KUBERNETES_NAMESPACE": self._namespace, @@ -788,7 +827,15 @@ def _postgresql_layer(self) -> Layer: "PATRONI_REPLICATION_USERNAME": REPLICATION_USER, "PATRONI_SUPERUSER_USERNAME": USER, }, - } + }, + self.pgbackrest_server_service: { + "override": "replace", + "summary": "pgBackRest server", + "command": self.pgbackrest_server_service, + "startup": "disabled", + "user": WORKLOAD_OS_USER, + "group": WORKLOAD_OS_GROUP, + }, }, } return Layer(layer_config) @@ -809,6 +856,7 @@ def push_tls_files_to_workload(self, container: Container = None) -> None: container = self.unit.get_container("postgresql") key, ca, cert = self.tls.get_tls_files() + if key is not None: container.push( f"{self._storage_path}/{TLS_KEY_FILE}", @@ -843,9 +891,14 @@ def _restart(self, _) -> None: """Restart PostgreSQL.""" try: self._patroni.restart_postgresql() - except RetryError as e: - logger.error("failed to restart PostgreSQL") - self.unit.status = BlockedStatus(f"failed to restart PostgreSQL with error {e}") + except RetryError: + error_message = "failed to restart PostgreSQL" + logger.exception(error_message) + self.unit.status = BlockedStatus(error_message) + return + + # Start or stop the pgBackRest TLS server service when TLS certificate change. + self.backup.start_stop_pgbackrest_service() def update_config(self) -> None: """Updates Patroni config file based on the existence of the TLS files.""" @@ -854,16 +907,24 @@ def update_config(self) -> None: # Update and reload configuration based on TLS files availability. self._patroni.render_patroni_yml_file( archive_mode=self.app_peer_data.get("archive-mode", "on"), + connectivity=self.unit_peer_data.get("connectivity", "on") == "on", enable_tls=enable_tls, backup_id=self.app_peer_data.get("restoring-backup"), - stanza=self.unit_peer_data.get("stanza"), + stanza=self.app_peer_data.get("stanza"), ) self._patroni.render_postgresql_conf_file() if not self._patroni.member_started: + # If Patroni/PostgreSQL has not started yet and TLS relations was initialised, + # then mark TLS as enabled. This commonly happens when the charm is deployed + # in a bundle together with the TLS certificates operator. This flag is used to + # know when to call the Patroni API using HTTP or HTTPS. + self.unit_peer_data.update({"tls": "enabled" if enable_tls else ""}) + logger.debug("Early exit update_config: Patroni not started yet") return restart_postgresql = enable_tls != self.postgresql.is_tls_enabled() self._patroni.reload_patroni_configuration() + self.unit_peer_data.update({"tls": "enabled" if enable_tls else ""}) # Restart PostgreSQL if TLS configuration has changed # (so the both old and new connections use the configuration). diff --git a/src/patroni.py b/src/patroni.py index 3afddd9fd6..e4961a5558 100644 --- a/src/patroni.py +++ b/src/patroni.py @@ -197,6 +197,7 @@ def _render_file(self, path: str, content: str, mode: int) -> None: def render_patroni_yml_file( self, archive_mode: str, + connectivity: bool = False, enable_tls: bool = False, stanza: str = None, backup_id: Optional[str] = None, @@ -205,6 +206,7 @@ def render_patroni_yml_file( Args: archive_mode: PostgreSQL archive mode. + connectivity: whether to allow external connections to the database. enable_tls: whether to enable TLS. stanza: name of the stanza created by pgBackRest. backup_id: id of the backup that is being restored. @@ -215,6 +217,7 @@ def render_patroni_yml_file( # Render the template file with the correct values. rendered = template.render( archive_mode=archive_mode, + connectivity=connectivity, enable_tls=enable_tls, endpoint=self._endpoint, endpoints=self._endpoints, diff --git a/templates/patroni.yml.j2 b/templates/patroni.yml.j2 index a13fc2f141..52b4526a83 100644 --- a/templates/patroni.yml.j2 +++ b/templates/patroni.yml.j2 @@ -68,7 +68,12 @@ postgresql: pgpass: /tmp/pgpass pg_hba: - local all backup peer map=operator + {%- if not connectivity %} + - {{ 'hostssl' if enable_tls else 'host' }} all all 0.0.0.0/0 reject + - {{ 'hostssl' if enable_tls else 'host' }} all all {{ endpoint }}.{{ namespace }}.svc.cluster.local md5 + {% else %} - {{ 'hostssl' if enable_tls else 'host' }} all all 0.0.0.0/0 md5 + {%- endif %} - {{ 'hostssl' if enable_tls else 'host' }} replication replication 127.0.0.1/32 md5 {%- for endpoint in endpoints %} - {{ 'hostssl' if enable_tls else 'host' }} replication replication {{ endpoint }}.{{ namespace }}.svc.cluster.local md5 diff --git a/templates/pgbackrest.conf.j2 b/templates/pgbackrest.conf.j2 index 288fbbce64..3c2349bf4f 100644 --- a/templates/pgbackrest.conf.j2 +++ b/templates/pgbackrest.conf.j2 @@ -1,4 +1,5 @@ [global] +backup-standby=y repo1-retention-full=9999999 repo1-type=s3 repo1-path={{ path }} @@ -9,7 +10,29 @@ repo1-s3-uri-style={{ s3_uri_style }} repo1-s3-key={{ access_key }} repo1-s3-key-secret={{ secret_key }} start-fast=y +{%- if enable_tls %} +tls-server-address=* +{%- for peer_endpoint in peer_endpoints %} +tls-server-auth={{ peer_endpoint }}={{ stanza }} +{%- endfor %} +tls-server-ca-file={{ storage_path }}/ca.pem +tls-server-cert-file={{ storage_path }}/cert.pem +tls-server-key-file={{ storage_path }}/key.pem +{%- endif %} [{{ stanza }}] -pg1-path=/var/lib/postgresql/data/pgdata +pg1-path={{ storage_path }}/pgdata pg1-user={{ user }} +{%- if enable_tls %} +{% set ns = namespace(count=2) %} +{%- for peer_endpoint in peer_endpoints %} +pg{{ ns.count }}-host-type=tls +pg{{ ns.count }}-host-ca-file={{ storage_path }}/ca.pem +pg{{ ns.count }}-host-cert-file={{ storage_path }}/cert.pem +pg{{ ns.count }}-host-key-file={{ storage_path }}/key.pem +pg{{ ns.count }}-host={{ peer_endpoint }} +pg{{ ns.count }}-path={{ storage_path }}/pgdata +pg{{ ns.count }}-user={{ user }} +{% set ns.count = ns.count + 1 %} +{%- endfor %} +{%- endif %} diff --git a/tests/integration/test_backups.py b/tests/integration/test_backups.py index 6487af85e0..9b533c261d 100644 --- a/tests/integration/test_backups.py +++ b/tests/integration/test_backups.py @@ -6,16 +6,20 @@ import pytest as pytest from pytest_operator.plugin import OpsTest +from tenacity import Retrying, stop_after_attempt, wait_exponential from tests.integration.helpers import ( DATABASE_APP_NAME, build_and_deploy, db_connect, get_password, + get_primary, get_unit_address, + scale_application, ) S3_INTEGRATOR_APP_NAME = "s3-integrator" +TLS_CERTIFICATES_APP_NAME = "tls-certificates-operator" logger = logging.getLogger(__name__) @@ -23,17 +27,21 @@ @pytest.mark.abort_on_fail async def test_backup_and_restore(ops_test: OpsTest, cloud_configs: Tuple[Dict, Dict]) -> None: """Build and deploy one unit of PostgreSQL and then test the backup and restore actions.""" - # Deploy S3 Integrator. + # Deploy S3 Integrator and TLS Certificates Operator. await ops_test.model.deploy(S3_INTEGRATOR_APP_NAME, channel="edge") + config = {"generate-self-signed-certificates": "true", "ca-common-name": "Test CA"} + await ops_test.model.deploy(TLS_CERTIFICATES_APP_NAME, channel="beta", config=config) for cloud, config in cloud_configs[0].items(): # Deploy and relate PostgreSQL to S3 integrator (one database app for each cloud for now - # as archivo_mode is disabled after restoring the backup). + # as archivo_mode is disabled after restoring the backup) and to TLS Certificates Operator + # (to be able to create backups from replicas). database_app_name = f"{DATABASE_APP_NAME}-{cloud.lower()}" await build_and_deploy( - ops_test, 1, database_app_name=database_app_name, wait_for_idle=False + ops_test, 2, database_app_name=database_app_name, wait_for_idle=False ) await ops_test.model.relate(database_app_name, S3_INTEGRATOR_APP_NAME) + await ops_test.model.relate(database_app_name, TLS_CERTIFICATES_APP_NAME) # Configure and set access and secret keys. logger.info(f"configuring S3 integrator for {cloud}") @@ -47,9 +55,15 @@ async def test_backup_and_restore(ops_test: OpsTest, cloud_configs: Tuple[Dict, apps=[database_app_name, S3_INTEGRATOR_APP_NAME], status="active", timeout=1000 ) + primary = await get_primary(ops_test, database_app_name) + for unit in ops_test.model.applications[database_app_name].units: + if unit.name != primary: + replica = unit.name + break + # Write some data. password = await get_password(ops_test, database_app_name=database_app_name) - address = await get_unit_address(ops_test, f"{database_app_name}/0") + address = await get_unit_address(ops_test, primary) logger.info("creating a table in the database") with db_connect(host=address, password=password) as connection: connection.autocommit = True @@ -60,22 +74,18 @@ async def test_backup_and_restore(ops_test: OpsTest, cloud_configs: Tuple[Dict, # Run the "create backup" action. logger.info("creating a backup") - action = await ops_test.model.units.get(f"{database_app_name}/0").run_action( - "create-backup" - ) + action = await ops_test.model.units.get(replica).run_action("create-backup") await action.wait() - logger.info(f"backup results: {action.results}") - await ops_test.model.wait_for_idle( - apps=[database_app_name, S3_INTEGRATOR_APP_NAME], status="active", timeout=1000 - ) + backup_status = action.results.get("backup-status") + assert backup_status, "backup hasn't succeeded" + async with ops_test.fast_forward(): + await ops_test.model.wait_for_idle(status="active", timeout=1000) # Run the "list backups" action. logger.info("listing the available backups") - action = await ops_test.model.units.get(f"{database_app_name}/0").run_action( - "list-backups" - ) + action = await ops_test.model.units.get(replica).run_action("list-backups") await action.wait() - backups = action.results["backups"] + backups = action.results.get("backups") assert backups, "backups not outputted" await ops_test.model.wait_for_idle(status="active", timeout=1000) @@ -86,15 +96,23 @@ async def test_backup_and_restore(ops_test: OpsTest, cloud_configs: Tuple[Dict, connection.cursor().execute("CREATE TABLE backup_table_2 (test_collumn INT );") connection.close() + # Scale down to be able to restore. + await scale_application(ops_test, database_app_name, 1) + # Run the "restore backup" action. - logger.info("restoring the backup") - most_recent_backup = backups.split("\n")[-1] - backup_id = most_recent_backup.split()[0] - action = await ops_test.model.units.get(f"{database_app_name}/0").run_action( - "restore", **{"backup-id": backup_id} - ) - await action.wait() - logger.info(f"restore results: {action.results}") + for attempt in Retrying( + stop=stop_after_attempt(10), wait=wait_exponential(multiplier=1, min=2, max=30) + ): + with attempt: + logger.info("restoring the backup") + most_recent_backup = backups.split("\n")[-1] + backup_id = most_recent_backup.split()[0] + action = await ops_test.model.units.get(f"{database_app_name}/0").run_action( + "restore", **{"backup-id": backup_id} + ) + await action.wait() + restore_status = action.results.get("restore-status") + assert restore_status, "restore hasn't succeeded" # Wait for the backup to complete. async with ops_test.fast_forward(): @@ -102,6 +120,8 @@ async def test_backup_and_restore(ops_test: OpsTest, cloud_configs: Tuple[Dict, # Check that the backup was correctly restored by having only the first created table. logger.info("checking that the backup was correctly restored") + primary = await get_primary(ops_test, database_app_name) + address = await get_unit_address(ops_test, primary) with db_connect( host=address, password=password ) as connection, connection.cursor() as cursor: diff --git a/tests/integration/test_tls.py b/tests/integration/test_tls.py index b36e4b44e8..f40debebc2 100644 --- a/tests/integration/test_tls.py +++ b/tests/integration/test_tls.py @@ -34,7 +34,6 @@ async def test_build_and_deploy(ops_test: OpsTest) -> None: await build_and_deploy(ops_test, DATABASE_UNITS, wait_for_idle=False) -@pytest.mark.unstable async def test_mattermost_db(ops_test: OpsTest) -> None: """Deploy Mattermost to test the 'db' relation. diff --git a/tests/unit/test_charm.py b/tests/unit/test_charm.py index 1a96af563f..39b94ae6f6 100644 --- a/tests/unit/test_charm.py +++ b/tests/unit/test_charm.py @@ -43,6 +43,7 @@ def setUp(self): self._peer_relation = PEER self._postgresql_container = "postgresql" self._postgresql_service = "postgresql" + self.pgbackrest_server_service = "pgbackrest server" self.harness = Harness(PostgresqlOperatorCharm) self.addCleanup(self.harness.cleanup) @@ -435,7 +436,15 @@ def test_postgresql_layer(self, _, __, ___, ____): "PATRONI_REPLICATION_USERNAME": "replication", "PATRONI_SUPERUSER_USERNAME": "operator", }, - } + }, + self.pgbackrest_server_service: { + "override": "replace", + "summary": "pgBackRest server", + "command": self.pgbackrest_server_service, + "startup": "disabled", + "user": "postgres", + "group": "postgres", + }, }, } self.assertDictEqual(plan, expected)