diff --git a/concierge.yaml b/concierge.yaml index 935ff237ae..0d2bbc8c7a 100644 --- a/concierge.yaml +++ b/concierge.yaml @@ -8,6 +8,7 @@ providers: addons: - dns - hostpath-storage + - rbac host: snaps: jhack: diff --git a/lib/charms/grafana_k8s/v0/grafana_dashboard.py b/lib/charms/grafana_k8s/v0/grafana_dashboard.py index cc22f9f7fa..d618c79917 100644 --- a/lib/charms/grafana_k8s/v0/grafana_dashboard.py +++ b/lib/charms/grafana_k8s/v0/grafana_dashboard.py @@ -219,7 +219,7 @@ def __init__(self, *args): # Increment this PATCH version before using `charmcraft publish-lib` or reset # to 0 if you are raising the major API version -LIBPATCH = 39 +LIBPATCH = 42 PYDEPS = ["cosl >= 0.0.50"] @@ -417,8 +417,7 @@ def __init__( self.expected_relation_interface = expected_relation_interface self.actual_relation_interface = actual_relation_interface self.message = ( - "The '{}' relation has '{}' as " - "interface rather than the expected '{}'".format( + "The '{}' relation has '{}' as " "interface rather than the expected '{}'".format( relation_name, actual_relation_interface, expected_relation_interface ) ) @@ -634,7 +633,10 @@ def _replace_template_fields( # noqa: C901 deletions = [] for tmpl in dict_content["templating"]["list"]: if tmpl["name"] and tmpl["name"] in used_replacements: - deletions.append(tmpl) + # it might happen that existing template var name is the same as the one we insert (i.e prometheusds or lokids) + # in that case, we want to pop the existing one only. + if tmpl not in DATASOURCE_TEMPLATE_DROPDOWNS: + deletions.append(tmpl) for d in deletions: dict_content["templating"]["list"].remove(d) @@ -962,6 +964,13 @@ def _replace_uid( "Processed dashboard '%s': kept original uid '%s'", dashboard_path, original_uid ) + @classmethod + def _add_tags(cls, dashboard_dict: dict, charm_name: str): + tags: List[str] = dashboard_dict.get("tags", []) + if not any(tag.startswith("charm: ") for tag in tags): + tags.append(f"charm: {charm_name}") + dashboard_dict["tags"] = tags + @classmethod def load_dashboards_from_dir( cls, @@ -1004,6 +1013,8 @@ def _is_dashboard(p: Path) -> bool: charm_name=charm_name, ) + cls._add_tags(dashboard_dict=dashboard_dict, charm_name=charm_name) + id = "file:{}".format(path.stem) dashboard_templates[id] = cls._content_to_dashboard_object( charm_name=charm_name, @@ -1601,7 +1612,7 @@ def _render_dashboards_and_signal_changed(self, relation: Relation) -> bool: # if not coerced_data == stored_data: stored_dashboards = self.get_peer_data("dashboards") - stored_dashboards[relation.id] = stored_data + stored_dashboards[str(relation.id)] = stored_data self.set_peer_data("dashboards", stored_dashboards) return True return None # type: ignore diff --git a/lib/charms/prometheus_k8s/v0/prometheus_scrape.py b/lib/charms/prometheus_k8s/v0/prometheus_scrape.py index ca554fb2bc..d1169ef3dc 100644 --- a/lib/charms/prometheus_k8s/v0/prometheus_scrape.py +++ b/lib/charms/prometheus_k8s/v0/prometheus_scrape.py @@ -340,8 +340,8 @@ def _on_scrape_targets_changed(self, event): import yaml from cosl import JujuTopology -from cosl.rules import AlertRules -from ops.charm import CharmBase, RelationRole +from cosl.rules import AlertRules, generic_alert_groups +from ops.charm import CharmBase, RelationJoinedEvent, RelationRole from ops.framework import ( BoundEvent, EventBase, @@ -362,7 +362,7 @@ def _on_scrape_targets_changed(self, event): # Increment this PATCH version before using `charmcraft publish-lib` or reset # to 0 if you are raising the major API version -LIBPATCH = 48 +LIBPATCH = 50 PYDEPS = ["cosl"] @@ -1309,6 +1309,8 @@ def __init__( refresh_event: Optional[Union[BoundEvent, List[BoundEvent]]] = None, external_url: str = "", lookaside_jobs_callable: Optional[Callable] = None, + *, + forward_alert_rules: bool = True, ): """Construct a metrics provider for a Prometheus charm. @@ -1411,6 +1413,7 @@ def __init__( files. Defaults to "./prometheus_alert_rules", resolved relative to the directory hosting the charm entry file. The alert rules are automatically updated on charm upgrade. + forward_alert_rules: a boolean flag to toggle forwarding of charmed alert rules. refresh_event: an optional bound event or list of bound events which will be observed to re-set scrape job data (IP address and others) external_url: an optional argument that represents an external url that @@ -1449,6 +1452,7 @@ def __init__( self._charm = charm self._alert_rules_path = alert_rules_path + self._forward_alert_rules = forward_alert_rules self._relation_name = relation_name # sanitize job configurations to the supported subset of parameters jobs = [] if jobs is None else jobs @@ -1530,7 +1534,11 @@ def set_scrape_job_spec(self, _=None): return alert_rules = AlertRules(query_type="promql", topology=self.topology) - alert_rules.add_path(self._alert_rules_path, recursive=True) + if self._forward_alert_rules: + alert_rules.add_path(self._alert_rules_path, recursive=True) + alert_rules.add( + generic_alert_groups.application_rules, group_name_prefix=self.topology.identifier + ) alert_rules_as_dict = alert_rules.as_dict() for relation in self._charm.model.relations[self._relation_name]: @@ -1776,6 +1784,9 @@ def __init__( relation_names: Optional[dict] = None, relabel_instance=True, resolve_addresses=False, + path_to_own_alert_rules: Optional[str] = None, + *, + forward_alert_rules: bool = True, ): """Construct a `MetricsEndpointAggregator`. @@ -1795,6 +1806,8 @@ def __init__( resolve_addresses: A boolean flag indiccating if the aggregator should attempt to perform DNS lookups of targets and append a `dns_name` label + path_to_own_alert_rules: Optionally supply a path for alert rule files + forward_alert_rules: a boolean flag to toggle forwarding of charmed alert rules """ self._charm = charm @@ -1807,15 +1820,21 @@ def __init__( self._alert_rules_relation = relation_names.get("alert_rules", "prometheus-rules") super().__init__(charm, self._prometheus_relation) + self.topology = JujuTopology.from_charm(charm) + self._stored.set_default(jobs=[], alert_rules=[]) self._relabel_instance = relabel_instance self._resolve_addresses = resolve_addresses + self._forward_alert_rules = forward_alert_rules + # manage Prometheus charm relation events prometheus_events = self._charm.on[self._prometheus_relation] self.framework.observe(prometheus_events.relation_joined, self._set_prometheus_data) + self.path_to_own_alert_rules = path_to_own_alert_rules + # manage list of Prometheus scrape jobs from related scrape targets target_events = self._charm.on[self._target_relation] self.framework.observe(target_events.relation_changed, self._on_prometheus_targets_changed) @@ -1828,7 +1847,7 @@ def __init__( self.framework.observe(alert_rule_events.relation_changed, self._on_alert_rules_changed) self.framework.observe(alert_rule_events.relation_departed, self._on_alert_rules_departed) - def _set_prometheus_data(self, event): + def _set_prometheus_data(self, event: Optional[RelationJoinedEvent] = None): """Ensure every new Prometheus instances is updated. Any time a new Prometheus unit joins the relation with @@ -1838,6 +1857,7 @@ def _set_prometheus_data(self, event): if not self._charm.unit.is_leader(): return + # Gather the scrape jobs jobs = [] + _type_convert_stored( self._stored.jobs # pyright: ignore ) # list of scrape jobs, one per relation @@ -1846,6 +1866,7 @@ def _set_prometheus_data(self, event): if targets and relation.app: jobs.append(self._static_scrape_job(targets, relation.app.name)) + # Gather the alert rules groups = [] + _type_convert_stored( self._stored.alert_rules # pyright: ignore ) # list of alert rule groups @@ -1856,9 +1877,23 @@ def _set_prometheus_data(self, event): rules = self._label_alert_rules(unit_rules, appname) group = {"name": self.group_name(appname), "rules": rules} groups.append(group) - - event.relation.data[self._charm.app]["scrape_jobs"] = json.dumps(jobs) - event.relation.data[self._charm.app]["alert_rules"] = json.dumps({"groups": groups}) + alert_rules = AlertRules(query_type="promql", topology=self.topology) + # Add alert rules from file + if self.path_to_own_alert_rules: + alert_rules.add_path(self.path_to_own_alert_rules, recursive=True) + # Add generic alert rules + alert_rules.add( + generic_alert_groups.application_rules, group_name_prefix=self.topology.identifier + ) + groups.extend(alert_rules.as_dict()["groups"]) + + # Set scrape jobs and alert rules in relation data + relations = [event.relation] if event else self.model.relations[self._prometheus_relation] + for rel in relations: + rel.data[self._charm.app]["scrape_jobs"] = json.dumps(jobs) # type: ignore + rel.data[self._charm.app]["alert_rules"] = json.dumps( # type: ignore + {"groups": groups if self._forward_alert_rules else []} + ) def _on_prometheus_targets_changed(self, event): """Update scrape jobs in response to scrape target changes. @@ -2129,7 +2164,9 @@ def set_alert_rule_data(self, name: str, unit_rules: dict, label_rules: bool = T if updated_group["name"] not in [g["name"] for g in groups]: groups.append(updated_group) - relation.data[self._charm.app]["alert_rules"] = json.dumps({"groups": groups}) + relation.data[self._charm.app]["alert_rules"] = json.dumps( + {"groups": groups if self._forward_alert_rules else []} + ) if not _type_convert_stored(self._stored.alert_rules) == groups: # pyright: ignore self._stored.alert_rules = groups @@ -2177,8 +2214,8 @@ def remove_alert_rules(self, group_name: str, unit_name: str) -> None: changed_group["rules"] = rules_kept # type: ignore groups.append(changed_group) - relation.data[self._charm.app]["alert_rules"] = ( - json.dumps({"groups": groups}) if groups else "{}" + relation.data[self._charm.app]["alert_rules"] = json.dumps( + {"groups": groups if self._forward_alert_rules else []} ) if not _type_convert_stored(self._stored.alert_rules) == groups: # pyright: ignore diff --git a/lib/charms/rolling_ops/v0/rollingops.py b/lib/charms/rolling_ops/v0/rollingops.py index 57aa9bf352..13b51a3051 100644 --- a/lib/charms/rolling_ops/v0/rollingops.py +++ b/lib/charms/rolling_ops/v0/rollingops.py @@ -63,13 +63,14 @@ def _on_trigger_restart(self, event): juju run-action some-charm/0 some-charm/1 <... some-charm/n> restart ``` -Note that all units that plan to restart must receive the action and emit the aquire +Note that all units that plan to restart must receive the action and emit the acquire event. Any units that do not run their acquire handler will be left out of the rolling restart. (An operator might take advantage of this fact to recover from a failed rolling operation without restarting workloads that were able to successfully restart -- simply omit the successful units from a subsequent run-action call.) """ + import logging from enum import Enum from typing import AnyStr, Callable, Optional @@ -88,7 +89,7 @@ def _on_trigger_restart(self, event): # Increment this PATCH version before using `charmcraft publish-lib` or reset # to 0 if you are raising the major API version -LIBPATCH = 7 +LIBPATCH = 8 class LockNoRelationError(Exception): @@ -149,7 +150,6 @@ class Lock: """ def __init__(self, manager, unit=None): - self.relation = manager.model.relations[manager.name][0] if not self.relation: # TODO: defer caller in this case (probably just fired too soon). @@ -246,7 +246,7 @@ def __init__(self, manager): # Gather all the units. relation = manager.model.relations[manager.name][0] - units = [unit for unit in relation.units] + units = list(relation.units) # Plus our unit ... units.append(manager.model.unit) diff --git a/lib/charms/tempo_coordinator_k8s/v0/charm_tracing.py b/lib/charms/tempo_coordinator_k8s/v0/charm_tracing.py index a9b6deeb64..ebf80ede2e 100644 --- a/lib/charms/tempo_coordinator_k8s/v0/charm_tracing.py +++ b/lib/charms/tempo_coordinator_k8s/v0/charm_tracing.py @@ -19,15 +19,19 @@ ```python # import the necessary charm libs -from charms.tempo_coordinator_k8s.v0.tracing import TracingEndpointRequirer, charm_tracing_config +from charms.tempo_coordinator_k8s.v0.tracing import ( + TracingEndpointRequirer, + charm_tracing_config, +) from charms.tempo_coordinator_k8s.v0.charm_tracing import charm_tracing + # decorate your charm class with charm_tracing: @charm_tracing( # forward-declare the instance attributes that the instrumentor will look up to obtain the # tempo endpoint and server certificate tracing_endpoint="tracing_endpoint", - server_cert="server_cert" + server_cert="server_cert", ) class MyCharm(CharmBase): _path_to_cert = "/path/to/cert.crt" @@ -37,10 +41,12 @@ class MyCharm(CharmBase): # If you do support TLS, you'll need to make sure that the server cert is copied to this location # and kept up to date so the instrumentor can use it. - def __init__(self, ...): - ... - self.tracing = TracingEndpointRequirer(self, ...) - self.tracing_endpoint, self.server_cert = charm_tracing_config(self.tracing, self._path_to_cert) + def __init__(self, framework): + # ... + self.tracing = TracingEndpointRequirer(self) + self.tracing_endpoint, self.server_cert = charm_tracing_config( + self.tracing, self._path_to_cert + ) ``` # Detailed usage @@ -226,12 +232,6 @@ def my_tracing_endpoint(self) -> Optional[str]: 3) If you were passing a certificate (str) using `server_cert`, you need to change it to provide an *absolute* path to the certificate file instead. """ -import typing - -from opentelemetry.exporter.otlp.proto.common._internal.trace_encoder import ( - encode_spans, -) -from opentelemetry.sdk.trace.export.in_memory_span_exporter import InMemorySpanExporter def _remove_stale_otel_sdk_packages(): @@ -286,12 +286,15 @@ def _remove_stale_otel_sdk_packages(): # apply hacky patch to remove stale opentelemetry sdk packages on upgrade-charm. # it could be trouble if someone ever decides to implement their own tracer parallel to # ours and before the charm has inited. We assume they won't. +# !!IMPORTANT!! keep all otlp imports UNDER this call. _remove_stale_otel_sdk_packages() import functools import inspect import logging import os +import typing +from collections import deque from contextlib import contextmanager from contextvars import Context, ContextVar, copy_context from pathlib import Path @@ -310,6 +313,9 @@ def _remove_stale_otel_sdk_packages(): import opentelemetry import ops +from opentelemetry.exporter.otlp.proto.common._internal.trace_encoder import ( + encode_spans, +) from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter from opentelemetry.sdk.resources import Resource from opentelemetry.sdk.trace import ReadableSpan, Span, TracerProvider @@ -318,6 +324,7 @@ def _remove_stale_otel_sdk_packages(): SpanExporter, SpanExportResult, ) +from opentelemetry.sdk.trace.export.in_memory_span_exporter import InMemorySpanExporter from opentelemetry.trace import INVALID_SPAN, Tracer from opentelemetry.trace import get_current_span as otlp_get_current_span from opentelemetry.trace import ( @@ -338,7 +345,7 @@ def _remove_stale_otel_sdk_packages(): # Increment this PATCH version before using `charmcraft publish-lib` or reset # to 0 if you are raising the major API version -LIBPATCH = 5 +LIBPATCH = 6 PYDEPS = ["opentelemetry-exporter-otlp-proto-http==1.21.0"] @@ -366,7 +373,9 @@ def _remove_stale_otel_sdk_packages(): BUFFER_DEFAULT_MAX_EVENT_HISTORY_LENGTH = 100 _MiB_TO_B = 2**20 # megabyte to byte conversion rate _OTLP_SPAN_EXPORTER_TIMEOUT = 1 -"""Timeout in seconds that the OTLP span exporter has to push traces to the backend.""" + + +# Timeout in seconds that the OTLP span exporter has to push traces to the backend. class _Buffer: @@ -398,45 +407,75 @@ def save(self, spans: typing.Sequence[ReadableSpan]): if self._max_event_history_length < 1: dev_logger.debug("buffer disabled: max history length < 1") return - - current_history_length = len(self.load()) - new_history_length = current_history_length + len(spans) - if (diff := self._max_event_history_length - new_history_length) < 0: - self.drop(diff) self._save(spans) def _serialize(self, spans: Sequence[ReadableSpan]) -> bytes: # encode because otherwise we can't json-dump them return encode_spans(spans).SerializeToString() + def _prune(self, queue: Sequence[bytes]) -> Sequence[bytes]: + """Prune the queue until it fits in our constraints.""" + n_dropped_spans = 0 + # drop older events if we are past the max history length + overflow = len(queue) - self._max_event_history_length + if overflow > 0: + n_dropped_spans += overflow + logger.warning( + f"charm tracing buffer exceeds max history length ({self._max_event_history_length} events)" + ) + + new_spans = deque(queue[-self._max_event_history_length :]) + + # drop older events if the buffer is too big; all units are bytes + logged_drop = False + target_size = self._max_buffer_size_mib * _MiB_TO_B + current_size = sum(len(span) for span in new_spans) + while current_size > target_size: + current_size -= len(new_spans.popleft()) + n_dropped_spans += 1 + + # only do this once + if not logged_drop: + logger.warning( + f"charm tracing buffer exceeds size limit ({self._max_buffer_size_mib}MiB)." + ) + logged_drop = True + + if n_dropped_spans > 0: + dev_logger.debug( + f"charm tracing buffer overflow: dropped {n_dropped_spans} older spans. " + f"Please increase the buffer limits, or ensure the spans can be flushed." + ) + return new_spans + def _save(self, spans: Sequence[ReadableSpan], replace: bool = False): dev_logger.debug(f"saving {len(spans)} new spans to buffer") old = [] if replace else self.load() - new = self._serialize(spans) + queue = old + [self._serialize(spans)] + new_buffer = self._prune(queue) - try: - # if the buffer exceeds the size limit, we start dropping old spans until it does - - while len((new + self._SPANSEP.join(old))) > (self._max_buffer_size_mib * _MiB_TO_B): - if not old: - # if we've already dropped all spans and still we can't get under the - # size limit, we can't save this span - logger.error( - f"span exceeds total buffer size limit ({self._max_buffer_size_mib}MiB); " - f"buffering FAILED" - ) - return - - old = old[1:] - logger.warning( - f"buffer size exceeds {self._max_buffer_size_mib}MiB; dropping older spans... " - f"Please increase the buffer size, disable buffering, or ensure the spans can be flushed." - ) + if queue and not new_buffer: + # this means that, given our constraints, we are pruning so much that there are no events left. + logger.error( + "No charm events could be buffered into charm traces buffer. Please increase the memory or history size limits." + ) + return - self._db_file.write_bytes(new + self._SPANSEP.join(old)) + try: + self._write(new_buffer) except Exception: logger.exception("error buffering spans") + def _write(self, spans: Sequence[bytes]): + """Write the spans to the db file.""" + # ensure the destination folder exists + db_file_dir = self._db_file.parent + if not db_file_dir.exists(): + dev_logger.info(f"creating buffer dir: {db_file_dir}") + db_file_dir.mkdir(parents=True) + + self._db_file.write_bytes(self._SPANSEP.join(spans)) + def load(self) -> List[bytes]: """Load currently buffered spans from the cache file. @@ -461,8 +500,10 @@ def drop(self, n_spans: Optional[int] = None): else: dev_logger.debug("emptying buffer") new = [] - - self._db_file.write_bytes(self._SPANSEP.join(new)) + try: + self._write(new) + except Exception: + logger.exception("error writing charm traces buffer") def flush(self) -> Optional[bool]: """Export all buffered spans to the given exporter, then clear the buffer. diff --git a/poetry.lock b/poetry.lock index e2de87b193..b623168287 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 2.0.0 and should not be changed by hand. +# This file is automatically @generated by Poetry 2.0.1 and should not be changed by hand. [[package]] name = "allure-pytest" @@ -451,14 +451,14 @@ files = [ [[package]] name = "cosl" -version = "0.0.51" +version = "0.0.54" description = "Utils for COS Lite charms" optional = false python-versions = ">=3.8" groups = ["charm-libs"] files = [ - {file = "cosl-0.0.51-py3-none-any.whl", hash = "sha256:2ef43a94f0ca130fb4f2af924b75329f3c5e74b5c40ad4036af16713ad7d47d4"}, - {file = "cosl-0.0.51.tar.gz", hash = "sha256:32af380475bba32df7334d53ff16fb93466a169c7433e79a9fef8dbbecfdd43c"}, + {file = "cosl-0.0.54-py3-none-any.whl", hash = "sha256:b16520d73c72ac83cb42f0abe997d36510732d4f8499f70e9068cfa05f0d02fa"}, + {file = "cosl-0.0.54.tar.gz", hash = "sha256:6baa889cc4468b0c0f746cc6319892a30ea8fbe38cbf5c49c6885f6fdf89d6a9"}, ] [package.dependencies] @@ -2547,4 +2547,4 @@ type = ["pytest-mypy"] [metadata] lock-version = "2.1" python-versions = "^3.10" -content-hash = "4cd539e69dcbd0004b1793f96bf2ac1337fc149288f512770be44b50d6b20751" +content-hash = "e8da5a5d4be98f29bd2bfa1f86804ddf675fbd4f1264cd039ff07d912c348d2d" diff --git a/pyproject.toml b/pyproject.toml index 539e0f5f57..d42005a165 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -27,7 +27,7 @@ poetry-core = "*" # tempo_coordinator_k8s/v0/tracing.py requires pydantic pydantic = "^1.10" # loki_k8s/v1/loki_push_api.py and prometheus_k8s/v0/prometheus_scrape.py -cosl = "*" +cosl = ">=0.0.50" # tempo_coordinator_k8s/v0/charm_tracing.py opentelemetry-exporter-otlp-proto-http = "1.21.0" # tls_certificates_interface/v2/tls_certificates.py diff --git a/tests/integration/relations/test_relations.py b/tests/integration/relations/test_relations.py index d58f319ee6..d345895b52 100644 --- a/tests/integration/relations/test_relations.py +++ b/tests/integration/relations/test_relations.py @@ -51,6 +51,7 @@ async def test_deploy_charms(ops_test: OpsTest, charm): "plugin_unaccent_enable": "True", "plugin_pg_trgm_enable": "True", }, + trust=True, ), ) diff --git a/tests/integration/test_trust.py b/tests/integration/test_trust.py index ab0b0cfe99..fe41f85c06 100644 --- a/tests/integration/test_trust.py +++ b/tests/integration/test_trust.py @@ -2,16 +2,13 @@ # Copyright 2024 Canonical Ltd. # See LICENSE file for licensing details. -import asyncio import logging -import time import pytest from pytest_operator.plugin import OpsTest from .helpers import ( CHARM_BASE, - KUBECTL, METADATA, get_leader_unit, ) @@ -19,67 +16,9 @@ logger = logging.getLogger(__name__) APP_NAME = "untrusted-postgresql-k8s" -MAX_RETRIES = 20 UNTRUST_ERROR_MESSAGE = f"Insufficient permissions, try: `juju trust {APP_NAME} --scope=cluster`" -async def test_enable_rbac(ops_test: OpsTest): - """Enables RBAC from inside test runner's environment. - - Assert on permission enforcement being active. - """ - enable_rbac_call = await asyncio.create_subprocess_exec( - "sudo", - "microk8s", - "enable", - "rbac", - stdout=asyncio.subprocess.PIPE, - stderr=asyncio.subprocess.PIPE, - ) - await enable_rbac_call.communicate() - - is_default_auth = None - retries = 0 - while is_default_auth != "no" and retries < MAX_RETRIES: - rbac_check = await asyncio.create_subprocess_exec( - *KUBECTL.split(), - "auth", - "can-i", - "get", - "cm", - "-A", - "--as=system:serviceaccount:default:no-permissions", - stdout=asyncio.subprocess.PIPE, - stderr=asyncio.subprocess.PIPE, - ) - stdout, _ = await rbac_check.communicate() - if stdout: - is_default_auth = stdout.decode().split()[0] - logger.info(f"Response from rbac check ('no' means enabled): {is_default_auth}") - retries += 1 - - assert is_default_auth == "no" - - -async def test_model_connectivity(ops_test: OpsTest): - """Tries to regain connectivity to model after microK8s restart.""" - retries = 0 - while retries < MAX_RETRIES: - try: - await ops_test.model.connect_current() - status = await ops_test.model.get_status() - logger.info(f"Connection established: {status}") - return - except Exception as e: - logger.info(f"Connection attempt failed: {e}") - retries += 1 - logger.info(f"Retrying ({retries}/{MAX_RETRIES})...") - time.sleep(3) - - logger.error(f"Max retries number of {MAX_RETRIES} reached. Unable to connect.") - assert False - - @pytest.mark.abort_on_fail async def test_deploy_without_trust(ops_test: OpsTest, charm): """Build and deploy the charm with trust set to false.