|
| 1 | +""" |
| 2 | +Monitor staging and production Elasticsearch cluster health endpoint. |
| 3 | +
|
| 4 | +Requests the cluster health and alerts under the following conditions: |
| 5 | +
|
| 6 | +- Red cluster health |
| 7 | +- Unexpected number of nodes |
| 8 | +- Unresponsive cluster |
| 9 | +
|
| 10 | +Additionally, the DAG will notify (rather than alert) when the cluster health is yellow. |
| 11 | +Yellow cluster health may or may not be an issue, depending on whether it is expected, |
| 12 | +and occurs whenever shards and replicas are being relocated (e.g., during reindexes). |
| 13 | +It is worthwhile to notify in these cases, as an assurance, but we could choose to add |
| 14 | +logic that ignores yellow cluster health during data refresh or other similar operations. |
| 15 | +""" |
| 16 | + |
| 17 | +import json |
| 18 | +import logging |
| 19 | +from datetime import datetime |
| 20 | +from textwrap import dedent, indent |
| 21 | + |
| 22 | +from airflow.decorators import dag, task |
| 23 | +from airflow.exceptions import AirflowSkipException |
| 24 | +from airflow.providers.elasticsearch.hooks.elasticsearch import ElasticsearchPythonHook |
| 25 | +from elasticsearch import Elasticsearch |
| 26 | + |
| 27 | +from common.constants import ENVIRONMENTS, PRODUCTION, Environment |
| 28 | +from common.sensors.utils import is_concurrent_with_any |
| 29 | +from common.slack import send_alert, send_message |
| 30 | +from data_refresh.data_refresh_types import DATA_REFRESH_CONFIGS |
| 31 | +from elasticsearch_cluster.shared import get_es_host |
| 32 | + |
| 33 | + |
| 34 | +logger = logging.getLogger(__name__) |
| 35 | + |
| 36 | + |
| 37 | +_DAG_ID = "{env}_elasticsearch_cluster_healthcheck" |
| 38 | + |
| 39 | +EXPECTED_NODE_COUNT = 6 |
| 40 | +EXPECTED_DATA_NODE_COUNT = 3 |
| 41 | +EXPECTED_MASTER_NODE_COUNT = 3 |
| 42 | + |
| 43 | + |
| 44 | +def _format_response_body(response_body: dict) -> str: |
| 45 | + body_str = indent(json.dumps(response_body, indent=4), prefix=" " * 4) |
| 46 | + # body_str is indented in, because the f string added an indentation to |
| 47 | + # the front, causing the first curly brace to be incorrectly indented |
| 48 | + # and interpolating a multi-line string into the f string led subsequent lines |
| 49 | + # to have incorrect indentation (they did not incorporate the f-strings |
| 50 | + # own indentation. |
| 51 | + # Adding our own indentation using `indent` to match the f-strings |
| 52 | + # allows us to correctly dedent later on without issue, with a uniform indentation |
| 53 | + # on every line. |
| 54 | + return f""" |
| 55 | + Full healthcheck response body: |
| 56 | + ``` |
| 57 | +{body_str} |
| 58 | + ``` |
| 59 | + """ |
| 60 | + |
| 61 | + |
| 62 | +def _compose_red_status(env: Environment, response_body: dict): |
| 63 | + message = f""" |
| 64 | + Elasticsearch {env} cluster status is **red**. |
| 65 | +
|
| 66 | + This is a critical status change, **investigate ASAP**. |
| 67 | +
|
| 68 | + {_format_response_body(response_body)} |
| 69 | + """ |
| 70 | + return message |
| 71 | + |
| 72 | + |
| 73 | +def _compose_unexpected_node_count(env: Environment, response_body: dict): |
| 74 | + node_count = response_body["number_of_nodes"] |
| 75 | + data_node_count = response_body["number_of_data_nodes"] |
| 76 | + master_node_count = node_count - data_node_count |
| 77 | + |
| 78 | + message = f""" |
| 79 | + Elasticsearch {env} cluster node count is **{node_count}**. |
| 80 | + Expected {EXPECTED_NODE_COUNT} total nodes. |
| 81 | +
|
| 82 | + Master nodes: **{master_node_count}** of expected {EXPECTED_MASTER_NODE_COUNT} |
| 83 | + Data nodes: **{data_node_count}** of expected {EXPECTED_DATA_NODE_COUNT} |
| 84 | +
|
| 85 | + This is a critical status change, **investigate ASAP**. |
| 86 | + If this is expected (e.g., during controlled node or cluster changes), acknowledge immediately with explanation. |
| 87 | +
|
| 88 | + {_format_response_body(response_body)} |
| 89 | + """ |
| 90 | + logger.error(f"Unexpected node count; {json.dumps(response_body)}") |
| 91 | + return message |
| 92 | + |
| 93 | + |
| 94 | +def _compose_yellow_cluster_health(env: Environment, response_body: dict): |
| 95 | + message = f""" |
| 96 | + Elasticsearch {env} cluster health is **yellow**. |
| 97 | +
|
| 98 | + This does not mean something is necessarily wrong, but if this is not expected (e.g., data refresh) then investigate cluster health now. |
| 99 | +
|
| 100 | + {_format_response_body(response_body)} |
| 101 | + """ |
| 102 | + logger.info(f"Cluster health was yellow; {json.dumps(response_body)}") |
| 103 | + return message |
| 104 | + |
| 105 | + |
| 106 | +@task |
| 107 | +def ping_healthcheck(env: str, es_host: str): |
| 108 | + es_conn: Elasticsearch = ElasticsearchPythonHook(hosts=[es_host]).get_conn |
| 109 | + |
| 110 | + response = es_conn.cluster.health() |
| 111 | + |
| 112 | + return response.body |
| 113 | + |
| 114 | + |
| 115 | +@task |
| 116 | +def compose_notification( |
| 117 | + env: Environment, response_body: dict, is_data_refresh_running: bool |
| 118 | +): |
| 119 | + status = response_body["status"] |
| 120 | + |
| 121 | + if status == "red": |
| 122 | + return "alert", _compose_red_status(env, response_body) |
| 123 | + |
| 124 | + if response_body["number_of_nodes"] != EXPECTED_NODE_COUNT: |
| 125 | + return "alert", _compose_unexpected_node_count(env, response_body) |
| 126 | + |
| 127 | + if status == "yellow": |
| 128 | + if is_data_refresh_running and env == PRODUCTION: |
| 129 | + raise AirflowSkipException( |
| 130 | + "Production cluster health status is yellow during data refresh. " |
| 131 | + "This is an expected state, so no alert is sent." |
| 132 | + ) |
| 133 | + |
| 134 | + return "notification", _compose_yellow_cluster_health(env, response_body) |
| 135 | + |
| 136 | + logger.info(f"Cluster health was green; {json.dumps(response_body)}") |
| 137 | + return None, None |
| 138 | + |
| 139 | + |
| 140 | +@task |
| 141 | +def notify(env: str, message_type_and_string: tuple[str, str]): |
| 142 | + message_type, message = message_type_and_string |
| 143 | + |
| 144 | + if message_type == "alert": |
| 145 | + send_alert(dedent(message), dag_id=_DAG_ID.format(env=env)) |
| 146 | + elif message_type == "notification": |
| 147 | + send_message(dedent(message), dag_id=_DAG_ID.format(env=env)) |
| 148 | + else: |
| 149 | + raise ValueError( |
| 150 | + f"Invalid message_type. Expected 'alert' or 'notification', " |
| 151 | + f"received {message_type}" |
| 152 | + ) |
| 153 | + |
| 154 | + |
| 155 | +_SHARED_DAG_ARGS = { |
| 156 | + # Every 15 minutes |
| 157 | + "schedule": "*/15 * * * *", |
| 158 | + "start_date": datetime(2024, 2, 4), |
| 159 | + "catchup": False, |
| 160 | + "max_active_runs": 1, |
| 161 | + "doc_md": __doc__, |
| 162 | + "tags": ["elasticsearch", "monitoring"], |
| 163 | +} |
| 164 | + |
| 165 | + |
| 166 | +_DATA_REFRESH_DAG_IDS = [] |
| 167 | +for config in DATA_REFRESH_CONFIGS.values(): |
| 168 | + _DATA_REFRESH_DAG_IDS += [config.dag_id, config.filtered_index_dag_id] |
| 169 | + |
| 170 | + |
| 171 | +for env in ENVIRONMENTS: |
| 172 | + |
| 173 | + @dag(dag_id=_DAG_ID.format(env=env), **_SHARED_DAG_ARGS) |
| 174 | + def cluster_healthcheck_dag(): |
| 175 | + is_data_refresh_running = is_concurrent_with_any(_DATA_REFRESH_DAG_IDS) |
| 176 | + |
| 177 | + es_host = get_es_host(env) |
| 178 | + healthcheck_response = ping_healthcheck(env, es_host) |
| 179 | + notification = compose_notification( |
| 180 | + env, healthcheck_response, is_data_refresh_running |
| 181 | + ) |
| 182 | + es_host >> healthcheck_response >> notification >> notify(env, notification) |
| 183 | + |
| 184 | + cluster_healthcheck_dag() |
0 commit comments