diff --git a/.github/workflows/check_formatting.yml b/.github/workflows/check_formatting.yml index b19a31fd..b6f8ec27 100644 --- a/.github/workflows/check_formatting.yml +++ b/.github/workflows/check_formatting.yml @@ -19,4 +19,5 @@ jobs: shell: bash -l {0} run: mamba install --quiet --yes --file requirements.txt black && - black tobac --check --diff + black --version && + black tobac --check --diff diff --git a/doc/tobac.rst b/doc/tobac.rst index 87cd45ab..5e651222 100644 --- a/doc/tobac.rst +++ b/doc/tobac.rst @@ -7,7 +7,26 @@ Submodules tobac.analysis module --------------------- -.. automodule:: tobac.analysis +tobac.analysis.cell_analysis module +--------------------- + +.. automodule:: tobac.analysis.cell_analysis + :members: + :undoc-members: + :show-inheritance: + +tobac.analysis.feature_analysis module +--------------------- + +.. automodule:: tobac.analysis.feature_analysis + :members: + :undoc-members: + :show-inheritance: + +tobac.analysis.spatial module +--------------------- + +.. automodule:: tobac.analysis.spatial :members: :undoc-members: :show-inheritance: @@ -71,18 +90,26 @@ tobac.tracking module tobac.utils modules ------------------ -tobac.utils.general module +tobac.utils.bulk_statistics module ------------------ -.. automodule:: tobac.utils.general +.. automodule:: tobac.utils.bulk_statistics :members: :undoc-members: :show-inheritance: -tobac.utils.bulk_statistics module +tobac.utils.decorators module ------------------ -.. automodule:: tobac.utils.bulk_statistics +.. automodule:: tobac.utils.decorators + :members: + :undoc-members: + :show-inheritance: + +tobac.utils.general module +------------------ + +.. automodule:: tobac.utils.general :members: :undoc-members: :show-inheritance: @@ -95,6 +122,14 @@ tobac.utils.mask module :undoc-members: :show-inheritance: +tobac.utils.periodic_boundaries module +------------------ + +.. automodule:: tobac.utils.periodic_boundaries + :members: + :undoc-members: + :show-inheritance: + tobac.wrapper module -------------------- diff --git a/examples/Basics/Idealized-Case-1_Tracking-of-a-Test-Blob-in-2D.ipynb b/examples/Basics/Idealized-Case-1_Tracking-of-a-Test-Blob-in-2D.ipynb index 00702937..35c026ab 100644 --- a/examples/Basics/Idealized-Case-1_Tracking-of-a-Test-Blob-in-2D.ipynb +++ b/examples/Basics/Idealized-Case-1_Tracking-of-a-Test-Blob-in-2D.ipynb @@ -36,17 +36,19 @@ "cell_type": "code", "execution_count": 1, "id": "46abd7ad", - "metadata": { - "execution": { - "iopub.execute_input": "2024-02-17T12:46:46.604370Z", - "iopub.status.busy": "2024-02-17T12:46:46.603803Z", - "iopub.status.idle": "2024-02-17T12:47:02.061962Z", - "shell.execute_reply": "2024-02-17T12:47:02.059167Z" + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "using tobac version 1.5.2\n" + ] } - }, - "outputs": [], + ], "source": [ "import tobac\n", + "print('using tobac version', str(tobac.__version__))\n", "\n", "# we add testing here to create test dataset (typically not needed in standard applications)\n", "import tobac.testing" @@ -64,14 +66,7 @@ "cell_type": "code", "execution_count": 2, "id": "a28f3ba2", - "metadata": { - "execution": { - "iopub.execute_input": "2024-02-17T12:47:02.069480Z", - "iopub.status.busy": "2024-02-17T12:47:02.068740Z", - "iopub.status.idle": "2024-02-17T12:47:03.098546Z", - "shell.execute_reply": "2024-02-17T12:47:03.096911Z" - } - }, + "metadata": {}, "outputs": [], "source": [ "import matplotlib.pyplot as plt\n", @@ -93,14 +88,7 @@ "cell_type": "code", "execution_count": 3, "id": "fd75ee85", - "metadata": { - "execution": { - "iopub.execute_input": "2024-02-17T12:47:03.105232Z", - "iopub.status.busy": "2024-02-17T12:47:03.104769Z", - "iopub.status.idle": "2024-02-17T12:47:03.716692Z", - "shell.execute_reply": "2024-02-17T12:47:03.715384Z" - } - }, + "metadata": {}, "outputs": [], "source": [ "import seaborn as sns\n", @@ -137,14 +125,7 @@ "cell_type": "code", "execution_count": 4, "id": "1ecce2f7", - "metadata": { - "execution": { - "iopub.execute_input": "2024-02-17T12:47:03.720546Z", - "iopub.status.busy": "2024-02-17T12:47:03.720174Z", - "iopub.status.idle": "2024-02-17T12:47:03.806021Z", - "shell.execute_reply": "2024-02-17T12:47:03.805262Z" - } - }, + "metadata": {}, "outputs": [ { "data": { @@ -521,7 +502,7 @@ " latitude (y, x) float64 ...\n", " longitude (y, x) float64 ...\n", "Attributes:\n", - " units: m s-1
  • units :
    m s-1
  • " ], "text/plain": [ "\n", @@ -694,14 +675,7 @@ "cell_type": "code", "execution_count": 5, "id": "cd819867", - "metadata": { - "execution": { - "iopub.execute_input": "2024-02-17T12:47:03.810077Z", - "iopub.status.busy": "2024-02-17T12:47:03.809799Z", - "iopub.status.idle": "2024-02-17T12:47:03.818233Z", - "shell.execute_reply": "2024-02-17T12:47:03.817307Z" - } - }, + "metadata": {}, "outputs": [ { "data": { @@ -744,14 +718,7 @@ "cell_type": "code", "execution_count": 6, "id": "b7c04d5a", - "metadata": { - "execution": { - "iopub.execute_input": "2024-02-17T12:47:03.822185Z", - "iopub.status.busy": "2024-02-17T12:47:03.821801Z", - "iopub.status.idle": "2024-02-17T12:47:05.221198Z", - "shell.execute_reply": "2024-02-17T12:47:05.218804Z" - } - }, + "metadata": {}, "outputs": [ { "data": { @@ -810,14 +777,7 @@ "cell_type": "code", "execution_count": 7, "id": "454d687c", - "metadata": { - "execution": { - "iopub.execute_input": "2024-02-17T12:47:05.235262Z", - "iopub.status.busy": "2024-02-17T12:47:05.232533Z", - "iopub.status.idle": "2024-02-17T12:47:05.463213Z", - "shell.execute_reply": "2024-02-17T12:47:05.461185Z" - } - }, + "metadata": {}, "outputs": [], "source": [ "dxy, dt = tobac.get_spacings(test_data)" @@ -835,14 +795,7 @@ "cell_type": "code", "execution_count": 8, "id": "24b828de", - "metadata": { - "execution": { - "iopub.execute_input": "2024-02-17T12:47:05.470570Z", - "iopub.status.busy": "2024-02-17T12:47:05.470170Z", - "iopub.status.idle": "2024-02-17T12:47:05.487186Z", - "shell.execute_reply": "2024-02-17T12:47:05.484994Z" - } - }, + "metadata": {}, "outputs": [ { "data": { @@ -1211,7 +1164,7 @@ " fill: currentColor;\n", "}\n", "
    <xarray.DataArray 'w' ()>\n",
    -       "array(10.)
    " + "array(10.)" ], "text/plain": [ "\n", @@ -1239,14 +1192,7 @@ "cell_type": "code", "execution_count": 9, "id": "93b5659d", - "metadata": { - "execution": { - "iopub.execute_input": "2024-02-17T12:47:05.492533Z", - "iopub.status.busy": "2024-02-17T12:47:05.491566Z", - "iopub.status.idle": "2024-02-17T12:47:05.503504Z", - "shell.execute_reply": "2024-02-17T12:47:05.502486Z" - } - }, + "metadata": {}, "outputs": [], "source": [ "threshold = 9" @@ -1264,14 +1210,7 @@ "cell_type": "code", "execution_count": 10, "id": "9c322da7", - "metadata": { - "execution": { - "iopub.execute_input": "2024-02-17T12:47:05.508050Z", - "iopub.status.busy": "2024-02-17T12:47:05.507772Z", - "iopub.status.idle": "2024-02-17T12:47:06.072214Z", - "shell.execute_reply": "2024-02-17T12:47:06.071461Z" - } - }, + "metadata": {}, "outputs": [], "source": [ "%%capture\n", @@ -1290,14 +1229,7 @@ "cell_type": "code", "execution_count": 11, "id": "6e8d3cd3", - "metadata": { - "execution": { - "iopub.execute_input": "2024-02-17T12:47:06.080011Z", - "iopub.status.busy": "2024-02-17T12:47:06.079289Z", - "iopub.status.idle": "2024-02-17T12:47:06.129931Z", - "shell.execute_reply": "2024-02-17T12:47:06.122788Z" - } - }, + "metadata": {}, "outputs": [ { "data": { @@ -2346,14 +2278,7 @@ "cell_type": "code", "execution_count": 12, "id": "c61c8715", - "metadata": { - "execution": { - "iopub.execute_input": "2024-02-17T12:47:06.137629Z", - "iopub.status.busy": "2024-02-17T12:47:06.137341Z", - "iopub.status.idle": "2024-02-17T12:47:07.859565Z", - "shell.execute_reply": "2024-02-17T12:47:07.858711Z" - } - }, + "metadata": {}, "outputs": [ { "data": { @@ -2410,14 +2335,7 @@ "cell_type": "code", "execution_count": 13, "id": "a1c72cca", - "metadata": { - "execution": { - "iopub.execute_input": "2024-02-17T12:47:07.867736Z", - "iopub.status.busy": "2024-02-17T12:47:07.867020Z", - "iopub.status.idle": "2024-02-17T12:47:08.071551Z", - "shell.execute_reply": "2024-02-17T12:47:08.069618Z" - } - }, + "metadata": {}, "outputs": [ { "name": "stdout", @@ -2451,14 +2369,7 @@ "cell_type": "code", "execution_count": 14, "id": "26b7a9b2", - "metadata": { - "execution": { - "iopub.execute_input": "2024-02-17T12:47:08.080735Z", - "iopub.status.busy": "2024-02-17T12:47:08.079561Z", - "iopub.status.idle": "2024-02-17T12:47:08.126057Z", - "shell.execute_reply": "2024-02-17T12:47:08.123918Z" - } - }, + "metadata": {}, "outputs": [ { "data": { @@ -3603,14 +3514,7 @@ "cell_type": "code", "execution_count": 15, "id": "ae3f63dd", - "metadata": { - "execution": { - "iopub.execute_input": "2024-02-17T12:47:08.136022Z", - "iopub.status.busy": "2024-02-17T12:47:08.135416Z", - "iopub.status.idle": "2024-02-17T12:47:08.140782Z", - "shell.execute_reply": "2024-02-17T12:47:08.139145Z" - } - }, + "metadata": {}, "outputs": [], "source": [ "track_mask = trajectories[\"cell\"] == 1.0" @@ -3629,14 +3533,7 @@ "cell_type": "code", "execution_count": 16, "id": "25e4c7ae", - "metadata": { - "execution": { - "iopub.execute_input": "2024-02-17T12:47:08.150265Z", - "iopub.status.busy": "2024-02-17T12:47:08.149159Z", - "iopub.status.idle": "2024-02-17T12:47:08.162712Z", - "shell.execute_reply": "2024-02-17T12:47:08.159743Z" - } - }, + "metadata": {}, "outputs": [], "source": [ "track = trajectories.where(track_mask).dropna()" @@ -3655,12 +3552,6 @@ "execution_count": 17, "id": "9e6f8c23", "metadata": { - "execution": { - "iopub.execute_input": "2024-02-17T12:47:08.181028Z", - "iopub.status.busy": "2024-02-17T12:47:08.172748Z", - "iopub.status.idle": "2024-02-17T12:47:09.791381Z", - "shell.execute_reply": "2024-02-17T12:47:09.790072Z" - }, "tags": [ "nbsphinx-thumbnail" ] @@ -3729,7 +3620,7 @@ "\n", "*On further extensions:*\n", "\n", - "- Also, one could actually use the output of the segmentation (`features_test` Dataset in the example below) as input for the tracking with the advantage that information on the area (ncells) is added in the dataframe. \n", + "- Also, one could actually use the output of the segmentation (`segments` Dataset in the example below) as input for the tracking with the advantage that information on the area (ncells) is added in the dataframe. \n", "\n", "- One could also use the output of the tracking in the segmentation (`trajectories` Dataset from above) with the advantage that mask will contain only the features that are also linked to trajectories. \n", "\n", @@ -3740,18 +3631,11 @@ "cell_type": "code", "execution_count": 18, "id": "f73bdf63", - "metadata": { - "execution": { - "iopub.execute_input": "2024-02-17T12:47:09.797031Z", - "iopub.status.busy": "2024-02-17T12:47:09.796476Z", - "iopub.status.idle": "2024-02-17T12:47:11.737307Z", - "shell.execute_reply": "2024-02-17T12:47:11.736184Z" - } - }, + "metadata": {}, "outputs": [], "source": [ "%%capture\n", - "mask, features_test = tobac.segmentation_2D(features, test_data, dxy, threshold=9)" + "segment_labels, segments = tobac.segmentation_2D(features, test_data, dxy, threshold=9)" ] }, { @@ -3759,21 +3643,14 @@ "id": "8b9f3d3b", "metadata": {}, "source": [ - "As the name implies, the first object returned is a Boolean mask that is true for all segments belonging to features. The second output is again the features of the field." + "As the name implies, the first object returned is an array in which the segmented areas belonging to each feature have the same label value as that feature. The second output is a dataframe of the detected features updated with information about their segmented regions (currently only the number of pixels segmented)" ] }, { "cell_type": "code", "execution_count": 19, "id": "7d2926de", - "metadata": { - "execution": { - "iopub.execute_input": "2024-02-17T12:47:11.745231Z", - "iopub.status.busy": "2024-02-17T12:47:11.744182Z", - "iopub.status.idle": "2024-02-17T12:47:11.776460Z", - "shell.execute_reply": "2024-02-17T12:47:11.775352Z" - } - }, + "metadata": {}, "outputs": [ { "data": { @@ -4150,7 +4027,7 @@ " latitude (y, x) float64 ...\n", " longitude (y, x) float64 ...\n", "Attributes:\n", - " long_name: segmentation_mask
  • long_name :
    segmentation_mask
  • " ], "text/plain": [ "\n", @@ -4307,7 +4184,7 @@ } ], "source": [ - "mask" + "segment_labels" ] }, { @@ -4322,14 +4199,7 @@ "cell_type": "code", "execution_count": 20, "id": "25d26ebc", - "metadata": { - "execution": { - "iopub.execute_input": "2024-02-17T12:47:11.782511Z", - "iopub.status.busy": "2024-02-17T12:47:11.781845Z", - "iopub.status.idle": "2024-02-17T12:47:13.720840Z", - "shell.execute_reply": "2024-02-17T12:47:13.719745Z" - } - }, + "metadata": {}, "outputs": [ { "data": { @@ -4352,7 +4222,7 @@ " test_data.isel(time=itime).plot(ax=axs[i])\n", "\n", " # plot the mask outline\n", - " mask.isel(time=itime).plot.contour(levels=[0.5], ax=axs[i], colors=\"k\")\n", + " segment_labels.isel(time=itime).plot.contour(levels=[0.5], ax=axs[i], colors=\"k\")\n", "\n", " # plot the detected feature as black cross\n", " f = features.loc[[itime]]\n", @@ -4404,17 +4274,10 @@ "cell_type": "code", "execution_count": 21, "id": "4c516c26", - "metadata": { - "execution": { - "iopub.execute_input": "2024-02-17T12:47:13.727021Z", - "iopub.status.busy": "2024-02-17T12:47:13.726230Z", - "iopub.status.idle": "2024-02-17T12:47:13.758905Z", - "shell.execute_reply": "2024-02-17T12:47:13.757558Z" - } - }, + "metadata": {}, "outputs": [], "source": [ - "vel = tobac.analysis.calculate_velocity(track)" + "vel = tobac.calculate_velocity(track)" ] }, { @@ -4429,14 +4292,7 @@ "cell_type": "code", "execution_count": 22, "id": "887d4338", - "metadata": { - "execution": { - "iopub.execute_input": "2024-02-17T12:47:13.766366Z", - "iopub.status.busy": "2024-02-17T12:47:13.765703Z", - "iopub.status.idle": "2024-02-17T12:47:13.771825Z", - "shell.execute_reply": "2024-02-17T12:47:13.770580Z" - } - }, + "metadata": {}, "outputs": [], "source": [ "expected_velocity = np.sqrt(30**2 + 14**2)" @@ -4454,14 +4310,7 @@ "cell_type": "code", "execution_count": 23, "id": "6151a205", - "metadata": { - "execution": { - "iopub.execute_input": "2024-02-17T12:47:13.775501Z", - "iopub.status.busy": "2024-02-17T12:47:13.775155Z", - "iopub.status.idle": "2024-02-17T12:47:14.037489Z", - "shell.execute_reply": "2024-02-17T12:47:14.036830Z" - } - }, + "metadata": {}, "outputs": [ { "data": { @@ -4499,17 +4348,10 @@ "cell_type": "code", "execution_count": 24, "id": "fdac72d9", - "metadata": { - "execution": { - "iopub.execute_input": "2024-02-17T12:47:14.041186Z", - "iopub.status.busy": "2024-02-17T12:47:14.040814Z", - "iopub.status.idle": "2024-02-17T12:47:14.046777Z", - "shell.execute_reply": "2024-02-17T12:47:14.044844Z" - } - }, + "metadata": {}, "outputs": [], "source": [ - "hist, edges = tobac.analysis.velocity_histogram(\n", + "hist, edges = tobac.velocity_histogram(\n", " track,\n", " bin_edges=np.arange(14, 43, 3),\n", ")" @@ -4519,14 +4361,7 @@ "cell_type": "code", "execution_count": 25, "id": "d18089f9", - "metadata": { - "execution": { - "iopub.execute_input": "2024-02-17T12:47:14.053363Z", - "iopub.status.busy": "2024-02-17T12:47:14.052460Z", - "iopub.status.idle": "2024-02-17T12:47:14.364261Z", - "shell.execute_reply": "2024-02-17T12:47:14.359902Z" - } - }, + "metadata": {}, "outputs": [ { "data": { @@ -4565,178 +4400,55 @@ { "cell_type": "code", "execution_count": 26, - "id": "20866a28", - "metadata": { - "execution": { - "iopub.execute_input": "2024-02-17T12:47:14.371374Z", - "iopub.status.busy": "2024-02-17T12:47:14.370983Z", - "iopub.status.idle": "2024-02-17T12:47:14.423818Z", - "shell.execute_reply": "2024-02-17T12:47:14.422823Z" - } - }, - "outputs": [ - { - "data": { - "text/html": [ - "\n", - "\n", - "\n", - " \n", - "\n", - "\n", - "\n", - "\n", - "\n", - " \n", - "\n", - "\n", - "\n", - "\n", - "\n", - " \n", - " \n", - " \n", - " \n", - " \n", - "\n", - "\n", - " \n", - " \n", - " \n", - " \n", - "\n", - "\n", - " \n", - " \n", - " \n", - " \n", - "\n", - "\n", - " \n", - " \n", - " \n", - " \n", - "\n", - "\n", - " \n", - " \n", - " \n", - " \n", - "\n", - "\n", - " \n", - " \n", - " \n", - " \n", - "\n", - "\n", - " \n", - " \n", - " \n", - " \n", - "\n", - "
    Segmentation Mask (unknown)timeprojection_y_coordinateprojection_x_coordinate
    Shape10050100
    Dimension coordinates
    \ttimex--
    \tprojection_y_coordinate-x-
    \tprojection_x_coordinate--x
    Auxiliary coordinates
    \tlatitude-xx
    \tlongitude-xx
    \n", - " " - ], - "text/plain": [ - "" - ] - }, - "execution_count": 26, - "metadata": {}, - "output_type": "execute_result" - } - ], + "id": "33a10697", + "metadata": {}, + "outputs": [], "source": [ - "import xarray as xr\n", - "\n", - "imask = xr.DataArray.to_iris(mask)\n", - "imask" + "area = tobac.calculate_area(features, segment_labels)" ] }, { "cell_type": "code", "execution_count": 27, - "id": "33a10697", - "metadata": { - "execution": { - "iopub.execute_input": "2024-02-17T12:47:14.437699Z", - "iopub.status.busy": "2024-02-17T12:47:14.437388Z", - "iopub.status.idle": "2024-02-17T12:47:14.448235Z", - "shell.execute_reply": "2024-02-17T12:47:14.442773Z" - } - }, + "id": "571cb182", + "metadata": {}, "outputs": [], "source": [ - "# area = tobac.analysis.calculate_area(features, imask)" + "blob_magitude = 10 # also hard-code in the test\n", + "blob_sigma = 10e3\n", + "\n", + "normalized_circle_radius = np.sqrt(np.log(blob_magitude / threshold))\n", + "absolute_circle_radius = np.sqrt(2) * blob_sigma * normalized_circle_radius\n", + "expected_area = np.pi * absolute_circle_radius**2" ] }, { "cell_type": "code", "execution_count": 28, "id": "86be74d4", - "metadata": { - "execution": { - "iopub.execute_input": "2024-02-17T12:47:14.459910Z", - "iopub.status.busy": "2024-02-17T12:47:14.459329Z", - "iopub.status.idle": "2024-02-17T12:47:14.465913Z", - "shell.execute_reply": "2024-02-17T12:47:14.463934Z" + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAA18AAAHxCAYAAACSzwbeAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjguMywgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/H5lhTAAAACXBIWXMAAA9hAAAPYQGoP6dpAABsXElEQVR4nO3deXxU9b3/8ffJnpCFhJCEJEAW9p2EHQVEBREUwWotVkXbitaFWlu1ixfrbXtva0ur+KtCeytaFBV3lBZFFkVElrAbCPsWSAKBbGSbzPn9ETMkkpBMMpkzk7yej0cejzlnzpzzIfkwc95zzvkewzRNUwAAAACAVuVjdQEAAAAA0B4QvgAAAADADQhfAAAAAOAGhC8AAAAAcAPCFwAAAAC4AeELAAAAANyA8AUAAAAAbkD4AgAAAAA3IHwBAAAAgBsQvgAAAADADQhfLnD69GktWbJEc+fO1dixYxUSEiLDMDRs2DCXb2vx4sUyDKNJPy+//LLLtw8AAACgefysLqAteP311/XII4+4ZVuxsbEaO3Zsg8/n5OTowIEDkqQxY8a4pSYAAAAAjSN8uUB4eLiuueYaDRs2TMOGDVNWVpZ++ctftsq2pkyZoilTpjT4/N13360DBw5o7Nix6tmzZ6vUAAAAAMB5hC8XuOeee3TPPfc4phcvXmxJHSUlJXrrrbckSbNnz7akBgAAAAD145ovi509e1a/+tWvNGjQIIWGhqpDhw4aMmSI/ud//kcXLlxwal1vvfWWiouLFRwcrFtvvbWVKgYAAADQHBz5stCWLVs0depU5ebmKiAgQMnJybLb7dq1a5d27NihZcuW6dNPP1VkZGST1ldzxO3mm29WeHh4K1YOAAAAwFkc+bJIXl6ebrjhBuXm5uqRRx5RXl6e9u7dq6ysLB04cECjRo3Stm3b9NBDDzVpfUeOHNG6deskccohAAAA4IkIXxb585//rNOnT+v222/X/Pnz6xypSk5O1rJly9ShQwctXbpUJ06caHR9ixcvlmma6tatmyZOnNiapQMAAABoBsKXRWoGxrjvvvvqfT4xMVHDhw+X3W7XZ599dtl1maapV155RZJ01113yTAM1xYLAAAAoMW45ssCJSUlOnjwoCTpZz/7mfz86v8zZGVlSVKjR77WrVunw4cPS6oOXwAAAAA8D+HLAufPn3c8/uqrrxpdvrFRD2sG2rjyyiuVmpraktIAAAAAtBLClwVCQ0Mdj/Py8hQdHd3sdZWUlOjtt9+WVH2DZQAAAACeiWu+LBAREaHExERJ0q5du1q0rmXLlqm4uFgdOnTQLbfc4oryAAAAALQCwpdFaoLSX/7ylxatp/a9vWofUQMAAADgWQhfFnn88ccVFxen5cuX684771R2dnad5ysqKvTxxx/r1ltvVVVVVb3rOHz4sGMkRE45BAAAADybYZqmaXUR3u748eMaOnSoY7q8vFzFxcXy8/NTRESEY/5jjz2mxx57zDG9bds23XjjjTpx4oR8fHzUs2dPRUZGqqCgQAcPHlRFRYUkqbKyst4REZ966in95je/UVJSkg4dOsQQ8wAAAIAHY8ANF6iqqtLZs2cvmW+z2erM//aohUOHDtXu3bv1wgsv6P3339fevXt1+PBhxcbGavjw4Zo4caJuuummeoMX9/YCAAAAvAtHvgAAAADADbjmCwAAAADcgPAFAAAAAG5A+Gqm22+/Xenp6br99tutLgUAAACAF2DAjWbau3evMjIyrC5DNptNH330kSRp6tSp9Q7OAdRGz8BZ9AycRc/AGfQLnOXNPcORLwAAAABwA8IXAAAAALgB4QsAAAAA3MDrw9eRI0dkGEaTfu6++26rywUAAADQTnnP1WkNCAoK0tixYxt8vqysTFu3bpUkjRkzxl1lAQAAAEAdXh++4uLitH79+gaff/nllzV79mwFBwfru9/9rhsrAwAAAICLvP60w8YsXrxYkjRz5kyFh4dbWwwAAACAdqtNh68jR45o3bp1kqTZs2dbWwwAAACAdq1Nh6+XX35ZpmmqW7dumjhxotXlAAAAAGjHvP6ar4aYpqlXXnlFknTnnXfKx6d1cqZpmrLZbK2y7qaovW0r64D3oGfgLHoGzqJn4Az6Bc7y1J7x82s8WhmmaZpuqMXt1q5dq6uuukqSdODAAaWmpjb6moULF2rRokVNWn9mZqZKS0uVkpKi+fPnt6hWAAAAAN5t+vTpjS7TZo981Qy0ceWVVzYpeEnSqVOnlJGR0YpVAQAAAGiv2mT4Kikp0dtvvy3JuYE2unTporS0tCYtW3PkKyIiQlOnTm1OmS5hs9m0cuVKSdLkyZObdLgT7Rs9A2fRM3AWPQNn0C9wljf3jPdU6oRly5apuLhYISEhuuWWW5r8ujlz5mjOnDlNWjY9PV0ZGRkyDMNj/uB+fn4eUwu8Az0DZ9EzcBY9A2fQL3CWt/VMmxztsOaUw+985zsKCwuzthg32JJnqKLK6ioAAAAAXE6bC1+HDx/WZ599Jql93Nvrve3Z+tcBXz27x1fH8i9YXQ4AtBsZx87pxufX69r567RmX67V5QAAvECbC1819/ZKSkrShAkTrC6nVR3ILdav398jSTpRYuimv32p1XtzLK4KANo20zT1ypdH9N2FX2rniQLtzy3WPYs36y+fZMlub5MDCAMAXKRNha/a9/a66667ZBiGxRW1rm5RIfrusETHdGGZTfcs3qL5H+9TFTsAAOByFypseuSN7fqv9/eosuri+6xpSs9+ul93L96scyUVFlYIAPBkbSp8rVu3TocPH5ZhGLrrrrusLqfVBfj56MmpfXVXzyoF+FzcCXhu9QHNfmmT8tkBAACXOZRXrBn/b4Pe257tmBcTFqikTiGO6XVZeZq2YL12nSiwokQAgIdrU+GrZqCNcePGKTk52dpi3Cgt2tRPB1YpJbqDY97n+89o2nOfa/vx89YVBgBtxH92n9b057/Qvpwix7yRyVH68OEr9MFDV2hSv1jH/JPnS3XzCxv0+qZjVpQKAPBgbS58maaptWvXWl2K23UJkd6+b5SmDIhzzMsuKNOtL36pJRuPyjQ5DREAnGWrsut//p2p+5ZsVVG5zTH/3nEpevWHIxUTFqTwIH8tvCNdT0zpI59vznavqLLriXd26bG3dqiskuFoAQDV2lT4au/Cgvz0t9vT9Kvr+8r3mz2Aiiq7fv3ebj26bIdKGY8eAJosr6hc3/+/r7Rw3SHHvNBAP71we5p+eX1f+fle/Ag1DEP3jU/Vkh+OVHRogGP+m1tO6OYXNujYWUajBQAQvtocwzD0o3Epeu2HI9U5LNAx/52Mk5rxty905EyJhdUBgHfYciRfU5/7XBsP5Tvm9YwJ1fsPjtWUgV0afN2Y1Gh9+NCVSuvW0TFvT3ahpi34XJ9mMhotALR3hK82amRKJ3300BUanhTpmLf3dJFuWLBeH+85bWFlAOC5TNPUP9cf1m2LNiq3qNwx/8bB8XrvgbFK7Rza6DriIoL0+r2jNXtMkmNeYZlNP3h5i/7MaLQA0K4RvtqwmPAgvfajUfrBFRcHHykqt+nef23VH/6zV7Yqu4XVAYBnKSm36aGl2/T0h1/L9k1A8vMx9Jsb++vZ24aoQ6Bfk9cV4Oejp27sr+e+N1QhAb6O+QsYjRYA2rWmf5LAK/n7+ujJaf2U1i1Sj721QyXfXPf1wtqD2nH8vJ773lBFhwY2shbvVDPIiDfc763cVtWsb8N9DENB/r6NL4hWV1pRJVPO/w39fX3k7+v534OZpuQF/5Wa7UBuse5bslUHcosd8+LCg/T/bk9TevfIy7zy8m4cHK8+cWG6b8lWHcqrPu27ZjTav30/XUO6dmxp6R7LW3rGVmVXRTO/jAwJYDfKE/AZCm/Cu0Y7MXVQF/WOC9V9SzIcOxcbDp7VtOfWt3jnwhPtzynST9/coaycIl3dN0a3DuuqK3t2dgxE4imKyio17/09Wr4zu84NW5vKx5Am9onR/8wcVOcaP7jP/pwiPfb2Tm07dr5Zrw8J8NWdo5P06KReHhfCyiqr9PHXOXr9q6PaeNhXfSJMjRxfrriObeujY8WuU/r5sotfTknS6JROWjDLNV9O9YoN0/sPjNVjb+3Uv3dXn/ZdMxrtf93QT7eP7OYVXxI1VVFZpX717i59uNNX3TpIJbEnNH1oolNHDt3BNE0t/OyQXlh7UAWllc1aR+/YMP1+5sA29xnqLVr6GWoY0k1DEvTnWwbLx8P2D9B2GSZjkDdLenq6MjIylJaWpq1bt1pWh81m00cffSRJmjp1qvz8Lv/hVlJu0+Nv79SHO0855vn7Gvr11H66c3T3NrEDsHxHth5/e6cufGt0x/iIIH1nWFfdkp6orlEhDbzaffadLtJ9S7bqsAsGQYkJC9Tfbk/TsKSoRpd1tmfQsIZ6rTlGJEXp+VlDFRMe5ILKWibzVKHe2Hxc7247eclOqTO95ukqq+z6w7/36h/rD9eZf/+EVD16ba86oxm6gmma+sfnh/W//9lb51v6mWkJ+t1NAxUc4P3fwDf0vhYS4KsbBsXr1uFdldato+WfNYVllXr0zR365OuWD4LS1j5DrdCczyVXfoa+9sORGtMjusXrgft4876MZ33NilbXIdBPC743VP81rZ/8vvmWp7LK1LwP9ugnb2zXhQpbI2vwXJVVdv1m+R49tHRbvTvD2QVleu7T/Rr3zBrd8X9fafmObJXbrBl+//3tJ3XT//vCJR8akpRbVK7bFm3UP9cf5p5ubtBYrzXHpiP5mrpgvTYdzm984VZQWFapJRuP6sbn12vKs59r8YYj9R4NaCu9lltYptv//lWd4BUW6KdFd6Tr8ev6uDx4SW1/NNrLva9dqKjSG1uO6+YXNmjSXz7TPz4/pLPF5fWspfVlnirUjQvWuyR4SW3nM9SbuPoz9O2Mky5ZD9AUHPlqppojX11Su+j+v95vWR12u1379u2TJPXu3Vs+Pk3fYTh5rlQf7sxWca0bh0aHBuqGwfGK6hBwmVd6nuJymz7cka2T50sd8wL9fDQ6tZNOnCvVobwS2etp9WB/X/XtEq4BCRFuOW2vym5q3b5cbTt+vs780SmdNKTW0NRNte90kdbuy6vzb+sTF65r+8UqwK/+XmhJz6DhXps8IE4JHYOdWpfdlD7PytPXpwod83wMQ+N6RSutW2Srf4tumqZOnivVruwCZeUUyVbPaTv+vj7qFROqygsF2l9oqPZ/o8Z6zVOd+Oa9r6TWe1/nb977It303ldSbtOHO0/pxLmL9/8K9PPRdQO6qEdM4yMqepKG3tdSwkxVmdLpMl+V2y69psrXx1Bq51ANSIhQ904h8nHDUaOvswv0SWZOnV5Pie6giX1j5e/r3PbPFFfo37tOtYnPUKs19XPJlZ+hh/JKtPKb0Z/9fX10/4RUjzv1Gw3z1H2ZJ6c92egyhK9mqh2+7p1/r2V12O12ZWVlSZJ69erldPOVlNu0YtepOjuSAb4+mtQ/Vj1iwlxaa2s5nn9B/959qs4RiOjQQE0b1EUdQ6o/AEvKbfr6VKH2ZBfq/IX6RxmLDQ/SgPgI9YoLVaCf60//KSqr1Ipdp3SqoMwxL9DPV9cNiFNydIdmrzf7fKlWfGsHIKpDgKYNqn8HoKU9057V12udQwM1bVC8IkL8m7VO0zS182SB1n0rRPeMCWu1YFPz/+Hr7EKda8L/B38fQ1lZWTpfIe0qCKgTWi7Xa57GNE1tO3Zenx84U+eoXd+4cE3sG+P2Ha8qu6kvDpxRxrFzdeYPT4rS6JROXnENSkPva5P7xagiv/poQkpqDx06c0G7swt04lxpvesJC/RTv/gI9YsPV0Rw8/4vXY7NbtdnWXnaeaKgzvzRKZ00Ijmq2V90tIXPUE/QlM8lV3+G2qrsWvT5IVV888XA5P5x6tslvJn/Aribp+7LPHXjU40uQ/hqprYSvqrXYeqLg2e09WjdHYD07pEamxrtkh2A4nKbDuQUyW5KKZ07OEJRS5imqa1Hz+mLg2fr7Ej16xKuiX1i6j1tyDRNZZ8v0+7sAu3PKXIMJ12bn4+PesZWfxsbHxHkkqMPx/MvaMWuUyqtvLjTHhMWqKmD4l2yo3GhwqYVu07X+Rbd39dHk/rFqmds3R2A5vZMbmGZjuVfUJC/r3rEhHrsCFHFZTYdyLW+15x1uqBMH+3MVlGtYBMZEqAbBndRVIeWH5W1200dOVui3dmFOnympN5TBoP8fdUnLkwDEiLqDDRRu2cSk1L0nz25Teq15mqNXquw2fXJ16e1v9Zohj6GofG9O2tQQoSl1+pk5RTpk69zVFlrxL2ukSGaMjDOJaPpVVbZdSC3WMXlNsV3DG7V97XOYdVfRoQF+tb7PnP+QoX2ZFcH/5IGTtHrFhWi/vERSo3pID8X7FAVllXqo52nlFN4cac9yL96pz2pU/O/+KrhrZ+hraG5vdbY51JrfYauyszR7pPVgbxbVIhmpiU2e11wL8JXO9SWwleNA7lF+nhPTp0hdxMjgzVlQJdmjVJVZTd1+EyJ9mQX6MiZkjqDcCdGhmhAQrh6dA5t1o5rua1Kn+zJ0YG8iztSvoahCX1iNCA+vElv9uW2Ku07XaQ92YV1PpRriwwJUP/4cPXtEt6s34Fpmtpy9Jw2HDhT598/ID5CE3p3dul1JXa7qQ2HzmrLkbrXDKV1i9TYHtGOkR6d6Zmyyurf0e7sAuXVuuGsr4+hnjFhGpAQroSOwZZfZO7pvdZUpRVV+vfuUzqWXzfYXNsvVr2aGWyaurM7ICFCKZ3r39n9ds9IRpN6zRmt2Wtni8v14c5TdY7yhQX6aeqgeMVFWD/AiSTll1Tow53Zde7/FRrop6mDuqhLhHOns0rV7z05heXak12gfaeL6ryvt9b7Wv/4CF31zftaY+8zNV8G7Mku1CEnvwxwxtGzJfr37tMqq7XTHhsepKkDuyjcxUfY3P4ZGh+uHjHNe19zJVf0WkP90tqfoSfPl2rZluOO6R9ckaywINcfeYXrEb7aobYYviTpXEmFPtx5SmdLLu78dAio3gGIb+L1LOdKKrQ7u0CZp4oavfg40M9HfeLC1T8hXDFhTdsJOvPNjlTt0wfDgvw1bVAXxTZzpLi8ouoPjsxTRfUOwuFjGEqO7qABCeHqHtWhSd9kltuqtHJPjg7V3mn3MTSxd4z6J0Q0q86mOJhbrJV7Ttf5AEzoGKzrB1bvADTWM6Zp6sS5Uu3JLtD+3OJG753SMdjfcbpQqJuHks4vqXD83byl1xpjN01tPHT2koE3hnTt2OTbJdiq7NqfW6w9LjrNq6GeaazXGtOcXuufEKF+ToSGfaeLtCqz7lGlblEhmjKgi8eNLlhhs2tVZo6ycooc86qvAeyswYlNOzpXVlmlzG9OsT7TyIAWrnxfu6p3jAbUel9z5rOppNzmqLkpp8E25bRw0zS1+Ui+Nhw8W2f+wIQIje/d2SVH1Opz7psQfbZWiHbLZ2h8uNtHSi2tqNLe067ptfr6xR2foaZp1hlcaGyPaA1vAyO5tgeEr3aorYYvqfq0gVWZOdp3uu4OwJU9ozWka/1DBFdW2bU/p3pnr/a577WFBfnLz8do8MM1JixQ/RMi1Ds2rMHTjPaeLtSqr3Nls1/ckereqYOu6x/nkh0pW5VdB/Oqv2msffShtg6BfurfJVz94yMavM4nr6hcH+3M1vlao8WFf7PT7o4PyPMXqkN07Q/EkAA/TR3YRV0iAuvtmeJym77OLtSe7IIG73kTGx6kgtLKOt8i1zAMQ8mdQtQ/IULJnZq2I9ccNb22O7tA2V7ca405lFcdbGoPVBAfEazrB3VpMOTmFpZpd3ah9p4udFzHUJuPYSi1cwf1j49QNycGOLjc+8zlei0hsv6dTXf0WpXd1Of787T9Wxfmj0iO0qiUTm4Z3KE5TNPU9uPn9fn+M98aSCdMV/eNrfe6NNM0dTy/VLuzC3Qwr/4QG+Dro06hAXWul6nN1e9rzflsaupp4b1iQ9X/MqeFl1VWaeWe03VGwvP1MTSxT4z6x7feF181LP0MjY9Q77iG39daqk6v5Rarqp5dyOb02rf75WxJpds+QzceOquNh6pDelSHAN0xilsGeAPCVzvUFkY7vJyaHYB1WXl1Pshrj25Wc6rBrpMF2ne6sMHRrKpPGYpQt6jqHbFTBWXfvKaozrfRNfx8DPWKrX5NYmT1aUb1jXBkGNKo5E4aldo6O1IFFyq1J7tAu7MLVVRW/w5izelaPWud+vH1qQJ98vWlo2lNGdjFrddJVVbZterrnEtG0ruyRyd1KMuVJPXo2UtH8i9oz8lCHTpTrPreDUICfNXvmxEhO4UGyma362BuiXZnF+jo2ZJ6X9Mh0E/9uoRrYEKES0aPq91rDQULb+61hpy/UKnlO7KVW3RxJ6bmW/Sae9WVVVZ/+7zrRGGd5Wrr1CFQAxOrT/1pzjVEjb3PNNRrtUdtrLKbOnSmuFV6rX98uAbEX+y1hkakvH5gF6V09o6RBJsyGm1RWaX2nCzU7suE2ITIYA1MiFCv2DD5+/q47X2tpZ9N5bYq7T1dpN0nC3S6gZ34qA4BGvDN0duaI6G5hWVavuOUzpdeDCgdg6uvnXTnkSFP+wxtqcLSSn2d3Xq9lhodooMH9kuS7OFdtGpvrts+Q89fqNT/rT/kmP7+yO6K9ZDTkdEwRjtsh7z1JsvO2nr0nB54NUOna10T1SMmVLekJ+rdbSe1t9Y3e7X1iQvTd4d31U1DEhrc+a7ZQXpjy3FtO3a+3mWSOoXolmFd9WlmjjJqLRMR7K+/3jZEV/WOafa/ramq7KY+25+nNzcfr975qOfb2PAgP80YmqCKKlNLNx1zzDcM6ZFreunBq3pYMnKZaZp69atjenr513VODRsSZVenIGlHYZDOFF/6LaphSON6dtZtw7vq6r4Nj7hXc778si0nGvy2dkRylL47rKtGNyO41HyD/Mbm4+2i1+pTVlmlJ9/brWVbTzjm+foYun98qk6cu6B/7z5d705bzU1tvzuiq4Y28G17UzXlfaahXrt+YJy6RoXo7a0n6z01yZW9NjI5Stf0jdXCzw7V2Va/LuF68fvp6tbJ+purOyOvqFwPLc3QxkMXT0ENDfTTA1f10FeHz+qzrDzVd6ZmdGiAbk5P1K3Duiq1gbBZ+32t+rRM176vufKz6XI3/Zaqw8bVfWM0MCFCC1YfqPP/4Zq+MfrzrUNaZQTFpvCUz9DrB3ZRsJPBxZSp7cfO640tx7UuK6/eLz9c2WuDO1aoyi5tyL34HuCuz9BbXtygzUeqB0yZPSZJT93Yv9W2Bdfw5pssE76aqb2EL6n6upeHl2675Nz5bwsN9NONQ+L13WFdNaiJ1yfUyMop0hubj+udjBM6d6H+b8hqDEyI0N9uT3N88+9OZ4rL9W7GSb2x5bgO1Bo1rT6RIf569rahGters5uqa9j24+f14yVbld3AN8g1EjoG67vDu+o76YlNvj5Bujhk9htbjuuTb11w3hraQ6/VZpqmXt98XPPe39Po7zatW0fdNrybpg5q3kX+9XHmfcbTeu2W9ET9900DPHZ0zsbYqux65uN9Wrju0GWX8zGkq3rH6NbhXTWxj3PD5rfG+1prfDaVVVbp469z9Obm41p/4Mxll/UxpEcn9db941MtH7Lf0z5DW8rTes0Vlm46pl+8s0tS9RHVjb+42uvuX9jeEL7aofYUvqTqHYA/f5KlF9YevOS5EUlRunV4V13vgmGRy21VWvV1rt7Yclyf77/0m7bbhnfVUzf2t3xHyjRNZRw7pzc2H9eHO+ve90mSBiVW77QnRnrON+35JRWa+/o2fb6/7k5LgG/1DYK/O6yrxqS2/N5C+SUVenfbSb25+bj25dT/rW5ztcdeq23nifO6f0nGJUd+ojoE6Oa0BN06rKvLhnuvzdn3GU/otQBfH/1men/dNrxrm7h+4z+7T+tny3bUOQ1Rkrp3CtGtw7rq5rTEFo/c6Mr3tdb+bDqef6H6SOjWE5dcWxTVIUDP3TZUV/SMduk2W8JTPkNbwlN7zRUKyyo17LerHKe0//3OYbq2X6xbto3mIXy1Q+0tfNX4eM9pzftgjyTpxiHxlz3VoKVqTjN6d9tJFZfZ9Ph1fXTr8K6tsq2WqH3qx77TRbp1WFf94vo+rXKj5paqspv66yf79PfPDqhToHTPVX01M62rS67L+jbTNLXjRIHe2HxMH+48paKyy4/a1ZCYsEDN+CZYtPdek6pHQvvlu7u0Zl+uRqV00neHXf50PVdozvtMld3Uc5/u1z/XH1ZiVIhuHZZ42VOoWqJ2r/1792nFhAXqT7cM1qDEji7flpUO5RXrZ8t2KCunWNf0rT7yMCq5dW7G3NL3NXd9NtUMrPLG5upT44Z266hnvjPYqaOp7mTFZ+jbGSd0PL/+U3UbE+zvq8n9Y1u9197fdkJ/X7Vb2Rek20Z016+m9XP7Z+iDr2Xow52nJElTBsTphe+nu3X7cA7hqx1qr+FLqt7Rcfc3yVZsszm8oU6bzaYPP/xIhuHenqnvWrmm8PMx3Po79Ya/oeTeOlvyPsP7heu5+9/XnO21l8+m5rCizvoG5mgKX8Nwy2mbNf1imtK0adbsSK/Zm6u7F2+WVH3kfNOvrvbYm1nDu8OX91QKj2HFh5s3fKBK3lSnu7dnyN/XW3431OlKvF+4nrv/fd7y+6TOhjlzXZaVrPwTXtkzWtGhATpTXKGKKruW7zylO0Z1t64gtFne8b8RAAAAaCV+vj6aPiTBMf1OxonLLA00H+ELAAAA7d7NaYmOx9uOndehvMuPyAg0B+ELAAAA7V6/+HD1ibs4Yuw7GSctrAZtFeELAAAAUN2jX+9uOyl7MweLAhpC+AIAAAAkTR8Sr5oBHk+eL9VXh/OtLQhtDuELAAAAkBQTHqQre3Z2TDPwBlyN8AUAAAB84+b0i6certh1ShcqbBZWg7aG8AUAAAB8Y1K/WIUFVt8Kt6SiSh/vybG4IrQlhC8AAADgG0H+vrp+YBfH9NuceggXInwBAAAAtcxMu3jD5S8OnNHpgjILq0FbQvgCAAAAahmeFKXEyGBJkt2U3t/OPb/gGoQvAAAAoBYfH0Mza93z6+2MEzJN7vmFliN8AQAAAN8yc+jFUw+zcoq1J7vQwmrQVhC+AAAAgG9Jiu6g9O6RjmkG3oArEL4AAACAetQeeOOD7dmqrLJbWA3aAsIXAAAAUI9pA+MV4Fe9u3y2pELr9uVZXBG8XZsLXytWrNDMmTMVHx+vwMBAxcbGauzYsfr1r38tm407lAMAAKBpIkL8dW3fWMf0O9s49RAt02bCl81m0x133KGpU6fq3Xffla+vrwYPHqzQ0FBt2bJFv/vd71RWxj0aAAAA0HS1Tz1c9XWuCi5UWlgNvF2bCV/333+/lixZosGDB2vTpk06fvy4Nm3apIMHD+rcuXN6//33FRgYaHWZAAAA8CLjenVWpw4BkqSKKrs+3JVtcUXwZm0ifK1Zs0b/+Mc/FB8fr9WrV2v48OF1ng8JCdGNN94of39/iyoEAACAN/L39dGNQ+Id029v5dRDNF+bCF/z58+XJP385z9XVFSUxdUAAACgLbm51g2XM46d1+EzJRZWA2/m9eGrrKxMK1eulCRNnz5dmzdv1o9//GNde+21uvHGG/X000/rxAm+oQAAAEDz9I8PV+/YMMf0u9zzC83kZ3UBLbVjxw5VVlaqQ4cOeuutt/TEE0/Ibr94D4bly5frf//3f/Xyyy/rlltucfn2TdO0dBTF2ttmNEc0BT0DZ9EzcBY9A2d4S7/cNKSL/rCySJL0TsZJPTghRT4+hsVVtU+e2jN+fo1HK8M0TdMNtbSa9957TzNmzJCfn59sNpuuuOIKPfvssxowYICOHj2qX/3qV1q2bJkCAgK0adMmDR48uMF1LVy4UIsWLWrSdjMzM1VaWqqUlBTHaY8AAABomwoqpHlbfWWqOnD9qE+VBkR69W40XGz69OmNLuP1R76Ki4slVafe6OhorVixQmFh1YeFe/bsqddff1379+/X9u3b9bvf/U5vvvlmg+s6deqUMjIy3FI3AAAAvEdEgNQv0tSec9Xh69/HfdSvY5U4+AVneH34CgoKcjy+9957HcGrho+Pjx555BHdddddWrlypex2u3x86r/UrUuXLkpLS2vSdmuOfEVERGjq1KnN/we0kM1mc1zzNnny5CYd7kT7Rs/AWfQMnEXPwBne1C/dhxTophc2SpJOlBjyT07Xdf3jLK6q/fGmnvk276m0AZGRkY7Hffv2rXeZmvmFhYXKz89XdHR0vcvNmTNHc+bMadJ209PTlZGRIcMwPOYP7ufn5zG1wDvQM3AWPQNn0TNwhqf3y5DunTS5f6xW7smRJD376UFNGZggXw5/WcbTe+bbvH60wz59+jge1z4KVlvt+VVVVa1eEwAAANqmR67tJeObrLU/t1jLd3DTZTSd14evhIQEde/eXZJ08ODBepepmR8YGKhOnTq5rTYAAAC0LX3iwnXDoIs3Xf7rqizZquyXeQVwkdeHL0n67ne/K0l6+eWX6wwzX+Of//ynJGn8+PFedVgSAAAAnucn1/R0DLRx5OwFvZNx0tqC4DXaRPj62c9+poiICGVmZuqRRx5RRUWFpOp7cD377LNavny5DMPQL37xC4srBQAAgLdL6RyqmWmJjulnP92vchuXtqBxbSJ8de7cWW+99ZaCg4P13HPPKS4uTiNHjlR8fLx+8pOfyDAM/fGPf9SECROsLhUAAABtwNyre8rvm8NfJ8+X6s3Nxy2uCN6gTYQvSbrmmmu0Y8cOzZ49Wx06dNC2bdtks9l04403as2aNfrZz35mdYkAAABoI7pGhei7w7s6phesPqCySo5+4fLa1AVQPXv21EsvvWR1GQAAAGgHHpzYQ8u2nlCFza7conIt2XhUP7wyxeqy4MHazJEvAAAAwJ26RATr+yO7O6b/tvagSsptFlYET0f4AgAAAJrp/gmpCvb3lSTll1Ro8YYj1hYEj0b4AgAAAJqpc1igZo9NckwvXHdQBaWV1hUEj0b4AgAAAFpgzrgUhQVWD6VQWGbT/31+yOKK4KkIXwAAAEALdAwJ0A+uTHZM/9/6w8ovqbCwIngqwhcAAADQQvdckayOIf6SpJKKKi1cd9DiiuCJCF8AAABAC4UH+WvOuFTH9MtfHlFuUZmFFcETEb4AAAAAF7hrTHdFhwZIksoq7frbGo5+oS7CFwAAAOACIQF++vGEHo7p1746puzzpRZWBE9D+AIAAABcZNbIbooLD5IkVVTZtWD1AYsrgichfAEAAAAuEuTvqwcnXjz6tWzLcR09W2JhRfAkhC8AAADAhW4d1lWJkcGSJJvd1LOf7re4IngKwhcAAADgQgF+Ppp7dU/H9HvbTupAbpGFFcFTEL4AAAAAF5sxNEEp0R0kSXZT+ssqjn6B8AUAAAC4nJ+vj35ybS/H9Ec7T+nr7EILK4InIHwBAAAArWDawC7qHRvmmJ7/SZaF1cATEL4AAACAVuDjY+inky4e/VqVmaPdJwssrAhWI3wBAAAArWRSv1gNSAh3TC/bctzCamA1whcAAADQSgzD0PdHdndMf7AjWxU2u4UVwUqELwAAAKAVXT+oiwL9qne7z12o1Np9uRZXBKsQvgAAAIBWFB7kr0n94xzTb2ecsLAaWInwBQAAALSymWkJjser9+bqXEmFhdXAKoQvAAAAoJVd2SNancMCJUmVVaY+3JltcUWwAuELAAAAaGV+vj66aUi8Y/rtjJMWVgOrEL4AAAAAN5iZluh4vP34eR3MK7awGliB8AUAAAC4Qd8u4erb5eI9v95h4I12h/AFAAAAuMnNtQbeeDfjpOx208Jq4G6ELwAAAMBNbhwSL18fQ5KUXVCmjYfPWlwR3InwBQAAALhJTFiQxvWMdky/vZWBN9oTwhcAAADgRrUH3vj37lO6UGGzsBq4E+ELAAAAcKNr+8UqLMhPknShokor95y2uCK4C+ELAAAAcKMgf19NG9TFMf0O9/xqNwhfAAAAgJvVPvVw/YEzOlVQamE1cBfCFwAAAOBmw7pHqmtUsCTJNKX3tmVbXBHcgfAFAAAAuJlhGJo59OLRr3cyTsg0uedXW0f4AgAAACxwc61TD/fnFmv3yUILq4E7EL4AAAAAC3TrFKLhSZGO6bczTlhYDdyhTYSvp556SoZhXPbnxRdftLpMAAAAoI7aA298sCNbFTa7hdWgtflZXYArxcTEqGfPnvU+16VLl3rnAwAAAFa5fmAXzftgjypsduWXVGhdVp6u7RdrdVloJW0qfE2ZMkWLFy+2ugwAAACgSSKC/TWpX6w+3HlKUvXAG4SvtqtNnHYIAAAAeKvaA298mpmr8xcqLKwGrYnwBQAAAFjoyp7Rig4NkCRVVNm1/JujYGh72tRphzt27NCsWbN0+vRphYWFadCgQbrtttvUv3//VtumaZqy2Wyttv7G1N62lXXAe9AzcBY9A2fRM3AG/VLthkFd9NKGo5Kkt7ce1/eGJVhckefy1J7x82s8WhlmG7ib21NPPaXf/OY39T5nGIbmzp2rP/3pT/L19b3sehYuXKhFixY1aZuZmZkqLS1VSkqK5s+f73TNAAAAQI0TJdIzOy/uvP9qiE0xwRYWBKdNnz690WXaxJGvuLg4PfbYY5o5c6ZSU1MVFhamrKws/e1vf9OLL76ov/71rwoICNAf/vCHy67n1KlTysjIcFPVAAAAQLXEDlJ8iKnsC4YkaXOej6Z2Y9j5tqZNHPm6nD/+8Y96/PHH5efnp/379yspKanBZZtz5Gvo0KHatGmTi6p1ns1m08qVKyVJkydPbtLhTrRv9AycRc/AWfQMnEG/XPSP9Uf0v//ZJ0mKjwjS2kfHycfHsLgqz+OpPdOUOjyj0lb06KOP6tlnn1V2draWL1+uhx56qMFl58yZozlz5jRpvenp6crIyJBhGB71B/eUWuAd6Bk4i56Bs+gZOKO998vMtET9ceU+2U0pu6BMW48XanRqJ6vL8mje1jNtfrRDX19fjRw5UpKUlZVlcTUAAABA/WLCg3Rlz86O6XcyTlhYDVpDmw9fkhQQUD10pyeNhgIAAAB8283pF+/5tWLXKV2oYP+1LWkX4Wv37t2SpMTExEaWBAAAAKwzqV+swgKrT6MrqajSx3tyLK4IrtTmw9dHH32kPXv2SJImTZpkcTUAAABAw4L8fXX9wC6O6bc59bBN8frwtWfPHs2ZM0c7duyoM99ut2vp0qWaNWuWJGnq1KkaPny4FSUCAAAATTYz7eINlr84cEanC8osrAau5D1DgzSgsrJSixYt0qJFixQVFaXu3bvLz89PBw4c0Llz5yRJV155pZYsWWJxpQAAAEDjhidFqWtUsI7nl8puSu9tP6n7xqdaXRZcwOuPfCUlJem3v/2tpk6dqo4dO+rAgQPavn27AgICNGXKFP3rX//SmjVr1LFjR6tLBQAAABrl42NoxtCLYxW8vfWE2vitedsNrz/y1bFjR/3qV7+yugwAAADAZWYOTdBzn+6XJO3PLdbuk4UamBhhcVVoKa8/8gUAAAC0NUnRHZTePdIx/Z89pyysBq5C+AIAAAA80HX94xyP1+7Ls7ASuEqzTju85557XF2HJCkiIkJ/+ctfWmXdAAAAgDeZ0LuzfrciU5K0J7tQuYVligkPsrgqtESzwtfixYtlGIZLL/wzDEOxsbGELwAAAEBSj5hQJXQM1snzpZKktVl5unVYV4urQks0e8CNoKAg3XrrrS4r5OWXX3bZugAAAABvZxiGJvTurFe/OiZJWreP8OXtmh2+IiIi9NJLL7msEMIXAAAAUNeE3jGO8PX5/jzZquzy82XYBm/FXw4AAADwUGNSOyngm7BVWGbTtuPnrS0ILdKsI18PP/ywIiJce5+B1lgnAAAA4M06BPppRHKU1h84I0laszdXw5OiLK4KzdWs8PXXv/7VxWW0zjoBAAAAbzehd2dH+Fq7L0+PXdfH4orQXJx2CAAAAHiwCb07Ox5/fapQOYVlFlaDliB8AQAAAB4stXOoEiODHdPruOGy1yJ8AQAAAB6sZsj5Gmuzci2sBi3hVPgqKCjQpk2bdPjw4QaXOXz4sF555ZUWFwYAAACg2oReMY7Hn+8/I1uV3cJq0FxNDl///d//rdjYWI0ePVo9evTQFVdcoT179lyy3IYNG3T33Xe7tEgAAACgPRvT4+KQ80VlNmUcO29tQWiWJoWv1157TfPmzVNSUpIeeeQR3XrrrdqyZYtGjBihDz/8sLVrBAAAANq1kAA/jUy5OMT8mn2ceuiNmhS+FixYoIEDB2rHjh3605/+pKVLlyojI0NJSUm6+eabtWzZstauEwAAAGjXxveqdd0Xg254pSaFr927d+vuu+9WYGCgY16/fv20ceNGjRo1SrNmzeI6LwAAAKAVTeh98bqvzFOFOl3AkPPepknhy8fHR6GhoZfMDwsL08qVK3X11Vfrnnvu0aJFi1xeIAAAAAAptXMHdY2qNeQ8ox56nSaFr+TkZGVkZNT7XFBQkJYvX65p06bp/vvv1z/+8Q+XFggAAADgmyHna416yKmH3qdJ4WvixIl6++23VVFRUe/z/v7+evvtt3XLLbdo3bp1Li0QAAAAQLXa9/tav/+MKhly3qv4NWWhO+64Q6dOnVJGRoZGjRpV7zK+vr5aunSp4uPjtW3bNpcWCQAAAEAandpJAX4+qrDZVVRuU8bRcxqZ0snqstBETQpfQ4cO1dKlSxtdzjAMzZ8/v8VFAQAAALhUSICfRiZH6fP9ZyRJa/blEb68SJNvsgwAAADAerVHPVzL/b68CuELAAAA8CK1r/vae7qIIee9COELAAAA8CIp0R3ULSrEMc3RL+/RpGu+mqqoqEivvPKK9u7dq8rKSnXr1k2DBg1SWlqa4uPjXbkpAAAAoF0yDEMTenfWK18elVQ95PxtI7pZXBWawmXha9euXZo0aZJyc+tP3p07d1ZaWlqdn6SkJFdtHgAAAGg3aoevLw5UDznv78tJbZ7OZeHr5z//uXJyciRJ48aNU0JCgg4fPqxdu3appKREubm5+s9//qOVK1c6XhMZGakzZ864qgQAAACgXRidEl1nyPmtR89pFKMeejyXha8vvvhChmHopZde0p133umYb5qm9u/fr23btmnbtm3avn27tm3bpry8PJ07d85VmwcAAADajeAAX41K6aTPsvIkSWv25RK+vIDLwpefn5+CgoLqBC+p+pzUXr16qVevXvrud7/rmJ+dnc3NmAEAAIBmmtCrsyN8rduXp19M6WtxRWiMy04M7du3rwzDaPLy8fHxmjp1qqs2DwAAALQrV/W5eL+vvaeLdKqg1MJq0BQuC1933XWXSktL9fnnn7tqlQAAAAAakBzdQd071R5yPs/CatAULgtfP/jBD5Senq65c+eqtJTUDQAAALS2Cb0u3nCZ+315PpeFLz8/P7377ruqrKzUyJEjlZmZ6apVAwAAAKjHhN4XTz384sBZVdjsFlaDxrj0ZgA5OTnq3bu3du/erYEDB2rcuHH685//rHXr1qmoqMiVmwIAAADavVEpnRToV71LX/zNkPPwXC4b7XDt2rWaMmWKKioqJEl2u13r16/XF198Ial61MPU1FSlpaUpPT1d6enpSktLU0REhKtKAAAAANqVmiHn130z6uHafbkancqQ857KZeFr3rx5Ki8vV1BQkL7//e8rMTFRR44c0fbt27Vnzx5VVlZq//792r9/v958801J1YHMZrO5qoQ6VqxY4RhNsXv37jpy5EirbAcAAACw0oTenWuFrzz94nqGnPdULgtf27dvl2EYev/993XttdfWea6yslK7du1SRkaGtm3bpoyMDO3cubPVBuYoKirSfffd1yrrBgAAADzJVb1j9JvlX0uS9uUUKft8qeI7BltcFerjsvDl7++vDh06XBK8ap5LS0tTWlqaY57dbtfevXtdtfk6Hn/8cR0/flw33XST3nvvvVbZBgAAAOAJkqI7KKlTiI6cvSCp+ujXrJHdLK4K9XHZgBuDBw9WZWWlqqqqmrZhHx/169fPVZt3WL9+vV588UXNmDFD06dPd/n6AQAAAE9Te9RDhpz3XC69z1d5eblWrFjhqlU6raysTD/84Q8VGhqqBQsWWFYHAAAA4E7je1+839cXB84w5LyHcln4mjVrlqZOnaqHH35YOTk5rlqtU55++mnt27dPv//975WQkGBJDQAAAIC7ja415HxJRZW2HM23uCLUx2XXfN1www3q2bOnvvjiCw0ZMkT/+Mc/HKMNusP27dv1zDPPaMSIEfrxj3/stu2aptlqIzY2Re1tW1kHvAc9A2fRM3AWPQNn0C+u4WdIo1KitC7rjCRpdWaORnTvaG1RrcRTe8bPr/FoZZimabpiYz4+PjIMo868Ll26aNq0aRo+fLjS0tI0cODAJhXlrKqqKo0YMUI7d+7Uli1bNHjwYEnS4sWLdffddzd5qPmFCxdq0aJFTdpmZmamSktLlZKSovnz57ekfAAAAKDFPjtl6O0jvpKkuGBTvxjStLEY4BpNGW/CZUnokUce0Y4dO7R9+3bl51cf5szOztbf//53/f3vf5dUPeph//79HSMfpqWlaeTIkS3e9p/+9CdlZGTosccecwSv5jh16pQyMjJaXA8AAADgbn07XjymcrrU0LlyKTLQwoJwCZeFrz//+c+Ox8eOHdO2bdvq/Jw4cUIVFRXatm2btm/frn/+858uucny/v379dRTTyk5OVnz5s1r0bq6dOlSZzj8y6k58hUREeHW0yu/zWazaeXKlZKkyZMnt8qRRbQt9AycRc/AWfQMnEG/uNaS4587hpz36zpIU4d3tbgi1/PmnmmVSrt166Zu3brVOfSWn59fJ4xlZGRo//79Ld7Wfffdp7KyMr3wwgsKCQlp0brmzJmjOXPmNGnZ9PR0ZWRkyDAMj/mD+/n5eUwt8A70DJxFz8BZ9AycQb+03ITeMVq84Ygkad3+s/r+6GRrC2pl3tYzbqs0KipKV199ta6++mrHvNLS0havd+vWrTIMQ3fdddclz9Ws//jx44qLi5MkvfPOOxozZkyLtwsAAAB4mgm9OzvC14ZvhpwP8HPZAOdoIUtjYnBwsEvWY5rmZYe3t9vtjucrKipcsk0AAADA04xK6aQgfx+VVdqrh5w/kq8xPaKtLgvfaFYM/uCDD/Txxx+7tJDmrvP8+fMyTbPen5deekmS1L17d8e8CRMmuLRuAAAAwFME+ftqdEonx/TyndkWVoNva1b4uummm3T33Xe7tJDWWCcAAADQ3kwdFO94/O62kzp/gTO/PEWzTwB10e3BWn2dAAAAQHsybVAXdeoQIEkqq7Trjc3HLa4INZp9zVdpaaleeeUVV9YCAAAAoIWC/H01a2Q3LVh9QJL0ypdH9YMrkuXny8AbVmt2+CosLPT40wRnz56t2bNnW10GAAAA4Fa3j+yuF9YelM1u6uT5Uq3KzNV1A+KsLqvda1b46tatmwzDcHUtiomJcfk6AQAAgPYmLiJI1w2I04c7T0mSFm84TPjyAM0KX0eOHHFxGQAAAABc6e6xSY7wtfFQvjJPFapvl3CLq2rfOPETAAAAaIPSukVqYEKEY/rlb26+DOsQvgAAAIA2yDAMzR6T5Jh+d9tJnSth2HkrEb4AAACANmra4C6KDq0edr7cZtfrDDtvKcIXAAAA0EYF+vlq1ohujul/fXlEtiq7hRW1b4QvAAAAoA27fVR3+flUj1SeXVCmVZk5FlfUfhG+AAAAgDYsNjxI1w/s4ph+6Ysj1hXTzhG+AAAAgDZu9tgkx+OvDufr6+xC64ppxwhfAAAAQBs3tGtHDU5k2HmrEb4AAACANs4wjDpHv97bflL5DDvvdoQvAAAAoB24fmAXRYcGSqoZdv6YxRW1P4QvAAAAoB0I9PPV7SNrDzt/lGHn3YzwBQAAALQTt4/s5hh2/lRBmT7+mmHn3cnl4WvHjh2699571a9fP4WHh8vX17fBHz8/P1dvHgAAAEADYsKDNHXQxWHnFzPsvFu5NP08//zz+ulPf6qqqiqZpunKVQMAAABwgdljkvT+9mxJ0qYj+dp9skADEiIaeRVcwWVHvr766ivNnTtXVVVV+vGPf6wVK1ZIkqKiorRq1SotWbJEs2fPVkBAgKKjo/Xaa69p9erVrto8AAAAgCYY2i1Sg7t2dEwz7Lz7uCx8PffcczJNU3PnztWCBQt03XXXSZICAgI0ceJEzZo1S//85z+1ceNGGYahJ598Umlpaa7aPAAAAIAmuntMkuPx+zuydba43Lpi2hGXha8vvvhChmFo7ty5deZ/+/TDIUOGaMGCBTp48KCeeeYZV20eAAAAQBNdP7CLOodVDztfYbPr9c3HLa6ofXBZ+MrJyVFgYKC6d+9+ceU+PiorK7tk2RkzZsjf31/vvPOOqzYPAAAAoIkC/HwuGXa+kmHnW53LwldISIj8/f3rzAsLC1NhYaHKy+sexvT391dISIiOHj3qqs0DAAAAcMKskd3k71s97PzpwjKt3HPa4oraPpeFr4SEBBUXF6uwsNAxLzU1VZK0efPmOstmZ2eroKCAEREBAAAAi8SEBWnaoHjHNMPOtz6Xha9BgwZJkvbt2+eYN2HCBJmmqaefftpx+mFFRYUefvhhSdLAgQNdtXkAAAAATppda+CNLUfPaffJAuuKaQdcFr6mTZsm0zT1xhtvOOY98MADCgwM1KeffqrExESNHTtWCQkJevfdd2UYhh588EFXbR4AAACAkwZ37aih3To6phcz7Hyrcln4uv766zVv3jz17NnTMS85OVmvvfaawsLClJ+fry+//FJnz56VYRh67LHHdPvtt7tq8wAAAACaofbRrw+2Z+sMw863Gj9XrSg8PFzz5s27ZP6MGTM0fvx4rVixQsePH1dERIQmTZqkHj16uGrTAAAAAJppyoAu+l1YpnKLylVRZdfrm47pwYk9G38hnOay8HU5UVFR+v73v++OTQEAAABwQoCfj74/qrvmf5IlSfrXxqOaMz5V/r4uO0kO32i136hpmjpz5oyOHTvWWpsAAAAA4ALfG9FNAd+ErZzCcv1nN8POtwaXh6+MjAzNnDlTERERio2NVUpKSp3nz507pzlz5ui+++5TRUWFqzcPAAAAwEmdwwI1bVAXx/SHO7MtrKbtcmn4+te//qXRo0frvffeU3FxsUzTvOReXpGRkTp8+LD+/ve/65NPPnHl5gEAAAA007TBF8PXxkP5stu5J6+ruSx8ZWZm6kc/+pEqKyv18MMPa8uWLYqOjq532TvvvFOmaer999931eYBAAAAtMDwpCj5+hiSpILSSn19qtDiitoel4Wv+fPnq6KiQg888ID++te/Ki0tTb6+vvUuO3HiREnSl19+6arNAwAAAGiBsCB/DUqMcEx/efCshdW0TS4LX6tXr5ZhGHr88ccbXTY+Pl4hISEMxgEAAAB4kDGpnRyPNxw8Y2ElbZPLwld2drY6dOigxMTEJi0fHBys0tJSV20eAAAAQAuNSb142dCmw/mqrLJbWE3b47LwFRgYqIqKiksG2KhPaWmpzp8/r4iIiEaXBQAAAOAe6d0jHUPOl1RUaeeJAosraltcFr6SkpJUWVmp/fv3N7rsihUrVFVVpX79+rlq8wAAAABaKMjfV2ndOzqmv+TUQ5dyWfi67rrrZJqmnn322csud/bsWT322GMyDENTp0511eYBAAAAuEDtUw83MOiGS7ksfD3yyCMKDQ3Viy++qN/85jcqKiqq83xpaalee+01DRs2TIcPH1anTp103333uWTby5cv1wMPPKBRo0YpMTFRQUFBCg0N1YABA/STn/xER48edcl2AAAAgLau9qAbW4+eU1lllYXVtC0uC1+xsbF67bXX5O/vr6efflqdO3fW2bPVSbl///6KiorSHXfcoaNHjyowMFBLly5VeHi4S7b95z//WX/729+UkZEhX19fDRw4UJ07d1ZmZqaeffZZ9evXTx9//LFLtgUAAAC0ZYMSOyokoPqWUeU2u7YdO29tQW2Iy8KXJE2bNk2fffaZ0tPTVVFRIZvNJtM0lZmZqfLycpmmqaFDh+qzzz7T1Vdf7bLt3nPPPVq1apWKiop09OhRbd68WYcPH1ZWVpbGjRunCxcu6Pbbb1dJSYnLtgkAAAC0RQF+PhqWFOWY5rov1/Fz9QpHjBihTZs2aefOnVq/fr2ys7NVVVWluLg4jR07VsOGDXP1JnXnnXfWOz81NVVvvvmm4uLidObMGX322WeaMmWKy7cPAAAAtCVjUjvps6w8SdXXff3U4nraCpeFr88++0ySNGjQIHXs2FGDBg3SoEGDXLX6ZouNjVVUVJTy8/N14cIFq8sBAAAAPF7t6762Hz+vknKbOgS6/LhNu+Oy0w4nTJigiRMnNuk+X+6UmZmp/Px8+fj4aOjQoVaXAwAAAHi8/vERCguqDls2u6nNR/ItrqhtcFl8jYiIkK+vryIjI121ymYzTVN5eXlav369Hn/8cUnSz372M6WkpLTKtmw2m8vX21S1t21lHfAe9AycRc/AWfQMnEG/eK6RyVFalZkrSfpif56uSI1q5BXu4ak94+fXeLQyTBcdqho+fLh27typwsJCBQYGumKVTluyZInuuOOOOvP69OmjJ598UrNmzWr09QsXLtSiRYuatK3MzEyVlpYqJSVF8+fPb1a9AAAAgKdad8rQO0eqRz3s2sHUzwYx5PzlTJ8+vdFlXHbk67bbbtPWrVv15ptvXhKA3CUmJkZjx46V3W7XiRMndPLkSWVlZenVV1/VuHHjlJiYeNnXnzp1ShkZGW6qFgAAAPBcPSMuHqM5USJdsEkhXPbVIi478mWz2TR+/Hjt3r1bS5cu1fXXX++K1bbIoUOH9Oijj+q9995TQkKC9uzZo4iIiAaXb86Rr6FDh2rTpk2uKtlpNptNK1eulCRNnjy5SYc70b7RM3AWPQNn0TNwBv3iuUzT1Mj/Xav8kgpJ0guzhujafrEWV+W5PdOUOlxW6e9//3uNGzdOu3bt0g033KD+/ftr7NixiomJka+vb4Ov+6//+i9XlXCJlJQUvfXWWxo8eLD27Nmj559/Xr/61a8aXH7OnDmaM2dOk9adnp6ujIwMGYbhUX9wT6kF3oGegbPoGTiLnoEz6BfPMya1kz7ceUqS9NWR85oyKMHiiurytp5xWaVPPfWUDMNwjHa4e/du7dmzp9HXtWb4kiRfX19NmTJFe/bs0ZYtW1p1WwAAAEBbMiY12hG+NnCz5RZzWfgaN26cDMNw1epcqrKyUpJkt9strgQAAADwHrXv95WVU6y8onJ1DrNmcL22wGXha+3ata5alUtVVFToww8/lCTu8wUAAAA4oXunEMVHBCm7oEyS9OWhs7pxcLzFVXkvl91k2SpbtmzRk08+qf3791/yXFZWlm644QYdPHhQoaGh+tGPfmRBhQAAAIB3MgxDo1OjHdNfcuphi3jP1WkNKC4u1m9/+1v99re/VefOndW1a1f5+/vr1KlTOnbsmCQpKipKy5YtU0KCZ10gCAAAAHi6Mamd9HbGCUnShoNnLa7Gu3l9+Bo8eLCee+45rV27Vrt27dKBAwd04cIFRURE6IorrtB1112nOXPmKDo6uvGVAQAAAKhjdK3rvo6evaAT5y4oMTLEwoq8l8vD144dO/T//t//0/r163XixAmVlJQ0uKxhGLLZbC3aXmRkpB566CE99NBDLVoPAAAAgEvFdwxWcnQHHT5TvV//5cGzumUY4as5XBq+nn/+ef30pz9VVVWVXHTvZgAAAAAWG53a6Vvhq6vFFXknlw248dVXX2nu3LmqqqrSj3/8Y61YsUJS9fVWq1at0pIlSzR79mwFBAQoOjpar732mlavXu2qzQMAAABoJbWHnN9w8CwHWprJZUe+nnvuOZmmqZ/85CeaP3++Y35AQIAmTpwoSZo1a5YefvhhTZ48WU8++aQyMjJctXkAAAAArWRUysXwdbqwTIfPlCilc6iFFXknlx35+uKLL2QYhubOnVtn/rdT8ZAhQ7RgwQIdPHhQzzzzjKs2DwAAAKCVRIcGqk9cmGOaUQ+bx2XhKycnR4GBgerevfvFlfv4qKys7JJlZ8yYIX9/f73zzjuu2jwAAACAVlR71MMvCV/N4rLwFRISIn9//zrzwsLCVFhYqPLy8jrz/f39FRISoqNHj7pq8wAAAABa0ZhaN1veeOis7Hau+3KWy8JXQkKCiouLVVhY6JiXmpoqSdq8eXOdZbOzs1VQUMCFegAAAICXGJEcJR+j+vHZkgpl5RZZW5AXcln4GjRokCRp3759jnkTJkyQaZp6+umnHacfVlRU6OGHH5YkDRw40FWbBwAAANCKIoL9NTAhwjG94QCnHjrLZeFr2rRpMk1Tb7zxhmPeAw88oMDAQH366adKTEzU2LFjlZCQoHfffVeGYejBBx901eYBAAAAtLLRtU49ZNAN57ksfF1//fWaN2+eevbs6ZiXnJys1157TWFhYcrPz9eXX36ps2fPyjAMPfbYY7r99ttdtXkAAAAAraz2/b6+OnRWtiq7hdV4H5fd5ys8PFzz5s27ZP6MGTM0fvx4rVixQsePH1dERIQmTZqkHj16uGrTAAAAANxgWFKk/H0NVVaZKiq3aU92oQZ37Wh1WV7DZeHrcqKiovT973/fHZsCAAAA0EpCAvw0tGukNh3Jl1R96iHhq+lcdtohAAAAgLav9v2+Nhw8Y2El3ofwBQAAAKDJal/3tflIvipsXPfVVIQvAAAAAE02pFtHBflXx4iySru2Hz9vbUFehPAFAAAAoMkC/Xw1PCnKMc2ph01H+AIAAADglLrXfXG/r6YifAEAAABwyuiUi+Fr27FzKq2osrAa70H4AgAAAOCUgQkRCg2svmtVZZWpLUfzLa7IOxC+AAAAADjFz9dHI5NrX/fFqYdNQfgCAAAA4DSu+3Ie4QsAAACA08akRjse7zpxXoVllRZW4x0IXwAAAACc1icuTJEh/pIkuyltOsR1X40hfAEAAABwmo+PwamHTiJ8AQAAAGiW0bVOPeRmy40jfAEAAABoljG1jnztPV2ks8XlFlbj+QhfAAAAAJolJbqDYsMDHdMbue7rsghfAAAAAJrFMIw6ox5y6uHlEb4AAAAANFvtQTe+PMSgG5dD+AIAAADQbLWv+zqUV6LTBWUWVuPZCF8AAAAAmi0xMkTdokIc018e4tTDhhC+AAAAALRI7aNfGw5w6mFDCF8AAAAAWuTbN1s2TdPCajwX4QsAAABAi9QOXyfPl+p4fqmF1XguwhcAAACAFokJC1LPmFDHNEPO14/wBQAAAKDFxnzr1ENcivAFAAAAoMVG17nZMtd91cfrw5dpmtqwYYOeeOIJXXHFFerUqZP8/f3VuXNnTZo0Sa+++ip/eAAAAKCVjUqJkmFUPz5TXK4DucXWFuSB/KwuoKVWr16ta665xjGdkpKi5ORkHT58WJ988ok++eQTLV26VG+//bYCAwMtrBQAAABouzqGBKh/fLh2nyyUVH30q2dsmMVVeZY2ceQrOTlZzz77rHJycnTw4EFt2bJFZ8+e1SuvvKLAwEB99NFHmjdvntWlAgAAAG3amDqnHjLoxrd5ffgaMWKE9u3bp4cfflgxMTF1nrvjjjv0X//1X5Kkv//977Lb7VaUCAAAALQLtYec33goX1V2Lv+pzevDV3h4uPz9/Rt8fsqUKZKk/Px85eXluassAAAAoN0ZnhQlP5/qC78KSiuVearQ4oo8i9eHr8aUlZU5HgcHB1tYCQAAANC2hQb6aXDXjo5pTj2sy+sH3GjM0qVLJUmDBw9WeHi4y9dvmqZsNpvL19tUtbdtZR3wHvQMnEXPwFn0DJxBv7Q9I5MitfXoOUnSFwfO6J4x3V26fk/tGT+/xqOVYbbhcdgzMjI0atQoVVZWaunSpbrtttsuu/zChQu1aNGiJq07MzNTpaWlSklJ0fz5811RLgAAAOD1sgoM/b+vfSVJAT6m/nd4lXzb/Pl20vTp0xtdps0e+crJydGMGTNUWVmpGTNmNBq8JOnUqVPKyMhwQ3UAAABA25QUasrPMGUzDVXYDR0rkZIZcV5SGw1fBQUFmjJlio4dO6b09HQtXry4Sa/r0qWL0tLSmrRszZGviIgITZ06tQXVtozNZtPKlSslSZMnT27S4U60b/QMnEXPwFn0DJxBv7RNb+dt1sbD+ZIkI7aPpl6V6rJ1e3PPeE+lTVRcXKzrrrtO27ZtU//+/bVy5comX+s1Z84czZkzp0nLpqenKyMjQ4ZheMwf3M/Pz2NqgXegZ+AsegbOomfgDPql7RjbI9oRvjYePqe517bO39XbeqZNnX154cIFTZ06VRs3blSvXr20atUqderUqfEXAgAAAHCZMT0u7oNvPXZOZZVVFlbjOdpM+CorK9P06dP12WefKSkpSZ9++qni4uKsLgsAAABodwYldlRIQPWgGxU2uzK+Gf2wvWsT4auyslI333yzVq1apcTERK1evVqJiYlWlwUAAAC0S/6+PhqRHOWY3nDwrIXVeA6vD19VVVW6/fbbtWLFCsXFxWn16tVKTk62uiwAAACgXRuTevHUQ262XM17rk5rwJtvvqlly5ZJkoKCgnT33Xc3uOyCBQs0dOhQd5UGAAAAtFtjUqMdj3ecKFBxuU2hgV4fP1rE6//15eXljsdHjhzRkSNHGly2oKDADRUBAAAA6NslXBHB/ioorVSV3dTmw/m6qk+M1WVZyutPO5w9e7ZM02zSz4QJE6wuFwAAAGgXfH0MjUqpfd0Xpx56ffgCAAAA4Jlqn3rIoBuELwAAAACtpPagG1+fKtS5kgoLq7Ee4QsAAABAq+gRE6ro0EBJkmlKXx1u30e/CF8AAAAAWoVhGN8acp7wBQAAAACtgvB1EeELAAAAQKupPejGgdxi5RaWWViNtQhfAAAAAFpN16hgJXQMdkx/eaj9Hv0ifAEAAABoNZdc93WA8AUAAAAArWJMj1rh61D7vdky4QsAAABAqxqdcvG6r+P5pTqef8HCaqxD+AIAAADQquIigpQS3cEx/WU7HfWQ8AUAAACg1Y2uM+R8+zz1kPAFAAAAoNXVHnJ+w8GzMk3TwmqsQfgCAAAA0OpGpUQ5HucWletgXomF1ViD8AUAAACg1XUKDVSfuDDH9Jft8NRDwhcAAAAAt6h96mF7vNky4QsAAACAW9S+2fKXB8/Kbm9f130RvgAAAAC4xYiUKPkY1Y/PXajU3tNF1hbkZoQvAAAAAG4RHuSvgYkdHdPtbch5whcAAAAAt/n2qYftCeELAAAAgNvUDl9fHc6XrcpuYTXuRfgCAAAA4DbDukfJ37f6wq/icpt2nSywuCL3IXwBAAAAcJvgAF8N7RbpmN7Qjk49JHwBAAAAcKv2et0X4QsAAACAW9W+2fLmI/kqt1VZWI37EL4AAAAAuNWQrh0V5F8dRcptdm07dt7agtyE8AUAAADArQL8fDQ8Kcox3V6u+yJ8AQAAAHC72qcefr4/z8JK3IfwBQAAAMDtrux5MXxtO3ZeOYVlFlbjHoQvAAAAAG7XPz5ciZHBjumP95y2sBr3IHwBAAAAcDvDMHRd/zjH9H8IXwAAAADQOqYMvBi+Nh7K17mSCguraX2ELwAAAACWGNo1UjFhgZKkKrupTzJzLK6odRG+AAAAAFjCx8fQ5NqnHu5u26ceEr4AAAAAWOa6ARfD1/r9Z1RUVmlhNa2L8AUAAADAMiOTo9QxxF+SVFFl1+q9uRZX1HraRPg6ffq0lixZorlz52rs2LEKCQmRYRgaNmyY1aUBAAAAuAw/Xx9d2zfWMb2yDY966Gd1Aa7w+uuv65FHHrG6DAAAAADNcN2AOC3bekKStGZvnkorqhQc4GtxVa7XJsJXeHi4rrnmGg0bNkzDhg1TVlaWfvnLX1pdFgAAAIAmGNsjWqGBfiout6m0skqf7c+rMxBHW9Emwtc999yje+65xzG9ePFi64oBAAAA4JQgf19d1SdGy3dkS5JW7j7dJsNXm7jmCwAAAIB3m1Jr1MNPMnNUYbNbWE3rIHwBAAAAsNz4Xp0V6FcdT4rKbPry0FmLK3K9NnHaoZVM05TNZrNs+7W3bWUd8B70DJxFz8BZ9AycQb+gRqCvNK5ntD7JrB5qfsXObI1NibxkOU/tGT+/xqOVYZqm6YZa3Grx4sW6++67lZ6eri1btjT5dQsXLtSiRYuatGxmZqZKS0uVkpKi+fPnN7dUAAAAAN/YnGdoyYHqUQ5D/Uz997Aq+RgWF9VE06dPb3QZjnzVcurUKWVkZFhdBgAAANAu9Y805WOYspuGim2GDhVKPSKsrsp1CF+1dOnSRWlpaU1atubIV0REhKZOndrKlTXMZrNp5cqVkqTJkyc36XAn2jd6Bs6iZ+AsegbOoF/wbSvOb9Hn+6uv9yqMSNHUqX3rPO/NPeM9lbrBnDlzNGfOnCYtm56eroyMDBmG4TF/cD8/P4+pBd6BnoGz6Bk4i56BM+gXSNL1A+Md4evjr3M174YB8mng3ENv6xlGOwQAAADgMa7tF+u4zutUQZl2niywtiAXInwBAAAA8BjRoYEanhTlmP737lMWVuNahC8AAAAAHuW6WjdcXrn7tNrKAO2ELwAAAAAeZXL/i+HryNkL2pdTZGE1rtMmwtfx48cVHR3t+HnooYckSTt27Kgz/49//KPFlQIAAABoTHzHYA3u2tEx/e9dp60rxoXaRPiqqqrS2bNnHT/FxcWSqoehrD3/woULFlcKAAAAoCmuq3X0a+WethG+vGdcxstISkpqM+eBAgAAAKi+7usP/9krSdp7ukiHz5QoObqDxVW1TJs48gUAAACgbUmO7qA+cWGO6f/s9v6jX4QvAAAAAB6p9qiH/2kDQ84TvgAAAAB4pNrha8eJAp08X2phNS1H+AIAAADgkXrHhtW5zmull596SPgCAAAA4JEMw6hzz6//ePmoh4QvAAAAAB5rSq1TDzcfydeZ4nILq2kZwhcAAAAAjzUoMULxEUGSJNOUVmXmWlxR8xG+AAAAAHgswzA0udbRr5Vf51hYTcsQvgAAAAB4tOtqXff15cF8XbBZWEwLEL4AAAAAeLRhSVGKDg2QJNnspvacMyyuqHkIXwAAAAA8mq+PoWv71brn11nCFwAAAAC0ito3XN573lB5lYXFNBPhCwAAAIDHG53SSeFBfpKkStPQ1+e97+gX4QsAAACAxwvw89E1fWMd0zu98NRDwhcAAAAAr1D71MM95wyVV3rXuYeELwAAAABeYVyvzgoJ8JUkldsNfXHwrMUVOYfwBQAAAMArBPn7anyvaMf0x1/nWliN8whfAAAAALzG5H4Xr/talZmryiq7hdU4h/AFAAAAwGtM6N1ZvoYpSYrs4K9T58ssrqjp/KwuAAAAAACaKjTQT7NS7UroYOoH37lC/v7+VpfUZIQvAAAAAF5lWOfqI1+G4V3DzXPaIQAAAAC4AeELAAAAANyA8AUAAAAAbkD4AgAAAAA3IHwBAAAAgBsQvgAAAADADQhfAAAAAOAGhC8AAAAAcAPCFwAAAAC4AeELAAAAANyA8AUAAAAAbkD4AgAAAAA3IHwBAAAAgBsYpmmaVhfhjaKionTu3DkFBwerb9++ltVhmqYKCgokSRERETIMw7Ja4B3oGTiLnoGz6Bk4g36Bszy5Z/r06aNXX321wef93FhLm1JWViZJKi0tVUZGhsXVAAAAAPB0hK9miomJUW5uroKCgpScnGxpLZmZmSotLbX8KBy8Bz0DZ9EzcBY9A2fQL3CWp/ZMnz59Lvs84auZjhw5YnUJDunp6crIyFDfvn21detWq8uBF6Bn4Cx6Bs6iZ+AM+gXO8taeYcANAAAAAHADwhcAAAAAuAHhCwAAAADcgPAFAAAAAG5A+AIAAAAANyB8AQAAAIAbEL4AAAAAwA0IXwAAAADgBoQvAAAAAHADwhcAAAAAuIGf1QWg5e69916dOnVKXbp0sboUeAl6Bs6iZ+AsegbOoF/gLG/tGcM0TdPqIgAAAACgreO0QwAAAABwA8IXAAAAALgB4QsAAAAA3IDwBQAAAABuQPjyYmvWrNG0adPUuXNnBQcHq0+fPnryySdVUlJidWmwwOnTp7VkyRLNnTtXY8eOVUhIiAzD0LBhwxp9bWVlpZ555hkNHjxYHTp0UFRUlCZOnKh33nnHDZXDCqZpasOGDXriiSd0xRVXqFOnTvL391fnzp01adIkvfrqq7rceEz0TPu0fPlyPfDAAxo1apQSExMVFBSk0NBQDRgwQD/5yU909OjRBl9Lz0CSVqxYIcMwZBiGkpKSGlyOfmm/nnrqKUePNPTz4osv1vtar+gbE17pueeeMw3DMCWZiYmJ5tChQ83AwEBTktm3b1/z7NmzVpcIN/vLX/5iSrrkJz09/bKvKy0tNa+44gpTkunr62sOGjTITE1Ndbz+8ccfd9O/AO60atWqOn2SkpJipqenm1FRUY55U6dONcvKyi55LT3Tfo0fP96UZPr7+5vdunUzhw0bZiYlJZk+Pj6mJDMkJMRcuXLlJa+jZ2CapllYWGh27drV8Xfv3r17vcvRL+3bvHnzTElmTEyMOXbs2Hp/3nvvvUte5y19Q/jyQlu2bDF9fHxMwzDMhQsXmna73TRN0zx58qSZnp5uSjJnzpxpcZVwt//7v/8zr7nmGvOJJ54w33rrLfP3v/99k8LXww8/bEoyk5OTzb179zrmv//++45A/8EHH7R2+XCzTz75xExOTjafffZZMycnp85zr7zyiuNvX9+HFT3Tfr388svmqlWrLgnlBw4cMMeNG2dKMqOjo83i4uI6z9MzME3TvP/++01J5k033XTZ8EW/tG814euuu+5y6nXe0jeELy80ffp0U5J55513XvJcVlaW4xvIHTt2WFAdPMVLL73UaPg6ffq0GRAQYEoyV69efcnzTz75pCnJTEtLa81SYYGCggKzoqKiwed/97vfmZLMqKgos6qqyjGfnkFDTp8+7fiGecWKFXXm0zP4/PPPTcMwzBkzZjg+n+oLX/QLmhO+vKlvuObLyxQXF+s///mPpOo7e39bz549NXHiREnSsmXL3FobvM8HH3ygiooK9ejRQ1ddddUlz8+ZM0eSlJGRoYMHD7q7PLSi8PBw+fv7N/j8lClTJEn5+fnKy8tzzKdn0JDY2FhFRUVJki5cuOCYT8+grKxMP/zhDxUaGqoFCxZcdln6Bc3hTX1D+PIy27ZtU3l5uQIDAzVixIh6l7nyyislSRs3bnRnafBCNT1S0zPflpCQoOTk5DrLon0oKytzPA4ODnY8pmfQkMzMTOXn58vHx0dDhw51zKdn8PTTT2vfvn36/e9/r4SEhMsuS7+gxo4dOzRr1ixNnDhR06dP15NPPqk9e/bUu6w39Q3hy8tkZWVJkrp169bgt9apqamSpH379rmtLninmn7q0aNHg8vQT+3T0qVLJUmDBw9WeHi4Yz49g9pM01Rubq7eeecd3XjjjZKkn/3sZ0pJSXEsQ8+0b9u3b9czzzyjESNG6Mc//nGjy9MvqLF9+3YtXbpUa9as0QcffKDf/va3GjhwoB555BFVVVXVWdab+obw5WXy8/MlyXFqR31qnjt37pxbaoL3op9Qn4yMDMcwvk888USd5+gZSNKSJUtkGIZ8fHwUGxurm2++WX5+fnr11Vf1hz/8oc6y9Ez7VVVVpR/84AeSpEWLFsnHp/HdTvoFcXFxeuyxx7Rx40bl5eWprKxMO3fu1H333SfTNPXXv/5Vv/zlL+u8xpv6xs/SrcNpNacCBQQENLhMYGCgJKm0tNQtNcF70U/4tpycHM2YMUOVlZWaMWOGbrvttjrP0zOQpJiYGI0dO1Z2u10nTpzQyZMnlZWVpVdffVXjxo1TYmKiY1l6pv3605/+pIyMDD322GMaPHhwk15Dv+C+++67ZN7AgQP1wgsvKDk5WY8//rjmz5+v+++/33GvOG/qG458eZmgoCBJUkVFRYPLlJeXS6p7nQZQH/oJtRUUFGjKlCk6duyY0tPTtXjx4kuWoWcgSZMmTdL69eu1YcMGHTt2TPv379eNN96oFStWaNSoUSooKHAsS8+0T/v379dTTz2l5ORkzZs3r8mvo19wOY8++qji4+Nls9m0fPlyx3xv6hvCl5eJjIyUdPHwan1qnqtZFmgI/YQaxcXFuu6667Rt2zb1799fK1eurHOtVw16BvVJSUnRW2+9pf79++vkyZN6/vnnHc/RM+3Tfffdp7KyMr3wwgsKCQlp8uvoF1yOr6+vRo4cKenidV6Sd/UN4cvL9OrVS5J07NgxVVZW1rtMzRCaNcsCDanpkQMHDjS4DP3U9l24cEFTp07Vxo0b1atXL61atUqdOnWqd1l6Bg3x9fV13KJgy5Ytjvn0TPu0detWGYahu+66S3FxcXV+5s6dK0k6fvy4Y96GDRsk0S9oXM2phTabzTHPm/qG8OVl0tLSFBAQoPLycm3atKneZT7//HNJ0ujRo91ZGrzQqFGjJEnr16+v9/mTJ0/q8OHDdZZF21JWVqbp06frs88+U1JSkj799FPFxcU1uDw9g8up+VLQbrc75tEz7ZdpmsrJybnkp7CwUFJ1n9TMqzldjH5BY3bv3i1Jda4t9aa+IXx5mdDQUE2ePFlS9chB37Z//36tXr1akvSd73zHrbXB+0yfPl3+/v7av3+/1qxZc8nzCxculCQNHTr0ssO3wjtVVlbq5ptv1qpVq5SYmKjVq1fX+TCrDz2DhlRUVOjDDz+UpDr3+aJn2qfz58/LNM16f1566SVJUvfu3R3zJkyYIIl+weV99NFHjnt9TZo0yTHfq/rGhNfZtGmTaRiGaRiGuXDhQtNut5umaZrZ2dlmenq6Kcm86aabLK4SVnvppZdMSWZ6evpll3vwwQdNSWZycrK5d+9ex/wPPvjADAwMNCWZ7733XmuXCzez2WzmLbfcYkoy4+LizKysrCa/lp5pnzZv3mz++te/rrdX9u3bZ06aNMmUZIaGhponTpyo8zw9g9pqPp+6d+9e7/P0S/u1e/du89577zW3b99eZ35VVZX52muvmeHh4aYkc+rUqZe81lv6hvDlpf7yl7+YhmGYksyuXbuaQ4cOdTRW7969zby8PKtLhJsdO3bM7NSpk+MnNDTUlGT6+fnVmf+HP/yhzusuXLhgjh492pRk+vr6moMHDzZTU1NNSaYk89FHH7XoX4TW9Nprrzn+xklJSebYsWMb/MnIyKjzWnqmfVqzZo3jb9y5c2czLS3NHDlypNmtWzfH/KioKPPTTz+95LX0DGprLHzRL+3Xtm3b6ryfDB061Bw+fLgZGRnpmH/llVea586du+S13tI3hC8vtmrVKnPKlClmVFSUGRgYaPbq1cv85S9/aRYVFVldGixw+PBhxxvM5X7mzZt3yWvLy8vNP/zhD+bAgQPN4OBgMyIiwhw/frz51ltvuf8fAreo2flpys+aNWsueT090/7k5+ebzz33nDlz5kyzZ8+eZnh4uOPLnSuuuML87W9/e9kv/ugZ1GgsfJkm/dJenTt3zvztb39rTp061UxJSTHDwsJMf39/MzY21pwyZYr5r3/9y7TZbA2+3hv6xjBN02zxuYsAAAAAgMtiwA0AAAAAcAPCFwAAAAC4AeELAAAAANyA8AUAAAAAbkD4AgAAAAA3IHwBAAAAgBsQvgAAAADADQhfAAAAAOAGhC8AAAAAcAPCFwDALZKSkmQYhhYvXmx1Kc1y7Ngx3X333erWrZsCAgJkGIY6duxodVkAAC/iZ3UBAADvt3jxYh05ckQTJkzQhAkTrC7H5QoKCjR27FidOHFCkhQREaGgoCBFRERYXBkAwJsQvgAALbZ48WKtW7dOkhoMX6mpqV4bWJYuXaoTJ04oMjJSGzZsUJ8+fawuCQDghQhfAAC3+PTTT60uodl27dolSZo4cSLBCwDQbFzzBQBAIy5cuCBJCg0NtbgSAIA3I3wBAJpt8eLFMgzDccrhb37zGxmGUefnyJEjki4/4EbNsmvXrtXZs2f105/+VKmpqQoODlb37t314IMPKi8vz7H80aNHdf/99ys5OVlBQUHq1q2bHn30URUVFV223oKCAv3ud7/TyJEjFRkZqcDAQHXt2lXf+973tHHjxkuWnzBhQp2aX3755Tr/tpr5a9eudcyTpG3btun2229XYmKi/P3965yKmZubq3/+85+aOXOm+vbtq4iICAUHB6tHjx764Q9/qD179jRY/+zZs2UYhmbPnu34/Y8ePVoRERGKiorSNddco88++8yxvM1m04IFC5Senq7w8HBFRETo+uuvV0ZGxmV/T5L03nvv6aabblJ8fLwCAgIUGRmpcePG6cUXX1RlZWWjrwcA1MMEAKCZXn/9dTM2Ntb09/c3JZkdOnQwY2Nj6/wcO3bMNE3T7N69uynJfOmlly5ZjyRTkvnyyy+biYmJjnUFBAQ4nuvbt6957tw5c9OmTWZ0dLQpyQwPDzf9/Pwcy4wdO9a02Wz11rpx40YzNjbWsayvr68ZFhbmmDYMw/z9739f5zUzZswwY2NjzaCgIFOSGRQUVOff9vrrr5umaZpr1qxxrOett95y/D7Cw8PNoKAgc/z48Y513nXXXY5l6/s3BAYGmm+99Va9/4aa1951112Ox35+fnX+HX5+fuby5cvNsrIyc9KkSaYkMyAgwOzQoYNjmZCQEHPLli31bqOoqMicNm3aJTUahuGYHj16tJmfn99YewAAvoXwBQBosfHjx5uSzHnz5jW4TFPCV8eOHc0hQ4aYGzduNE3TNCsqKsylS5eaISEhpiTzwQcfNLt3725OnDjR3L17t2mapllaWmouWLDA9PX1NSWZf//73y9Z/+HDh82OHTuakszvfOc75tatW83KykrTNE0zJyfHfPLJJx0B6N13373k9bVDT31qh6/Q0FDz+uuvNzMzMx3PZ2VlOR4/9dRT5q9//Wtz27ZtZnFxsWmapllVVWXu3r3bvP322x3B8+TJkw3W0bFjRzM4ONhcuHCheeHCBdM0TXPv3r1menq6KclMSkoyH3zwQTMqKsp88803zYqKCtNut5tbtmwxU1NTHUG1PjfddJMpyezRo4f52muvmYWFhY7f8/vvv2+mpKSYksybbrqp3tcDABpG+AIAtJirwldsbKx55syZS55/8sknHcv079/fLCsru2SZO+64w5RkXn311Zc8953vfMeUZN5xxx0N1jd//nxTkjl48OBLnnMmfI0YMaLBo29NMXXqVFOS+d///d8N1iHJXLJkySXPHzx4sM4Rqs8///ySZT799FPH88ePH6/z3IcffmhKMuPi4swTJ07UW9/x48cdR9G2bdvWvH8kALRTXPMFAPAYP/rRj9SpU6dL5k+ePNnx+Kc//akCAwMbXGbnzp115ufn5+udd96RJD3xxBMNbvvOO++UJO3YsUM5OTnOF/+Nn//85/L19W3266dOnSpJWr9+fYPLdOvWTbNmzbpkfkpKilJTUyVJV155pa644opLlhk/frzj9/ft39U//vEPSdIdd9yhhISEeredmJioq666SpK0cuXKxv45AIBaGGoeAOAxRowYUe/82NhYx+Phw4dfdplz587Vmf/ll1/KbrdLqh4qvimOHj1aZ5vOGDt2bKPL7NixQwsXLtT69et15MgRFRcXyzTNOsvU3NC5PsOGDXMM7vFtsbGxOnDgQIO/J19fX0VHR+vkyZOX/K5qAt+iRYv0yiuvNLj9goICSdW/JwBA0xG+AAAeIywsrN75fn5+TV7GZrPVmZ+dne143NQjWjVDyzdHTEzMZZ9//vnnNXfuXEcgNAxDERERjqNRpaWlKiwsVElJSYPraOh3IF38PTRlmdqjFlZWVurMmTOSqsNVTcC6nJb8ngCgPeK0QwBAm1ZVVSVJCg4Olll9rXOjP7WHhnfW5U45zMzM1E9+8hPZ7Xbdcsst2rRpk8rKynTu3DmdPn1ap0+f1vz58yXpkiNhra3m9yRJr7/+epN+T/XdNgAA0DDCFwCgTYuLi5NUfUTpwIEDltby1ltvqaqqSn379tXrr7+u4cOHKyAgoM4yp0+ftqS2oKAgRURESJJ27dplSQ0A0NYRvgAALebjU/1x4u6jNU0xZswYx/VRr7/+uqW1HD9+XJI0ePBgx+/s21atWuXOkuqouV5t2bJljtMiAQCuQ/gCALRYeHi4JOn8+fPWFlKPmJgYTZ8+XZL0zDPPKCsr67LL5+fnt1ottY8s1RdU//3vf2vt2rWttv3G3HvvvZKkrKwsPfPMM5ddtqSkRBUVFe4oCwDaDMIXAKDFBgwYIElasWKFTp48aXE1l/rzn/+sTp06qbCwUFdccYX++c9/1hlQ4syZM3rnnXc0c+ZMfe9732u1Oq677jpJ0p49e/TAAw84gl5JSYkWLlyo73znO/UOte8u06dP14wZMyRVD8t///331wmrFRUV+uqrr/T444+re/fuys3NtapUAPBKhC8AQIvdddddCgoK0oEDB9StWzfFxcUpKSlJSUlJlx0y3V1SUlL0ySefKCkpSXl5efrBD36gyMhIRUVFKSwsTJ07d9bNN9+sd999t1VPt7v66qt12223SZJeeOEFderUSZGRkYqIiNB9992nvn376qmnnmq17TfFkiVLHDW++OKL6t27t0JDQxUVFaXg4GCNGjVKf/zjH3X27NkGh7sHANSP8AUAaLGePXtqzZo1uvHGG9W5c2edPXtWR48e1dGjRy8Z+t0qQ4cO1ddff63nn39e11xzjaKjo1VUVCS73a6ePXtq1qxZev311x03ZG4tr776qv76179q0KBBCgwMVFVVlQYOHKj/+Z//0RdffKHQ0NBW3X5jQkJCtHTpUq1Zs0Z33HGHUlJSZLfbVVxcrJiYGE2cOFF//OMftX///gZvxAwAqJ9heuLV0QAAAADQxnDkCwAAAADcgPAFAAAAAG5A+AIAAAAANyB8AQAAAIAbEL4AAAAAwA0IXwAAAADgBoQvAAAAAHADwhcAAAAAuAHhCwAAAADcgPAFAAAAAG5A+AIAAAAANyB8AQAAAIAbEL4AAAAAwA0IXwAAAADgBv8fmtrOeynHickAAAAASUVORK5CYII=", + "text/plain": [ + "
    " + ] + }, + "metadata": {}, + "output_type": "display_data" } - }, - "outputs": [], + ], "source": [ - "# plt.figure(figsize=(10, 5))\n", - "# plt.tight_layout()\n", - "# plt.plot(area[\"frame\"], area[\"area\"])\n", - "# plt.xlabel(\"timeframe\")\n", - "# plt.ylabel(r\"area [$m^2$]\")\n", - "# plt.grid()" + "plt.figure(figsize=(10, 5))\n", + "plt.tight_layout()\n", + "plt.plot(area[\"frame\"], area[\"area\"])\n", + "plt.xlabel(\"timeframe\")\n", + "plt.ylabel(r\"area [$m^2$]\")\n", + "plt.grid()\n", + "\n", + "plt.axhline(expected_area, color=\"darkgreen\", lw=5, alpha=0.5)\n", + "sns.despine()" ] }, { @@ -4753,17 +4465,10 @@ "cell_type": "code", "execution_count": 29, "id": "4a7b37f1", - "metadata": { - "execution": { - "iopub.execute_input": "2024-02-17T12:47:14.474517Z", - "iopub.status.busy": "2024-02-17T12:47:14.474125Z", - "iopub.status.idle": "2024-02-17T12:47:14.486908Z", - "shell.execute_reply": "2024-02-17T12:47:14.485462Z" - } - }, + "metadata": {}, "outputs": [], "source": [ - "hist, bins, centers = tobac.analysis.lifetime_histogram(\n", + "hist, bins, centers = tobac.lifetime_histogram(\n", " track, bin_edges=np.arange(0, 200, 20)\n", ")" ] @@ -4772,14 +4477,7 @@ "cell_type": "code", "execution_count": 30, "id": "36bb6765", - "metadata": { - "execution": { - "iopub.execute_input": "2024-02-17T12:47:14.492981Z", - "iopub.status.busy": "2024-02-17T12:47:14.491513Z", - "iopub.status.idle": "2024-02-17T12:47:14.721691Z", - "shell.execute_reply": "2024-02-17T12:47:14.720229Z" - } - }, + "metadata": {}, "outputs": [ { "data": { @@ -4810,6 +4508,11 @@ } ], "metadata": { + "kernelspec": { + "display_name": "Python [conda env:tobac_RC_v1.5.x]", + "language": "python", + "name": "conda-env-tobac_RC_v1.5.x-py" + }, "language_info": { "codemirror_mode": { "name": "ipython", diff --git a/setup.py b/setup.py index 68b13c4d..2339a6ca 100644 --- a/setup.py +++ b/setup.py @@ -1,10 +1,10 @@ -from setuptools import setup - """ This code is from the python documentation and is designed to read in the version number. See: https://packaging.python.org/en/latest/guides/single-sourcing-package-version/ """ + +from setuptools import setup from pathlib import Path @@ -37,6 +37,14 @@ def get_requirements(requirements_filename): return requirements +def get_packages(package_name): + package = Path(package_name) + packages = [ + str(path.parent).replace("/", ".") for path in package.rglob("__init__.py") + ] + return packages + + PACKAGE_NAME = "tobac" # See classifiers list at: https://pypi.org/classifiers/ @@ -86,7 +94,7 @@ def get_requirements(requirements_filename): "peter.marinescu@colostate.edu", ], license="BSD-3-Clause License", - packages=[PACKAGE_NAME, PACKAGE_NAME + ".utils", PACKAGE_NAME + ".utils.internal"], + packages=get_packages(PACKAGE_NAME), install_requires=get_requirements("requirements.txt"), test_requires=["pytest"], zip_safe=False, diff --git a/tobac/__init__.py b/tobac/__init__.py index 652645e4..1b668bd1 100644 --- a/tobac/__init__.py +++ b/tobac/__init__.py @@ -29,47 +29,53 @@ plot_mask_cell_track_follow, plot_mask_cell_track_static, plot_mask_cell_track_static_timeseries, -) -from .plotting import ( plot_lifetime_histogram, plot_lifetime_histogram_bar, plot_histogram_cellwise, plot_histogram_featurewise, -) -from .plotting import plot_mask_cell_track_3Dstatic, plot_mask_cell_track_2D3Dstatic -from .plotting import ( + plot_mask_cell_track_3Dstatic, + plot_mask_cell_track_2D3Dstatic, plot_mask_cell_individual_static, plot_mask_cell_individual_3Dstatic, + animation_mask_field, + make_map, + map_tracks, ) -from .plotting import animation_mask_field -from .plotting import make_map, map_tracks -from .analysis import ( +from tobac.analysis.cell_analysis import ( cell_statistics, cog_cell, lifetime_histogram, - histogram_featurewise, histogram_cellwise, -) -from .analysis import calculate_velocity, calculate_distance, calculate_area -from .analysis import calculate_nearestneighbordistance -from .analysis import ( velocity_histogram, + calculate_overlap, +) +from tobac.analysis.feature_analysis import ( + histogram_featurewise, + calculate_nearestneighbordistance, nearestneighbordistance_histogram, area_histogram, ) -from .analysis import calculate_overlap -from .utils import ( +from tobac.analysis.spatial import ( + calculate_velocity, + calculate_distance, + calculate_area, +) +from .utils.mask import ( mask_cell, mask_cell_surface, mask_cube_cell, mask_cube_untracked, mask_cube, column_mask_from2D, + mask_features, + mask_features_surface, + mask_cube_features, +) +from .utils.general import ( get_bounding_box, + add_coordinates, + get_spacings, ) -from .utils import mask_features, mask_features_surface, mask_cube_features - -from .utils import add_coordinates, get_spacings from .feature_detection import feature_detection_multithreshold from .tracking import linking_trackpy from .wrapper import maketrack diff --git a/tobac/analysis.py b/tobac/analysis.py deleted file mode 100644 index afc11084..00000000 --- a/tobac/analysis.py +++ /dev/null @@ -1,1243 +0,0 @@ -"""Provide tools to analyse and visualize the tracked objects. -This module provides a set of routines that enables performing analyses -and deriving statistics for individual tracks, such as the time series -of integrated properties and vertical profiles. It also provides -routines to calculate summary statistics of the entire population of -tracked features in the field like histograms of areas/volumes -or mass and a detailed cell lifetime analysis. These analysis -routines are all built in a modular manner. Thus, users can reuse the -most basic methods for interacting with the data structure of the -package in their own analysis procedures in Python. This includes -functions performing simple tasks like looping over all identified -objects or trajectories and masking arrays for the analysis of -individual features. Plotting routines include both visualizations -for individual convective cells and their properties. [1]_ - -References ----------- -.. Heikenfeld, M., Marinescu, P. J., Christensen, M., - Watson-Parris, D., Senf, F., van den Heever, S. C. - & Stier, P. (2019). tobac 1.2: towards a flexible - framework for tracking and analysis of clouds in - diverse datasets. Geoscientific Model Development, - 12(11), 4551-4570. - -Notes ------ -""" - -import pandas as pd -import numpy as np -import logging -import os -import warnings - -from tobac.centerofgravity import calculate_cog -from .utils import mask_cell, mask_cell_surface, mask_cube_cell, get_bounding_box -from .utils import internal as internal_utils -from .utils import decorators - - -def cell_statistics_all( - input_cubes, - track, - mask, - aggregators, - output_path="./", - cell_selection=None, - output_name="Profiles", - width=10000, - z_coord="model_level_number", - dimensions=["x", "y"], - **kwargs, -): - """ - Parameters - ---------- - input_cubes : iris.cube.Cube - - track : dask.dataframe.DataFrame - - mask : iris.cube.Cube - Cube containing mask (int id for tracked volumes 0 everywhere - else). - - aggregators : list - list of iris.analysis.Aggregator instances - - output_path : str, optional - Default is './'. - - cell_selection : optional - Default is None. - - output_name : str, optional - Default is 'Profiles'. - - width : int, optional - Default is 10000. - - z_coord : str, optional - Name of the vertical coordinate in the cube. Default is - 'model_level_number'. - - dimensions : list of str, optional - Default is ['x', 'y']. - - **kwargs - - Returns - ------- - None - """ - warnings.warn( - "cell_statistics_all is depreciated and will be removed or significantly changed in v2.0.", - DeprecationWarning, - ) - - if cell_selection is None: - cell_selection = np.unique(track["cell"]) - for cell in cell_selection: - cell_statistics( - input_cubes=input_cubes, - track=track, - mask=mask, - dimensions=dimensions, - aggregators=aggregators, - cell=cell, - output_path=output_path, - output_name=output_name, - width=width, - z_coord=z_coord, - **kwargs, - ) - - -def cell_statistics( - input_cubes, - track, - mask, - aggregators, - cell, - output_path="./", - output_name="Profiles", - width=10000, - z_coord="model_level_number", - dimensions=["x", "y"], - **kwargs, -): - """ - Parameters - ---------- - input_cubes : iris.cube.Cube - - track : dask.dataframe.DataFrame - - mask : iris.cube.Cube - Cube containing mask (int id for tracked volumes 0 everywhere - else). - - aggregators list - list of iris.analysis.Aggregator instances - - cell : int - Integer id of cell to create masked cube for output. - - output_path : str, optional - Default is './'. - - output_name : str, optional - Default is 'Profiles'. - - width : int, optional - Default is 10000. - - z_coord : str, optional - Name of the vertical coordinate in the cube. Default is - 'model_level_number'. - - dimensions : list of str, optional - Default is ['x', 'y']. - - **kwargs - - Returns - ------- - None - """ - - from iris.cube import Cube, CubeList - from iris.coords import AuxCoord - from iris import Constraint, save - - warnings.warn( - "cell_statistics is depreciated and will be removed or significantly changed in v2.0.", - DeprecationWarning, - ) - - # If input is single cube, turn into cubelist - if type(input_cubes) is Cube: - input_cubes = CubeList([input_cubes]) - - logging.debug("Start calculating profiles for cell " + str(cell)) - track_i = track[track["cell"] == cell] - - cubes_profile = {} - for aggregator in aggregators: - cubes_profile[aggregator.name()] = CubeList() - - for time_i in track_i["time"].values: - constraint_time = Constraint(time=time_i) - - mask_i = mask.extract(constraint_time) - mask_cell_i = mask_cell(mask_i, cell, track_i, masked=False) - mask_cell_surface_i = mask_cell_surface( - mask_i, cell, track_i, masked=False, z_coord=z_coord - ) - - x_dim = mask_cell_surface_i.coord_dims("projection_x_coordinate")[0] - y_dim = mask_cell_surface_i.coord_dims("projection_y_coordinate")[0] - x_coord = mask_cell_surface_i.coord("projection_x_coordinate") - y_coord = mask_cell_surface_i.coord("projection_y_coordinate") - - if (mask_cell_surface_i.core_data() > 0).any(): - box_mask_i = get_bounding_box(mask_cell_surface_i.core_data(), buffer=1) - - box_mask = [ - [ - x_coord.points[box_mask_i[x_dim][0]], - x_coord.points[box_mask_i[x_dim][1]], - ], - [ - y_coord.points[box_mask_i[y_dim][0]], - y_coord.points[box_mask_i[y_dim][1]], - ], - ] - else: - box_mask = [[np.nan, np.nan], [np.nan, np.nan]] - - x = track_i[track_i["time"].values == time_i]["projection_x_coordinate"].values[ - 0 - ] - y = track_i[track_i["time"].values == time_i]["projection_y_coordinate"].values[ - 0 - ] - - box_slice = [[x - width, x + width], [y - width, y + width]] - - x_min = np.nanmin([box_mask[0][0], box_slice[0][0]]) - x_max = np.nanmax([box_mask[0][1], box_slice[0][1]]) - y_min = np.nanmin([box_mask[1][0], box_slice[1][0]]) - y_max = np.nanmax([box_mask[1][1], box_slice[1][1]]) - - constraint_x = Constraint( - projection_x_coordinate=lambda cell: int(x_min) < cell < int(x_max) - ) - constraint_y = Constraint( - projection_y_coordinate=lambda cell: int(y_min) < cell < int(y_max) - ) - - constraint = constraint_time & constraint_x & constraint_y - # Mask_cell_surface_i=mask_cell_surface(Mask_w_i,cell,masked=False,z_coord='model_level_number') - mask_cell_i = mask_cell_i.extract(constraint) - mask_cell_surface_i = mask_cell_surface_i.extract(constraint) - - input_cubes_i = input_cubes.extract(constraint) - for cube in input_cubes_i: - cube_masked = mask_cube_cell(cube, mask_cell_i, cell, track_i) - coords_remove = [] - for coordinate in cube_masked.coords(dim_coords=False): - if coordinate.name() not in dimensions: - for dim in dimensions: - if set(cube_masked.coord_dims(coordinate)).intersection( - set(cube_masked.coord_dims(dim)) - ): - coords_remove.append(coordinate.name()) - for coordinate in set(coords_remove): - cube_masked.remove_coord(coordinate) - - for aggregator in aggregators: - cube_collapsed = cube_masked.collapsed(dimensions, aggregator, **kwargs) - # remove all collapsed coordinates (x and y dim, scalar now) and keep only time as all these coordinates are useless - for coordinate in cube_collapsed.coords(): - if not cube_collapsed.coord_dims(coordinate): - if coordinate.name() != "time": - cube_collapsed.remove_coord(coordinate) - logging.debug(str(cube_collapsed)) - cubes_profile[aggregator.name()].append(cube_collapsed) - - minutes = (track_i["time_cell"] / pd.Timedelta(minutes=1)).values - latitude = track_i["latitude"].values - longitude = track_i["longitude"].values - minutes_coord = AuxCoord(minutes, long_name="cell_time", units="min") - latitude_coord = AuxCoord(latitude, long_name="latitude", units="degrees") - longitude_coord = AuxCoord(longitude, long_name="longitude", units="degrees") - - for aggregator in aggregators: - cubes_profile[aggregator.name()] = cubes_profile[aggregator.name()].merge() - for cube in cubes_profile[aggregator.name()]: - cube.add_aux_coord(minutes_coord, data_dims=cube.coord_dims("time")) - cube.add_aux_coord(latitude_coord, data_dims=cube.coord_dims("time")) - cube.add_aux_coord(longitude_coord, data_dims=cube.coord_dims("time")) - os.makedirs( - os.path.join(output_path, output_name, aggregator.name()), exist_ok=True - ) - savefile = os.path.join( - output_path, - output_name, - aggregator.name(), - output_name + "_" + aggregator.name() + "_" + str(int(cell)) + ".nc", - ) - save(cubes_profile[aggregator.name()], savefile) - - -def cog_cell( - cell, - Tracks=None, - M_total=None, - M_liquid=None, - M_frozen=None, - Mask=None, - savedir=None, -): - """ - Parameters - ---------- - cell : int - Integer id of cell to create masked cube for output. - - Tracks : optional - Default is None. - - M_total : subset of cube, optional - Default is None. - - M_liquid : subset of cube, optional - Default is None. - - M_frozen : subset of cube, optional - Default is None. - - savedir : str - Default is None. - - Returns - ------- - None - """ - - warnings.warn( - "cog_cell is depreciated and will be removed or significantly changed in v2.0.", - DeprecationWarning, - ) - - from iris import Constraint - - logging.debug("Start calculating COG for " + str(cell)) - Track = Tracks[Tracks["cell"] == cell] - constraint_time = Constraint( - time=lambda cell: Track.head(1)["time"].values[0] - <= cell - <= Track.tail(1)["time"].values[0] - ) - M_total_i = M_total.extract(constraint_time) - M_liquid_i = M_liquid.extract(constraint_time) - M_frozen_i = M_frozen.extract(constraint_time) - Mask_i = Mask.extract(constraint_time) - - savedir_cell = os.path.join(savedir, "cells", str(int(cell))) - os.makedirs(savedir_cell, exist_ok=True) - savefile_COG_total_i = os.path.join( - savedir_cell, "COG_total" + "_" + str(int(cell)) + ".h5" - ) - savefile_COG_liquid_i = os.path.join( - savedir_cell, "COG_liquid" + "_" + str(int(cell)) + ".h5" - ) - savefile_COG_frozen_i = os.path.join( - savedir_cell, "COG_frozen" + "_" + str(int(cell)) + ".h5" - ) - - Tracks_COG_total_i = calculate_cog(Track, M_total_i, Mask_i) - # Tracks_COG_total_list.append(Tracks_COG_total_i) - logging.debug("COG total loaded for " + str(cell)) - - Tracks_COG_liquid_i = calculate_cog(Track, M_liquid_i, Mask_i) - # Tracks_COG_liquid_list.append(Tracks_COG_liquid_i) - logging.debug("COG liquid loaded for " + str(cell)) - Tracks_COG_frozen_i = calculate_cog(Track, M_frozen_i, Mask_i) - # Tracks_COG_frozen_list.append(Tracks_COG_frozen_i) - logging.debug("COG frozen loaded for " + str(cell)) - - Tracks_COG_total_i.to_hdf(savefile_COG_total_i, "table") - Tracks_COG_liquid_i.to_hdf(savefile_COG_liquid_i, "table") - Tracks_COG_frozen_i.to_hdf(savefile_COG_frozen_i, "table") - logging.debug("individual COG calculated and saved to " + savedir_cell) - - -def lifetime_histogram( - Track, bin_edges=np.arange(0, 200, 20), density=False, return_values=False -): - """Compute the lifetime histogram of linked features. - - Parameters - ---------- - Track : pandas.DataFrame - Dataframe of linked features, containing the columns 'cell' - and 'time_cell'. - - bin_edges : int or ndarray, optional - If bin_edges is an int, it defines the number of equal-width - bins in the given range. If bins is a ndarray, it defines a - monotonically increasing array of bin edges, including the - rightmost edge. The unit is minutes. - Default is np.arange(0, 200, 20). - - density : bool, optional - If False, the result will contain the number of samples in - each bin. If True, the result is the value of the probability - density function at the bin, normalized such that the integral - over the range is 1. Default is False. - - return_values : bool, optional - Bool determining wether the lifetimes of the features are - returned from this function. Default is False. - - Returns - ------- - hist : ndarray - The values of the histogram. - - bin_edges : ndarray - The edges of the histogram. - - bin_centers : ndarray - The centers of the histogram intervalls. - - minutes, optional : ndarray - Numpy.array of the lifetime of each feature in minutes. - Returned if return_values is True. - - """ - - Track_cell = Track.groupby("cell") - minutes = (Track_cell["time_cell"].max() / pd.Timedelta(minutes=1)).values - hist, bin_edges = np.histogram(minutes, bin_edges, density=density) - bin_centers = bin_edges[:-1] + 0.5 * np.diff(bin_edges) - if return_values: - return hist, bin_edges, bin_centers, minutes - else: - return hist, bin_edges, bin_centers - - -def haversine(lat1, lon1, lat2, lon2): - """Computes the Haversine distance in kilometers. - - Calculates the Haversine distance between two points - (based on implementation CIS https://github.com/cedadev/cis). - - Parameters - ---------- - lat1, lon1 : array of latitude, longitude - First point or points as array in degrees. - - lat2, lon2 : array of latitude, longitude - Second point or points as array in degrees. - - Returns - ------- - arclen * RADIUS_EARTH : array - Array of Distance(s) between the two points(-arrays) in - kilometers. - - """ - - RADIUS_EARTH = 6378.0 - lat1 = np.radians(lat1) - lat2 = np.radians(lat2) - lon1 = np.radians(lon1) - lon2 = np.radians(lon2) - # print(lat1,lat2,lon1,lon2) - arclen = 2 * np.arcsin( - np.sqrt( - (np.sin((lat2 - lat1) / 2)) ** 2 - + np.cos(lat1) * np.cos(lat2) * (np.sin((lon2 - lon1) / 2)) ** 2 - ) - ) - return arclen * RADIUS_EARTH - - -def calculate_distance(feature_1, feature_2, method_distance=None): - """Compute the distance between two features. It is based on - either lat/lon coordinates or x/y coordinates. - - Parameters - ---------- - feature_1, feature_2 : pandas.DataFrame or pandas.Series - Dataframes containing multiple features or pandas.Series - of one feature. Need to contain either projection_x_coordinate - and projection_y_coordinate or latitude and longitude - coordinates. - - method_distance : {None, 'xy', 'latlon'}, optional - Method of distance calculation. 'xy' uses the length of the - vector between the two features, 'latlon' uses the haversine - distance. None checks wether the required coordinates are - present and starts with 'xy'. Default is None. - - Returns - ------- - distance : float or pandas.Series - Float with the distance between the two features in meters if - the input are two pandas.Series containing one feature, - pandas.Series of the distances if one of the inputs contains - multiple features. - - """ - if method_distance is None: - if ( - ("projection_x_coordinate" in feature_1) - and ("projection_y_coordinate" in feature_1) - and ("projection_x_coordinate" in feature_2) - and ("projection_y_coordinate" in feature_2) - ): - method_distance = "xy" - elif ( - ("latitude" in feature_1) - and ("longitude" in feature_1) - and ("latitude" in feature_2) - and ("longitude" in feature_2) - ): - method_distance = "latlon" - else: - raise ValueError( - "either latitude/longitude or projection_x_coordinate/projection_y_coordinate have to be present to calculate distances" - ) - - if method_distance == "xy": - distance = np.sqrt( - ( - feature_1["projection_x_coordinate"] - - feature_2["projection_x_coordinate"] - ) - ** 2 - + ( - feature_1["projection_y_coordinate"] - - feature_2["projection_y_coordinate"] - ) - ** 2 - ) - elif method_distance == "latlon": - distance = 1000 * haversine( - feature_1["latitude"], - feature_1["longitude"], - feature_2["latitude"], - feature_2["longitude"], - ) - else: - raise ValueError("method undefined") - return distance - - -def calculate_velocity_individual(feature_old, feature_new, method_distance=None): - """Calculate the mean velocity of a feature between two timeframes. - - Parameters - ---------- - feature_old : pandas.Series - pandas.Series of a feature at a certain timeframe. Needs to - contain a 'time' column and either projection_x_coordinate - and projection_y_coordinate or latitude and longitude coordinates. - - feature_new : pandas.Series - pandas.Series of the same feature at a later timeframe. Needs - to contain a 'time' column and either projection_x_coordinate - and projection_y_coordinate or latitude and longitude coordinates. - - method_distance : {None, 'xy', 'latlon'}, optional - Method of distance calculation, used to calculate the velocity. - 'xy' uses the length of the vector between the two features, - 'latlon' uses the haversine distance. None checks wether the - required coordinates are present and starts with 'xy'. - Default is None. - - Returns - ------- - velocity : float - Value of the approximate velocity. - - """ - - distance = calculate_distance( - feature_old, feature_new, method_distance=method_distance - ) - diff_time = (feature_new["time"] - feature_old["time"]).total_seconds() - velocity = distance / diff_time - return velocity - - -def calculate_velocity(track, method_distance=None): - """Calculate the velocities of a set of linked features. - - Parameters - ---------- - track : pandas.DataFrame - Dataframe of linked features, containing the columns 'cell', - 'time' and either 'projection_x_coordinate' and - 'projection_y_coordinate' or 'latitude' and 'longitude'. - - method_distance : {None, 'xy', 'latlon'}, optional - Method of distance calculation, used to calculate the - velocity. 'xy' uses the length of the vector between the - two features, 'latlon' uses the haversine distance. None - checks wether the required coordinates are present and - starts with 'xy'. Default is None. - - Returns - ------- - track : pandas.DataFrame - DataFrame from the input, with an additional column 'v', - contain the value of the velocity for every feature at - every possible timestep - """ - - for cell_i, track_i in track.groupby("cell"): - index = track_i.index.values - for i, index_i in enumerate(index[:-1]): - velocity = calculate_velocity_individual( - track_i.loc[index[i]], - track_i.loc[index[i + 1]], - method_distance=method_distance, - ) - track.at[index_i, "v"] = velocity - return track - - -def velocity_histogram( - track, - bin_edges=np.arange(0, 30, 1), - density=False, - method_distance=None, - return_values=False, -): - """Create an velocity histogram of the features. If the DataFrame - does not contain a velocity column, the velocities are calculated. - - Parameters - ---------- - track: pandas.DataFrame - DataFrame of the linked features, containing the columns 'cell', - 'time' and either 'projection_x_coordinate' and - 'projection_y_coordinate' or 'latitude' and 'longitude'. - - bin_edges : int or ndarray, optional - If bin_edges is an int, it defines the number of equal-width - bins in the given range. If bins is a ndarray, it defines a - monotonically increasing array of bin edges, including the - rightmost edge. Default is np.arange(0, 30000, 500). - - density : bool, optional - If False, the result will contain the number of samples in - each bin. If True, the result is the value of the probability - density function at the bin, normalized such that the integral - over the range is 1. Default is False. - - methods_distance : {None, 'xy', 'latlon'}, optional - Method of distance calculation, used to calculate the velocity. - 'xy' uses the length of the vector between the two features, - 'latlon' uses the haversine distance. None checks wether the - required coordinates are present and starts with 'xy'. - Default is None. - - return_values : bool, optional - Bool determining wether the velocities of the features are - returned from this function. Default is False. - - Returns - ------- - hist : ndarray - The values of the histogram. - - bin_edges : ndarray - The edges of the histogram. - - velocities , optional : ndarray - Numpy array with the velocities of each feature. - - """ - - if "v" not in track.columns: - logging.info("calculate velocities") - track = calculate_velocity(track) - velocities = track["v"].values - hist, bin_edges = np.histogram( - velocities[~np.isnan(velocities)], bin_edges, density=density - ) - if return_values: - return hist, bin_edges, velocities - else: - return hist, bin_edges - - -def calculate_nearestneighbordistance(features, method_distance=None): - """Calculate the distance between a feature and the nearest other - feature in the same timeframe. - - Parameters - ---------- - features : pandas.DataFrame - DataFrame of the features whose nearest neighbor distance is to - be calculated. Needs to contain either projection_x_coordinate - and projection_y_coordinate or latitude and longitude coordinates. - - method_distance : {None, 'xy', 'latlon'}, optional - Method of distance calculation. 'xy' uses the length of the vector - between the two features, 'latlon' uses the haversine distance. - None checks wether the required coordinates are present and starts - with 'xy'. Default is None. - - Returns - ------- - features : pandas.DataFrame - DataFrame of the features with a new column 'min_distance', - containing the calculated minimal distance to other features. - - """ - - from itertools import combinations - - features["min_distance"] = np.nan - for time_i, features_i in features.groupby("time"): - logging.debug(str(time_i)) - indeces = combinations(features_i.index.values, 2) - # Loop over combinations to remove features that are closer together than min_distance and keep larger one (either higher threshold or larger area) - distances = [] - for index_1, index_2 in indeces: - if index_1 is not index_2: - distance = calculate_distance( - features_i.loc[index_1], - features_i.loc[index_2], - method_distance=method_distance, - ) - distances.append( - pd.DataFrame( - {"index_1": index_1, "index_2": index_2, "distance": distance}, - index=[0], - ) - ) - if any([x is not None for x in distances]): - distances = pd.concat(distances, ignore_index=True) - for i in features_i.index: - min_distance = distances.loc[ - (distances["index_1"] == i) | (distances["index_2"] == i), - "distance", - ].min() - features.at[i, "min_distance"] = min_distance - return features - - -def nearestneighbordistance_histogram( - features, - bin_edges=np.arange(0, 30000, 500), - density=False, - method_distance=None, - return_values=False, -): - """Create an nearest neighbor distance histogram of the features. - If the DataFrame does not contain a 'min_distance' column, the - distances are calculated. - - ---------- - features - - bin_edges : int or ndarray, optional - If bin_edges is an int, it defines the number of equal-width - bins in the given range. If bins is a ndarray, it defines a - monotonically increasing array of bin edges, including the - rightmost edge. Default is np.arange(0, 30000, 500). - - density : bool, optional - If False, the result will contain the number of samples in - each bin. If True, the result is the value of the probability - density function at the bin, normalized such that the integral - over the range is 1. Default is False. - - method_distance : {None, 'xy', 'latlon'}, optional - Method of distance calculation. 'xy' uses the length of the - vector between the two features, 'latlon' uses the haversine - distance. None checks wether the required coordinates are - present and starts with 'xy'. Default is None. - - return_values : bool, optional - Bool determining wether the nearest neighbor distance of the - features are returned from this function. Default is False. - - Returns - ------- - hist : ndarray - The values of the histogram. - - bin_edges : ndarray - The edges of the histogram. - - distances, optional : ndarray - A numpy array with the nearest neighbor distances of each - feature. - - """ - - if "min_distance" not in features.columns: - logging.debug("calculate nearest neighbor distances") - features = calculate_nearestneighbordistance( - features, method_distance=method_distance - ) - distances = features["min_distance"].values - hist, bin_edges = np.histogram( - distances[~np.isnan(distances)], bin_edges, density=density - ) - if return_values: - return hist, bin_edges, distances - else: - return hist, bin_edges - - -# Treatment of 2D lat/lon coordinates to be added: -def calculate_areas_2Dlatlon(_2Dlat_coord, _2Dlon_coord): - """Calculate an array of cell areas when given two 2D arrays - of latitude and longitude values - - NOTE: This currently assuems that the lat/lon grid is orthogonal, - which is not strictly true! It's close enough for most cases, but - should be updated in future to use the cross product of the - distances to the neighbouring cells. This will require the use - of a more advanced calculation. I would advise using pyproj - at some point in the future to solve this issue and replace - haversine distance. - - Parameters - ---------- - _2Dlat_coord : AuxCoord - Iris auxilliary coordinate containing a 2d grid of latitudes - for each point. - - _2Dlon_coord : AuxCoord - Iris auxilliary coordinate containing a 2d grid of longitudes - for each point. - - Returns - ------- - area : ndarray - A numpy array approximating the area of each cell. - - """ - - hdist1 = ( - haversine( - _2Dlat_coord.points[:-1], - _2Dlon_coord.points[:-1], - _2Dlat_coord.points[1:], - _2Dlon_coord.points[1:], - ) - * 1000 - ) - - dists1 = np.zeros(_2Dlat_coord.points.shape) - dists1[0] = hdist1[0] - dists1[-1] = hdist1[-1] - dists1[1:-1] = (hdist1[0:-1] + hdist1[1:]) * 0.5 - - hdist2 = ( - haversine( - _2Dlat_coord.points[:, :-1], - _2Dlon_coord.points[:, :-1], - _2Dlat_coord.points[:, 1:], - _2Dlon_coord.points[:, 1:], - ) - * 1000 - ) - - dists2 = np.zeros(_2Dlat_coord.points.shape) - dists2[:, 0] = hdist2[:, 0] - dists2[:, -1] = hdist2[:, -1] - dists2[:, 1:-1] = (hdist2[:, 0:-1] + hdist2[:, 1:]) * 0.5 - - area = dists1 * dists2 - - return area - - -@decorators.xarray_to_iris -def calculate_area(features, mask, method_area=None): - """Calculate the area of the segments for each feature. - - Parameters - ---------- - features : pandas.DataFrame - DataFrame of the features whose area is to be calculated. - - mask : iris.cube.Cube - Cube containing mask (int for tracked volumes 0 everywhere - else). Needs to contain either projection_x_coordinate and - projection_y_coordinate or latitude and longitude - coordinates. - - method_area : {None, 'xy', 'latlon'}, optional - Flag determining how the area is calculated. 'xy' uses the - areas of the individual pixels, 'latlon' uses the - area_weights method of iris.analysis.cartography, None - checks wether the required coordinates are present and - starts with 'xy'. Default is None. - - Returns - ------- - features : pandas.DataFrame - DataFrame of the features with a new column 'area', - containing the calculated areas. - - Raises - ------ - ValueError - If neither latitude/longitude nor - projection_x_coordinate/projection_y_coordinate are - present in mask_coords. - - If latitude/longitude coordinates are 2D. - - If latitude/longitude shapes are not supported. - - If method is undefined, i.e. method is neither None, - 'xy' nor 'latlon'. - - """ - - from tobac.utils import mask_features_surface, mask_features - from iris import Constraint - from iris.analysis.cartography import area_weights - from scipy.ndimage import labeled_comprehension - - features["area"] = np.nan - - mask_coords = [coord.name() for coord in mask.coords()] - if method_area is None: - if ("projection_x_coordinate" in mask_coords) and ( - "projection_y_coordinate" in mask_coords - ): - method_area = "xy" - elif ("latitude" in mask_coords) and ("longitude" in mask_coords): - method_area = "latlon" - else: - raise ValueError( - "either latitude/longitude or projection_x_coordinate/projection_y_coordinate have to be present to calculate distances" - ) - # logging.debug("calculating area using method " + method_area) - if method_area == "xy": - if not ( - mask.coord("projection_x_coordinate").has_bounds() - and mask.coord("projection_y_coordinate").has_bounds() - ): - mask.coord("projection_x_coordinate").guess_bounds() - mask.coord("projection_y_coordinate").guess_bounds() - area = np.outer( - np.diff(mask.coord("projection_x_coordinate").bounds, axis=1), - np.diff(mask.coord("projection_y_coordinate").bounds, axis=1), - ) - elif method_area == "latlon": - if (mask.coord("latitude").ndim == 1) and (mask.coord("latitude").ndim == 1): - if not ( - mask.coord("latitude").has_bounds() - and mask.coord("longitude").has_bounds() - ): - mask.coord("latitude").guess_bounds() - mask.coord("longitude").guess_bounds() - area = area_weights(mask, normalize=False) - elif mask.coord("latitude").ndim == 2 and mask.coord("longitude").ndim == 2: - area = calculate_areas_2Dlatlon( - mask.coord("latitude"), mask.coord("longitude") - ) - else: - raise ValueError("latitude/longitude coordinate shape not supported") - else: - raise ValueError("method undefined") - - feature_areas = labeled_comprehension( - area, mask.data, features["feature"], np.sum, area.dtype, np.nan - ) - - features["area"] = feature_areas - - return features - - -def area_histogram( - features, - mask, - bin_edges=np.arange(0, 30000, 500), - density=False, - method_area=None, - return_values=False, - representative_area=False, -): - """Create an area histogram of the features. If the DataFrame - does not contain an area column, the areas are calculated. - - Parameters - ---------- - features : pandas.DataFrame - DataFrame of the features. - - mask : iris.cube.Cube - Cube containing mask (int for tracked volumes 0 - everywhere else). Needs to contain either - projection_x_coordinate and projection_y_coordinate or - latitude and longitude coordinates. The output of a - segmentation should be used here. - - bin_edges : int or ndarray, optional - If bin_edges is an int, it defines the number of - equal-width bins in the given range. If bins is a ndarray, - it defines a monotonically increasing array of bin edges, - including the rightmost edge. - Default is np.arange(0, 30000, 500). - - density : bool, optional - If False, the result will contain the number of samples - in each bin. If True, the result is the value of the - probability density function at the bin, normalized such - that the integral over the range is 1. Default is False. - - return_values : bool, optional - Bool determining wether the areas of the features are - returned from this function. Default is False. - - representive_area: bool, optional - If False, no weights will associated to the values. - If True, the weights for each area will be the areas - itself, i.e. each bin count will have the value of - the sum of all areas within the edges of the bin. - Default is False. - - Returns - ------- - hist : ndarray - The values of the histogram. - - bin_edges : ndarray - The edges of the histogram. - - bin_centers : ndarray - The centers of the histogram intervalls. - - areas : ndarray, optional - A numpy array approximating the area of each feature. - - """ - - if "area" not in features.columns: - logging.info("calculate area") - features = calculate_area(features, mask, method_area) - areas = features["area"].values - # restrict to non NaN values: - areas = areas[~np.isnan(areas)] - if representative_area: - weights = areas - else: - weights = None - hist, bin_edges = np.histogram(areas, bin_edges, density=density, weights=weights) - bin_centers = bin_edges[:-1] + 0.5 * np.diff(bin_edges) - - if return_values: - return hist, bin_edges, bin_centers, areas - else: - return hist, bin_edges, bin_centers - - -def histogram_cellwise( - Track, variable=None, bin_edges=None, quantity="max", density=False -): - """Create a histogram of the maximum, minimum or mean of - a variable for the cells (series of features linked together - over multiple timesteps) of a track. Essentially a wrapper - of the numpy.histogram() method. - - Parameters - ---------- - Track : pandas.DataFrame - The track containing the variable to create the histogram - from. - - variable : string, optional - Column of the DataFrame with the variable on which the - histogram is to be based on. Default is None. - - bin_edges : int or ndarray, optional - If bin_edges is an int, it defines the number of - equal-width bins in the given range. If bins is a ndarray, - it defines a monotonically increasing array of bin edges, - including the rightmost edge. - - quantity : {'max', 'min', 'mean'}, optional - Flag determining wether to use maximum, minimum or mean - of a variable from all timeframes the cell covers. - Default is 'max'. - - density : bool, optional - If False, the result will contain the number of samples - in each bin. If True, the result is the value of the - probability density function at the bin, normalized such - that the integral over the range is 1. - Default is False. - - Returns - ------- - hist : ndarray - The values of the histogram - - bin_edges : ndarray - The edges of the histogram - - bin_centers : ndarray - The centers of the histogram intervalls - - Raises - ------ - ValueError - If quantity is not 'max', 'min' or 'mean'. - - """ - - Track_cell = Track.groupby("cell") - if quantity == "max": - variable_cell = Track_cell[variable].max().values - elif quantity == "min": - variable_cell = Track_cell[variable].min().values - elif quantity == "mean": - variable_cell = Track_cell[variable].mean().values - else: - raise ValueError("quantity unknown, must be max, min or mean") - hist, bin_edges = np.histogram(variable_cell, bin_edges, density=density) - bin_centers = bin_edges[:-1] + 0.5 * np.diff(bin_edges) - - return hist, bin_edges, bin_centers - - -def histogram_featurewise(Track, variable=None, bin_edges=None, density=False): - """Create a histogram of a variable from the features - (detected objects at a single time step) of a track. - Essentially a wrapper of the numpy.histogram() method. - - Parameters - ---------- - Track : pandas.DataFrame - The track containing the variable to create the - histogram from. - - variable : string, optional - Column of the DataFrame with the variable on which the - histogram is to be based on. Default is None. - - bin_edges : int or ndarray, optional - If bin_edges is an int, it defines the number of - equal-width bins in the given range. If bins is - a sequence, it defines a monotonically increasing - array of bin edges, including the rightmost edge. - - density : bool, optional - If False, the result will contain the number of - samples in each bin. If True, the result is the - value of the probability density function at the - bin, normalized such that the integral over the - range is 1. Default is False. - - Returns - ------- - hist : ndarray - The values of the histogram - - bin_edges : ndarray - The edges of the histogram - - bin_centers : ndarray - The centers of the histogram intervalls - - """ - - hist, bin_edges = np.histogram(Track[variable].values, bin_edges, density=density) - bin_centers = bin_edges[:-1] + 0.5 * np.diff(bin_edges) - - return hist, bin_edges, bin_centers - - -def calculate_overlap( - track_1, track_2, min_sum_inv_distance=None, min_mean_inv_distance=None -): - """Count the number of time frames in which the - individual cells of two tracks are present together - and calculate their mean and summed inverse distance. - - Parameters - ---------- - track_1, track_2 : pandas.DataFrame - The tracks conaining the cells to analyze. - - min_sum_inv_distance : float, optional - Minimum of the inverse net distance for two - cells to be counted as overlapping. - Default is None. - - min_mean_inv_distance : float, optional - Minimum of the inverse mean distance for two cells - to be counted as overlapping. Default is None. - - Returns - ------- - overlap : pandas.DataFrame - DataFrame containing the columns cell_1 and cell_2 - with the index of the cells from the tracks, - n_overlap with the number of frames both cells are - present in, mean_inv_distance with the mean inverse - distance and sum_inv_distance with the summed - inverse distance of the cells. - - """ - - cells_1 = track_1["cell"].unique() - # n_cells_1_tot=len(cells_1) - cells_2 = track_2["cell"].unique() - overlap = pd.DataFrame() - for i_cell_1, cell_1 in enumerate(cells_1): - for cell_2 in cells_2: - track_1_i = track_1[track_1["cell"] == cell_1] - track_2_i = track_2[track_2["cell"] == cell_2] - track_1_i = track_1_i[track_1_i["time"].isin(track_2_i["time"])] - track_2_i = track_2_i[track_2_i["time"].isin(track_1_i["time"])] - if not track_1_i.empty: - n_overlap = len(track_1_i) - distances = [] - for i in range(len(track_1_i)): - distance = calculate_distance( - track_1_i.iloc[[i]], track_2_i.iloc[[i]], method_distance="xy" - ) - distances.append(distance) - # mean_distance=np.mean(distances) - mean_inv_distance = np.mean(1 / (1 + np.array(distances) / 1000)) - # mean_inv_squaredistance=np.mean(1/(1+(np.array(distances)/1000)**2)) - sum_inv_distance = np.sum(1 / (1 + np.array(distances) / 1000)) - # sum_inv_squaredistance=np.sum(1/(1+(np.array(distances)/1000)**2)) - overlap = overlap.append( - { - "cell_1": cell_1, - "cell_2": cell_2, - "n_overlap": n_overlap, - # 'mean_distance':mean_distance, - "mean_inv_distance": mean_inv_distance, - # 'mean_inv_squaredistance':mean_inv_squaredistance, - "sum_inv_distance": sum_inv_distance, - # 'sum_inv_squaredistance':sum_inv_squaredistance - }, - ignore_index=True, - ) - if min_sum_inv_distance: - overlap = overlap[(overlap["sum_inv_distance"] >= min_sum_inv_distance)] - if min_mean_inv_distance: - overlap = overlap[(overlap["mean_inv_distance"] >= min_mean_inv_distance)] - - return overlap diff --git a/tobac/analysis/__init__.py b/tobac/analysis/__init__.py new file mode 100644 index 00000000..f56f538f --- /dev/null +++ b/tobac/analysis/__init__.py @@ -0,0 +1,31 @@ +"""Provide tools to analyse and visualize the tracked objects. +This module provides a set of routines that enables performing analyses +and deriving statistics for individual tracks, such as the time series +of integrated properties and vertical profiles. It also provides +routines to calculate summary statistics of the entire population of +tracked features in the field like histograms of areas/volumes +or mass and a detailed cell lifetime analysis. These analysis +routines are all built in a modular manner. Thus, users can reuse the +most basic methods for interacting with the data structure of the +package in their own analysis procedures in Python. This includes +functions performing simple tasks like looping over all identified +objects or trajectories and masking arrays for the analysis of +individual features. Plotting routines include both visualizations +for individual convective cells and their properties. [1]_ + +References +---------- +.. Heikenfeld, M., Marinescu, P. J., Christensen, M., + Watson-Parris, D., Senf, F., van den Heever, S. C. + & Stier, P. (2019). tobac 1.2: towards a flexible + framework for tracking and analysis of clouds in + diverse datasets. Geoscientific Model Development, + 12(11), 4551-4570. + +Notes +----- +""" + +from tobac.analysis.cell_analysis import * +from tobac.analysis.feature_analysis import * +from tobac.analysis.spatial import * diff --git a/tobac/analysis/cell_analysis.py b/tobac/analysis/cell_analysis.py new file mode 100644 index 00000000..e8ef39f4 --- /dev/null +++ b/tobac/analysis/cell_analysis.py @@ -0,0 +1,628 @@ +""" +Perform analysis on the properties of tracked cells +""" + +import logging +import os +import warnings + +import numpy as np +import pandas as pd +from iris.cube import Cube, CubeList +from iris.coords import AuxCoord +from iris import Constraint, save + +from tobac.centerofgravity import calculate_cog +from tobac.utils.mask import mask_cell, mask_cell_surface, mask_cube_cell +from tobac.utils.general import get_bounding_box +from tobac.analysis.spatial import ( + calculate_distance, + calculate_velocity, +) + +__all__ = ( + "cell_statistics_all", + "cell_statistics", + "cog_cell", + "lifetime_histogram", + "velocity_histogram", + "histogram_cellwise", + "calculate_overlap", +) + + +def cell_statistics_all( + input_cubes, + track, + mask, + aggregators, + output_path="./", + cell_selection=None, + output_name="Profiles", + width=10000, + z_coord="model_level_number", + dimensions=["x", "y"], + **kwargs, +): + """ + Parameters + ---------- + input_cubes : iris.cube.Cube + + track : dask.dataframe.DataFrame + + mask : iris.cube.Cube + Cube containing mask (int id for tracked volumes 0 everywhere + else). + + aggregators : list + list of iris.analysis.Aggregator instances + + output_path : str, optional + Default is './'. + + cell_selection : optional + Default is None. + + output_name : str, optional + Default is 'Profiles'. + + width : int, optional + Default is 10000. + + z_coord : str, optional + Name of the vertical coordinate in the cube. Default is + 'model_level_number'. + + dimensions : list of str, optional + Default is ['x', 'y']. + + **kwargs + + Returns + ------- + None + """ + warnings.warn( + "cell_statistics_all is depreciated and will be removed or significantly changed in v2.0.", + DeprecationWarning, + ) + + if cell_selection is None: + cell_selection = np.unique(track["cell"]) + for cell in cell_selection: + cell_statistics( + input_cubes=input_cubes, + track=track, + mask=mask, + dimensions=dimensions, + aggregators=aggregators, + cell=cell, + output_path=output_path, + output_name=output_name, + width=width, + z_coord=z_coord, + **kwargs, + ) + + +def cell_statistics( + input_cubes, + track, + mask, + aggregators, + cell, + output_path="./", + output_name="Profiles", + width=10000, + z_coord="model_level_number", + dimensions=["x", "y"], + **kwargs, +): + """ + Parameters + ---------- + input_cubes : iris.cube.Cube + + track : dask.dataframe.DataFrame + + mask : iris.cube.Cube + Cube containing mask (int id for tracked volumes 0 everywhere + else). + + aggregators list + list of iris.analysis.Aggregator instances + + cell : int + Integer id of cell to create masked cube for output. + + output_path : str, optional + Default is './'. + + output_name : str, optional + Default is 'Profiles'. + + width : int, optional + Default is 10000. + + z_coord : str, optional + Name of the vertical coordinate in the cube. Default is + 'model_level_number'. + + dimensions : list of str, optional + Default is ['x', 'y']. + + **kwargs + + Returns + ------- + None + """ + + warnings.warn( + "cell_statistics is depreciated and will be removed or significantly changed in v2.0.", + DeprecationWarning, + ) + + # If input is single cube, turn into cubelist + if type(input_cubes) is Cube: + input_cubes = CubeList([input_cubes]) + + logging.debug("Start calculating profiles for cell " + str(cell)) + track_i = track[track["cell"] == cell] + + cubes_profile = {} + for aggregator in aggregators: + cubes_profile[aggregator.name()] = CubeList() + + for time_i in track_i["time"].values: + constraint_time = Constraint(time=time_i) + + mask_i = mask.extract(constraint_time) + mask_cell_i = mask_cell(mask_i, cell, track_i, masked=False) + mask_cell_surface_i = mask_cell_surface( + mask_i, cell, track_i, masked=False, z_coord=z_coord + ) + + x_dim = mask_cell_surface_i.coord_dims("projection_x_coordinate")[0] + y_dim = mask_cell_surface_i.coord_dims("projection_y_coordinate")[0] + x_coord = mask_cell_surface_i.coord("projection_x_coordinate") + y_coord = mask_cell_surface_i.coord("projection_y_coordinate") + + if (mask_cell_surface_i.core_data() > 0).any(): + box_mask_i = get_bounding_box(mask_cell_surface_i.core_data(), buffer=1) + + box_mask = [ + [ + x_coord.points[box_mask_i[x_dim][0]], + x_coord.points[box_mask_i[x_dim][1]], + ], + [ + y_coord.points[box_mask_i[y_dim][0]], + y_coord.points[box_mask_i[y_dim][1]], + ], + ] + else: + box_mask = [[np.nan, np.nan], [np.nan, np.nan]] + + x = track_i[track_i["time"].values == time_i]["projection_x_coordinate"].values[ + 0 + ] + y = track_i[track_i["time"].values == time_i]["projection_y_coordinate"].values[ + 0 + ] + + box_slice = [[x - width, x + width], [y - width, y + width]] + + x_min = np.nanmin([box_mask[0][0], box_slice[0][0]]) + x_max = np.nanmax([box_mask[0][1], box_slice[0][1]]) + y_min = np.nanmin([box_mask[1][0], box_slice[1][0]]) + y_max = np.nanmax([box_mask[1][1], box_slice[1][1]]) + + constraint_x = Constraint( + projection_x_coordinate=lambda cell: int(x_min) < cell < int(x_max) + ) + constraint_y = Constraint( + projection_y_coordinate=lambda cell: int(y_min) < cell < int(y_max) + ) + + constraint = constraint_time & constraint_x & constraint_y + # Mask_cell_surface_i=mask_cell_surface(Mask_w_i,cell,masked=False,z_coord='model_level_number') + mask_cell_i = mask_cell_i.extract(constraint) + mask_cell_surface_i = mask_cell_surface_i.extract(constraint) + + input_cubes_i = input_cubes.extract(constraint) + for cube in input_cubes_i: + cube_masked = mask_cube_cell(cube, mask_cell_i, cell, track_i) + coords_remove = [] + for coordinate in cube_masked.coords(dim_coords=False): + if coordinate.name() not in dimensions: + for dim in dimensions: + if set(cube_masked.coord_dims(coordinate)).intersection( + set(cube_masked.coord_dims(dim)) + ): + coords_remove.append(coordinate.name()) + for coordinate in set(coords_remove): + cube_masked.remove_coord(coordinate) + + for aggregator in aggregators: + cube_collapsed = cube_masked.collapsed(dimensions, aggregator, **kwargs) + # remove all collapsed coordinates (x and y dim, scalar now) and keep only time as all these coordinates are useless + for coordinate in cube_collapsed.coords(): + if not cube_collapsed.coord_dims(coordinate): + if coordinate.name() != "time": + cube_collapsed.remove_coord(coordinate) + logging.debug(str(cube_collapsed)) + cubes_profile[aggregator.name()].append(cube_collapsed) + + minutes = (track_i["time_cell"] / pd.Timedelta(minutes=1)).values + latitude = track_i["latitude"].values + longitude = track_i["longitude"].values + minutes_coord = AuxCoord(minutes, long_name="cell_time", units="min") + latitude_coord = AuxCoord(latitude, long_name="latitude", units="degrees") + longitude_coord = AuxCoord(longitude, long_name="longitude", units="degrees") + + for aggregator in aggregators: + cubes_profile[aggregator.name()] = cubes_profile[aggregator.name()].merge() + for cube in cubes_profile[aggregator.name()]: + cube.add_aux_coord(minutes_coord, data_dims=cube.coord_dims("time")) + cube.add_aux_coord(latitude_coord, data_dims=cube.coord_dims("time")) + cube.add_aux_coord(longitude_coord, data_dims=cube.coord_dims("time")) + os.makedirs( + os.path.join(output_path, output_name, aggregator.name()), exist_ok=True + ) + savefile = os.path.join( + output_path, + output_name, + aggregator.name(), + output_name + "_" + aggregator.name() + "_" + str(int(cell)) + ".nc", + ) + save(cubes_profile[aggregator.name()], savefile) + + +def cog_cell( + cell, + Tracks=None, + M_total=None, + M_liquid=None, + M_frozen=None, + Mask=None, + savedir=None, +): + """ + Parameters + ---------- + cell : int + Integer id of cell to create masked cube for output. + + Tracks : optional + Default is None. + + M_total : subset of cube, optional + Default is None. + + M_liquid : subset of cube, optional + Default is None. + + M_frozen : subset of cube, optional + Default is None. + + savedir : str + Default is None. + + Returns + ------- + None + """ + + warnings.warn( + "cog_cell is depreciated and will be removed or significantly changed in v2.0.", + DeprecationWarning, + ) + + logging.debug("Start calculating COG for " + str(cell)) + Track = Tracks[Tracks["cell"] == cell] + constraint_time = Constraint( + time=lambda cell: Track.head(1)["time"].values[0] + <= cell + <= Track.tail(1)["time"].values[0] + ) + M_total_i = M_total.extract(constraint_time) + M_liquid_i = M_liquid.extract(constraint_time) + M_frozen_i = M_frozen.extract(constraint_time) + Mask_i = Mask.extract(constraint_time) + + savedir_cell = os.path.join(savedir, "cells", str(int(cell))) + os.makedirs(savedir_cell, exist_ok=True) + savefile_COG_total_i = os.path.join( + savedir_cell, "COG_total" + "_" + str(int(cell)) + ".h5" + ) + savefile_COG_liquid_i = os.path.join( + savedir_cell, "COG_liquid" + "_" + str(int(cell)) + ".h5" + ) + savefile_COG_frozen_i = os.path.join( + savedir_cell, "COG_frozen" + "_" + str(int(cell)) + ".h5" + ) + + Tracks_COG_total_i = calculate_cog(Track, M_total_i, Mask_i) + # Tracks_COG_total_list.append(Tracks_COG_total_i) + logging.debug("COG total loaded for " + str(cell)) + + Tracks_COG_liquid_i = calculate_cog(Track, M_liquid_i, Mask_i) + # Tracks_COG_liquid_list.append(Tracks_COG_liquid_i) + logging.debug("COG liquid loaded for " + str(cell)) + Tracks_COG_frozen_i = calculate_cog(Track, M_frozen_i, Mask_i) + # Tracks_COG_frozen_list.append(Tracks_COG_frozen_i) + logging.debug("COG frozen loaded for " + str(cell)) + + Tracks_COG_total_i.to_hdf(savefile_COG_total_i, "table") + Tracks_COG_liquid_i.to_hdf(savefile_COG_liquid_i, "table") + Tracks_COG_frozen_i.to_hdf(savefile_COG_frozen_i, "table") + logging.debug("individual COG calculated and saved to " + savedir_cell) + + +def lifetime_histogram( + Track, bin_edges=np.arange(0, 200, 20), density=False, return_values=False +): + """Compute the lifetime histogram of tracked cells. + + Parameters + ---------- + Track : pandas.DataFrame + Dataframe of linked features, containing the columns 'cell' + and 'time_cell'. + + bin_edges : int or ndarray, optional + If bin_edges is an int, it defines the number of equal-width + bins in the given range. If bins is a ndarray, it defines a + monotonically increasing array of bin edges, including the + rightmost edge. The unit is minutes. + Default is np.arange(0, 200, 20). + + density : bool, optional + If False, the result will contain the number of samples in + each bin. If True, the result is the value of the probability + density function at the bin, normalized such that the integral + over the range is 1. Default is False. + + return_values : bool, optional + Bool determining wether the lifetimes of the features are + returned from this function. Default is False. + + Returns + ------- + hist : ndarray + The values of the histogram. + + bin_edges : ndarray + The edges of the histogram. + + bin_centers : ndarray + The centers of the histogram intervalls. + + minutes, optional : ndarray + Numpy.array of the lifetime of each feature in minutes. + Returned if return_values is True. + + """ + + Track_cell = Track.groupby("cell") + minutes = (Track_cell["time_cell"].max() / pd.Timedelta(minutes=1)).values + hist, bin_edges = np.histogram(minutes, bin_edges, density=density) + bin_centers = bin_edges[:-1] + 0.5 * np.diff(bin_edges) + if return_values: + return hist, bin_edges, bin_centers, minutes + else: + return hist, bin_edges, bin_centers + + +def velocity_histogram( + track, + bin_edges=np.arange(0, 30, 1), + density=False, + method_distance=None, + return_values=False, +): + """Create an velocity histogram of the tracked cells. If the DataFrame + does not contain a velocity column, the velocities are calculated. + + Parameters + ---------- + track: pandas.DataFrame + DataFrame of the linked features, containing the columns 'cell', + 'time' and either 'projection_x_coordinate' and + 'projection_y_coordinate' or 'latitude' and 'longitude'. + + bin_edges : int or ndarray, optional + If bin_edges is an int, it defines the number of equal-width + bins in the given range. If bins is a ndarray, it defines a + monotonically increasing array of bin edges, including the + rightmost edge. Default is np.arange(0, 30000, 500). + + density : bool, optional + If False, the result will contain the number of samples in + each bin. If True, the result is the value of the probability + density function at the bin, normalized such that the integral + over the range is 1. Default is False. + + methods_distance : {None, 'xy', 'latlon'}, optional + Method of distance calculation, used to calculate the velocity. + 'xy' uses the length of the vector between the two features, + 'latlon' uses the haversine distance. None checks wether the + required coordinates are present and starts with 'xy'. + Default is None. + + return_values : bool, optional + Bool determining wether the velocities of the features are + returned from this function. Default is False. + + Returns + ------- + hist : ndarray + The values of the histogram. + + bin_edges : ndarray + The edges of the histogram. + + velocities , optional : ndarray + Numpy array with the velocities of each feature. + + """ + + if "v" not in track.columns: + logging.info("calculate velocities") + track = calculate_velocity(track) + velocities = track["v"].values + hist, bin_edges = np.histogram( + velocities[~np.isnan(velocities)], bin_edges, density=density + ) + if return_values: + return hist, bin_edges, velocities + else: + return hist, bin_edges + + +def histogram_cellwise( + Track, variable=None, bin_edges=None, quantity="max", density=False +): + """Create a histogram of the maximum, minimum or mean of + a variable for the cells (series of features linked together + over multiple timesteps) of a track. Essentially a wrapper + of the numpy.histogram() method. + + Parameters + ---------- + Track : pandas.DataFrame + The track containing the variable to create the histogram + from. + + variable : string, optional + Column of the DataFrame with the variable on which the + histogram is to be based on. Default is None. + + bin_edges : int or ndarray, optional + If bin_edges is an int, it defines the number of + equal-width bins in the given range. If bins is a ndarray, + it defines a monotonically increasing array of bin edges, + including the rightmost edge. + + quantity : {'max', 'min', 'mean'}, optional + Flag determining wether to use maximum, minimum or mean + of a variable from all timeframes the cell covers. + Default is 'max'. + + density : bool, optional + If False, the result will contain the number of samples + in each bin. If True, the result is the value of the + probability density function at the bin, normalized such + that the integral over the range is 1. + Default is False. + + Returns + ------- + hist : ndarray + The values of the histogram + + bin_edges : ndarray + The edges of the histogram + + bin_centers : ndarray + The centers of the histogram intervalls + + Raises + ------ + ValueError + If quantity is not 'max', 'min' or 'mean'. + + """ + + Track_cell = Track.groupby("cell") + if quantity == "max": + variable_cell = Track_cell[variable].max().values + elif quantity == "min": + variable_cell = Track_cell[variable].min().values + elif quantity == "mean": + variable_cell = Track_cell[variable].mean().values + else: + raise ValueError("quantity unknown, must be max, min or mean") + hist, bin_edges = np.histogram(variable_cell, bin_edges, density=density) + bin_centers = bin_edges[:-1] + 0.5 * np.diff(bin_edges) + + return hist, bin_edges, bin_centers + + +def calculate_overlap( + track_1, track_2, min_sum_inv_distance=None, min_mean_inv_distance=None +): + """Count the number of time frames in which the + individual cells of two tracks are present together + and calculate their mean and summed inverse distance. + + Parameters + ---------- + track_1, track_2 : pandas.DataFrame + The tracks conaining the cells to analyze. + + min_sum_inv_distance : float, optional + Minimum of the inverse net distance for two + cells to be counted as overlapping. + Default is None. + + min_mean_inv_distance : float, optional + Minimum of the inverse mean distance for two cells + to be counted as overlapping. Default is None. + + Returns + ------- + overlap : pandas.DataFrame + DataFrame containing the columns cell_1 and cell_2 + with the index of the cells from the tracks, + n_overlap with the number of frames both cells are + present in, mean_inv_distance with the mean inverse + distance and sum_inv_distance with the summed + inverse distance of the cells. + + """ + + cells_1 = track_1["cell"].unique() + # n_cells_1_tot=len(cells_1) + cells_2 = track_2["cell"].unique() + overlap = pd.DataFrame() + for i_cell_1, cell_1 in enumerate(cells_1): + for cell_2 in cells_2: + track_1_i = track_1[track_1["cell"] == cell_1] + track_2_i = track_2[track_2["cell"] == cell_2] + track_1_i = track_1_i[track_1_i["time"].isin(track_2_i["time"])] + track_2_i = track_2_i[track_2_i["time"].isin(track_1_i["time"])] + if not track_1_i.empty: + n_overlap = len(track_1_i) + distances = [] + for i in range(len(track_1_i)): + distance = calculate_distance( + track_1_i.iloc[[i]], track_2_i.iloc[[i]], method_distance="xy" + ) + distances.append(distance) + # mean_distance=np.mean(distances) + mean_inv_distance = np.mean(1 / (1 + np.array(distances) / 1000)) + # mean_inv_squaredistance=np.mean(1/(1+(np.array(distances)/1000)**2)) + sum_inv_distance = np.sum(1 / (1 + np.array(distances) / 1000)) + # sum_inv_squaredistance=np.sum(1/(1+(np.array(distances)/1000)**2)) + overlap = overlap.append( + { + "cell_1": cell_1, + "cell_2": cell_2, + "n_overlap": n_overlap, + # 'mean_distance':mean_distance, + "mean_inv_distance": mean_inv_distance, + # 'mean_inv_squaredistance':mean_inv_squaredistance, + "sum_inv_distance": sum_inv_distance, + # 'sum_inv_squaredistance':sum_inv_squaredistance + }, + ignore_index=True, + ) + if min_sum_inv_distance: + overlap = overlap[(overlap["sum_inv_distance"] >= min_sum_inv_distance)] + if min_mean_inv_distance: + overlap = overlap[(overlap["mean_inv_distance"] >= min_mean_inv_distance)] + + return overlap diff --git a/tobac/analysis/feature_analysis.py b/tobac/analysis/feature_analysis.py new file mode 100644 index 00000000..7366cbdd --- /dev/null +++ b/tobac/analysis/feature_analysis.py @@ -0,0 +1,212 @@ +""" +Perform analysis on the properties of detected features +""" + +import logging +import numpy as np + +from tobac.analysis.spatial import ( + calculate_nearestneighbordistance, + calculate_area, +) + +__all__ = ( + "nearestneighbordistance_histogram", + "area_histogram", + "histogram_featurewise", +) + + +def nearestneighbordistance_histogram( + features, + bin_edges=np.arange(0, 30000, 500), + density=False, + method_distance=None, + return_values=False, +): + """Create an nearest neighbor distance histogram of the features. + If the DataFrame does not contain a 'min_distance' column, the + distances are calculated. + + ---------- + features + + bin_edges : int or ndarray, optional + If bin_edges is an int, it defines the number of equal-width + bins in the given range. If bins is a ndarray, it defines a + monotonically increasing array of bin edges, including the + rightmost edge. Default is np.arange(0, 30000, 500). + + density : bool, optional + If False, the result will contain the number of samples in + each bin. If True, the result is the value of the probability + density function at the bin, normalized such that the integral + over the range is 1. Default is False. + + method_distance : {None, 'xy', 'latlon'}, optional + Method of distance calculation. 'xy' uses the length of the + vector between the two features, 'latlon' uses the haversine + distance. None checks wether the required coordinates are + present and starts with 'xy'. Default is None. + + return_values : bool, optional + Bool determining wether the nearest neighbor distance of the + features are returned from this function. Default is False. + + Returns + ------- + hist : ndarray + The values of the histogram. + + bin_edges : ndarray + The edges of the histogram. + + distances, optional : ndarray + A numpy array with the nearest neighbor distances of each + feature. + + """ + + if "min_distance" not in features.columns: + logging.debug("calculate nearest neighbor distances") + features = calculate_nearestneighbordistance( + features, method_distance=method_distance + ) + distances = features["min_distance"].values + hist, bin_edges = np.histogram( + distances[~np.isnan(distances)], bin_edges, density=density + ) + if return_values: + return hist, bin_edges, distances + else: + return hist, bin_edges + + +def area_histogram( + features, + mask, + bin_edges=np.arange(0, 30000, 500), + density=False, + method_area=None, + return_values=False, + representative_area=False, +): + """Create an area histogram of the features. If the DataFrame + does not contain an area column, the areas are calculated. + + Parameters + ---------- + features : pandas.DataFrame + DataFrame of the features. + + mask : iris.cube.Cube + Cube containing mask (int for tracked volumes 0 + everywhere else). Needs to contain either + projection_x_coordinate and projection_y_coordinate or + latitude and longitude coordinates. The output of a + segmentation should be used here. + + bin_edges : int or ndarray, optional + If bin_edges is an int, it defines the number of + equal-width bins in the given range. If bins is a ndarray, + it defines a monotonically increasing array of bin edges, + including the rightmost edge. + Default is np.arange(0, 30000, 500). + + density : bool, optional + If False, the result will contain the number of samples + in each bin. If True, the result is the value of the + probability density function at the bin, normalized such + that the integral over the range is 1. Default is False. + + return_values : bool, optional + Bool determining wether the areas of the features are + returned from this function. Default is False. + + representive_area: bool, optional + If False, no weights will associated to the values. + If True, the weights for each area will be the areas + itself, i.e. each bin count will have the value of + the sum of all areas within the edges of the bin. + Default is False. + + Returns + ------- + hist : ndarray + The values of the histogram. + + bin_edges : ndarray + The edges of the histogram. + + bin_centers : ndarray + The centers of the histogram intervalls. + + areas : ndarray, optional + A numpy array approximating the area of each feature. + + """ + + if "area" not in features.columns: + logging.info("calculate area") + features = calculate_area(features, mask, method_area) + areas = features["area"].values + # restrict to non NaN values: + areas = areas[~np.isnan(areas)] + if representative_area: + weights = areas + else: + weights = None + hist, bin_edges = np.histogram(areas, bin_edges, density=density, weights=weights) + bin_centers = bin_edges[:-1] + 0.5 * np.diff(bin_edges) + + if return_values: + return hist, bin_edges, bin_centers, areas + else: + return hist, bin_edges, bin_centers + + +def histogram_featurewise(Track, variable=None, bin_edges=None, density=False): + """Create a histogram of a variable from the features + (detected objects at a single time step) of a track. + Essentially a wrapper of the numpy.histogram() method. + + Parameters + ---------- + Track : pandas.DataFrame + The track containing the variable to create the + histogram from. + + variable : string, optional + Column of the DataFrame with the variable on which the + histogram is to be based on. Default is None. + + bin_edges : int or ndarray, optional + If bin_edges is an int, it defines the number of + equal-width bins in the given range. If bins is + a sequence, it defines a monotonically increasing + array of bin edges, including the rightmost edge. + + density : bool, optional + If False, the result will contain the number of + samples in each bin. If True, the result is the + value of the probability density function at the + bin, normalized such that the integral over the + range is 1. Default is False. + + Returns + ------- + hist : ndarray + The values of the histogram + + bin_edges : ndarray + The edges of the histogram + + bin_centers : ndarray + The centers of the histogram intervalls + + """ + + hist, bin_edges = np.histogram(Track[variable].values, bin_edges, density=density) + bin_centers = bin_edges[:-1] + 0.5 * np.diff(bin_edges) + + return hist, bin_edges, bin_centers diff --git a/tobac/analysis/spatial.py b/tobac/analysis/spatial.py new file mode 100644 index 00000000..8f1caa6d --- /dev/null +++ b/tobac/analysis/spatial.py @@ -0,0 +1,449 @@ +""" +Calculate spatial properties (distances, velocities, areas, volumes) of tracked objects +""" + +import logging +from itertools import combinations + +import numpy as np +import pandas as pd +import xarray as xr +from iris.analysis.cartography import area_weights + +from tobac.utils.bulk_statistics import get_statistics_from_mask +from tobac.utils.internal.basic import find_vertical_axis_from_coord +from tobac.utils import decorators + +__all__ = ( + "haversine", + "calculate_distance", + "calculate_velocity", + "calculate_velocity_individual", + "calculate_areas_2Dlatlon", + "calculate_area", +) + + +def haversine(lat1, lon1, lat2, lon2): + """Computes the Haversine distance in kilometers. + + Calculates the Haversine distance between two points + (based on implementation CIS https://github.com/cedadev/cis). + + Parameters + ---------- + lat1, lon1 : array of latitude, longitude + First point or points as array in degrees. + + lat2, lon2 : array of latitude, longitude + Second point or points as array in degrees. + + Returns + ------- + arclen * RADIUS_EARTH : array + Array of Distance(s) between the two points(-arrays) in + kilometers. + + """ + + RADIUS_EARTH = 6378.0 + lat1 = np.radians(lat1) + lat2 = np.radians(lat2) + lon1 = np.radians(lon1) + lon2 = np.radians(lon2) + # print(lat1,lat2,lon1,lon2) + arclen = 2 * np.arcsin( + np.sqrt( + (np.sin((lat2 - lat1) / 2)) ** 2 + + np.cos(lat1) * np.cos(lat2) * (np.sin((lon2 - lon1) / 2)) ** 2 + ) + ) + return arclen * RADIUS_EARTH + + +def calculate_distance(feature_1, feature_2, method_distance=None): + """Compute the distance between two features. It is based on + either lat/lon coordinates or x/y coordinates. + + Parameters + ---------- + feature_1, feature_2 : pandas.DataFrame or pandas.Series + Dataframes containing multiple features or pandas.Series + of one feature. Need to contain either projection_x_coordinate + and projection_y_coordinate or latitude and longitude + coordinates. + + method_distance : {None, 'xy', 'latlon'}, optional + Method of distance calculation. 'xy' uses the length of the + vector between the two features, 'latlon' uses the haversine + distance. None checks wether the required coordinates are + present and starts with 'xy'. Default is None. + + Returns + ------- + distance : float or pandas.Series + Float with the distance between the two features in meters if + the input are two pandas.Series containing one feature, + pandas.Series of the distances if one of the inputs contains + multiple features. + + """ + if method_distance is None: + if ( + ("projection_x_coordinate" in feature_1) + and ("projection_y_coordinate" in feature_1) + and ("projection_x_coordinate" in feature_2) + and ("projection_y_coordinate" in feature_2) + ): + method_distance = "xy" + elif ( + ("latitude" in feature_1) + and ("longitude" in feature_1) + and ("latitude" in feature_2) + and ("longitude" in feature_2) + ): + method_distance = "latlon" + else: + raise ValueError( + "either latitude/longitude or projection_x_coordinate/projection_y_coordinate have to be present to calculate distances" + ) + + if method_distance == "xy": + distance = np.sqrt( + ( + feature_1["projection_x_coordinate"] + - feature_2["projection_x_coordinate"] + ) + ** 2 + + ( + feature_1["projection_y_coordinate"] + - feature_2["projection_y_coordinate"] + ) + ** 2 + ) + elif method_distance == "latlon": + distance = 1000 * haversine( + feature_1["latitude"], + feature_1["longitude"], + feature_2["latitude"], + feature_2["longitude"], + ) + else: + raise ValueError("method undefined") + return distance + + +def calculate_velocity_individual(feature_old, feature_new, method_distance=None): + """Calculate the mean velocity of a feature between two timeframes. + + Parameters + ---------- + feature_old : pandas.Series + pandas.Series of a feature at a certain timeframe. Needs to + contain a 'time' column and either projection_x_coordinate + and projection_y_coordinate or latitude and longitude coordinates. + + feature_new : pandas.Series + pandas.Series of the same feature at a later timeframe. Needs + to contain a 'time' column and either projection_x_coordinate + and projection_y_coordinate or latitude and longitude coordinates. + + method_distance : {None, 'xy', 'latlon'}, optional + Method of distance calculation, used to calculate the velocity. + 'xy' uses the length of the vector between the two features, + 'latlon' uses the haversine distance. None checks wether the + required coordinates are present and starts with 'xy'. + Default is None. + + Returns + ------- + velocity : float + Value of the approximate velocity. + + """ + + distance = calculate_distance( + feature_old, feature_new, method_distance=method_distance + ) + diff_time = (feature_new["time"] - feature_old["time"]).total_seconds() + velocity = distance / diff_time + return velocity + + +def calculate_velocity(track, method_distance=None): + """Calculate the velocities of a set of linked features. + + Parameters + ---------- + track : pandas.DataFrame + Dataframe of linked features, containing the columns 'cell', + 'time' and either 'projection_x_coordinate' and + 'projection_y_coordinate' or 'latitude' and 'longitude'. + + method_distance : {None, 'xy', 'latlon'}, optional + Method of distance calculation, used to calculate the + velocity. 'xy' uses the length of the vector between the + two features, 'latlon' uses the haversine distance. None + checks wether the required coordinates are present and + starts with 'xy'. Default is None. + + Returns + ------- + track : pandas.DataFrame + DataFrame from the input, with an additional column 'v', + contain the value of the velocity for every feature at + every possible timestep + """ + + for cell_i, track_i in track.groupby("cell"): + index = track_i.index.values + for i, index_i in enumerate(index[:-1]): + velocity = calculate_velocity_individual( + track_i.loc[index[i]], + track_i.loc[index[i + 1]], + method_distance=method_distance, + ) + track.at[index_i, "v"] = velocity + return track + + +def calculate_nearestneighbordistance(features, method_distance=None): + """Calculate the distance between a feature and the nearest other + feature in the same timeframe. + + Parameters + ---------- + features : pandas.DataFrame + DataFrame of the features whose nearest neighbor distance is to + be calculated. Needs to contain either projection_x_coordinate + and projection_y_coordinate or latitude and longitude coordinates. + + method_distance : {None, 'xy', 'latlon'}, optional + Method of distance calculation. 'xy' uses the length of the vector + between the two features, 'latlon' uses the haversine distance. + None checks wether the required coordinates are present and starts + with 'xy'. Default is None. + + Returns + ------- + features : pandas.DataFrame + DataFrame of the features with a new column 'min_distance', + containing the calculated minimal distance to other features. + + """ + + features["min_distance"] = np.nan + for time_i, features_i in features.groupby("time"): + logging.debug(str(time_i)) + indeces = combinations(features_i.index.values, 2) + # Loop over combinations to remove features that are closer together than min_distance and keep larger one (either higher threshold or larger area) + distances = [] + for index_1, index_2 in indeces: + if index_1 is not index_2: + distance = calculate_distance( + features_i.loc[index_1], + features_i.loc[index_2], + method_distance=method_distance, + ) + distances.append( + pd.DataFrame( + {"index_1": index_1, "index_2": index_2, "distance": distance}, + index=[0], + ) + ) + if any([x is not None for x in distances]): + distances = pd.concat(distances, ignore_index=True) + for i in features_i.index: + min_distance = distances.loc[ + (distances["index_1"] == i) | (distances["index_2"] == i), + "distance", + ].min() + features.at[i, "min_distance"] = min_distance + return features + + +def calculate_areas_2Dlatlon(_2Dlat_coord, _2Dlon_coord): + """Calculate an array of cell areas when given two 2D arrays + of latitude and longitude values + + NOTE: This currently assuems that the lat/lon grid is orthogonal, + which is not strictly true! It's close enough for most cases, but + should be updated in future to use the cross product of the + distances to the neighbouring cells. This will require the use + of a more advanced calculation. I would advise using pyproj + at some point in the future to solve this issue and replace + haversine distance. + + Parameters + ---------- + _2Dlat_coord : AuxCoord + Iris auxilliary coordinate containing a 2d grid of latitudes + for each point. + + _2Dlon_coord : AuxCoord + Iris auxilliary coordinate containing a 2d grid of longitudes + for each point. + + Returns + ------- + area : ndarray + A numpy array approximating the area of each cell. + + """ + + hdist1 = ( + haversine( + _2Dlat_coord.points[:-1], + _2Dlon_coord.points[:-1], + _2Dlat_coord.points[1:], + _2Dlon_coord.points[1:], + ) + * 1000 + ) + + dists1 = np.zeros(_2Dlat_coord.points.shape) + dists1[0] = hdist1[0] + dists1[-1] = hdist1[-1] + dists1[1:-1] = (hdist1[0:-1] + hdist1[1:]) * 0.5 + + hdist2 = ( + haversine( + _2Dlat_coord.points[:, :-1], + _2Dlon_coord.points[:, :-1], + _2Dlat_coord.points[:, 1:], + _2Dlon_coord.points[:, 1:], + ) + * 1000 + ) + + dists2 = np.zeros(_2Dlat_coord.points.shape) + dists2[:, 0] = hdist2[:, 0] + dists2[:, -1] = hdist2[:, -1] + dists2[:, 1:-1] = (hdist2[:, 0:-1] + hdist2[:, 1:]) * 0.5 + + area = dists1 * dists2 + + return area + + +@decorators.xarray_to_iris() +def calculate_area(features, mask, method_area=None, vertical_coord=None): + """Calculate the area of the segments for each feature. + + Parameters + ---------- + features : pandas.DataFrame + DataFrame of the features whose area is to be calculated. + + mask : iris.cube.Cube + Cube containing mask (int for tracked volumes 0 everywhere + else). Needs to contain either projection_x_coordinate and + projection_y_coordinate or latitude and longitude + coordinates. + + method_area : {None, 'xy', 'latlon'}, optional + Flag determining how the area is calculated. 'xy' uses the + areas of the individual pixels, 'latlon' uses the + area_weights method of iris.analysis.cartography, None + checks wether the required coordinates are present and + starts with 'xy'. Default is None. + + vertical_coord: None | str, optional (default: None) + Name of the vertical coordinate. If None, tries to auto-detect. + It looks for the coordinate or the dimension name corresponding + to the string. + + Returns + ------- + features : pandas.DataFrame + DataFrame of the features with a new column 'area', + containing the calculated areas. + + Raises + ------ + ValueError + If neither latitude/longitude nor + projection_x_coordinate/projection_y_coordinate are + present in mask_coords. + + If latitude/longitude coordinates are 2D. + + If latitude/longitude shapes are not supported. + + If method is undefined, i.e. method is neither None, + 'xy' nor 'latlon'. + + """ + + features["area"] = np.nan + + # Get the first time step of mask to remove time dimension of calculated areas + mask_slice = next(mask.slices_over("time")) + is_3d = len(mask_slice.core_data().shape) == 3 + if is_3d: + vertical_coord_name = find_vertical_axis_from_coord(mask_slice, vertical_coord) + # Need to get var_name as xarray uses this to label dims + collapse_dim = mask_slice.coords(vertical_coord_name)[0].var_name + else: + collapse_dim = None + + mask_coords = [coord.name() for coord in mask_slice.coords()] + if method_area is None: + if ("projection_x_coordinate" in mask_coords) and ( + "projection_y_coordinate" in mask_coords + ): + method_area = "xy" + elif ("latitude" in mask_coords) and ("longitude" in mask_coords): + method_area = "latlon" + else: + raise ValueError( + "either latitude/longitude or projection_x_coordinate/projection_y_coordinate have to be present to calculate distances" + ) + # logging.debug("calculating area using method " + method_area) + if method_area == "xy": + if not ( + mask_slice.coord("projection_x_coordinate").has_bounds() + and mask_slice.coord("projection_y_coordinate").has_bounds() + ): + mask_slice.coord("projection_x_coordinate").guess_bounds() + mask_slice.coord("projection_y_coordinate").guess_bounds() + area = np.outer( + np.diff(mask_slice.coord("projection_y_coordinate").bounds, axis=1), + np.diff(mask_slice.coord("projection_x_coordinate").bounds, axis=1), + ) + elif method_area == "latlon": + if (mask_slice.coord("latitude").ndim == 1) and ( + mask_slice.coord("longitude").ndim == 1 + ): + if not ( + mask_slice.coord("latitude").has_bounds() + and mask_slice.coord("longitude").has_bounds() + ): + mask_slice.coord("latitude").guess_bounds() + mask_slice.coord("longitude").guess_bounds() + area = area_weights(mask_slice, normalize=False) + elif ( + mask_slice.coord("latitude").ndim == 2 + and mask_slice.coord("longitude").ndim == 2 + ): + area = calculate_areas_2Dlatlon( + mask_slice.coord("latitude"), mask_slice.coord("longitude") + ) + else: + raise ValueError("latitude/longitude coordinate shape not supported") + else: + raise ValueError("method undefined") + + # Area needs to be a dataarray for get_statistics from mask, but otherwise dims/coords don't actually matter + area = xr.DataArray(area, dims=("a", "b")) + + features = get_statistics_from_mask( + features, + mask, + area, + statistic={"area": np.sum}, + default=np.nan, + collapse_dim=collapse_dim, + ) + + return features diff --git a/tobac/feature_detection.py b/tobac/feature_detection.py index d98f9ed0..a21d8db1 100644 --- a/tobac/feature_detection.py +++ b/tobac/feature_detection.py @@ -16,6 +16,7 @@ diverse datasets. Geoscientific Model Development, 12(11), 4551-4570. """ + from __future__ import annotations from typing import Union, Callable import warnings @@ -1127,7 +1128,7 @@ def feature_detection_multithreshold_timestep( return features_thresholds -@decorators.xarray_to_iris +@decorators.xarray_to_iris() def feature_detection_multithreshold( field_in: iris.cube.Cube, dxy: float = None, diff --git a/tobac/plotting.py b/tobac/plotting.py index d4e1d72e..63cc6c09 100644 --- a/tobac/plotting.py +++ b/tobac/plotting.py @@ -14,13 +14,17 @@ 12(11), 4551-4570. """ -import matplotlib as mpl import warnings import logging -from .analysis import lifetime_histogram -from .analysis import histogram_cellwise, histogram_featurewise import numpy as np +import matplotlib as mpl + +from tobac.analysis.cell_analysis import ( + lifetime_histogram, + histogram_cellwise, +) +from tobac.analysis.feature_analysis import histogram_featurewise def plot_tracks_mask_field_loop( diff --git a/tobac/segmentation.py b/tobac/segmentation.py index cfb3d8cd..486100df 100644 --- a/tobac/segmentation.py +++ b/tobac/segmentation.py @@ -29,6 +29,7 @@ diverse datasets. Geoscientific Model Development, 12(11), 4551-4570. """ + import copy import logging @@ -330,7 +331,7 @@ def segmentation_2D( ) -@decorators.xarray_to_iris +@decorators.xarray_to_iris() def segmentation_timestep( field_in: iris.cube.Cube, features_in: pd.DataFrame, @@ -1117,7 +1118,7 @@ def check_add_unseeded_across_bdrys( return markers_out -@decorators.xarray_to_iris +@decorators.xarray_to_iris() def segmentation( features: pd.DataFrame, field: iris.cube.Cube, diff --git a/tobac/tests/test_analysis_spatial.py b/tobac/tests/test_analysis_spatial.py new file mode 100644 index 00000000..0ed2c16a --- /dev/null +++ b/tobac/tests/test_analysis_spatial.py @@ -0,0 +1,588 @@ +""" +Test spatial analysis functions +""" + +from datetime import datetime +import pytest +import numpy as np +import pandas as pd +import xarray as xr +from iris.analysis.cartography import area_weights + +from tobac.analysis.spatial import ( + calculate_distance, + calculate_velocity_individual, + calculate_velocity, + calculate_nearestneighbordistance, + calculate_area, + calculate_areas_2Dlatlon, +) + + +def test_calculate_distance(): + test_features = pd.DataFrame( + { + "feature": [1, 2], + "frame": [0, 0], + "time": [ + datetime(2000, 1, 1), + datetime(2000, 1, 1), + ], + } + ) + + with pytest.raises(ValueError): + calculate_distance(test_features.iloc[0], test_features.iloc[1]) + + test_features = pd.DataFrame( + { + "feature": [1, 2], + "frame": [0, 0], + "time": [ + datetime(2000, 1, 1), + datetime(2000, 1, 1), + ], + "projection_x_coordinate": [0, 1000], + "projection_y_coordinate": [0, 0], + } + ) + + assert calculate_distance(test_features.iloc[0], test_features.iloc[1]) == 1000 + + test_features = pd.DataFrame( + { + "feature": [1, 2], + "frame": [0, 0], + "time": [ + datetime(2000, 1, 1), + datetime(2000, 1, 1), + ], + "longitude": [0, 1], + "latitude": [0, 0], + } + ) + + assert calculate_distance( + test_features.iloc[0], test_features.iloc[1] + ) == pytest.approx(1.11e5, rel=1e4) + + with pytest.raises(ValueError): + calculate_distance( + test_features.iloc[0], + test_features.iloc[1], + method_distance="invalid_method", + ) + + +def test_calculate_velocity_individual(): + test_features = pd.DataFrame( + { + "feature": [1, 2], + "frame": [0, 1], + "time": [ + datetime(2000, 1, 1, 0, 0), + datetime(2000, 1, 1, 0, 10), + ], + "projection_x_coordinate": [0, 6000], + "projection_y_coordinate": [0, 0], + } + ) + + assert ( + calculate_velocity_individual(test_features.iloc[0], test_features.iloc[1]) + == 10 + ) + + +def test_calculate_velocity(): + test_features = pd.DataFrame( + { + "feature": [1, 2], + "frame": [0, 1], + "time": [ + datetime(2000, 1, 1, 0, 0), + datetime(2000, 1, 1, 0, 10), + ], + "projection_x_coordinate": [0, 6000], + "projection_y_coordinate": [0, 0], + "cell": [1, 1], + } + ) + + assert calculate_velocity(test_features).at[0, "v"] == 10 + + +def test_calculate_nearestneighbordistance(): + test_features = pd.DataFrame( + { + "feature": [1, 2, 3, 4], + "frame": [0, 0, 1, 1], + "time": [ + datetime(2000, 1, 1, 0, 0), + datetime(2000, 1, 1, 0, 0), + datetime(2000, 1, 1, 0, 10), + datetime(2000, 1, 1, 0, 10), + ], + "projection_x_coordinate": [0, 1000, 0, 2000], + "projection_y_coordinate": [0, 0, 0, 0], + "cell": [1, 2, 1, 2], + } + ) + + assert calculate_nearestneighbordistance(test_features)[ + "min_distance" + ].to_list() == [1000, 1000, 2000, 2000] + + test_features = pd.DataFrame( + { + "feature": [1, 2], + "frame": [0, 1], + "time": [ + datetime(2000, 1, 1, 0, 0), + datetime(2000, 1, 1, 0, 10), + ], + "projection_x_coordinate": [0, 6000], + "projection_y_coordinate": [0, 0], + "cell": [1, 1], + } + ) + + assert np.all( + np.isnan(calculate_nearestneighbordistance(test_features)["min_distance"]) + ) + + +def test_calculate_area(): + """ + Test the calculate_area function for 2D and 3D masks + """ + + test_labels = np.array( + [ + [ + [0, 0, 0, 0, 0], + [0, 1, 0, 2, 0], + [0, 1, 0, 2, 0], + [0, 1, 0, 0, 0], + [0, 0, 0, 0, 0], + ], + ], + dtype=int, + ) + + test_labels = xr.DataArray( + test_labels, + dims=("time", "projection_y_coordinate", "projection_x_coordinate"), + coords={ + "time": [datetime(2000, 1, 1)], + "projection_y_coordinate": np.arange(5), + "projection_x_coordinate": np.arange(5), + }, + ) + + # We need to do this to avoid round trip bug with xarray to iris conversion + test_cube = test_labels.to_iris() + test_cube = test_cube.copy(test_cube.core_data().filled()) + + test_features = pd.DataFrame( + { + "feature": [1, 2], + "frame": [0, 0], + "time": [ + datetime(2000, 1, 1), + datetime(2000, 1, 1), + ], + } + ) + + expected_areas = np.array([3, 2]) + + area = calculate_area(test_features, test_cube) + + assert np.all(area["area"] == expected_areas) + + test_labels = np.array( + [ + [ + [ + [0, 0, 0, 0, 0], + [0, 1, 0, 2, 0], + [0, 1, 0, 2, 0], + [0, 1, 0, 0, 0], + [0, 0, 0, 0, 0], + ], + [ + [0, 0, 0, 0, 0], + [0, 1, 0, 0, 0], + [0, 1, 0, 3, 0], + [0, 1, 0, 3, 0], + [0, 0, 0, 0, 0], + ], + ], + ], + dtype=int, + ) + + test_labels = xr.DataArray( + test_labels, + dims=( + "time", + "model_level_number", + "projection_y_coordinate", + "projection_x_coordinate", + ), + coords={ + "time": [datetime(2000, 1, 1)], + "model_level_number": np.arange(2), + "projection_y_coordinate": np.arange(5), + "projection_x_coordinate": np.arange(5), + }, + ) + + # We need to do this to avoid round trip bug with xarray to iris conversion + test_cube = test_labels.to_iris() + test_cube = test_cube.copy(test_cube.core_data().filled()) + + test_features = pd.DataFrame( + { + "feature": [1, 2, 3], + "frame": [0, 0, 0], + "time": [ + datetime(2000, 1, 1), + datetime(2000, 1, 1), + datetime(2000, 1, 1), + ], + } + ) + + expected_areas = np.array([3, 2, 2]) + + area = calculate_area(test_features, test_cube) + + assert np.all(area["area"] == expected_areas) + + test_labels = xr.DataArray( + test_labels, + dims=( + "time", + "model_level_number", + "hdim_0", + "hdim_1", + ), + coords={ + "time": [datetime(2000, 1, 1)], + "model_level_number": np.arange(2), + }, + ) + + # Test failure to find valid coordinates + with pytest.raises(ValueError): + calculate_area(test_features, test_labels) + + # Test failure for invalid method + with pytest.raises(ValueError): + calculate_area(test_features, test_labels, method_area="invalid_method") + + +def test_calculate_area_latlon(): + # Test with latitude/longitude + test_labels = np.array( + [ + [ + [0, 0, 0, 0, 0], + [0, 1, 0, 2, 0], + [0, 1, 0, 2, 0], + [0, 1, 0, 0, 0], + [0, 0, 0, 0, 0], + ], + [ + [0, 0, 0, 0, 0], + [0, 4, 0, 0, 0], + [0, 4, 0, 3, 0], + [0, 4, 0, 3, 0], + [0, 0, 0, 0, 0], + ], + ], + dtype=int, + ) + + test_labels = xr.DataArray( + test_labels, + dims=( + "time", + "latitude", + "longitude", + ), + coords={ + "time": [datetime(2000, 1, 1), datetime(2000, 1, 1, 1)], + "latitude": xr.DataArray( + np.arange(5), dims="latitude", attrs={"units": "degrees"} + ), + "longitude": xr.DataArray( + np.arange(5), dims="longitude", attrs={"units": "degrees"} + ), + }, + ) + + test_features = pd.DataFrame( + { + "feature": [1, 2, 3, 4], + "frame": [0, 0, 1, 1], + "time": [ + datetime(2000, 1, 1, 0), + datetime(2000, 1, 1, 0), + datetime(2000, 1, 1, 1), + datetime(2000, 1, 1, 1), + ], + } + ) + + area = calculate_area(test_features, test_labels) + + expected_areas = np.array([3, 2, 2, 3]) * 1.11e5**2 + + assert np.all(np.isclose(area["area"], expected_areas, atol=1e8)) + + # Test invalid lat/lon dimensions + + # Test 1D lat but 2D lon + test_labels = xr.DataArray( + test_labels.values, + dims=( + "time", + "y_dim", + "x_dim", + ), + coords={ + "time": [datetime(2000, 1, 1), datetime(2000, 1, 1, 1)], + "latitude": xr.DataArray( + np.arange(5), dims="y_dim", attrs={"units": "degrees"} # 1D lat + ), + "longitude": xr.DataArray( + np.tile(np.arange(5), (5, 1)), + dims=("y_dim", "x_dim"), # 2D lon + attrs={"units": "degrees"}, + ), + }, + ) + + with pytest.raises(ValueError): + calculate_area(test_features, test_labels, method_area="latlon") + + # Test 3D lat/lon + test_labels = xr.DataArray( + np.tile(test_labels.values[:, np.newaxis, ...], (1, 2, 1, 1)), + dims=( + "time", + "z_dim", + "y_dim", + "x_dim", + ), + coords={ + "time": [datetime(2000, 1, 1), datetime(2000, 1, 1, 1)], + "latitude": xr.DataArray( + np.tile(np.arange(5)[:, np.newaxis], (2, 1, 5)), + dims=("z_dim", "y_dim", "x_dim"), + attrs={"units": "degrees"}, + ), + "longitude": xr.DataArray( + np.tile(np.arange(5), (2, 5, 1)), + dims=("z_dim", "y_dim", "x_dim"), + attrs={"units": "degrees"}, + ), + }, + ) + + with pytest.raises(ValueError): + calculate_area(test_features, test_labels, method_area="latlon") + + +def test_calculate_area_1D_latlon(): + """ + Test area calculation using 1D lat/lon coords + """ + test_labels = np.array( + [ + [ + [0, 0, 0, 0, 0], + [0, 1, 0, 2, 0], + [0, 1, 0, 2, 0], + [0, 1, 0, 0, 0], + [0, 0, 0, 0, 0], + ], + ], + dtype=int, + ) + + test_labels = xr.DataArray( + test_labels, + dims=("time", "latitude", "longitude"), + coords={ + "time": [datetime(2000, 1, 1)], + "latitude": xr.DataArray( + np.arange(5), dims=("latitude",), attrs={"units": "degrees"} + ), + "longitude": xr.DataArray( + np.arange(5), dims=("longitude",), attrs={"units": "degrees"} + ), + }, + ) + + # We need to do this to avoid round trip bug with xarray to iris conversion + test_cube = test_labels.to_iris() + test_cube = test_cube.copy(test_cube.core_data().filled()) + + test_features = pd.DataFrame( + { + "feature": [1, 2], + "frame": [0, 0], + "time": [ + datetime(2000, 1, 1), + datetime(2000, 1, 1), + ], + } + ) + + # Calculate expected areas + copy_of_test_cube = test_cube.copy() + copy_of_test_cube.coord("latitude").guess_bounds() + copy_of_test_cube.coord("longitude").guess_bounds() + area_array = area_weights(copy_of_test_cube, normalize=False) + + expected_areas = np.array( + [np.sum(area_array[test_labels.data == i]) for i in [1, 2]] + ) + + area = calculate_area(test_features, test_cube) + + assert np.all(area["area"] == expected_areas) + + +def test_calculate_areas_2Dlatlon(): + """ + Test calculation of area array from 2D lat/lon coords + Note, in future this needs to be updated to account for non-orthogonal lat/lon arrays + """ + + test_labels = np.ones([1, 5, 5], dtype=int) + + test_labels = xr.DataArray( + test_labels, + dims=("time", "latitude", "longitude"), + coords={ + "time": [datetime(2000, 1, 1)], + "latitude": xr.DataArray( + np.arange(5), dims=("latitude",), attrs={"units": "degrees"} + ), + "longitude": xr.DataArray( + np.arange(5), dims=("longitude",), attrs={"units": "degrees"} + ), + }, + ) + + test_cube = test_labels.to_iris() + test_cube = test_cube.copy(test_cube.core_data().filled()) + copy_of_test_cube = test_cube.copy() + copy_of_test_cube.coord("latitude").guess_bounds() + copy_of_test_cube.coord("longitude").guess_bounds() + area_array = area_weights(copy_of_test_cube, normalize=False) + + lat_2d = xr.DataArray( + np.stack([np.arange(5)] * 5, axis=1), + dims=("y", "x"), + attrs={"units": "degrees"}, + ) + + lon_2d = xr.DataArray( + np.stack([np.arange(5)] * 5, axis=0), + dims=("y", "x"), + attrs={"units": "degrees"}, + ) + + test_labels = xr.DataArray( + test_labels, + dims=("time", "y", "x"), + coords={ + "time": [datetime(2000, 1, 1)], + "latitude": lat_2d, + "longitude": lon_2d, + }, + ) + + test_cube = test_labels.to_iris() + test_cube = test_cube.copy(test_cube.core_data().filled()) + + assert np.allclose( + calculate_areas_2Dlatlon( + test_cube.coord("latitude"), test_cube.coord("longitude") + ), + area_array, + rtol=0.01, + ) + + +def test_calculate_area_2D_latlon(): + """ + Test area calculation using 2D lat/lon coords + """ + + test_labels = np.array( + [ + [ + [0, 0, 0, 0, 0], + [0, 1, 0, 2, 0], + [0, 1, 0, 2, 0], + [0, 1, 0, 0, 0], + [0, 0, 0, 0, 0], + ], + ], + dtype=int, + ) + + lat_2d = xr.DataArray( + np.stack([np.arange(5)] * 5, axis=1), + dims=("y", "x"), + attrs={"units": "degrees"}, + ) + + lon_2d = xr.DataArray( + np.stack([np.arange(5)] * 5, axis=0), + dims=("y", "x"), + attrs={"units": "degrees"}, + ) + + test_labels = xr.DataArray( + test_labels, + dims=("time", "y", "x"), + coords={ + "time": [datetime(2000, 1, 1)], + "latitude": lat_2d, + "longitude": lon_2d, + }, + ) + + test_cube = test_labels.to_iris() + test_cube = test_cube.copy(test_cube.core_data().filled()) + + area_array = calculate_areas_2Dlatlon( + test_cube.coord("latitude"), test_cube.coord("longitude") + ) + + expected_areas = np.array( + [np.sum(area_array[test_labels[0].data == i]) for i in [1, 2]] + ) + + test_features = pd.DataFrame( + { + "feature": [1, 2], + "frame": [0, 0], + "time": [ + datetime(2000, 1, 1), + datetime(2000, 1, 1), + ], + } + ) + + area = calculate_area(test_features, test_cube) + + assert np.all(area["area"] == expected_areas) diff --git a/tobac/tests/test_decorators.py b/tobac/tests/test_decorators.py new file mode 100644 index 00000000..01a3a0ad --- /dev/null +++ b/tobac/tests/test_decorators.py @@ -0,0 +1,147 @@ +""" +Tests for tobac.utils.decorators +""" +import numpy as np +import pandas as pd +import xarray as xr +import iris + +from tobac.utils import decorators + + +def test_convert_cube_to_dataarray(): + test_da_float = xr.DataArray(np.arange(15, dtype=float).reshape(3, 5) + 0.5) + test_da_int = xr.DataArray(np.arange(15, dtype=int).reshape(3, 5)) + + assert np.all( + decorators.convert_cube_to_dataarray(test_da_float.to_iris()) + == test_da_float.values + ) + assert np.all( + decorators.convert_cube_to_dataarray(test_da_int.to_iris()) + == test_da_int.values + ) + + +def test_conv_kwargs_iris_to_xarray(): + assert decorators._conv_kwargs_iris_to_xarray({}) == {} + assert decorators._conv_kwargs_iris_to_xarray(dict(test_int=1)) == dict(test_int=1) + + test_da = xr.DataArray(np.arange(5)) + + test_xr_kwarg = decorators._conv_kwargs_iris_to_xarray(dict(test_xr=test_da)) + assert isinstance(test_xr_kwarg["test_xr"], xr.DataArray) + + test_iris_kwarg = decorators._conv_kwargs_iris_to_xarray( + dict(test_iris=test_da.to_iris()) + ) + assert isinstance(test_iris_kwarg["test_iris"], xr.DataArray) + + +def test_conv_kwargs_irispandas_to_xarray(): + assert decorators._conv_kwargs_irispandas_to_xarray({}) == {} + assert decorators._conv_kwargs_irispandas_to_xarray(dict(test_int=1)) == dict( + test_int=1 + ) + + test_da = xr.DataArray(np.arange(5)) + + test_xr_kwarg = decorators._conv_kwargs_irispandas_to_xarray(dict(test_xr=test_da)) + assert isinstance(test_xr_kwarg["test_xr"], xr.DataArray) + + test_iris_kwarg = decorators._conv_kwargs_irispandas_to_xarray( + dict(test_iris=test_da.to_iris()) + ) + assert isinstance(test_iris_kwarg["test_iris"], xr.DataArray) + + test_ds = xr.Dataset({"test": test_da}) + test_ds_kwarg = decorators._conv_kwargs_irispandas_to_xarray(dict(test_xr=test_ds)) + assert isinstance(test_ds_kwarg["test_xr"], xr.Dataset) + + test_pd_kwarg = decorators._conv_kwargs_irispandas_to_xarray( + dict(test_pd=test_ds.to_pandas()) + ) + assert isinstance(test_pd_kwarg["test_pd"], xr.Dataset) + + +def test_conv_kwargs_xarray_to_iris(): + assert decorators._conv_kwargs_xarray_to_iris({}) == {} + assert decorators._conv_kwargs_xarray_to_iris(dict(test_int=1)) == dict(test_int=1) + + test_da = xr.DataArray(np.arange(5)) + + test_xr_kwarg = decorators._conv_kwargs_xarray_to_iris(dict(test_xr=test_da)) + assert isinstance(test_xr_kwarg["test_xr"], iris.cube.Cube) + + test_iris_kwarg = decorators._conv_kwargs_xarray_to_iris( + dict(test_iris=test_da.to_iris()) + ) + assert isinstance(test_iris_kwarg["test_iris"], iris.cube.Cube) + + +def test_conv_kwargs_xarray_to_irispandas(): + assert decorators._conv_kwargs_xarray_to_irispandas({}) == {} + assert decorators._conv_kwargs_xarray_to_irispandas(dict(test_int=1)) == dict( + test_int=1 + ) + + test_da = xr.DataArray(np.arange(5)) + + test_xr_kwarg = decorators._conv_kwargs_xarray_to_irispandas(dict(test_xr=test_da)) + assert isinstance(test_xr_kwarg["test_xr"], iris.cube.Cube) + + test_iris_kwarg = decorators._conv_kwargs_xarray_to_irispandas( + dict(test_iris=test_da.to_iris()) + ) + assert isinstance(test_iris_kwarg["test_iris"], iris.cube.Cube) + + test_ds = xr.Dataset({"test": test_da}) + test_ds_kwarg = decorators._conv_kwargs_xarray_to_irispandas(dict(test_xr=test_ds)) + assert isinstance(test_ds_kwarg["test_xr"], pd.DataFrame) + + test_pd_kwarg = decorators._conv_kwargs_xarray_to_irispandas( + dict(test_pd=test_ds.to_pandas()) + ) + assert isinstance(test_pd_kwarg["test_pd"], pd.DataFrame) + + +@decorators.iris_to_xarray(save_iris_info=True) +def _test_iris_to_xarray(*args, **kwargs): + return kwargs["converted_from_iris"] + + +def test_iris_to_xarray(): + test_da = xr.DataArray(np.arange(5)) + + assert _test_iris_to_xarray(test_da) == False + assert _test_iris_to_xarray(kwarg_xr=test_da) == False + + assert _test_iris_to_xarray(test_da.to_iris()) == True + assert _test_iris_to_xarray(kwarg_ir=test_da.to_iris()) == True + + +@decorators.irispandas_to_xarray(save_iris_info=True) +def _test_irispandas_to_xarray(*args, **kwargs): + return kwargs["converted_from_iris"] + + +def test_irispandas_to_xarray(): + test_da = xr.DataArray(np.arange(5)) + + assert _test_irispandas_to_xarray(test_da) == False + assert _test_irispandas_to_xarray(kwarg_xr=test_da) == False + + assert _test_irispandas_to_xarray(test_da.to_iris()) == True + assert _test_irispandas_to_xarray(kwarg_ir=test_da.to_iris()) == True + + +@decorators.xarray_to_irispandas() +def _test_xarray_to_irispandas(*args, **kwargs): + return args, kwargs + + +def test_xarray_to_irispandas(): + test_da = xr.DataArray(np.arange(5, dtype=float)) + + assert isinstance(_test_xarray_to_irispandas(test_da)[0][0], iris.cube.Cube) + assert _test_xarray_to_irispandas(test_da)[1] == {} diff --git a/tobac/tests/test_sample_data.py b/tobac/tests/test_sample_data.py index 42f60344..bd395742 100644 --- a/tobac/tests/test_sample_data.py +++ b/tobac/tests/test_sample_data.py @@ -1,6 +1,7 @@ """ Tests for tobac based on simple sample datasets with moving blobs. These tests should be adapted to be more modular in the future. """ + from tobac.testing import ( make_sample_data_2D_3blobs, make_sample_data_2D_3blobs_inv, diff --git a/tobac/tests/test_testing.py b/tobac/tests/test_testing.py index 2dd0577b..282c1d03 100644 --- a/tobac/tests/test_testing.py +++ b/tobac/tests/test_testing.py @@ -2,6 +2,7 @@ Audit of the testing functions that produce our test data. Who's watching the watchmen, basically. """ + import pytest from tobac.testing import ( generate_single_feature, diff --git a/tobac/tests/test_tracking.py b/tobac/tests/test_tracking.py index 1266b210..94f410c3 100644 --- a/tobac/tests/test_tracking.py +++ b/tobac/tests/test_tracking.py @@ -1,6 +1,7 @@ """ Test for the trackpy tracking functions """ + import datetime import pytest diff --git a/tobac/tests/test_utils_bulk_statistics.py b/tobac/tests/test_utils_bulk_statistics.py index 0c2ad190..4967a779 100644 --- a/tobac/tests/test_utils_bulk_statistics.py +++ b/tobac/tests/test_utils_bulk_statistics.py @@ -1,6 +1,7 @@ from datetime import datetime import numpy as np import pandas as pd +import pytest import xarray as xr import tobac import tobac.utils as tb_utils @@ -396,3 +397,214 @@ def test_bulk_statistics_broadcasting(): assert np.all( bulk_statistics_output["weighted_sum"] == expected_weighted_sum_result ) + + +def test_get_statistics_collapse_axis(): + """ + Test the collapse_axis keyword of get_statistics + """ + test_labels = np.array( + [ + [0, 0, 0, 0, 0], + [0, 1, 0, 0, 0], + [0, 1, 0, 2, 0], + [0, 1, 0, 2, 0], + [0, 0, 0, 0, 0], + ], + dtype=int, + ) + + test_values = np.array([0.25, 0.5, 0.75, 1, 1]) + + test_features = pd.DataFrame( + { + "feature": [1, 2], + "frame": [0, 0], + "time": [ + datetime(2000, 1, 1), + datetime(2000, 1, 1), + ], + } + ) + statistics_sum = {"sum": np.sum} + + expected_sum_result_axis0 = np.array([0.5, 1]) + output_collapse_axis0 = tb_utils.get_statistics( + test_features, + test_labels, + test_values, + statistic=statistics_sum, + collapse_axis=0, + ) + assert np.all(output_collapse_axis0["sum"] == expected_sum_result_axis0) + + expected_sum_result_axis1 = np.array([2.25, 1.75]) + output_collapse_axis1 = tb_utils.get_statistics( + test_features, + test_labels, + test_values, + statistic=statistics_sum, + collapse_axis=1, + ) + assert np.all(output_collapse_axis1["sum"] == expected_sum_result_axis1) + + # Check that attempting broadcast raises a ValueError + with pytest.raises(ValueError): + _ = tb_utils.get_statistics( + test_features, + test_labels, + test_values.reshape([5, 1]), + statistic=statistics_sum, + collapse_axis=0, + ) + + # Check that attempting to collapse all axes raises a ValueError: + with pytest.raises(ValueError): + _ = tb_utils.get_statistics( + test_features, + test_labels, + test_values, + statistic=statistics_sum, + collapse_axis=[0, 1], + ) + + # Test with collpasing multiple axes + test_labels = np.array( + [ + [ + [0, 0, 0, 0, 0], + [0, 1, 0, 0, 0], + [0, 1, 0, 2, 0], + [0, 1, 0, 2, 0], + [0, 0, 0, 0, 0], + ], + [ + [0, 0, 0, 0, 0], + [0, 1, 0, 0, 0], + [0, 1, 0, 0, 0], + [0, 0, 0, 0, 0], + [0, 0, 0, 0, 0], + ], + ], + dtype=int, + ) + test_values = np.array([0.5, 1]) + expected_sum_result_axis12 = np.array([1.5, 0.5]) + output_collapse_axis12 = tb_utils.get_statistics( + test_features, + test_labels, + test_values, + statistic=statistics_sum, + collapse_axis=[1, 2], + ) + assert np.all(output_collapse_axis12["sum"] == expected_sum_result_axis12) + + +def test_get_statistics_from_mask_collapse_dim(): + """ + Test the collapse_dim keyword of get_statistics_from_mask + """ + + test_labels = np.array( + [ + [ + [ + [0, 0, 0, 0, 0], + [0, 1, 0, 2, 0], + [0, 1, 0, 2, 0], + [0, 1, 0, 0, 0], + [0, 0, 0, 0, 0], + ], + [ + [0, 0, 0, 0, 0], + [0, 1, 0, 0, 0], + [0, 1, 0, 3, 0], + [0, 1, 0, 3, 0], + [0, 0, 0, 0, 0], + ], + ], + ], + dtype=int, + ) + + test_labels = xr.DataArray( + test_labels, + dims=("time", "z", "y", "x"), + coords={ + "time": [datetime(2000, 1, 1)], + "z": np.arange(2), + "y": np.arange(5), + "x": np.arange(5), + }, + ) + + test_values = np.ones([5, 5]) + + test_values = xr.DataArray( + test_values, + dims=("x", "y"), + coords={ + "y": np.arange(5), + "x": np.arange(5), + }, + ) + + test_features = pd.DataFrame( + { + "feature": [1, 2, 3], + "frame": [0, 0, 0], + "time": [ + datetime(2000, 1, 1), + datetime(2000, 1, 1), + datetime(2000, 1, 1), + ], + } + ) + + statistics_sum = {"sum": np.sum} + + expected_sum_result = np.array([3, 2, 2]) + + # Test over a single dim + statistics_output = tb_utils.get_statistics_from_mask( + test_features, + test_labels, + test_values, + statistic=statistics_sum, + collapse_dim="z", + ) + + assert np.all(statistics_output["sum"] == expected_sum_result) + + test_values = np.ones([2]) + + test_values = xr.DataArray( + test_values, + dims=("z",), + coords={ + "z": np.arange(2), + }, + ) + + expected_sum_result = np.array([2, 1, 1]) + + # Test over multiple dims + statistics_output = tb_utils.get_statistics_from_mask( + test_features, + test_labels, + test_values, + statistic=statistics_sum, + collapse_dim=("x", "y"), + ) + + assert np.all(statistics_output["sum"] == expected_sum_result) + + # Test that collapse_dim not in labels raises an error + with pytest.raises(ValueError): + _ = statistics_output = tb_utils.get_statistics_from_mask( + test_features, + test_labels, + test_values, + statistic=statistics_sum, + collapse_dim="not_a_dim", + ) diff --git a/tobac/tracking.py b/tobac/tracking.py index 362ead05..558dd85e 100644 --- a/tobac/tracking.py +++ b/tobac/tracking.py @@ -385,7 +385,7 @@ def linking_trackpy( link_strategy="auto", adaptive_step=adaptive_step, adaptive_stop=adaptive_stop, - dist_func=dist_func + dist_func=dist_func, # copy_features=False, diagnostics=False, # hash_size=None, box_size=None, verify_integrity=True, # retain_index=False diff --git a/tobac/utils/bulk_statistics.py b/tobac/utils/bulk_statistics.py index e933f373..cb9ce0cf 100644 --- a/tobac/utils/bulk_statistics.py +++ b/tobac/utils/bulk_statistics.py @@ -3,16 +3,24 @@ or within feature detection or segmentation. """ + import logging import warnings -from . import internal as internal_utils -from . import decorators -from typing import Callable, Union from functools import partial +from typing import Callable, Union + import numpy as np + +# numpy renamed core to _core recently +try: + from numpy._core import multiarray as mu +except ModuleNotFoundError: + from numpy.core import multiarray as mu import pandas as pd import xarray as xr +from tobac.utils import decorators + def get_statistics( features: pd.DataFrame, @@ -24,6 +32,7 @@ def get_statistics( index: Union[None, list[int]] = None, default: Union[None, float] = None, id_column: str = "feature", + collapse_axis: Union[None, int, list[int]] = None, ) -> pd.DataFrame: """Get bulk statistics for objects (e.g. features or segmented features) given a labelled mask of the objects and any input field with the same @@ -66,6 +75,12 @@ def get_statistics( Name of the column in feature dataframe that contains IDs that match with the labels in mask. The default is the column "feature". + collapse_axis: None | int | list[int], optional (default: None): + Index or indices of axes of labels to collapse. This will reduce the dimensionality of labels + while allowing labelled features to overlap. This can be used, for example, to calculate the + footprint area (2D) of 3D labels + + Returns ------- features: pd.DataFrame @@ -74,16 +89,38 @@ def get_statistics( """ # if mask and input data dimensions do not match we can broadcast using numpy broadcasting rules - for field in fields: - if labels.shape != field.shape: + if collapse_axis is not None: + # Test if iterable and if not make a list + try: + collapse_axis = list(iter(collapse_axis)) + except TypeError: + collapse_axis = [collapse_axis] + + # Normalise axes to handle negative axis number conventions + ndim = len(labels.shape) + collapse_axis = [mu.normalize_axis_index(axis, ndim) for axis in collapse_axis] + uncollapsed_axes = [ + i for i, _ in enumerate(labels.shape) if i not in collapse_axis + ] + if not len(uncollapsed_axes): + raise ValueError("Cannot collapse all axes of labels") + collapsed_shape = tuple( + [s for i, s in enumerate(labels.shape) if i not in collapse_axis] + ) + broadcast_flag = any([collapsed_shape != field.shape for field in fields]) + if broadcast_flag: + raise ValueError("Broadcasting not supported with collapse_axis") + + else: + broadcast_flag = any([labels.shape != field.shape for field in fields]) + if broadcast_flag: # Broadcast input labels and fields to ensure they work according to numpy broadcasting rules broadcast_fields = np.broadcast_arrays(labels, *fields) labels = broadcast_fields[0] fields = broadcast_fields[1:] - break # mask must contain positive values to calculate statistics - if labels[labels > 0].size > 0: + if np.any(labels > 0): if index is None: index = features.feature.to_numpy() else: @@ -98,6 +135,20 @@ def get_statistics( bins = np.cumsum(np.bincount(np.maximum(labels.ravel(), 0))) argsorted = np.argsort(labels.ravel()) + # Create lambdas to get (ravelled) label locations using argsorted and bins + if collapse_axis is None: + label_locs = lambda i: argsorted[bins[i - 1] : bins[i]] + else: + # Collapse ravelled locations to the remaining axes + label_locs = lambda i: np.unique( + np.ravel_multi_index( + np.array( + np.unravel_index(argsorted[bins[i - 1] : bins[i]], labels.shape) + )[uncollapsed_axes], + collapsed_shape, + ) + ) + # apply each function given per statistic parameter for the labeled regions sorted in ascending order for stats_name in statistic.keys(): # if function is given as a tuple, take the input parameters provided @@ -119,14 +170,11 @@ def get_statistics( stats = np.array( [ - func( - *( - field.ravel()[argsorted[bins[i - 1] : bins[i]]] - for field in fields - ) + ( + func(*(field.ravel()[label_locs(i)] for field in fields)) + if i < bins.size and bins[i] > bins[i - 1] + else default ) - if i < bins.size and bins[i] > bins[i - 1] - else default for i in index ] ) @@ -172,6 +220,7 @@ def get_statistics_from_mask( index: Union[None, list[int]] = None, default: Union[None, float] = None, id_column: str = "feature", + collapse_dim: Union[None, str, list[str]] = None, ) -> pd.DataFrame: """Derives bulk statistics for each object in the segmentation mask, and returns a features Dataframe with these properties for each feature. @@ -206,28 +255,36 @@ def get_statistics_from_mask( default value to return in a region that has no values id_column: str, optional (default: "feature") - Name of the column in feature dataframe that contains IDs that match - with the labels in mask. The default is the column "feature". - - Returns - ------- - features: pd.DataFrame - Updated feature dataframe with bulk statistics for each feature saved in a new column + Name of the column in feature dataframe that contains IDs that match with the labels in mask. The default is the column "feature". + collapse_dim: None | str | list[str], optional (defailt: None) + Dimension names of labels to collapse, allowing, e.g. calulcation of statistics on 2D + fields for the footprint of 3D objects + + Returns: + ------- + features: pd.DataFrame + Updated feature dataframe with bulk statistics for each feature saved in a new column """ - - # check that mask and input data have the same dimensions - for field in fields: - if segmentation_mask.shape != field.shape: - warnings.warn( - "One or more field does not have the same shape as segmentation_mask. Numpy broadcasting rules will be applied" - ) - # warning when feature labels are not unique in dataframe if not features.feature.is_unique: raise logging.warning( "Feature labels are not unique which may cause unexpected results for the computation of bulk statistics." ) + if collapse_dim is not None: + if isinstance(collapse_dim, str): + collapse_dim = [collapse_dim] + non_time_dims = [dim for dim in segmentation_mask.dims if dim != "time"] + collapse_axis = [ + i for i, dim in enumerate(non_time_dims) if dim in collapse_dim + ] + if len(collapse_dim) != len(collapse_axis): + raise ValueError( + "One or more of collapse_dim not found in dimensions of segmentation_mask" + ) + else: + collapse_axis = None + # get bulk statistics for each timestep step_statistics = [] @@ -259,6 +316,7 @@ def get_statistics_from_mask( default=default, index=index, id_column=id_column, + collapse_axis=collapse_axis, ) ) diff --git a/tobac/utils/decorators.py b/tobac/utils/decorators.py index afe90d65..8bc6657f 100644 --- a/tobac/utils/decorators.py +++ b/tobac/utils/decorators.py @@ -3,9 +3,37 @@ import functools import warnings -import iris.cube + +import numpy as np +from numpy import ma import pandas as pd import xarray as xr +import iris.cube + + +def convert_cube_to_dataarray(cube): + """ + Convert an iris cube to an xarray dataarray, averting error for integer dtype cubes in xarray